extract worker & crawler from server

This commit is contained in:
Nazar Kanaev
2021-03-17 10:36:18 +00:00
parent 682b0c1729
commit dfb32d4ebe
4 changed files with 152 additions and 123 deletions

View File

@@ -3,11 +3,9 @@ package server
import (
"log"
"net/http"
"runtime"
"sync/atomic"
"time"
"github.com/nkanaev/yarr/src/storage"
"github.com/nkanaev/yarr/src/worker"
)
var BasePath string = ""
@@ -15,9 +13,7 @@ var BasePath string = ""
type Server struct {
Addr string
db *storage.Storage
feedQueue chan storage.Feed
queueSize *int32
refreshRate chan int64
worker *worker.Worker
// auth
Username string
Password string
@@ -27,13 +23,10 @@ type Server struct {
}
func NewServer(db *storage.Storage, addr string) *Server {
queueSize := int32(0)
return &Server{
db: db,
feedQueue: make(chan storage.Feed, 3000),
queueSize: &queueSize,
Addr: addr,
refreshRate: make(chan int64),
db: db,
Addr: addr,
worker: worker.NewWorker(db),
}
}
@@ -46,7 +39,7 @@ func (h *Server) GetAddr() string {
}
func (s *Server) Start() {
s.startJobs()
s.worker.Start()
httpserver := &http.Server{Addr: s.Addr, Handler: s.handler()}
@@ -102,103 +95,6 @@ func (h Server) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
}
*/
func (h *Server) startJobs() {
delTicker := time.NewTicker(time.Hour * 24)
syncSearchChannel := make(chan bool, 10)
var syncSearchTimer *time.Timer // TODO: should this be atomic?
syncSearch := func() {
if syncSearchTimer == nil {
syncSearchTimer = time.AfterFunc(time.Second*2, func() {
syncSearchChannel <- true
})
} else {
syncSearchTimer.Reset(time.Second * 2)
}
}
worker := func() {
for {
select {
case feed := <-h.feedQueue:
items, err := listItems(feed, h.db)
atomic.AddInt32(h.queueSize, -1)
if err != nil {
log.Printf("Failed to fetch %s (%d): %s", feed.FeedLink, feed.Id, err)
h.db.SetFeedError(feed.Id, err)
continue
}
h.db.CreateItems(items)
syncSearch()
if !feed.HasIcon {
icon, err := findFavicon(feed.Link, feed.FeedLink)
if icon != nil {
h.db.UpdateFeedIcon(feed.Id, icon)
}
if err != nil {
log.Printf("Failed to search favicon for %s (%s): %s", feed.Link, feed.FeedLink, err)
}
}
case <-delTicker.C:
h.db.DeleteOldItems()
case <-syncSearchChannel:
h.db.SyncSearch()
}
}
}
num := runtime.NumCPU() - 1
if num < 1 {
num = 1
}
for i := 0; i < num; i++ {
go worker()
}
go h.db.DeleteOldItems()
go h.db.SyncSearch()
go func() {
var refreshTicker *time.Ticker
refreshTick := make(<-chan time.Time)
for {
select {
case <-refreshTick:
h.fetchAllFeeds()
case val := <-h.refreshRate:
if refreshTicker != nil {
refreshTicker.Stop()
if val == 0 {
refreshTick = make(<-chan time.Time)
}
}
if val > 0 {
refreshTicker = time.NewTicker(time.Duration(val) * time.Minute)
refreshTick = refreshTicker.C
}
}
}
}()
refreshRate := h.db.GetSettingsValueInt64("refresh_rate")
h.refreshRate <- refreshRate
if refreshRate > 0 {
h.fetchAllFeeds()
}
}
func (h Server) requiresAuth() bool {
return h.Username != "" && h.Password != ""
}
func (h *Server) fetchAllFeeds() {
log.Print("Refreshing all feeds")
h.db.ResetFeedErrors()
for _, feed := range h.db.ListFeeds() {
h.fetchFeed(feed)
}
}
func (h *Server) fetchFeed(feed storage.Feed) {
atomic.AddInt32(h.queueSize, 1)
h.feedQueue <- feed
}