Do not crawl existing entry URLs
This commit is contained in:
parent
09785df07f
commit
3b62f904d6
3 changed files with 23 additions and 8 deletions
|
@ -70,7 +70,7 @@ func (h *Handler) CreateFeed(userID, categoryID int64, url string, crawler bool)
|
|||
return nil, err
|
||||
}
|
||||
|
||||
feedProcessor := processor.NewFeedProcessor(subscription)
|
||||
feedProcessor := processor.NewFeedProcessor(userID, h.store, subscription)
|
||||
feedProcessor.WithCrawler(crawler)
|
||||
feedProcessor.Process()
|
||||
|
||||
|
@ -162,7 +162,7 @@ func (h *Handler) RefreshFeed(userID, feedID int64) error {
|
|||
return err
|
||||
}
|
||||
|
||||
feedProcessor := processor.NewFeedProcessor(subscription)
|
||||
feedProcessor := processor.NewFeedProcessor(userID, h.store, subscription)
|
||||
feedProcessor.WithScraperRules(originalFeed.ScraperRules)
|
||||
feedProcessor.WithRewriteRules(originalFeed.RewriteRules)
|
||||
feedProcessor.WithCrawler(originalFeed.Crawler)
|
||||
|
|
|
@ -10,10 +10,13 @@ import (
|
|||
"github.com/miniflux/miniflux/reader/rewrite"
|
||||
"github.com/miniflux/miniflux/reader/sanitizer"
|
||||
"github.com/miniflux/miniflux/reader/scraper"
|
||||
"github.com/miniflux/miniflux/storage"
|
||||
)
|
||||
|
||||
// FeedProcessor handles the processing of feed contents.
|
||||
type FeedProcessor struct {
|
||||
userID int64
|
||||
store *storage.Storage
|
||||
feed *model.Feed
|
||||
scraperRules string
|
||||
rewriteRules string
|
||||
|
@ -39,11 +42,15 @@ func (f *FeedProcessor) WithRewriteRules(rules string) {
|
|||
func (f *FeedProcessor) Process() {
|
||||
for _, entry := range f.feed.Entries {
|
||||
if f.crawler {
|
||||
content, err := scraper.Fetch(entry.URL, f.scraperRules)
|
||||
if err != nil {
|
||||
logger.Error("[FeedProcessor] %v", err)
|
||||
if f.store.EntryURLExists(f.userID, entry.URL) {
|
||||
logger.Debug(`[FeedProcessor] Do not crawl existing entry URL: "%s"`, entry.URL)
|
||||
} else {
|
||||
entry.Content = content
|
||||
content, err := scraper.Fetch(entry.URL, f.scraperRules)
|
||||
if err != nil {
|
||||
logger.Error("[FeedProcessor] %v", err)
|
||||
} else {
|
||||
entry.Content = content
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -53,6 +60,6 @@ func (f *FeedProcessor) Process() {
|
|||
}
|
||||
|
||||
// NewFeedProcessor returns a new FeedProcessor.
|
||||
func NewFeedProcessor(feed *model.Feed) *FeedProcessor {
|
||||
return &FeedProcessor{feed: feed, crawler: false}
|
||||
func NewFeedProcessor(userID int64, store *storage.Storage, feed *model.Feed) *FeedProcessor {
|
||||
return &FeedProcessor{userID: userID, store: store, feed: feed, crawler: false}
|
||||
}
|
||||
|
|
|
@ -226,3 +226,11 @@ func (s *Storage) MarkAllAsRead(userID int64) error {
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
// EntryURLExists returns true if an entry with this URL already exists.
|
||||
func (s *Storage) EntryURLExists(userID int64, entryURL string) bool {
|
||||
var result int
|
||||
query := `SELECT count(*) as c FROM entries WHERE user_id=$1 AND url=$2`
|
||||
s.db.QueryRow(query, userID, entryURL).Scan(&result)
|
||||
return result >= 1
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue