2018-10-15 07:33:19 +02:00
|
|
|
// Copyright 2018 Frédéric Guillot. All rights reserved.
|
|
|
|
// Use of this source code is governed by the Apache 2.0
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
2018-12-03 05:51:06 +01:00
|
|
|
package processor
|
2018-10-15 07:33:19 +02:00
|
|
|
|
|
|
|
import (
|
2020-09-28 01:01:06 +02:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"miniflux.app/config"
|
2018-10-15 07:33:19 +02:00
|
|
|
"miniflux.app/logger"
|
2020-09-28 01:01:06 +02:00
|
|
|
"miniflux.app/metric"
|
2018-10-15 07:33:19 +02:00
|
|
|
"miniflux.app/model"
|
|
|
|
"miniflux.app/reader/rewrite"
|
|
|
|
"miniflux.app/reader/sanitizer"
|
|
|
|
"miniflux.app/reader/scraper"
|
|
|
|
"miniflux.app/storage"
|
|
|
|
)
|
|
|
|
|
2018-12-03 05:51:06 +01:00
|
|
|
// ProcessFeedEntries downloads original web page for entries and apply filters.
|
|
|
|
func ProcessFeedEntries(store *storage.Storage, feed *model.Feed) {
|
2018-10-15 07:33:19 +02:00
|
|
|
for _, entry := range feed.Entries {
|
2020-06-06 06:50:59 +02:00
|
|
|
logger.Debug("[Feed #%d] Processing entry %s", feed.ID, entry.URL)
|
|
|
|
|
2018-10-15 07:33:19 +02:00
|
|
|
if feed.Crawler {
|
2019-03-01 05:43:33 +01:00
|
|
|
if !store.EntryURLExists(feed.ID, entry.URL) {
|
2020-09-28 01:01:06 +02:00
|
|
|
startTime := time.Now()
|
|
|
|
content, scraperErr := scraper.Fetch(entry.URL, feed.ScraperRules, feed.UserAgent)
|
|
|
|
|
|
|
|
if config.Opts.HasMetricsCollector() {
|
|
|
|
status := "success"
|
|
|
|
if scraperErr != nil {
|
|
|
|
status = "error"
|
|
|
|
}
|
|
|
|
metric.ScraperRequestDuration.WithLabelValues(status).Observe(time.Since(startTime).Seconds())
|
|
|
|
}
|
|
|
|
|
|
|
|
if scraperErr != nil {
|
|
|
|
logger.Error(`[Filter] Unable to crawl this entry: %q => %v`, entry.URL, scraperErr)
|
2018-12-03 05:51:06 +01:00
|
|
|
} else if content != "" {
|
2018-10-15 07:33:19 +02:00
|
|
|
// We replace the entry content only if the scraper doesn't return any error.
|
|
|
|
entry.Content = content
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
entry.Content = rewrite.Rewriter(entry.URL, entry.Content, feed.RewriteRules)
|
|
|
|
|
|
|
|
// The sanitizer should always run at the end of the process to make sure unsafe HTML is filtered.
|
|
|
|
entry.Content = sanitizer.Sanitize(entry.URL, entry.Content)
|
|
|
|
}
|
|
|
|
}
|
2018-12-03 05:51:06 +01:00
|
|
|
|
|
|
|
// ProcessEntryWebPage downloads the entry web page and apply rewrite rules.
|
|
|
|
func ProcessEntryWebPage(entry *model.Entry) error {
|
2020-09-28 01:01:06 +02:00
|
|
|
startTime := time.Now()
|
|
|
|
content, scraperErr := scraper.Fetch(entry.URL, entry.Feed.ScraperRules, entry.Feed.UserAgent)
|
|
|
|
if config.Opts.HasMetricsCollector() {
|
|
|
|
status := "success"
|
|
|
|
if scraperErr != nil {
|
|
|
|
status = "error"
|
|
|
|
}
|
|
|
|
metric.ScraperRequestDuration.WithLabelValues(status).Observe(time.Since(startTime).Seconds())
|
|
|
|
}
|
|
|
|
|
|
|
|
if scraperErr != nil {
|
|
|
|
return scraperErr
|
2018-12-03 05:51:06 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
content = rewrite.Rewriter(entry.URL, content, entry.Feed.RewriteRules)
|
|
|
|
content = sanitizer.Sanitize(entry.URL, content)
|
|
|
|
|
|
|
|
if content != "" {
|
|
|
|
entry.Content = content
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|