miniflux/reader/scraper/scraper.go

98 lines
2.1 KiB
Go
Raw Normal View History

// Copyright 2017 Frédéric Guillot. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package scraper
import (
"errors"
"fmt"
2017-12-11 05:51:04 +01:00
"io"
"strings"
2017-12-11 05:51:04 +01:00
"github.com/PuerkitoBio/goquery"
2018-04-28 19:51:07 +02:00
"github.com/miniflux/miniflux/http/client"
2017-12-16 03:55:57 +01:00
"github.com/miniflux/miniflux/logger"
2017-12-13 06:48:13 +01:00
"github.com/miniflux/miniflux/reader/readability"
"github.com/miniflux/miniflux/url"
)
// Fetch downloads a web page a returns relevant contents.
2017-12-11 05:51:04 +01:00
func Fetch(websiteURL, rules string) (string, error) {
2018-04-28 19:51:07 +02:00
clt := client.New(websiteURL)
response, err := clt.Get()
if err != nil {
return "", err
}
if response.HasServerFailure() {
return "", errors.New("scraper: unable to download web page")
}
if !strings.Contains(response.ContentType, "text/html") {
return "", fmt.Errorf("scraper: this resource is not a HTML document (%s)", response.ContentType)
}
page, err := response.NormalizeBodyEncoding()
if err != nil {
return "", err
}
// The entry URL could redirect somewhere else.
2017-12-14 06:30:40 +01:00
websiteURL = response.EffectiveURL
2017-12-11 05:51:04 +01:00
if rules == "" {
rules = getPredefinedScraperRules(websiteURL)
}
var content string
2017-12-11 05:51:04 +01:00
if rules != "" {
2017-12-16 03:55:57 +01:00
logger.Debug(`[Scraper] Using rules "%s" for "%s"`, rules, websiteURL)
2017-12-11 05:51:04 +01:00
content, err = scrapContent(page, rules)
} else {
2017-12-16 03:55:57 +01:00
logger.Debug(`[Scraper] Using readability for "%s"`, websiteURL)
2017-12-11 05:51:04 +01:00
content, err = readability.ExtractContent(page)
}
if err != nil {
return "", err
}
return content, nil
}
2017-12-11 05:51:04 +01:00
func scrapContent(page io.Reader, rules string) (string, error) {
document, err := goquery.NewDocumentFromReader(page)
if err != nil {
return "", err
}
contents := ""
document.Find(rules).Each(func(i int, s *goquery.Selection) {
var content string
// For some inline elements, we get the parent.
if s.Is("img") || s.Is("iframe") {
2017-12-11 05:51:04 +01:00
content, _ = s.Parent().Html()
} else {
content, _ = s.Html()
}
contents += content
})
return contents, nil
}
func getPredefinedScraperRules(websiteURL string) string {
urlDomain := url.Domain(websiteURL)
for domain, rules := range predefinedRules {
if strings.Contains(urlDomain, domain) {
return rules
}
}
return ""
}