2023-06-19 23:42:47 +02:00
|
|
|
// SPDX-FileCopyrightText: Copyright The Miniflux Authors. All rights reserved.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0
|
2017-12-11 05:51:04 +01:00
|
|
|
|
2018-08-25 06:51:50 +02:00
|
|
|
package scraper // import "miniflux.app/reader/scraper"
|
2017-12-11 05:51:04 +01:00
|
|
|
|
2019-12-22 06:18:31 +01:00
|
|
|
import (
|
|
|
|
"bytes"
|
2021-02-17 06:19:03 +01:00
|
|
|
"os"
|
2019-12-22 06:18:31 +01:00
|
|
|
"strings"
|
|
|
|
"testing"
|
|
|
|
)
|
2017-12-11 05:51:04 +01:00
|
|
|
|
|
|
|
func TestGetPredefinedRules(t *testing.T) {
|
|
|
|
if getPredefinedScraperRules("http://www.phoronix.com/") == "" {
|
|
|
|
t.Error("Unable to find rule for phoronix.com")
|
|
|
|
}
|
|
|
|
|
|
|
|
if getPredefinedScraperRules("https://www.linux.com/") == "" {
|
|
|
|
t.Error("Unable to find rule for linux.com")
|
|
|
|
}
|
|
|
|
|
|
|
|
if getPredefinedScraperRules("https://example.org/") != "" {
|
|
|
|
t.Error("A rule not defined should not return anything")
|
|
|
|
}
|
|
|
|
}
|
2018-11-03 21:44:13 +01:00
|
|
|
|
|
|
|
func TestWhitelistedContentTypes(t *testing.T) {
|
|
|
|
scenarios := map[string]bool{
|
|
|
|
"text/html": true,
|
|
|
|
"TeXt/hTmL": true,
|
|
|
|
"application/xhtml+xml": true,
|
|
|
|
"text/html; charset=utf-8": true,
|
|
|
|
"application/xhtml+xml; charset=utf-8": true,
|
|
|
|
"text/css": false,
|
|
|
|
"application/javascript": false,
|
|
|
|
"image/png": false,
|
|
|
|
"application/pdf": false,
|
|
|
|
}
|
|
|
|
|
|
|
|
for inputValue, expectedResult := range scenarios {
|
2020-09-28 01:01:06 +02:00
|
|
|
actualResult := isAllowedContentType(inputValue)
|
2018-11-03 21:44:13 +01:00
|
|
|
if actualResult != expectedResult {
|
|
|
|
t.Errorf(`Unexpected result for content type whitelist, got "%v" instead of "%v"`, actualResult, expectedResult)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-12-22 06:18:31 +01:00
|
|
|
|
|
|
|
func TestSelectorRules(t *testing.T) {
|
2020-09-28 01:01:06 +02:00
|
|
|
var ruleTestCases = map[string]string{
|
|
|
|
"img.html": "article > img",
|
|
|
|
"iframe.html": "article > iframe",
|
|
|
|
"p.html": "article > p",
|
2019-12-22 06:18:31 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
for filename, rule := range ruleTestCases {
|
2021-02-17 06:19:03 +01:00
|
|
|
html, err := os.ReadFile("testdata/" + filename)
|
2019-12-22 06:18:31 +01:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf(`Unable to read file %q: %v`, filename, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
actualResult, err := scrapContent(bytes.NewReader(html), rule)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf(`Scraping error for %q - %q: %v`, filename, rule, err)
|
|
|
|
}
|
|
|
|
|
2021-02-17 06:19:03 +01:00
|
|
|
expectedResult, err := os.ReadFile("testdata/" + filename + "-result")
|
2019-12-22 06:18:31 +01:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf(`Unable to read file %q: %v`, filename, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if actualResult != strings.TrimSpace(string(expectedResult)) {
|
|
|
|
t.Errorf(`Unexpected result for %q, got "%s" instead of "%s"`, rule, actualResult, expectedResult)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|