2023-06-19 23:42:47 +02:00
|
|
|
// SPDX-FileCopyrightText: Copyright The Miniflux Authors. All rights reserved.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0
|
2017-11-20 06:10:04 +01:00
|
|
|
|
2023-08-11 04:46:45 +02:00
|
|
|
package storage // import "miniflux.app/v2/internal/storage"
|
2017-11-20 06:10:04 +01:00
|
|
|
|
|
|
|
import (
|
2020-09-21 08:01:01 +02:00
|
|
|
"database/sql"
|
2017-11-20 06:10:04 +01:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
2023-09-25 01:32:09 +02:00
|
|
|
"log/slog"
|
2024-03-18 17:06:30 +01:00
|
|
|
"slices"
|
2017-11-22 00:46:59 +01:00
|
|
|
"time"
|
|
|
|
|
2023-08-11 04:46:45 +02:00
|
|
|
"miniflux.app/v2/internal/crypto"
|
|
|
|
"miniflux.app/v2/internal/model"
|
2017-11-20 06:10:04 +01:00
|
|
|
|
|
|
|
"github.com/lib/pq"
|
|
|
|
)
|
|
|
|
|
2020-09-28 01:01:06 +02:00
|
|
|
// CountAllEntries returns the number of entries for each status in the database.
|
|
|
|
func (s *Storage) CountAllEntries() map[string]int64 {
|
|
|
|
rows, err := s.db.Query(`SELECT status, count(*) FROM entries GROUP BY status`)
|
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
defer rows.Close()
|
|
|
|
|
|
|
|
results := make(map[string]int64)
|
2021-01-18 22:22:09 +01:00
|
|
|
results[model.EntryStatusUnread] = 0
|
|
|
|
results[model.EntryStatusRead] = 0
|
|
|
|
results[model.EntryStatusRemoved] = 0
|
2020-09-28 01:01:06 +02:00
|
|
|
|
|
|
|
for rows.Next() {
|
|
|
|
var status string
|
|
|
|
var count int64
|
|
|
|
|
|
|
|
if err := rows.Scan(&status, &count); err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
results[status] = count
|
|
|
|
}
|
|
|
|
|
2021-01-18 22:22:09 +01:00
|
|
|
results["total"] = results[model.EntryStatusUnread] + results[model.EntryStatusRead] + results[model.EntryStatusRemoved]
|
2020-09-28 01:01:06 +02:00
|
|
|
return results
|
|
|
|
}
|
|
|
|
|
2018-04-30 01:35:04 +02:00
|
|
|
// CountUnreadEntries returns the number of unread entries.
|
|
|
|
func (s *Storage) CountUnreadEntries(userID int64) int {
|
|
|
|
builder := s.NewEntryQueryBuilder(userID)
|
|
|
|
builder.WithStatus(model.EntryStatusUnread)
|
2021-06-03 02:39:47 +02:00
|
|
|
builder.WithGloballyVisible()
|
2018-04-30 01:35:04 +02:00
|
|
|
|
|
|
|
n, err := builder.CountEntries()
|
|
|
|
if err != nil {
|
2023-09-25 01:32:09 +02:00
|
|
|
slog.Error("Unable to count unread entries",
|
|
|
|
slog.Int64("user_id", userID),
|
|
|
|
slog.Any("error", err),
|
|
|
|
)
|
2018-04-30 01:35:04 +02:00
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2017-12-29 04:20:14 +01:00
|
|
|
// NewEntryQueryBuilder returns a new EntryQueryBuilder
|
|
|
|
func (s *Storage) NewEntryQueryBuilder(userID int64) *EntryQueryBuilder {
|
|
|
|
return NewEntryQueryBuilder(s, userID)
|
2017-11-20 06:10:04 +01:00
|
|
|
}
|
|
|
|
|
2023-10-07 07:42:59 +02:00
|
|
|
// UpdateEntryTitleAndContent updates entry title and content.
|
|
|
|
func (s *Storage) UpdateEntryTitleAndContent(entry *model.Entry) error {
|
2019-10-30 06:48:07 +01:00
|
|
|
query := `
|
|
|
|
UPDATE
|
|
|
|
entries
|
|
|
|
SET
|
2023-10-07 07:42:59 +02:00
|
|
|
title=$1,
|
|
|
|
content=$2,
|
|
|
|
reading_time=$3,
|
|
|
|
document_vectors = setweight(to_tsvector(left(coalesce($1, ''), 500000)), 'A') || setweight(to_tsvector(left(coalesce($2, ''), 500000)), 'B')
|
2019-10-30 06:48:07 +01:00
|
|
|
WHERE
|
2023-10-07 07:42:59 +02:00
|
|
|
id=$4 AND user_id=$5
|
2019-10-30 06:48:07 +01:00
|
|
|
`
|
2018-07-05 02:40:03 +02:00
|
|
|
|
2023-10-07 07:42:59 +02:00
|
|
|
if _, err := s.db.Exec(query, entry.Title, entry.Content, entry.ReadingTime, entry.ID, entry.UserID); err != nil {
|
|
|
|
return fmt.Errorf(`store: unable to update entry #%d: %v`, entry.ID, err)
|
2018-07-05 02:40:03 +02:00
|
|
|
}
|
|
|
|
|
2023-10-07 07:42:59 +02:00
|
|
|
return nil
|
2018-07-05 02:40:03 +02:00
|
|
|
}
|
|
|
|
|
2017-12-25 03:04:34 +01:00
|
|
|
// createEntry add a new entry.
|
2020-09-21 08:01:01 +02:00
|
|
|
func (s *Storage) createEntry(tx *sql.Tx, entry *model.Entry) error {
|
2017-11-20 06:10:04 +01:00
|
|
|
query := `
|
|
|
|
INSERT INTO entries
|
2020-11-19 02:29:40 +01:00
|
|
|
(
|
|
|
|
title,
|
|
|
|
hash,
|
|
|
|
url,
|
|
|
|
comments_url,
|
|
|
|
published_at,
|
|
|
|
content,
|
|
|
|
author,
|
|
|
|
user_id,
|
|
|
|
feed_id,
|
|
|
|
reading_time,
|
|
|
|
changed_at,
|
2023-02-25 05:52:45 +01:00
|
|
|
document_vectors,
|
|
|
|
tags
|
2020-11-19 02:29:40 +01:00
|
|
|
)
|
2017-11-20 06:10:04 +01:00
|
|
|
VALUES
|
2020-11-19 02:29:40 +01:00
|
|
|
(
|
|
|
|
$1,
|
|
|
|
$2,
|
|
|
|
$3,
|
|
|
|
$4,
|
|
|
|
$5,
|
|
|
|
$6,
|
|
|
|
$7,
|
|
|
|
$8,
|
|
|
|
$9,
|
|
|
|
$10,
|
|
|
|
now(),
|
2023-02-25 05:52:45 +01:00
|
|
|
setweight(to_tsvector(left(coalesce($1, ''), 500000)), 'A') || setweight(to_tsvector(left(coalesce($6, ''), 500000)), 'B'),
|
|
|
|
$11
|
2020-11-19 02:29:40 +01:00
|
|
|
)
|
2019-10-30 06:48:07 +01:00
|
|
|
RETURNING
|
2023-09-09 07:45:17 +02:00
|
|
|
id, status, created_at, changed_at
|
2017-11-20 06:10:04 +01:00
|
|
|
`
|
2020-09-21 08:01:01 +02:00
|
|
|
err := tx.QueryRow(
|
2017-11-20 06:10:04 +01:00
|
|
|
query,
|
|
|
|
entry.Title,
|
|
|
|
entry.Hash,
|
|
|
|
entry.URL,
|
2018-04-07 22:50:45 +02:00
|
|
|
entry.CommentsURL,
|
2017-11-20 06:10:04 +01:00
|
|
|
entry.Date,
|
|
|
|
entry.Content,
|
|
|
|
entry.Author,
|
|
|
|
entry.UserID,
|
|
|
|
entry.FeedID,
|
2020-11-19 02:29:40 +01:00
|
|
|
entry.ReadingTime,
|
2023-02-25 05:52:45 +01:00
|
|
|
pq.Array(removeDuplicates(entry.Tags)),
|
2023-09-09 07:45:17 +02:00
|
|
|
).Scan(
|
|
|
|
&entry.ID,
|
|
|
|
&entry.Status,
|
|
|
|
&entry.CreatedAt,
|
|
|
|
&entry.ChangedAt,
|
|
|
|
)
|
2017-11-20 06:10:04 +01:00
|
|
|
|
|
|
|
if err != nil {
|
2019-10-30 06:48:07 +01:00
|
|
|
return fmt.Errorf(`store: unable to create entry %q (feed #%d): %v`, entry.URL, entry.FeedID, err)
|
2017-11-20 06:10:04 +01:00
|
|
|
}
|
|
|
|
|
2024-02-29 01:01:20 +01:00
|
|
|
for _, enclosure := range entry.Enclosures {
|
|
|
|
enclosure.EntryID = entry.ID
|
|
|
|
enclosure.UserID = entry.UserID
|
|
|
|
err := s.createEnclosure(tx, enclosure)
|
2017-11-20 06:10:04 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-01-30 06:07:55 +01:00
|
|
|
// updateEntry updates an entry when a feed is refreshed.
|
|
|
|
// Note: we do not update the published date because some feeds do not contains any date,
|
|
|
|
// it default to time.Now() which could change the order of items on the history page.
|
2020-09-21 08:01:01 +02:00
|
|
|
func (s *Storage) updateEntry(tx *sql.Tx, entry *model.Entry) error {
|
2017-11-20 06:10:04 +01:00
|
|
|
query := `
|
2019-10-30 06:48:07 +01:00
|
|
|
UPDATE
|
|
|
|
entries
|
|
|
|
SET
|
|
|
|
title=$1,
|
|
|
|
url=$2,
|
|
|
|
comments_url=$3,
|
|
|
|
content=$4,
|
|
|
|
author=$5,
|
2020-11-19 02:29:40 +01:00
|
|
|
reading_time=$6,
|
2023-02-25 05:52:45 +01:00
|
|
|
document_vectors = setweight(to_tsvector(left(coalesce($1, ''), 500000)), 'A') || setweight(to_tsvector(left(coalesce($4, ''), 500000)), 'B'),
|
|
|
|
tags=$10
|
2019-10-30 06:48:07 +01:00
|
|
|
WHERE
|
2020-11-19 02:29:40 +01:00
|
|
|
user_id=$7 AND feed_id=$8 AND hash=$9
|
2019-10-30 06:48:07 +01:00
|
|
|
RETURNING
|
|
|
|
id
|
2017-11-20 06:10:04 +01:00
|
|
|
`
|
2020-09-21 08:01:01 +02:00
|
|
|
err := tx.QueryRow(
|
2017-11-20 06:10:04 +01:00
|
|
|
query,
|
|
|
|
entry.Title,
|
|
|
|
entry.URL,
|
2018-04-07 22:50:45 +02:00
|
|
|
entry.CommentsURL,
|
2017-11-20 06:10:04 +01:00
|
|
|
entry.Content,
|
|
|
|
entry.Author,
|
2020-11-19 02:29:40 +01:00
|
|
|
entry.ReadingTime,
|
2017-11-20 06:10:04 +01:00
|
|
|
entry.UserID,
|
|
|
|
entry.FeedID,
|
|
|
|
entry.Hash,
|
2023-02-25 05:52:45 +01:00
|
|
|
pq.Array(removeDuplicates(entry.Tags)),
|
2017-11-22 01:08:43 +01:00
|
|
|
).Scan(&entry.ID)
|
|
|
|
|
|
|
|
if err != nil {
|
2019-10-30 06:48:07 +01:00
|
|
|
return fmt.Errorf(`store: unable to update entry %q: %v`, entry.URL, err)
|
2017-11-22 01:08:43 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, enclosure := range entry.Enclosures {
|
|
|
|
enclosure.UserID = entry.UserID
|
|
|
|
enclosure.EntryID = entry.ID
|
|
|
|
}
|
2017-11-20 06:10:04 +01:00
|
|
|
|
2023-09-09 07:45:17 +02:00
|
|
|
return s.updateEnclosures(tx, entry)
|
2017-11-20 06:10:04 +01:00
|
|
|
}
|
|
|
|
|
2017-12-25 03:04:34 +01:00
|
|
|
// entryExists checks if an entry already exists based on its hash when refreshing a feed.
|
2023-06-24 21:56:53 +02:00
|
|
|
func (s *Storage) entryExists(tx *sql.Tx, entry *model.Entry) (bool, error) {
|
2020-09-21 08:01:01 +02:00
|
|
|
var result bool
|
2023-06-24 21:56:53 +02:00
|
|
|
|
|
|
|
// Note: This query uses entries_feed_id_hash_key index (filtering on user_id is not necessary).
|
|
|
|
err := tx.QueryRow(`SELECT true FROM entries WHERE feed_id=$1 AND hash=$2`, entry.FeedID, entry.Hash).Scan(&result)
|
|
|
|
|
|
|
|
if err != nil && err != sql.ErrNoRows {
|
|
|
|
return result, fmt.Errorf(`store: unable to check if entry exists: %v`, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return result, nil
|
2017-11-20 06:10:04 +01:00
|
|
|
}
|
|
|
|
|
2021-03-09 05:10:53 +01:00
|
|
|
// GetReadTime fetches the read time of an entry based on its hash, and the feed id and user id from the feed.
|
|
|
|
// It's intended to be used on entries objects created by parsing a feed as they don't contain much information.
|
|
|
|
// The feed param helps to scope the search to a specific user and feed in order to avoid hash clashes.
|
|
|
|
func (s *Storage) GetReadTime(entry *model.Entry, feed *model.Feed) int {
|
|
|
|
var result int
|
|
|
|
s.db.QueryRow(
|
2024-02-25 16:48:46 +01:00
|
|
|
`SELECT
|
2024-02-25 17:00:52 +01:00
|
|
|
reading_time
|
|
|
|
FROM
|
|
|
|
entries
|
|
|
|
WHERE
|
2024-02-25 16:48:46 +01:00
|
|
|
user_id=$1 AND
|
2024-02-25 17:00:52 +01:00
|
|
|
feed_id=$2 AND
|
2024-02-25 16:48:46 +01:00
|
|
|
hash=$3
|
|
|
|
`,
|
2021-03-09 05:10:53 +01:00
|
|
|
feed.UserID,
|
|
|
|
feed.ID,
|
|
|
|
entry.Hash,
|
|
|
|
).Scan(&result)
|
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2018-07-05 02:40:03 +02:00
|
|
|
// cleanupEntries deletes from the database entries marked as "removed" and not visible anymore in the feed.
|
|
|
|
func (s *Storage) cleanupEntries(feedID int64, entryHashes []string) error {
|
|
|
|
query := `
|
2019-10-30 06:48:07 +01:00
|
|
|
DELETE FROM
|
|
|
|
entries
|
|
|
|
WHERE
|
2024-02-25 16:52:08 +01:00
|
|
|
feed_id=$1 AND
|
2024-02-26 05:11:20 +01:00
|
|
|
status=$2 AND
|
|
|
|
NOT (hash=ANY($3))
|
2018-07-05 02:40:03 +02:00
|
|
|
`
|
2024-02-25 16:52:08 +01:00
|
|
|
if _, err := s.db.Exec(query, feedID, model.EntryStatusRemoved, pq.Array(entryHashes)); err != nil {
|
2019-10-30 06:48:07 +01:00
|
|
|
return fmt.Errorf(`store: unable to cleanup entries: %v`, err)
|
2018-07-05 02:40:03 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-09-21 08:01:01 +02:00
|
|
|
// RefreshFeedEntries updates feed entries while refreshing a feed.
|
2023-09-09 07:45:17 +02:00
|
|
|
func (s *Storage) RefreshFeedEntries(userID, feedID int64, entries model.Entries, updateExistingEntries bool) (newEntries model.Entries, err error) {
|
2017-11-22 01:33:36 +01:00
|
|
|
var entryHashes []string
|
2020-09-21 08:01:01 +02:00
|
|
|
|
2017-11-20 06:10:04 +01:00
|
|
|
for _, entry := range entries {
|
|
|
|
entry.UserID = userID
|
|
|
|
entry.FeedID = feedID
|
|
|
|
|
2020-09-21 08:01:01 +02:00
|
|
|
tx, err := s.db.Begin()
|
|
|
|
if err != nil {
|
2023-09-09 07:45:17 +02:00
|
|
|
return nil, fmt.Errorf(`store: unable to start transaction: %v`, err)
|
2020-09-21 08:01:01 +02:00
|
|
|
}
|
|
|
|
|
2023-06-24 21:56:53 +02:00
|
|
|
entryExists, err := s.entryExists(tx, entry)
|
|
|
|
if err != nil {
|
|
|
|
if rollbackErr := tx.Rollback(); rollbackErr != nil {
|
2023-09-09 07:45:17 +02:00
|
|
|
return nil, fmt.Errorf(`store: unable to rollback transaction: %v (rolled back due to: %v)`, rollbackErr, err)
|
2023-06-24 21:56:53 +02:00
|
|
|
}
|
2023-09-09 07:45:17 +02:00
|
|
|
return nil, err
|
2023-06-24 21:56:53 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if entryExists {
|
2018-01-20 23:04:19 +01:00
|
|
|
if updateExistingEntries {
|
2020-09-21 08:01:01 +02:00
|
|
|
err = s.updateEntry(tx, entry)
|
2018-01-20 23:04:19 +01:00
|
|
|
}
|
2017-11-20 06:10:04 +01:00
|
|
|
} else {
|
2020-09-21 08:01:01 +02:00
|
|
|
err = s.createEntry(tx, entry)
|
2023-09-09 07:45:17 +02:00
|
|
|
if err == nil {
|
|
|
|
newEntries = append(newEntries, entry)
|
|
|
|
}
|
2017-11-20 06:10:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
2023-06-24 21:56:53 +02:00
|
|
|
if rollbackErr := tx.Rollback(); rollbackErr != nil {
|
2023-09-09 07:45:17 +02:00
|
|
|
return nil, fmt.Errorf(`store: unable to rollback transaction: %v (rolled back due to: %v)`, rollbackErr, err)
|
2023-06-24 21:56:53 +02:00
|
|
|
}
|
2023-09-09 07:45:17 +02:00
|
|
|
return nil, err
|
2017-11-20 06:10:04 +01:00
|
|
|
}
|
2017-11-22 01:33:36 +01:00
|
|
|
|
2020-09-21 08:01:01 +02:00
|
|
|
if err := tx.Commit(); err != nil {
|
2023-09-09 07:45:17 +02:00
|
|
|
return nil, fmt.Errorf(`store: unable to commit transaction: %v`, err)
|
2020-09-21 08:01:01 +02:00
|
|
|
}
|
|
|
|
|
2017-11-22 01:33:36 +01:00
|
|
|
entryHashes = append(entryHashes, entry.Hash)
|
|
|
|
}
|
|
|
|
|
2020-09-21 08:01:01 +02:00
|
|
|
go func() {
|
|
|
|
if err := s.cleanupEntries(feedID, entryHashes); err != nil {
|
2023-09-25 01:32:09 +02:00
|
|
|
slog.Error("Unable to cleanup entries",
|
|
|
|
slog.Int64("user_id", userID),
|
|
|
|
slog.Int64("feed_id", feedID),
|
|
|
|
slog.Any("error", err),
|
|
|
|
)
|
2020-09-21 08:01:01 +02:00
|
|
|
}
|
|
|
|
}()
|
2017-11-22 01:33:36 +01:00
|
|
|
|
2023-09-09 07:45:17 +02:00
|
|
|
return newEntries, nil
|
2017-11-22 01:33:36 +01:00
|
|
|
}
|
|
|
|
|
2020-09-13 05:04:06 +02:00
|
|
|
// ArchiveEntries changes the status of entries to "removed" after the given number of days.
|
2021-05-24 05:45:37 +02:00
|
|
|
func (s *Storage) ArchiveEntries(status string, days, limit int) (int64, error) {
|
|
|
|
if days < 0 || limit <= 0 {
|
2020-09-13 05:04:06 +02:00
|
|
|
return 0, nil
|
2019-03-10 18:51:21 +01:00
|
|
|
}
|
2019-10-30 06:48:07 +01:00
|
|
|
|
|
|
|
query := `
|
|
|
|
UPDATE
|
|
|
|
entries
|
|
|
|
SET
|
2023-11-25 05:37:48 +01:00
|
|
|
status=$1
|
2019-10-30 06:48:07 +01:00
|
|
|
WHERE
|
2024-02-25 17:00:52 +01:00
|
|
|
id IN (
|
|
|
|
SELECT
|
|
|
|
id
|
|
|
|
FROM
|
|
|
|
entries
|
|
|
|
WHERE
|
|
|
|
status=$2 AND
|
|
|
|
starred is false AND
|
|
|
|
share_code='' AND
|
2024-02-26 11:44:03 +01:00
|
|
|
created_at < now () - $3::interval
|
2024-02-25 17:00:52 +01:00
|
|
|
ORDER BY
|
2024-02-26 11:44:03 +01:00
|
|
|
created_at ASC LIMIT $4
|
2024-02-25 17:00:52 +01:00
|
|
|
)
|
2019-10-30 06:48:07 +01:00
|
|
|
`
|
2020-09-13 05:04:06 +02:00
|
|
|
|
2024-02-26 11:44:03 +01:00
|
|
|
result, err := s.db.Exec(query, model.EntryStatusRemoved, status, fmt.Sprintf("%d days", days), limit)
|
2020-09-13 05:04:06 +02:00
|
|
|
if err != nil {
|
|
|
|
return 0, fmt.Errorf(`store: unable to archive %s entries: %v`, status, err)
|
2018-05-20 01:40:24 +02:00
|
|
|
}
|
|
|
|
|
2020-09-13 05:04:06 +02:00
|
|
|
count, err := result.RowsAffected()
|
|
|
|
if err != nil {
|
|
|
|
return 0, fmt.Errorf(`store: unable to get the number of rows affected: %v`, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return count, nil
|
2018-05-20 01:40:24 +02:00
|
|
|
}
|
|
|
|
|
2017-11-22 00:46:59 +01:00
|
|
|
// SetEntriesStatus update the status of the given list of entries.
|
2017-11-20 06:10:04 +01:00
|
|
|
func (s *Storage) SetEntriesStatus(userID int64, entryIDs []int64, status string) error {
|
2020-02-11 05:20:03 +01:00
|
|
|
query := `UPDATE entries SET status=$1, changed_at=now() WHERE user_id=$2 AND id=ANY($3)`
|
2017-11-20 06:10:04 +01:00
|
|
|
result, err := s.db.Exec(query, status, userID, pq.Array(entryIDs))
|
|
|
|
if err != nil {
|
2019-10-30 06:48:07 +01:00
|
|
|
return fmt.Errorf(`store: unable to update entries statuses %v: %v`, entryIDs, err)
|
2017-11-20 06:10:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
count, err := result.RowsAffected()
|
|
|
|
if err != nil {
|
2019-10-30 06:48:07 +01:00
|
|
|
return fmt.Errorf(`store: unable to update these entries %v: %v`, entryIDs, err)
|
2017-11-20 06:10:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if count == 0 {
|
2019-10-30 06:48:07 +01:00
|
|
|
return errors.New(`store: nothing has been updated`)
|
2017-11-22 00:46:59 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-06-03 02:39:47 +02:00
|
|
|
func (s *Storage) SetEntriesStatusCount(userID int64, entryIDs []int64, status string) (int, error) {
|
|
|
|
if err := s.SetEntriesStatus(userID, entryIDs, status); err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
query := `
|
|
|
|
SELECT count(*)
|
|
|
|
FROM entries e
|
|
|
|
JOIN feeds f ON (f.id = e.feed_id)
|
|
|
|
JOIN categories c ON (c.id = f.category_id)
|
2021-08-15 17:32:43 +02:00
|
|
|
WHERE e.user_id = $1
|
|
|
|
AND e.id = ANY($2)
|
|
|
|
AND NOT f.hide_globally
|
|
|
|
AND NOT c.hide_globally
|
2021-06-03 02:39:47 +02:00
|
|
|
`
|
|
|
|
row := s.db.QueryRow(query, userID, pq.Array(entryIDs))
|
|
|
|
visible := 0
|
|
|
|
if err := row.Scan(&visible); err != nil {
|
|
|
|
return 0, fmt.Errorf(`store: unable to query entries visibility %v: %v`, entryIDs, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return visible, nil
|
|
|
|
}
|
|
|
|
|
2022-01-03 04:45:12 +01:00
|
|
|
// SetEntriesBookmarked update the bookmarked state for the given list of entries.
|
|
|
|
func (s *Storage) SetEntriesBookmarkedState(userID int64, entryIDs []int64, starred bool) error {
|
|
|
|
query := `UPDATE entries SET starred=$1, changed_at=now() WHERE user_id=$2 AND id=ANY($3)`
|
|
|
|
result, err := s.db.Exec(query, starred, userID, pq.Array(entryIDs))
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf(`store: unable to update the bookmarked state %v: %v`, entryIDs, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
count, err := result.RowsAffected()
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf(`store: unable to update these entries %v: %v`, entryIDs, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if count == 0 {
|
|
|
|
return errors.New(`store: nothing has been updated`)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-12-22 20:33:01 +01:00
|
|
|
// ToggleBookmark toggles entry bookmark value.
|
|
|
|
func (s *Storage) ToggleBookmark(userID int64, entryID int64) error {
|
2020-02-11 05:20:03 +01:00
|
|
|
query := `UPDATE entries SET starred = NOT starred, changed_at=now() WHERE user_id=$1 AND id=$2`
|
2017-12-25 03:04:34 +01:00
|
|
|
result, err := s.db.Exec(query, userID, entryID)
|
2017-12-22 20:33:01 +01:00
|
|
|
if err != nil {
|
2019-10-30 06:48:07 +01:00
|
|
|
return fmt.Errorf(`store: unable to toggle bookmark flag for entry #%d: %v`, entryID, err)
|
2017-12-25 03:04:34 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
count, err := result.RowsAffected()
|
|
|
|
if err != nil {
|
2020-01-29 23:21:48 +01:00
|
|
|
return fmt.Errorf(`store: unable to toggle bookmark flag for entry #%d: %v`, entryID, err)
|
2017-12-25 03:04:34 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if count == 0 {
|
2019-10-30 06:48:07 +01:00
|
|
|
return errors.New(`store: nothing has been updated`)
|
2017-12-22 20:33:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-10-06 06:37:45 +02:00
|
|
|
// FlushHistory changes all entries with the status "read" to "removed".
|
2017-11-22 00:46:59 +01:00
|
|
|
func (s *Storage) FlushHistory(userID int64) error {
|
2020-03-23 02:48:14 +01:00
|
|
|
query := `
|
|
|
|
UPDATE
|
|
|
|
entries
|
|
|
|
SET
|
|
|
|
status=$1,
|
|
|
|
changed_at=now()
|
|
|
|
WHERE
|
2020-07-02 05:11:36 +02:00
|
|
|
user_id=$2 AND status=$3 AND starred is false AND share_code=''
|
2020-03-23 02:48:14 +01:00
|
|
|
`
|
2017-11-22 00:46:59 +01:00
|
|
|
_, err := s.db.Exec(query, model.EntryStatusRemoved, userID, model.EntryStatusRead)
|
|
|
|
if err != nil {
|
2019-10-30 06:48:07 +01:00
|
|
|
return fmt.Errorf(`store: unable to flush history: %v`, err)
|
2017-11-20 06:10:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2018-01-05 03:11:15 +01:00
|
|
|
|
2018-10-07 21:50:59 +02:00
|
|
|
// MarkAllAsRead updates all user entries to the read status.
|
2018-01-05 03:11:15 +01:00
|
|
|
func (s *Storage) MarkAllAsRead(userID int64) error {
|
2020-02-11 05:20:03 +01:00
|
|
|
query := `UPDATE entries SET status=$1, changed_at=now() WHERE user_id=$2 AND status=$3`
|
2018-10-27 04:49:49 +02:00
|
|
|
result, err := s.db.Exec(query, model.EntryStatusRead, userID, model.EntryStatusUnread)
|
2018-01-05 03:11:15 +01:00
|
|
|
if err != nil {
|
2019-10-30 06:48:07 +01:00
|
|
|
return fmt.Errorf(`store: unable to mark all entries as read: %v`, err)
|
2018-01-05 03:11:15 +01:00
|
|
|
}
|
|
|
|
|
2018-10-27 04:49:49 +02:00
|
|
|
count, _ := result.RowsAffected()
|
2023-09-25 01:32:09 +02:00
|
|
|
slog.Debug("Marked all entries as read",
|
|
|
|
slog.Int64("user_id", userID),
|
|
|
|
slog.Int64("nb_entries", count),
|
|
|
|
)
|
2018-10-27 04:49:49 +02:00
|
|
|
|
2018-01-05 03:11:15 +01:00
|
|
|
return nil
|
|
|
|
}
|
2018-01-20 03:43:27 +01:00
|
|
|
|
2023-05-30 20:29:36 +02:00
|
|
|
// MarkGloballyVisibleFeedsAsRead updates all user entries to the read status.
|
|
|
|
func (s *Storage) MarkGloballyVisibleFeedsAsRead(userID int64) error {
|
|
|
|
query := `
|
|
|
|
UPDATE
|
|
|
|
entries
|
|
|
|
SET
|
|
|
|
status=$1,
|
|
|
|
changed_at=now()
|
|
|
|
FROM
|
|
|
|
feeds
|
|
|
|
WHERE
|
|
|
|
entries.feed_id = feeds.id
|
|
|
|
AND entries.user_id=$2
|
|
|
|
AND entries.status=$3
|
|
|
|
AND feeds.hide_globally=$4
|
|
|
|
`
|
|
|
|
result, err := s.db.Exec(query, model.EntryStatusRead, userID, model.EntryStatusUnread, false)
|
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf(`store: unable to mark globally visible feeds as read: %v`, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
count, _ := result.RowsAffected()
|
2023-09-25 01:32:09 +02:00
|
|
|
slog.Debug("Marked globally visible feed entries as read",
|
|
|
|
slog.Int64("user_id", userID),
|
|
|
|
slog.Int64("nb_entries", count),
|
|
|
|
)
|
2023-05-30 20:29:36 +02:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-10-07 21:50:59 +02:00
|
|
|
// MarkFeedAsRead updates all feed entries to the read status.
|
|
|
|
func (s *Storage) MarkFeedAsRead(userID, feedID int64, before time.Time) error {
|
|
|
|
query := `
|
2019-10-30 06:48:07 +01:00
|
|
|
UPDATE
|
|
|
|
entries
|
|
|
|
SET
|
2020-02-11 05:20:03 +01:00
|
|
|
status=$1,
|
|
|
|
changed_at=now()
|
2019-10-30 06:48:07 +01:00
|
|
|
WHERE
|
|
|
|
user_id=$2 AND feed_id=$3 AND status=$4 AND published_at < $5
|
2018-10-07 21:50:59 +02:00
|
|
|
`
|
|
|
|
result, err := s.db.Exec(query, model.EntryStatusRead, userID, feedID, model.EntryStatusUnread, before)
|
|
|
|
if err != nil {
|
2019-10-30 06:48:07 +01:00
|
|
|
return fmt.Errorf(`store: unable to mark feed entries as read: %v`, err)
|
2018-10-07 21:50:59 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
count, _ := result.RowsAffected()
|
2023-09-25 01:32:09 +02:00
|
|
|
slog.Debug("Marked feed entries as read",
|
|
|
|
slog.Int64("user_id", userID),
|
|
|
|
slog.Int64("feed_id", feedID),
|
|
|
|
slog.Int64("nb_entries", count),
|
|
|
|
)
|
2018-10-07 21:50:59 +02:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// MarkCategoryAsRead updates all category entries to the read status.
|
|
|
|
func (s *Storage) MarkCategoryAsRead(userID, categoryID int64, before time.Time) error {
|
|
|
|
query := `
|
2019-10-30 06:48:07 +01:00
|
|
|
UPDATE
|
|
|
|
entries
|
|
|
|
SET
|
2020-02-11 05:20:03 +01:00
|
|
|
status=$1,
|
|
|
|
changed_at=now()
|
2024-02-25 19:17:58 +01:00
|
|
|
FROM
|
|
|
|
feeds
|
2018-10-07 21:50:59 +02:00
|
|
|
WHERE
|
2024-02-25 19:17:58 +01:00
|
|
|
feed_id=feeds.id
|
|
|
|
AND
|
|
|
|
feeds.user_id=$2
|
2019-10-30 06:48:07 +01:00
|
|
|
AND
|
|
|
|
status=$3
|
|
|
|
AND
|
|
|
|
published_at < $4
|
|
|
|
AND
|
2024-02-25 19:17:58 +01:00
|
|
|
feeds.category_id=$5
|
2018-10-07 21:50:59 +02:00
|
|
|
`
|
|
|
|
result, err := s.db.Exec(query, model.EntryStatusRead, userID, model.EntryStatusUnread, before, categoryID)
|
|
|
|
if err != nil {
|
2019-10-30 06:48:07 +01:00
|
|
|
return fmt.Errorf(`store: unable to mark category entries as read: %v`, err)
|
2018-10-07 21:50:59 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
count, _ := result.RowsAffected()
|
2023-09-25 01:32:09 +02:00
|
|
|
slog.Debug("Marked category entries as read",
|
|
|
|
slog.Int64("user_id", userID),
|
|
|
|
slog.Int64("category_id", categoryID),
|
|
|
|
slog.Int64("nb_entries", count),
|
|
|
|
)
|
2018-10-07 21:50:59 +02:00
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-01-20 03:43:27 +01:00
|
|
|
// EntryURLExists returns true if an entry with this URL already exists.
|
2019-03-01 05:43:33 +01:00
|
|
|
func (s *Storage) EntryURLExists(feedID int64, entryURL string) bool {
|
2019-10-30 06:48:07 +01:00
|
|
|
var result bool
|
|
|
|
query := `SELECT true FROM entries WHERE feed_id=$1 AND url=$2`
|
2019-03-01 05:43:33 +01:00
|
|
|
s.db.QueryRow(query, feedID, entryURL).Scan(&result)
|
2019-10-30 06:48:07 +01:00
|
|
|
return result
|
2018-01-20 03:43:27 +01:00
|
|
|
}
|
2019-10-05 13:30:25 +02:00
|
|
|
|
2020-03-23 02:48:14 +01:00
|
|
|
// EntryShareCode returns the share code of the provided entry.
|
2019-10-05 13:30:25 +02:00
|
|
|
// It generates a new one if not already defined.
|
2020-03-23 02:48:14 +01:00
|
|
|
func (s *Storage) EntryShareCode(userID int64, entryID int64) (shareCode string, err error) {
|
2019-10-05 13:30:25 +02:00
|
|
|
query := `SELECT share_code FROM entries WHERE user_id=$1 AND id=$2`
|
|
|
|
err = s.db.QueryRow(query, userID, entryID).Scan(&shareCode)
|
2020-03-23 02:48:14 +01:00
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf(`store: unable to get share code for entry #%d: %v`, entryID, err)
|
2019-10-05 13:30:25 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-03-23 02:48:14 +01:00
|
|
|
if shareCode == "" {
|
|
|
|
shareCode = crypto.GenerateRandomStringHex(20)
|
2019-10-05 13:30:25 +02:00
|
|
|
|
2020-03-23 02:48:14 +01:00
|
|
|
query = `UPDATE entries SET share_code = $1 WHERE user_id=$2 AND id=$3`
|
|
|
|
_, err = s.db.Exec(query, shareCode, userID, entryID)
|
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf(`store: unable to set share code for entry #%d: %v`, entryID, err)
|
|
|
|
return
|
|
|
|
}
|
2019-10-05 13:30:25 +02:00
|
|
|
}
|
|
|
|
|
2020-03-23 02:48:14 +01:00
|
|
|
return
|
|
|
|
}
|
2019-10-05 13:30:25 +02:00
|
|
|
|
2020-03-23 02:48:14 +01:00
|
|
|
// UnshareEntry removes the share code for the given entry.
|
|
|
|
func (s *Storage) UnshareEntry(userID int64, entryID int64) (err error) {
|
|
|
|
query := `UPDATE entries SET share_code='' WHERE user_id=$1 AND id=$2`
|
|
|
|
_, err = s.db.Exec(query, userID, entryID)
|
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf(`store: unable to remove share code for entry #%d: %v`, entryID, err)
|
2019-10-05 13:30:25 +02:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2023-02-25 05:52:45 +01:00
|
|
|
|
2024-03-18 17:06:30 +01:00
|
|
|
func removeDuplicates(l []string) []string {
|
|
|
|
slices.Sort(l)
|
|
|
|
return slices.Compact(l)
|
2023-02-25 05:52:45 +01:00
|
|
|
}
|