2017-11-20 06:10:04 +01:00
|
|
|
// Copyright 2017 Frédéric Guillot. All rights reserved.
|
|
|
|
// Use of this source code is governed by the Apache 2.0
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
2018-08-25 06:51:50 +02:00
|
|
|
package storage // import "miniflux.app/storage"
|
2017-11-20 06:10:04 +01:00
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
2017-11-22 00:46:59 +01:00
|
|
|
"time"
|
|
|
|
|
2019-10-05 13:30:25 +02:00
|
|
|
"miniflux.app/crypto"
|
2018-08-25 06:51:50 +02:00
|
|
|
"miniflux.app/logger"
|
|
|
|
"miniflux.app/model"
|
2017-11-20 06:10:04 +01:00
|
|
|
|
|
|
|
"github.com/lib/pq"
|
|
|
|
)
|
|
|
|
|
2018-04-30 01:35:04 +02:00
|
|
|
// CountUnreadEntries returns the number of unread entries.
|
|
|
|
func (s *Storage) CountUnreadEntries(userID int64) int {
|
|
|
|
builder := s.NewEntryQueryBuilder(userID)
|
|
|
|
builder.WithStatus(model.EntryStatusUnread)
|
|
|
|
|
|
|
|
n, err := builder.CountEntries()
|
|
|
|
if err != nil {
|
2019-10-30 06:48:07 +01:00
|
|
|
logger.Error(`store: unable to count unread entries for user #%d: %v`, userID, err)
|
2018-04-30 01:35:04 +02:00
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
return n
|
|
|
|
}
|
|
|
|
|
2017-12-29 04:20:14 +01:00
|
|
|
// NewEntryQueryBuilder returns a new EntryQueryBuilder
|
|
|
|
func (s *Storage) NewEntryQueryBuilder(userID int64) *EntryQueryBuilder {
|
|
|
|
return NewEntryQueryBuilder(s, userID)
|
2017-11-20 06:10:04 +01:00
|
|
|
}
|
|
|
|
|
2018-07-05 02:40:03 +02:00
|
|
|
// UpdateEntryContent updates entry content.
|
|
|
|
func (s *Storage) UpdateEntryContent(entry *model.Entry) error {
|
|
|
|
tx, err := s.db.Begin()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-10-30 06:48:07 +01:00
|
|
|
query := `
|
|
|
|
UPDATE
|
|
|
|
entries
|
|
|
|
SET
|
|
|
|
content=$1
|
|
|
|
WHERE
|
|
|
|
id=$2 AND user_id=$3
|
|
|
|
`
|
|
|
|
_, err = tx.Exec(query, entry.Content, entry.ID, entry.UserID)
|
2018-07-05 02:40:03 +02:00
|
|
|
if err != nil {
|
|
|
|
tx.Rollback()
|
2019-10-30 06:48:07 +01:00
|
|
|
return fmt.Errorf(`store: unable to update content of entry #%d: %v`, entry.ID, err)
|
2018-07-05 02:40:03 +02:00
|
|
|
}
|
|
|
|
|
2019-10-30 06:48:07 +01:00
|
|
|
query = `
|
|
|
|
UPDATE
|
|
|
|
entries
|
|
|
|
SET
|
|
|
|
document_vectors = setweight(to_tsvector(substring(coalesce(title, '') for 1000000)), 'A') || setweight(to_tsvector(substring(coalesce(content, '') for 1000000)), 'B')
|
|
|
|
WHERE
|
|
|
|
id=$1 AND user_id=$2
|
2018-07-05 02:40:03 +02:00
|
|
|
`
|
|
|
|
_, err = tx.Exec(query, entry.ID, entry.UserID)
|
|
|
|
if err != nil {
|
|
|
|
tx.Rollback()
|
2019-10-30 06:48:07 +01:00
|
|
|
return fmt.Errorf(`store: unable to update content of entry #%d: %v`, entry.ID, err)
|
2018-07-05 02:40:03 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return tx.Commit()
|
|
|
|
}
|
|
|
|
|
2017-12-25 03:04:34 +01:00
|
|
|
// createEntry add a new entry.
|
|
|
|
func (s *Storage) createEntry(entry *model.Entry) error {
|
2017-11-20 06:10:04 +01:00
|
|
|
query := `
|
|
|
|
INSERT INTO entries
|
2020-02-11 05:20:03 +01:00
|
|
|
(title, hash, url, comments_url, published_at, content, author, user_id, feed_id, changed_at, document_vectors)
|
2017-11-20 06:10:04 +01:00
|
|
|
VALUES
|
2020-02-11 05:20:03 +01:00
|
|
|
($1, $2, $3, $4, $5, $6, $7, $8, $9, now(), setweight(to_tsvector(substring(coalesce($1, '') for 1000000)), 'A') || setweight(to_tsvector(substring(coalesce($6, '') for 1000000)), 'B'))
|
2019-10-30 06:48:07 +01:00
|
|
|
RETURNING
|
|
|
|
id, status
|
2017-11-20 06:10:04 +01:00
|
|
|
`
|
|
|
|
err := s.db.QueryRow(
|
|
|
|
query,
|
|
|
|
entry.Title,
|
|
|
|
entry.Hash,
|
|
|
|
entry.URL,
|
2018-04-07 22:50:45 +02:00
|
|
|
entry.CommentsURL,
|
2017-11-20 06:10:04 +01:00
|
|
|
entry.Date,
|
|
|
|
entry.Content,
|
|
|
|
entry.Author,
|
|
|
|
entry.UserID,
|
|
|
|
entry.FeedID,
|
2018-07-05 02:40:03 +02:00
|
|
|
).Scan(&entry.ID, &entry.Status)
|
2017-11-20 06:10:04 +01:00
|
|
|
|
|
|
|
if err != nil {
|
2019-10-30 06:48:07 +01:00
|
|
|
return fmt.Errorf(`store: unable to create entry %q (feed #%d): %v`, entry.URL, entry.FeedID, err)
|
2017-11-20 06:10:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < len(entry.Enclosures); i++ {
|
|
|
|
entry.Enclosures[i].EntryID = entry.ID
|
|
|
|
entry.Enclosures[i].UserID = entry.UserID
|
|
|
|
err := s.CreateEnclosure(entry.Enclosures[i])
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-01-30 06:07:55 +01:00
|
|
|
// updateEntry updates an entry when a feed is refreshed.
|
|
|
|
// Note: we do not update the published date because some feeds do not contains any date,
|
|
|
|
// it default to time.Now() which could change the order of items on the history page.
|
2017-12-25 03:04:34 +01:00
|
|
|
func (s *Storage) updateEntry(entry *model.Entry) error {
|
2017-11-20 06:10:04 +01:00
|
|
|
query := `
|
2019-10-30 06:48:07 +01:00
|
|
|
UPDATE
|
|
|
|
entries
|
|
|
|
SET
|
|
|
|
title=$1,
|
|
|
|
url=$2,
|
|
|
|
comments_url=$3,
|
|
|
|
content=$4,
|
|
|
|
author=$5,
|
|
|
|
document_vectors = setweight(to_tsvector(substring(coalesce($1, '') for 1000000)), 'A') || setweight(to_tsvector(substring(coalesce($4, '') for 1000000)), 'B')
|
|
|
|
WHERE
|
|
|
|
user_id=$6 AND feed_id=$7 AND hash=$8
|
|
|
|
RETURNING
|
|
|
|
id
|
2017-11-20 06:10:04 +01:00
|
|
|
`
|
2017-11-22 01:08:43 +01:00
|
|
|
err := s.db.QueryRow(
|
2017-11-20 06:10:04 +01:00
|
|
|
query,
|
|
|
|
entry.Title,
|
|
|
|
entry.URL,
|
2018-04-07 22:50:45 +02:00
|
|
|
entry.CommentsURL,
|
2017-11-20 06:10:04 +01:00
|
|
|
entry.Content,
|
|
|
|
entry.Author,
|
|
|
|
entry.UserID,
|
|
|
|
entry.FeedID,
|
|
|
|
entry.Hash,
|
2017-11-22 01:08:43 +01:00
|
|
|
).Scan(&entry.ID)
|
|
|
|
|
|
|
|
if err != nil {
|
2019-10-30 06:48:07 +01:00
|
|
|
return fmt.Errorf(`store: unable to update entry %q: %v`, entry.URL, err)
|
2017-11-22 01:08:43 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, enclosure := range entry.Enclosures {
|
|
|
|
enclosure.UserID = entry.UserID
|
|
|
|
enclosure.EntryID = entry.ID
|
|
|
|
}
|
2017-11-20 06:10:04 +01:00
|
|
|
|
2017-11-22 01:08:43 +01:00
|
|
|
return s.UpdateEnclosures(entry.Enclosures)
|
2017-11-20 06:10:04 +01:00
|
|
|
}
|
|
|
|
|
2017-12-25 03:04:34 +01:00
|
|
|
// entryExists checks if an entry already exists based on its hash when refreshing a feed.
|
|
|
|
func (s *Storage) entryExists(entry *model.Entry) bool {
|
2017-11-20 06:10:04 +01:00
|
|
|
var result int
|
2019-09-19 07:41:33 +02:00
|
|
|
query := `SELECT 1 FROM entries WHERE user_id=$1 AND feed_id=$2 AND hash=$3`
|
2017-11-20 06:10:04 +01:00
|
|
|
s.db.QueryRow(query, entry.UserID, entry.FeedID, entry.Hash).Scan(&result)
|
2019-09-19 07:41:33 +02:00
|
|
|
return result == 1
|
2017-11-20 06:10:04 +01:00
|
|
|
}
|
|
|
|
|
2018-07-05 02:40:03 +02:00
|
|
|
// cleanupEntries deletes from the database entries marked as "removed" and not visible anymore in the feed.
|
|
|
|
func (s *Storage) cleanupEntries(feedID int64, entryHashes []string) error {
|
|
|
|
query := `
|
2019-10-30 06:48:07 +01:00
|
|
|
DELETE FROM
|
|
|
|
entries
|
|
|
|
WHERE
|
|
|
|
feed_id=$1
|
|
|
|
AND
|
|
|
|
id IN (SELECT id FROM entries WHERE feed_id=$2 AND status=$3 AND NOT (hash=ANY($4)))
|
2018-07-05 02:40:03 +02:00
|
|
|
`
|
|
|
|
if _, err := s.db.Exec(query, feedID, feedID, model.EntryStatusRemoved, pq.Array(entryHashes)); err != nil {
|
2019-10-30 06:48:07 +01:00
|
|
|
return fmt.Errorf(`store: unable to cleanup entries: %v`, err)
|
2018-07-05 02:40:03 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-01-20 23:04:19 +01:00
|
|
|
// UpdateEntries updates a list of entries while refreshing a feed.
|
|
|
|
func (s *Storage) UpdateEntries(userID, feedID int64, entries model.Entries, updateExistingEntries bool) (err error) {
|
2017-11-22 01:33:36 +01:00
|
|
|
var entryHashes []string
|
2017-11-20 06:10:04 +01:00
|
|
|
for _, entry := range entries {
|
|
|
|
entry.UserID = userID
|
|
|
|
entry.FeedID = feedID
|
|
|
|
|
2017-12-25 03:04:34 +01:00
|
|
|
if s.entryExists(entry) {
|
2018-01-20 23:04:19 +01:00
|
|
|
if updateExistingEntries {
|
|
|
|
err = s.updateEntry(entry)
|
|
|
|
}
|
2017-11-20 06:10:04 +01:00
|
|
|
} else {
|
2017-12-25 03:04:34 +01:00
|
|
|
err = s.createEntry(entry)
|
2017-11-20 06:10:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2017-11-22 01:33:36 +01:00
|
|
|
|
|
|
|
entryHashes = append(entryHashes, entry.Hash)
|
|
|
|
}
|
|
|
|
|
2017-12-25 03:04:34 +01:00
|
|
|
if err := s.cleanupEntries(feedID, entryHashes); err != nil {
|
2019-10-30 06:48:07 +01:00
|
|
|
logger.Error(`store: feed #%d: %v`, feedID, err)
|
2017-11-22 01:33:36 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-12-06 05:35:30 +01:00
|
|
|
// ArchiveEntries changes the status of read items to "removed" after specified days.
|
|
|
|
func (s *Storage) ArchiveEntries(days int) error {
|
2019-03-10 18:51:21 +01:00
|
|
|
if days < 0 {
|
|
|
|
return nil
|
|
|
|
}
|
2019-10-30 06:48:07 +01:00
|
|
|
|
2020-07-02 05:11:36 +02:00
|
|
|
before := time.Now().AddDate(0, 0, -days)
|
2019-10-30 06:48:07 +01:00
|
|
|
query := `
|
|
|
|
UPDATE
|
|
|
|
entries
|
|
|
|
SET
|
2020-07-02 05:11:36 +02:00
|
|
|
status=$1
|
2019-10-30 06:48:07 +01:00
|
|
|
WHERE
|
2020-07-02 05:11:36 +02:00
|
|
|
id=ANY(SELECT id FROM entries WHERE status=$2 AND starred is false AND share_code='' AND published_at < $3 LIMIT 5000)
|
2019-10-30 06:48:07 +01:00
|
|
|
`
|
2020-07-02 05:11:36 +02:00
|
|
|
if _, err := s.db.Exec(query, model.EntryStatusRemoved, model.EntryStatusRead, before); err != nil {
|
2019-10-30 06:48:07 +01:00
|
|
|
return fmt.Errorf(`store: unable to archive read entries: %v`, err)
|
2018-05-20 01:40:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-11-22 00:46:59 +01:00
|
|
|
// SetEntriesStatus update the status of the given list of entries.
|
2017-11-20 06:10:04 +01:00
|
|
|
func (s *Storage) SetEntriesStatus(userID int64, entryIDs []int64, status string) error {
|
2020-02-11 05:20:03 +01:00
|
|
|
query := `UPDATE entries SET status=$1, changed_at=now() WHERE user_id=$2 AND id=ANY($3)`
|
2017-11-20 06:10:04 +01:00
|
|
|
result, err := s.db.Exec(query, status, userID, pq.Array(entryIDs))
|
|
|
|
if err != nil {
|
2019-10-30 06:48:07 +01:00
|
|
|
return fmt.Errorf(`store: unable to update entries statuses %v: %v`, entryIDs, err)
|
2017-11-20 06:10:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
count, err := result.RowsAffected()
|
|
|
|
if err != nil {
|
2019-10-30 06:48:07 +01:00
|
|
|
return fmt.Errorf(`store: unable to update these entries %v: %v`, entryIDs, err)
|
2017-11-20 06:10:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if count == 0 {
|
2019-10-30 06:48:07 +01:00
|
|
|
return errors.New(`store: nothing has been updated`)
|
2017-11-22 00:46:59 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-12-22 20:33:01 +01:00
|
|
|
// ToggleBookmark toggles entry bookmark value.
|
|
|
|
func (s *Storage) ToggleBookmark(userID int64, entryID int64) error {
|
2020-02-11 05:20:03 +01:00
|
|
|
query := `UPDATE entries SET starred = NOT starred, changed_at=now() WHERE user_id=$1 AND id=$2`
|
2017-12-25 03:04:34 +01:00
|
|
|
result, err := s.db.Exec(query, userID, entryID)
|
2017-12-22 20:33:01 +01:00
|
|
|
if err != nil {
|
2019-10-30 06:48:07 +01:00
|
|
|
return fmt.Errorf(`store: unable to toggle bookmark flag for entry #%d: %v`, entryID, err)
|
2017-12-25 03:04:34 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
count, err := result.RowsAffected()
|
|
|
|
if err != nil {
|
2020-01-29 23:21:48 +01:00
|
|
|
return fmt.Errorf(`store: unable to toggle bookmark flag for entry #%d: %v`, entryID, err)
|
2017-12-25 03:04:34 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if count == 0 {
|
2019-10-30 06:48:07 +01:00
|
|
|
return errors.New(`store: nothing has been updated`)
|
2017-12-22 20:33:01 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-11-22 00:46:59 +01:00
|
|
|
// FlushHistory set all entries with the status "read" to "removed".
|
|
|
|
func (s *Storage) FlushHistory(userID int64) error {
|
2020-03-23 02:48:14 +01:00
|
|
|
query := `
|
|
|
|
UPDATE
|
|
|
|
entries
|
|
|
|
SET
|
|
|
|
status=$1,
|
|
|
|
changed_at=now()
|
|
|
|
WHERE
|
2020-07-02 05:11:36 +02:00
|
|
|
user_id=$2 AND status=$3 AND starred is false AND share_code=''
|
2020-03-23 02:48:14 +01:00
|
|
|
`
|
2017-11-22 00:46:59 +01:00
|
|
|
_, err := s.db.Exec(query, model.EntryStatusRemoved, userID, model.EntryStatusRead)
|
|
|
|
if err != nil {
|
2019-10-30 06:48:07 +01:00
|
|
|
return fmt.Errorf(`store: unable to flush history: %v`, err)
|
2017-11-20 06:10:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2018-01-05 03:11:15 +01:00
|
|
|
|
2018-10-07 21:50:59 +02:00
|
|
|
// MarkAllAsRead updates all user entries to the read status.
|
2018-01-05 03:11:15 +01:00
|
|
|
func (s *Storage) MarkAllAsRead(userID int64) error {
|
2020-02-11 05:20:03 +01:00
|
|
|
query := `UPDATE entries SET status=$1, changed_at=now() WHERE user_id=$2 AND status=$3`
|
2018-10-27 04:49:49 +02:00
|
|
|
result, err := s.db.Exec(query, model.EntryStatusRead, userID, model.EntryStatusUnread)
|
2018-01-05 03:11:15 +01:00
|
|
|
if err != nil {
|
2019-10-30 06:48:07 +01:00
|
|
|
return fmt.Errorf(`store: unable to mark all entries as read: %v`, err)
|
2018-01-05 03:11:15 +01:00
|
|
|
}
|
|
|
|
|
2018-10-27 04:49:49 +02:00
|
|
|
count, _ := result.RowsAffected()
|
|
|
|
logger.Debug("[Storage:MarkAllAsRead] %d items marked as read", count)
|
|
|
|
|
2018-01-05 03:11:15 +01:00
|
|
|
return nil
|
|
|
|
}
|
2018-01-20 03:43:27 +01:00
|
|
|
|
2018-10-07 21:50:59 +02:00
|
|
|
// MarkFeedAsRead updates all feed entries to the read status.
|
|
|
|
func (s *Storage) MarkFeedAsRead(userID, feedID int64, before time.Time) error {
|
|
|
|
query := `
|
2019-10-30 06:48:07 +01:00
|
|
|
UPDATE
|
|
|
|
entries
|
|
|
|
SET
|
2020-02-11 05:20:03 +01:00
|
|
|
status=$1,
|
|
|
|
changed_at=now()
|
2019-10-30 06:48:07 +01:00
|
|
|
WHERE
|
|
|
|
user_id=$2 AND feed_id=$3 AND status=$4 AND published_at < $5
|
2018-10-07 21:50:59 +02:00
|
|
|
`
|
|
|
|
result, err := s.db.Exec(query, model.EntryStatusRead, userID, feedID, model.EntryStatusUnread, before)
|
|
|
|
if err != nil {
|
2019-10-30 06:48:07 +01:00
|
|
|
return fmt.Errorf(`store: unable to mark feed entries as read: %v`, err)
|
2018-10-07 21:50:59 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
count, _ := result.RowsAffected()
|
|
|
|
logger.Debug("[Storage:MarkFeedAsRead] %d items marked as read", count)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// MarkCategoryAsRead updates all category entries to the read status.
|
|
|
|
func (s *Storage) MarkCategoryAsRead(userID, categoryID int64, before time.Time) error {
|
|
|
|
query := `
|
2019-10-30 06:48:07 +01:00
|
|
|
UPDATE
|
|
|
|
entries
|
|
|
|
SET
|
2020-02-11 05:20:03 +01:00
|
|
|
status=$1,
|
|
|
|
changed_at=now()
|
2018-10-07 21:50:59 +02:00
|
|
|
WHERE
|
2019-10-30 06:48:07 +01:00
|
|
|
user_id=$2
|
|
|
|
AND
|
|
|
|
status=$3
|
|
|
|
AND
|
|
|
|
published_at < $4
|
|
|
|
AND
|
|
|
|
feed_id IN (SELECT id FROM feeds WHERE user_id=$2 AND category_id=$5)
|
2018-10-07 21:50:59 +02:00
|
|
|
`
|
|
|
|
result, err := s.db.Exec(query, model.EntryStatusRead, userID, model.EntryStatusUnread, before, categoryID)
|
|
|
|
if err != nil {
|
2019-10-30 06:48:07 +01:00
|
|
|
return fmt.Errorf(`store: unable to mark category entries as read: %v`, err)
|
2018-10-07 21:50:59 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
count, _ := result.RowsAffected()
|
|
|
|
logger.Debug("[Storage:MarkCategoryAsRead] %d items marked as read", count)
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-01-20 03:43:27 +01:00
|
|
|
// EntryURLExists returns true if an entry with this URL already exists.
|
2019-03-01 05:43:33 +01:00
|
|
|
func (s *Storage) EntryURLExists(feedID int64, entryURL string) bool {
|
2019-10-30 06:48:07 +01:00
|
|
|
var result bool
|
|
|
|
query := `SELECT true FROM entries WHERE feed_id=$1 AND url=$2`
|
2019-03-01 05:43:33 +01:00
|
|
|
s.db.QueryRow(query, feedID, entryURL).Scan(&result)
|
2019-10-30 06:48:07 +01:00
|
|
|
return result
|
2018-01-20 03:43:27 +01:00
|
|
|
}
|
2019-10-05 13:30:25 +02:00
|
|
|
|
2020-03-23 02:48:14 +01:00
|
|
|
// EntryShareCode returns the share code of the provided entry.
|
2019-10-05 13:30:25 +02:00
|
|
|
// It generates a new one if not already defined.
|
2020-03-23 02:48:14 +01:00
|
|
|
func (s *Storage) EntryShareCode(userID int64, entryID int64) (shareCode string, err error) {
|
2019-10-05 13:30:25 +02:00
|
|
|
query := `SELECT share_code FROM entries WHERE user_id=$1 AND id=$2`
|
|
|
|
err = s.db.QueryRow(query, userID, entryID).Scan(&shareCode)
|
2020-03-23 02:48:14 +01:00
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf(`store: unable to get share code for entry #%d: %v`, entryID, err)
|
2019-10-05 13:30:25 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2020-03-23 02:48:14 +01:00
|
|
|
if shareCode == "" {
|
|
|
|
shareCode = crypto.GenerateRandomStringHex(20)
|
2019-10-05 13:30:25 +02:00
|
|
|
|
2020-03-23 02:48:14 +01:00
|
|
|
query = `UPDATE entries SET share_code = $1 WHERE user_id=$2 AND id=$3`
|
|
|
|
_, err = s.db.Exec(query, shareCode, userID, entryID)
|
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf(`store: unable to set share code for entry #%d: %v`, entryID, err)
|
|
|
|
return
|
|
|
|
}
|
2019-10-05 13:30:25 +02:00
|
|
|
}
|
|
|
|
|
2020-03-23 02:48:14 +01:00
|
|
|
return
|
|
|
|
}
|
2019-10-05 13:30:25 +02:00
|
|
|
|
2020-03-23 02:48:14 +01:00
|
|
|
// UnshareEntry removes the share code for the given entry.
|
|
|
|
func (s *Storage) UnshareEntry(userID int64, entryID int64) (err error) {
|
|
|
|
query := `UPDATE entries SET share_code='' WHERE user_id=$1 AND id=$2`
|
|
|
|
_, err = s.db.Exec(query, userID, entryID)
|
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf(`store: unable to remove share code for entry #%d: %v`, entryID, err)
|
2019-10-05 13:30:25 +02:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|