Documentation
¶
Overview ¶
sqlite database management
Copyright (c) 2025 Chakib Ben Ziane <contact@blob42.xyz> and [`gosuki` contributors](https://github.com/blob42/gosuki/graphs/contributors). All rights reserved.
SPDX-License-Identifier: AGPL-3.0-or-later
This file is part of GoSuki.
GoSuki is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
GoSuki is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License along with gosuki. If not, see <http://www.gnu.org/licenses/>.
Index ¶
- Constants
- Variables
- func CountTotalBookmarks(ctx context.Context) (uint, error)
- func DebugPrintRow(rows *sql.Rows)
- func DebugPrintRows(rows *sql.Rows)
- func DotxQuery(file string) (*dotsqlx.DotSqlx, error)
- func DotxQueryEmbedFS(fs embed.FS, filename string) (*dotsqlx.DotSqlx, error)
- func GetDBDir() string
- func GetDBPath() string
- func Init(ctx context.Context, cmd *cli.Command)
- func InitDiskConn(dbPath string) error
- func LoadBookmarks(load loadFunc, modName string) error
- func RegisterSqliteHooks()
- func SQLFuncFoo(in string) string
- func SQLFuzzy(test, in string) bool
- func SQLxxHash(in string) string
- func ScheduleBackupToDisk()
- func SyncTreeToBuffer(node *Node, buffer *DB)
- func SyncURLIndexToBuffer(urls []string, index Index, buffer *DB)
- type Bookmark
- type CacheDB
- type DB
- func (db *DB) Attach(attached *DB) error
- func (src *DB) BackupToDisk(dbpath string) error
- func (db *DB) Close() error
- func (src *DB) CopyTo(dst *DB, dstName, srcName string)
- func (db *DB) CountRows(table string) int
- func (db *DB) DebugPrintBookmarks() error
- func (db *DB) GetDBClock(ctx context.Context) (*LamportClock, error)
- func (db *DB) Init() (*DB, error)
- func (db *DB) InitSchema(ctx context.Context) error
- func (db *DB) IsEmpty() (bool, error)
- func (db *DB) Locked() (bool, error)
- func (dst *DB) SyncFromDisk(dbpath string) error
- func (src *DB) SyncTo(dst *DB)
- func (src *DB) SyncToCache() error
- func (src *DB) SyncToClock(dst *DB, remoteClock uint64)
- func (db *DB) TotalBookmarks(ctx context.Context) (uint, error)
- func (db *DB) UpsertBookmark(bk *Bookmark) error
- type DBError
- type DBType
- type DsnOptions
- type Index
- type LamportClock
- type LockChecker
- type Node
- type Opener
- type PaginationParams
- type QueryResult
- func BookmarksByTag(ctx context.Context, tag string, pagination *PaginationParams) (*QueryResult, error)
- func ListBookmarks(ctx context.Context, pagination *PaginationParams) (*QueryResult, error)
- func QueryBookmarks(ctx context.Context, query string, fuzzy bool, pagination *PaginationParams) (*QueryResult, error)
- func QueryBookmarksByTag(ctx context.Context, query, tag string, fuzzy bool, ...) (*QueryResult, error)
- type RawBookmark
- type RawBookmarks
- type SQLXDBOpener
- type SQLXOpener
- type Tags
- type UUID
- type VFSLockChecker
Constants ¶
const ( CacheName = "memcache" L2CacheName = "memcache_l2" //MemcacheFmt = "file:%s?mode=memory&cache=shared" //BufferFmt = "file:%s?mode=memory&cache=shared" DBTypeInMemoryDSN = "file:%s?mode=memory&cache=shared&_journal=MEMORY" DBTypeCacheDSN = DBTypeInMemoryDSN )
const ( DBFileName = "gosuki.db" DBTypeFileDSN = "file:%s" // Opening DBs with this driver allows to track connections // This is used to perform sqlite backup DriverBackupMode = "sqlite_hook_backup" GosukiMainTable = "bookmarks" )
const ( WhereQueryBookmarks = ` URL like '%%%s%%' OR metadata like '%%%s%%' OR tags like '%%%s%%' ` WhereQueryBookmarksFuzzy = ` fuzzy('%s', URL) OR fuzzy('%s', metadata) OR fuzzy('%s', tags) ` WhereQueryBookmarksByTag = ` (URL LIKE '%%%s%%' OR metadata LIKE '%%%s%%') AND tags LIKE '%%%s%%' ` WhereQueryBookmarksByTagFuzzy = ` (fuzzy('%s', URL) OR fuzzy('%s', metadata)) AND tags LIKE '%%%s%%' ` QQueryPaginate = ` LIMIT %d OFFSET %d` )
const ( // metadata: name or title of resource // modified: time.Now().Unix() // desc: // flags: designed to be extended in future using bitwise masks // Masks: // 0b00000001: set title immutable ((do not change title when updating the bookmarks from the web )) QCreateSchema = `` /* 485-byte string literal not displayed */ // The following view and and triggers provide buku compatibility QCreateView = `CREATE VIEW bookmarks AS SELECT id, URL, metadata, tags, desc, flags FROM gskbookmarks` QCreateInsertTrigger = `` /* 326-byte string literal not displayed */ QCreateUpdateTrigger = `` /* 574-byte string literal not displayed */ QCreateSchemaVersion = ` CREATE TABLE IF NOT EXISTS schema_version ( version INTEGER PRIMARY KEY ) ` )
const CurrentSchemaVersion = 3
const TagSep = ","
TagSep is the default separator used to separate and wrap tags in the DB
Variables ¶
var ( Cache = &CacheDB{} L2Cache = &CacheDB{} )
Global in-memory cache hierarchy for the gosuki database, structured in two levels to optimize performance and reduce unnecessary I/O operations.
Cache (level 1 cache) serves as a working buffer that aggregates and merges data from all scanned bookmarks. It acts as the primary cache for real-time operations and is periodically synchronized with L2Cache.
L2Cache (level 2 cache) functions as a persistent memory mirror of the on-disk database (gosuki.db). It is updated as a final step after level 1 cache synchronizations from module buffers, enabling checksum-based comparison between levels to detect changes and avoid redundant updates. This ensures efficient data consistency checks and minimizes write operations to the underlying storage.
The two-level architecture balances speed (level 1) with data integrity (level 2), while L2Cache maintains a faithful in-memory replica of the on-disk database state.
var ( // Handle to on-disk gosuki database DiskDB *DB Config *dbConfig )
var (
// Default sqlite3 driver
DriverDefault = "sqlite3_gosuki"
)
var (
ErrVfsLocked = errors.New("vfs locked")
)
var (
SyncTrigger = atomic.Bool{}
)
Functions ¶
func CountTotalBookmarks ¶
CountTotalBookmarks counts total bookmarks from disk
func DebugPrintRow ¶
Print debug a single row (does not run rows.next())
func DotxQueryEmbedFS ¶
Loads a dotsql from an embedded FS
func InitDiskConn ¶
Initialize the connection to ondisk gosuki db
func LoadBookmarks ¶
internal loading function called by modules
func RegisterSqliteHooks ¶
func RegisterSqliteHooks()
RegisterSqliteHooks registers a SQLite backup hook with additional connection tracking.
func ScheduleBackupToDisk ¶ added in v1.2.0
func ScheduleBackupToDisk()
func SyncTreeToBuffer ¶
func SyncURLIndexToBuffer ¶
Types ¶
type CacheDB ¶
type CacheDB struct {
*DB
}
func GetCacheDB ¶
func GetCacheDB() *CacheDB
func (*CacheDB) IsInitialized ¶
type DB ¶
type DB struct { Name string Path string Handle *sqlx.DB EngineMode string AttachedTo []string Type DBType SQLXOpener LockChecker // contains filtered or unexported fields }
DB encapsulates an sql.DB struct. All interactions with memory/buffer and disk databases are done through the DB instance.
func NewBuffer ¶
A Buffer is an in-memory sqlite database holding the current state of parsed bookmarks within a specific module. Buffers act as temporary, per-module storage that aggregates data before synchronizing with the Level 1 Cache (Cache). This decouples module processing from the global cache hierarchy, enabling efficient batching of updates and reducing contention. Buffers are ephemeral and exist only for the duration of module operations, with their contents periodically flushed and mereged into the Level 1 Cache to propagate changes upward in the two-level architecture. This design ensures minimal I/O overhead while maintaining consistency through checksum-based comparisons between cache levels.
func NewDB ¶
func NewDB(name string, dbPath string, dbFormat string, opts ...DsnOptions) *DB
dbPath is empty string ("") when using in memory sqlite db Call to Init() required before using
func (*DB) BackupToDisk ¶ added in v1.2.0
BackupToDisk copies the `src` database contents to a file on disk. It creates a backup of the source database (src) to the specified dbpath. The function is safe for concurrent use as it acquires a mutex. Returns an error if any step fails, including database connection issues, backup execution errors, or invalid configuration. Uses SQLite's backup API via the sqlx package, requiring the driver to support it.
func (*DB) DebugPrintBookmarks ¶ added in v1.2.0
func (*DB) GetDBClock ¶ added in v1.2.0
func (db *DB) GetDBClock(ctx context.Context) (*LamportClock, error)
GetDBClock returns lamport clock for this node's db (version column)
func (*DB) Init ¶
We should export Open() in its own method and wrap with interface so we can mock it and test the lock status in Init() Initialize a sqlite database with Gosuki Schema if not already done
func (*DB) SyncFromDisk ¶
func (*DB) SyncTo ¶
SyncTo synchronizes bookmarks from the source database to the destination database using the current Lamport clock value.
This function performs local-only synchronization by: 1. Reading all entries from the source database's gskbookmarks table 2. Attempting to insert each entry into the destination database's gskbookmarks table 3. Handling duplicate URL constraints by comparing hash values and updating existing entries 4. Maintaining versioning through Lamport clock synchronization 5. Scheduling disk backup when syncing to memcache
It is designed for local synchronization scenarios where causal ordering needs to be maintained using Lamport timestamps that can be used for multi-device synchronization.
Example usage:
srcDB := NewDB("source.db") dstDB := NewDB("destination.db") srcDB.SyncTo(dstDB)
func (*DB) SyncToCache ¶
func (*DB) SyncToClock ¶ added in v1.2.0
SyncToClock synchronizes bookmarks from the source DB (src) to the destination DB (dst) using Lamport clock synchronization for peer-to-peer consistency.
It performs the following steps:
- Reads all entries from src's gskbookmarks table
- Attempts to insert each entry into dst's gskbookmarks table
- For existing entries (due to URL constraints), captures their hashes and processes them in a second transaction for potential updates
- Updates existing entries only if there are changes in metadata, tags, or description
- Commits transactions for both insert and update phases
- If dst is a memcache, schedules a disk backup after completion
The synchronization uses SQLite transactions for consistency and handles duplicate URL constraints by comparing hash values. Tags are merged and normalized during updates. Lamport clock is used to maintain versioning and ensure proper ordering of operations in distributed systems.
Parameters:
- dst: The destination database to sync bookmarks to
- clock: The Lamport clock value to use for versioning
Behavior: - When syncing to L2 cache, increments the version field on successful inserts - Merges tags from both source and destination when updating existing entries - Normalizes merged tags by sorting them alphabetically - Only updates entries when there are actual changes in metadata, tags, or description - Schedules disk backup when syncing to memcache (CacheName) - Uses Lamport clock for p2p synchronization to maintain causal ordering
func (*DB) TotalBookmarks ¶ added in v1.2.0
func (*DB) UpsertBookmark ¶
Inserts or updates a bookmark in the target database. If a bookmark with the same URL already exists due to a constraint, the existing entry is updated with the new data. NOTE: We don't use sql UPSERT as we need to do a manual merge of some columns such as `tags`. NOTE: This function is always called against a buffer db
type DBError ¶
type DsnOptions ¶
type Index ¶
Index is a RedBlack Tree Hashmap that holds in memory the last state of the bookmark tree.It is used as fast db queries. Each URL holds a pointer to a node in [nodeTree]
type LamportClock ¶ added in v1.2.0
type LamportClock struct { Value uint64 // contains filtered or unexported fields }
var ( // lamport clock for this node Clock *LamportClock )
func (*LamportClock) LocalTick ¶ added in v1.2.0
func (c *LamportClock) LocalTick() uint64
func (*LamportClock) Tick ¶ added in v1.2.0
func (c *LamportClock) Tick(peerClock uint64) uint64
type LockChecker ¶
type PaginationParams ¶
func DefaultPagination ¶
func DefaultPagination() *PaginationParams
type QueryResult ¶
func BookmarksByTag ¶
func BookmarksByTag( ctx context.Context, tag string, pagination *PaginationParams, ) (*QueryResult, error)
func ListBookmarks ¶
func ListBookmarks( ctx context.Context, pagination *PaginationParams, ) (*QueryResult, error)
func QueryBookmarks ¶
func QueryBookmarks( ctx context.Context, query string, fuzzy bool, pagination *PaginationParams, ) (*QueryResult, error)
func QueryBookmarksByTag ¶
func QueryBookmarksByTag( ctx context.Context, query, tag string, fuzzy bool, pagination *PaginationParams, ) (*QueryResult, error)
type RawBookmark ¶
type RawBookmark struct { ID uint64 URL string `db:"URL"` // Usually used for the bookmark title Metadata string Tags string Desc string // Last modified Modified uint64 // kept for buku compat, not used for now Flags int Module string // currently not used XHSum string // lamport clock Version uint64 // Node that made the change NodeID UUID `db:"node_id"` }
type RawBookmarks ¶
type RawBookmarks []*RawBookmark
func (RawBookmarks) AsBookmarks ¶
func (raws RawBookmarks) AsBookmarks() []*gosuki.Bookmark
type SQLXDBOpener ¶
type SQLXDBOpener struct {
// contains filtered or unexported fields
}
func (*SQLXDBOpener) Get ¶
func (o *SQLXDBOpener) Get() *sqlx.DB
type SQLXOpener ¶
type Tags ¶
type Tags struct {
// contains filtered or unexported fields
}
func (*Tags) PreSanitize ¶
Sanitize the list of tags before saving them to the DB
func (Tags) String ¶
String representation of the tags. It can wrap the tags with the delim if wrap is true. This is done for compatibility with Buku DB format.
func (Tags) StringWrap ¶
String representation of the tags. It wraps the tags with the delim.
type VFSLockChecker ¶
type VFSLockChecker struct {
// contains filtered or unexported fields
}
func (*VFSLockChecker) Locked ¶
func (checker *VFSLockChecker) Locked() (bool, error)