mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-07-28 13:52:55 +02:00
kvdb+refactor: move all general sqlite code to seprate dir
In this commit, all the sql, non-postgres-specific, code is moved out of the postgres package and into a new sqlbase package. This will make it more easily reusable for future sql integrations.
This commit is contained in:
@@ -4,269 +4,14 @@ package postgres
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcwallet/walletdb"
|
||||
"github.com/lightningnetwork/lnd/kvdb/sqlbase"
|
||||
)
|
||||
|
||||
const (
|
||||
// kvTableName is the name of the table that will contain all the kv
|
||||
// pairs.
|
||||
kvTableName = "kv"
|
||||
)
|
||||
|
||||
// SqlConfig holds a set of configuration options of a sql database connection.
|
||||
type SqlConfig struct {
|
||||
// DriverName is the string that defines the registered sql driver that
|
||||
// is to be used.
|
||||
DriverName string
|
||||
|
||||
// Dsn is the database connection string that will be used to connect
|
||||
// to the db.
|
||||
Dsn string
|
||||
|
||||
// Timeout is the time after which a query to the db will be canceled if
|
||||
// it has not yet completed.
|
||||
Timeout time.Duration
|
||||
|
||||
// Schema is the name of the schema under which the sql tables should be
|
||||
// created. It should be left empty for backends like sqlite that do not
|
||||
// support having more than one schema.
|
||||
Schema string
|
||||
|
||||
// TableNamePrefix is the name that should be used as a table name
|
||||
// prefix when constructing the KV style table.
|
||||
TableNamePrefix string
|
||||
|
||||
// SQLiteCmdReplacements define a one-to-one string mapping of sql
|
||||
// keywords to the strings that should replace those keywords in any
|
||||
// commands. Note that the sqlite keywords to be replaced are
|
||||
// case-sensitive.
|
||||
SQLiteCmdReplacements SQLiteCmdReplacements
|
||||
|
||||
// WithTxLevelLock when set will ensure that there is a transaction
|
||||
// level lock.
|
||||
WithTxLevelLock bool
|
||||
}
|
||||
|
||||
// db holds a reference to the postgres connection.
|
||||
type db struct {
|
||||
// cfg is the sql db connection config.
|
||||
cfg *SqlConfig
|
||||
|
||||
// prefix is the table name prefix that is used to simulate namespaces.
|
||||
// We don't use schemas because at least sqlite does not support that.
|
||||
prefix string
|
||||
|
||||
// ctx is the overall context for the database driver.
|
||||
//
|
||||
// TODO: This is an anti-pattern that is in place until the kvdb
|
||||
// interface supports a context.
|
||||
ctx context.Context
|
||||
|
||||
// db is the underlying database connection instance.
|
||||
db *sql.DB
|
||||
|
||||
// lock is the global write lock that ensures single writer. This is
|
||||
// only used if cfg.WithTxLevelLock is set.
|
||||
lock sync.RWMutex
|
||||
|
||||
// table is the name of the table that contains the data for all
|
||||
// top-level buckets that have keys that cannot be mapped to a distinct
|
||||
// sql table.
|
||||
table string
|
||||
}
|
||||
|
||||
// Enforce db implements the walletdb.DB interface.
|
||||
var _ walletdb.DB = (*db)(nil)
|
||||
|
||||
var (
|
||||
// dbConns is a global set of database connections.
|
||||
dbConns *dbConnSet
|
||||
dbConnsMu sync.Mutex
|
||||
)
|
||||
|
||||
// Init initializes the global set of database connections.
|
||||
func Init(maxConnections int) {
|
||||
dbConnsMu.Lock()
|
||||
defer dbConnsMu.Unlock()
|
||||
|
||||
if dbConns != nil {
|
||||
return
|
||||
}
|
||||
|
||||
dbConns = newDbConnSet(maxConnections)
|
||||
}
|
||||
|
||||
// NewSqlBackend returns a db object initialized with the passed backend
|
||||
// config. If database connection cannot be established, then returns error.
|
||||
func NewSqlBackend(ctx context.Context, cfg *SqlConfig) (*db, error) {
|
||||
dbConnsMu.Lock()
|
||||
defer dbConnsMu.Unlock()
|
||||
|
||||
if dbConns == nil {
|
||||
return nil, errors.New("db connection set not initialized")
|
||||
}
|
||||
|
||||
if cfg.TableNamePrefix == "" {
|
||||
return nil, errors.New("empty table name prefix")
|
||||
}
|
||||
|
||||
table := fmt.Sprintf("%s_%s", cfg.TableNamePrefix, kvTableName)
|
||||
|
||||
query := newKVSchemaCreationCmd(
|
||||
table, cfg.Schema, cfg.SQLiteCmdReplacements,
|
||||
)
|
||||
|
||||
dbConn, err := dbConns.Open(cfg.DriverName, cfg.Dsn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = dbConn.ExecContext(ctx, query)
|
||||
if err != nil {
|
||||
_ = dbConn.Close()
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &db{
|
||||
cfg: cfg,
|
||||
ctx: ctx,
|
||||
db: dbConn,
|
||||
table: table,
|
||||
prefix: cfg.TableNamePrefix,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// getTimeoutCtx gets a timeout context for database requests.
|
||||
func (db *db) getTimeoutCtx() (context.Context, func()) {
|
||||
if db.cfg.Timeout == time.Duration(0) {
|
||||
return db.ctx, func() {}
|
||||
}
|
||||
|
||||
return context.WithTimeout(db.ctx, db.cfg.Timeout)
|
||||
}
|
||||
|
||||
// getPrefixedTableName returns a table name for this prefix (namespace).
|
||||
func (db *db) getPrefixedTableName(table string) string {
|
||||
return fmt.Sprintf("%s_%s", db.prefix, table)
|
||||
}
|
||||
|
||||
// catchPanic executes the specified function. If a panic occurs, it is returned
|
||||
// as an error value.
|
||||
func catchPanic(f func() error) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
log.Criticalf("Caught unhandled error: %v", r)
|
||||
|
||||
switch data := r.(type) {
|
||||
case error:
|
||||
err = data
|
||||
|
||||
default:
|
||||
err = errors.New(fmt.Sprintf("%v", data))
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
err = f()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// View opens a database read transaction and executes the function f with the
|
||||
// transaction passed as a parameter. After f exits, the transaction is rolled
|
||||
// back. If f errors, its error is returned, not a rollback error (if any
|
||||
// occur). The passed reset function is called before the start of the
|
||||
// transaction and can be used to reset intermediate state. As callers may
|
||||
// expect retries of the f closure (depending on the database backend used), the
|
||||
// reset function will be called before each retry respectively.
|
||||
func (db *db) View(f func(tx walletdb.ReadTx) error, reset func()) error {
|
||||
return db.executeTransaction(
|
||||
func(tx walletdb.ReadWriteTx) error {
|
||||
return f(tx.(walletdb.ReadTx))
|
||||
},
|
||||
reset, true,
|
||||
)
|
||||
}
|
||||
|
||||
// Update opens a database read/write transaction and executes the function f
|
||||
// with the transaction passed as a parameter. After f exits, if f did not
|
||||
// error, the transaction is committed. Otherwise, if f did error, the
|
||||
// transaction is rolled back. If the rollback fails, the original error
|
||||
// returned by f is still returned. If the commit fails, the commit error is
|
||||
// returned. As callers may expect retries of the f closure, the reset function
|
||||
// will be called before each retry respectively.
|
||||
func (db *db) Update(f func(tx walletdb.ReadWriteTx) error, reset func()) (err error) {
|
||||
return db.executeTransaction(f, reset, false)
|
||||
}
|
||||
|
||||
// executeTransaction creates a new read-only or read-write transaction and
|
||||
// executes the given function within it.
|
||||
func (db *db) executeTransaction(f func(tx walletdb.ReadWriteTx) error,
|
||||
reset func(), readOnly bool) error {
|
||||
|
||||
reset()
|
||||
|
||||
tx, err := newReadWriteTx(db, readOnly)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = catchPanic(func() error { return f(tx) })
|
||||
if err != nil {
|
||||
if rollbackErr := tx.Rollback(); rollbackErr != nil {
|
||||
log.Errorf("Error rolling back tx: %v", rollbackErr)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return tx.Commit()
|
||||
}
|
||||
|
||||
// PrintStats returns all collected stats pretty printed into a string.
|
||||
func (db *db) PrintStats() string {
|
||||
return "stats not supported by Postgres driver"
|
||||
}
|
||||
|
||||
// BeginReadWriteTx opens a database read+write transaction.
|
||||
func (db *db) BeginReadWriteTx() (walletdb.ReadWriteTx, error) {
|
||||
return newReadWriteTx(db, false)
|
||||
}
|
||||
|
||||
// BeginReadTx opens a database read transaction.
|
||||
func (db *db) BeginReadTx() (walletdb.ReadTx, error) {
|
||||
return newReadWriteTx(db, true)
|
||||
}
|
||||
|
||||
// Copy writes a copy of the database to the provided writer. This call will
|
||||
// start a read-only transaction to perform all operations.
|
||||
// This function is part of the walletdb.Db interface implementation.
|
||||
func (db *db) Copy(w io.Writer) error {
|
||||
return errors.New("not implemented")
|
||||
}
|
||||
|
||||
// Close cleanly shuts down the database and syncs all data.
|
||||
// This function is part of the walletdb.Db interface implementation.
|
||||
func (db *db) Close() error {
|
||||
dbConnsMu.Lock()
|
||||
defer dbConnsMu.Unlock()
|
||||
|
||||
log.Infof("Closing database %v", db.prefix)
|
||||
|
||||
return dbConns.Close(db.cfg.Dsn)
|
||||
}
|
||||
|
||||
// sqliteCmdReplacements defines a mapping from some SQLite keywords and phrases
|
||||
// to their postgres counterparts.
|
||||
var sqliteCmdReplacements = SQLiteCmdReplacements{
|
||||
var sqliteCmdReplacements = sqlbase.SQLiteCmdReplacements{
|
||||
"BLOB": "BYTEA",
|
||||
"INTEGER PRIMARY KEY": "BIGSERIAL PRIMARY KEY",
|
||||
}
|
||||
@@ -276,7 +21,7 @@ var sqliteCmdReplacements = SQLiteCmdReplacements{
|
||||
func newPostgresBackend(ctx context.Context, config *Config, prefix string) (
|
||||
walletdb.DB, error) {
|
||||
|
||||
cfg := &SqlConfig{
|
||||
cfg := &sqlbase.Config{
|
||||
DriverName: "pgx",
|
||||
Dsn: config.Dsn,
|
||||
Timeout: config.Timeout,
|
||||
@@ -286,5 +31,5 @@ func newPostgresBackend(ctx context.Context, config *Config, prefix string) (
|
||||
WithTxLevelLock: true,
|
||||
}
|
||||
|
||||
return NewSqlBackend(ctx, cfg)
|
||||
return sqlbase.NewSqlBackend(ctx, cfg)
|
||||
}
|
||||
|
@@ -1,90 +0,0 @@
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
_ "github.com/jackc/pgx/v4/stdlib"
|
||||
)
|
||||
|
||||
// dbConn stores the actual connection and a user count.
|
||||
type dbConn struct {
|
||||
db *sql.DB
|
||||
count int
|
||||
}
|
||||
|
||||
// dbConnSet stores a set of connections.
|
||||
type dbConnSet struct {
|
||||
dbConn map[string]*dbConn
|
||||
maxConnections int
|
||||
|
||||
// mu is used to guard access to the dbConn map.
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// newDbConnSet initializes a new set of connections.
|
||||
func newDbConnSet(maxConnections int) *dbConnSet {
|
||||
return &dbConnSet{
|
||||
dbConn: make(map[string]*dbConn),
|
||||
maxConnections: maxConnections,
|
||||
}
|
||||
}
|
||||
|
||||
// Open opens a new database connection. If a connection already exists for the
|
||||
// given dsn, the existing connection is returned.
|
||||
func (d *dbConnSet) Open(driver, dsn string) (*sql.DB, error) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
if dbConn, ok := d.dbConn[dsn]; ok {
|
||||
dbConn.count++
|
||||
|
||||
return dbConn.db, nil
|
||||
}
|
||||
|
||||
db, err := sql.Open(driver, dsn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Limit maximum number of open connections. This is useful to prevent
|
||||
// the server from running out of connections and returning an error.
|
||||
// With this client-side limit in place, lnd will wait for a connection
|
||||
// to become available.
|
||||
if d.maxConnections != 0 {
|
||||
db.SetMaxOpenConns(d.maxConnections)
|
||||
}
|
||||
|
||||
d.dbConn[dsn] = &dbConn{
|
||||
db: db,
|
||||
count: 1,
|
||||
}
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// Close closes the connection with the given dsn. If there are still other
|
||||
// users of the same connection, this function does nothing.
|
||||
func (d *dbConnSet) Close(dsn string) error {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
dbConn, ok := d.dbConn[dsn]
|
||||
if !ok {
|
||||
return fmt.Errorf("connection not found: %v", dsn)
|
||||
}
|
||||
|
||||
// Reduce user count.
|
||||
dbConn.count--
|
||||
|
||||
// Do not close if there are other users.
|
||||
if dbConn.count > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close connection.
|
||||
delete(d.dbConn, dsn)
|
||||
|
||||
return dbConn.db.Close()
|
||||
}
|
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
"github.com/btcsuite/btcwallet/walletdb"
|
||||
embeddedpostgres "github.com/fergusstrange/embedded-postgres"
|
||||
"github.com/lightningnetwork/lnd/kvdb/sqlbase"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -32,7 +33,7 @@ const testMaxConnections = 50
|
||||
// to be done once, because NewFixture will create random new databases on every
|
||||
// call. It returns a stop closure that stops the database if called.
|
||||
func StartEmbeddedPostgres() (func() error, error) {
|
||||
Init(testMaxConnections)
|
||||
sqlbase.Init(testMaxConnections)
|
||||
|
||||
postgres := embeddedpostgres.NewDatabase(
|
||||
embeddedpostgres.DefaultConfig().
|
||||
|
@@ -1,12 +0,0 @@
|
||||
package postgres
|
||||
|
||||
import "github.com/btcsuite/btclog"
|
||||
|
||||
// log is a logger that is initialized as disabled. This means the package will
|
||||
// not perform any logging by default until a logger is set.
|
||||
var log = btclog.Disabled
|
||||
|
||||
// UseLogger uses a specified Logger to output package logging info.
|
||||
func UseLogger(logger btclog.Logger) {
|
||||
log = logger
|
||||
}
|
@@ -1,5 +0,0 @@
|
||||
//go:build !kvdb_postgres
|
||||
|
||||
package postgres
|
||||
|
||||
func Init(maxConnections int) {}
|
@@ -1,470 +0,0 @@
|
||||
//go:build kvdb_postgres
|
||||
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/btcsuite/btcwallet/walletdb"
|
||||
)
|
||||
|
||||
// readWriteBucket stores the bucket id and the buckets transaction.
|
||||
type readWriteBucket struct {
|
||||
// id is used to identify the bucket. If id is null, it refers to the
|
||||
// root bucket.
|
||||
id *int64
|
||||
|
||||
// tx holds the parent transaction.
|
||||
tx *readWriteTx
|
||||
|
||||
table string
|
||||
}
|
||||
|
||||
// newReadWriteBucket creates a new rw bucket with the passed transaction
|
||||
// and bucket id.
|
||||
func newReadWriteBucket(tx *readWriteTx, id *int64) *readWriteBucket {
|
||||
return &readWriteBucket{
|
||||
id: id,
|
||||
tx: tx,
|
||||
table: tx.db.table,
|
||||
}
|
||||
}
|
||||
|
||||
// NestedReadBucket retrieves a nested read bucket with the given key.
|
||||
// Returns nil if the bucket does not exist.
|
||||
func (b *readWriteBucket) NestedReadBucket(key []byte) walletdb.ReadBucket {
|
||||
return b.NestedReadWriteBucket(key)
|
||||
}
|
||||
|
||||
func parentSelector(id *int64) string {
|
||||
if id == nil {
|
||||
return "parent_id IS NULL"
|
||||
}
|
||||
return fmt.Sprintf("parent_id=%v", *id)
|
||||
}
|
||||
|
||||
// ForEach invokes the passed function with every key/value pair in
|
||||
// the bucket. This includes nested buckets, in which case the value
|
||||
// is nil, but it does not include the key/value pairs within those
|
||||
// nested buckets.
|
||||
func (b *readWriteBucket) ForEach(cb func(k, v []byte) error) error {
|
||||
cursor := b.ReadWriteCursor()
|
||||
|
||||
k, v := cursor.First()
|
||||
for k != nil {
|
||||
err := cb(k, v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
k, v = cursor.Next()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get returns the value for the given key. Returns nil if the key does
|
||||
// not exist in this bucket.
|
||||
func (b *readWriteBucket) Get(key []byte) []byte {
|
||||
// Return nil if the key is empty.
|
||||
if len(key) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var value *[]byte
|
||||
row, cancel := b.tx.QueryRow(
|
||||
"SELECT value FROM "+b.table+" WHERE "+parentSelector(b.id)+
|
||||
" AND key=$1", key,
|
||||
)
|
||||
defer cancel()
|
||||
err := row.Scan(&value)
|
||||
|
||||
switch {
|
||||
case err == sql.ErrNoRows:
|
||||
return nil
|
||||
|
||||
case err != nil:
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// When an empty byte array is stored as the value, Sqlite will decode
|
||||
// that into nil whereas postgres will decode that as an empty byte
|
||||
// array. Since returning nil is taken to mean that no value has ever
|
||||
// been written, we ensure here that we at least return an empty array
|
||||
// so that nil checks will fail.
|
||||
if len(*value) == 0 {
|
||||
return []byte{}
|
||||
}
|
||||
|
||||
return *value
|
||||
}
|
||||
|
||||
// ReadCursor returns a new read-only cursor for this bucket.
|
||||
func (b *readWriteBucket) ReadCursor() walletdb.ReadCursor {
|
||||
return newReadWriteCursor(b)
|
||||
}
|
||||
|
||||
// NestedReadWriteBucket retrieves a nested bucket with the given key.
|
||||
// Returns nil if the bucket does not exist.
|
||||
func (b *readWriteBucket) NestedReadWriteBucket(
|
||||
key []byte) walletdb.ReadWriteBucket {
|
||||
|
||||
if len(key) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var id int64
|
||||
row, cancel := b.tx.QueryRow(
|
||||
"SELECT id FROM "+b.table+" WHERE "+parentSelector(b.id)+
|
||||
" AND key=$1 AND value IS NULL", key,
|
||||
)
|
||||
defer cancel()
|
||||
err := row.Scan(&id)
|
||||
|
||||
switch {
|
||||
case err == sql.ErrNoRows:
|
||||
return nil
|
||||
|
||||
case err != nil:
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return newReadWriteBucket(b.tx, &id)
|
||||
}
|
||||
|
||||
// CreateBucket creates and returns a new nested bucket with the given key.
|
||||
// Returns ErrBucketExists if the bucket already exists, ErrBucketNameRequired
|
||||
// if the key is empty, or ErrIncompatibleValue if the key value is otherwise
|
||||
// invalid for the particular database implementation. Other errors are
|
||||
// possible depending on the implementation.
|
||||
func (b *readWriteBucket) CreateBucket(key []byte) (
|
||||
walletdb.ReadWriteBucket, error) {
|
||||
|
||||
if len(key) == 0 {
|
||||
return nil, walletdb.ErrBucketNameRequired
|
||||
}
|
||||
|
||||
// Check to see if the bucket already exists.
|
||||
var (
|
||||
value *[]byte
|
||||
id int64
|
||||
)
|
||||
row, cancel := b.tx.QueryRow(
|
||||
"SELECT id,value FROM "+b.table+" WHERE "+parentSelector(b.id)+
|
||||
" AND key=$1", key,
|
||||
)
|
||||
defer cancel()
|
||||
err := row.Scan(&id, &value)
|
||||
|
||||
switch {
|
||||
case err == sql.ErrNoRows:
|
||||
|
||||
case err == nil && value == nil:
|
||||
return nil, walletdb.ErrBucketExists
|
||||
|
||||
case err == nil && value != nil:
|
||||
return nil, walletdb.ErrIncompatibleValue
|
||||
|
||||
case err != nil:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Bucket does not yet exist, so create it. Postgres will generate a
|
||||
// bucket id for the new bucket.
|
||||
row, cancel = b.tx.QueryRow(
|
||||
"INSERT INTO "+b.table+" (parent_id, key) "+
|
||||
"VALUES($1, $2) RETURNING id", b.id, key,
|
||||
)
|
||||
defer cancel()
|
||||
err = row.Scan(&id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newReadWriteBucket(b.tx, &id), nil
|
||||
}
|
||||
|
||||
// CreateBucketIfNotExists creates and returns a new nested bucket with
|
||||
// the given key if it does not already exist. Returns
|
||||
// ErrBucketNameRequired if the key is empty or ErrIncompatibleValue
|
||||
// if the key value is otherwise invalid for the particular database
|
||||
// backend. Other errors are possible depending on the implementation.
|
||||
func (b *readWriteBucket) CreateBucketIfNotExists(key []byte) (
|
||||
walletdb.ReadWriteBucket, error) {
|
||||
|
||||
if len(key) == 0 {
|
||||
return nil, walletdb.ErrBucketNameRequired
|
||||
}
|
||||
|
||||
// Check to see if the bucket already exists.
|
||||
var (
|
||||
value *[]byte
|
||||
id int64
|
||||
)
|
||||
row, cancel := b.tx.QueryRow(
|
||||
"SELECT id,value FROM "+b.table+" WHERE "+parentSelector(b.id)+
|
||||
" AND key=$1", key,
|
||||
)
|
||||
defer cancel()
|
||||
err := row.Scan(&id, &value)
|
||||
|
||||
switch {
|
||||
// Bucket does not yet exist, so create it now. Postgres will generate a
|
||||
// bucket id for the new bucket.
|
||||
case err == sql.ErrNoRows:
|
||||
row, cancel := b.tx.QueryRow(
|
||||
"INSERT INTO "+b.table+" (parent_id, key) "+
|
||||
"VALUES($1, $2) RETURNING id", b.id, key,
|
||||
)
|
||||
defer cancel()
|
||||
err := row.Scan(&id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
case err == nil && value != nil:
|
||||
return nil, walletdb.ErrIncompatibleValue
|
||||
|
||||
case err != nil:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newReadWriteBucket(b.tx, &id), nil
|
||||
}
|
||||
|
||||
// DeleteNestedBucket deletes the nested bucket and its sub-buckets
|
||||
// pointed to by the passed key. All values in the bucket and sub-buckets
|
||||
// will be deleted as well.
|
||||
func (b *readWriteBucket) DeleteNestedBucket(key []byte) error {
|
||||
if len(key) == 0 {
|
||||
return walletdb.ErrIncompatibleValue
|
||||
}
|
||||
|
||||
result, err := b.tx.Exec(
|
||||
"DELETE FROM "+b.table+" WHERE "+parentSelector(b.id)+
|
||||
" AND key=$1 AND value IS NULL",
|
||||
key,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rows, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rows == 0 {
|
||||
return walletdb.ErrBucketNotFound
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put updates the value for the passed key.
|
||||
// Returns ErrKeyRequired if te passed key is empty.
|
||||
func (b *readWriteBucket) Put(key, value []byte) error {
|
||||
if len(key) == 0 {
|
||||
return walletdb.ErrKeyRequired
|
||||
}
|
||||
|
||||
// Prevent NULL being written for an empty value slice.
|
||||
if value == nil {
|
||||
value = []byte{}
|
||||
}
|
||||
|
||||
var (
|
||||
result sql.Result
|
||||
err error
|
||||
)
|
||||
|
||||
// We are putting a value in a bucket in this table. Try to insert the
|
||||
// key first. If the key already exists (ON CONFLICT), update the key.
|
||||
// Do not update a NULL value, because this indicates that the key
|
||||
// contains a sub-bucket. This case will be caught via RowsAffected
|
||||
// below.
|
||||
if b.id == nil {
|
||||
// ON CONFLICT requires the WHERE parent_id IS NULL hint to let
|
||||
// Postgres find the NULL-parent_id unique index (<table>_unp).
|
||||
result, err = b.tx.Exec(
|
||||
"INSERT INTO "+b.table+" (key, value) VALUES($1, $2) "+
|
||||
"ON CONFLICT (key) WHERE parent_id IS NULL "+
|
||||
"DO UPDATE SET value=$2 "+
|
||||
"WHERE "+b.table+".value IS NOT NULL",
|
||||
key, value,
|
||||
)
|
||||
} else {
|
||||
// ON CONFLICT requires the WHERE parent_id NOT IS NULL hint to
|
||||
// let Postgres find the non-NULL-parent_id unique index
|
||||
// (<table>_up).
|
||||
result, err = b.tx.Exec(
|
||||
"INSERT INTO "+b.table+" (key, value, parent_id) "+
|
||||
"VALUES($1, $2, $3) "+
|
||||
"ON CONFLICT (key, parent_id) "+
|
||||
"WHERE parent_id IS NOT NULL "+
|
||||
"DO UPDATE SET value=$2 "+
|
||||
"WHERE "+b.table+".value IS NOT NULL",
|
||||
key, value, b.id,
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rows, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rows != 1 {
|
||||
return walletdb.ErrIncompatibleValue
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Delete deletes the key/value pointed to by the passed key.
|
||||
// Returns ErrKeyRequired if the passed key is empty.
|
||||
func (b *readWriteBucket) Delete(key []byte) error {
|
||||
if key == nil {
|
||||
return nil
|
||||
}
|
||||
if len(key) == 0 {
|
||||
return walletdb.ErrKeyRequired
|
||||
}
|
||||
|
||||
// Check to see if a bucket with this key exists.
|
||||
var dummy int
|
||||
row, cancel := b.tx.QueryRow(
|
||||
"SELECT 1 FROM "+b.table+" WHERE "+parentSelector(b.id)+
|
||||
" AND key=$1 AND value IS NULL", key,
|
||||
)
|
||||
defer cancel()
|
||||
err := row.Scan(&dummy)
|
||||
switch {
|
||||
// No bucket exists, proceed to deletion of the key.
|
||||
case err == sql.ErrNoRows:
|
||||
|
||||
case err != nil:
|
||||
return err
|
||||
|
||||
// Bucket exists.
|
||||
default:
|
||||
return walletdb.ErrIncompatibleValue
|
||||
}
|
||||
|
||||
_, err = b.tx.Exec(
|
||||
"DELETE FROM "+b.table+" WHERE key=$1 AND "+
|
||||
parentSelector(b.id)+" AND value IS NOT NULL",
|
||||
key,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadWriteCursor returns a new read-write cursor for this bucket.
|
||||
func (b *readWriteBucket) ReadWriteCursor() walletdb.ReadWriteCursor {
|
||||
return newReadWriteCursor(b)
|
||||
}
|
||||
|
||||
// Tx returns the buckets transaction.
|
||||
func (b *readWriteBucket) Tx() walletdb.ReadWriteTx {
|
||||
return b.tx
|
||||
}
|
||||
|
||||
// NextSequence returns an autoincrementing sequence number for this bucket.
|
||||
// Note that this is not a thread safe function and as such it must not be used
|
||||
// for synchronization.
|
||||
func (b *readWriteBucket) NextSequence() (uint64, error) {
|
||||
seq := b.Sequence() + 1
|
||||
|
||||
return seq, b.SetSequence(seq)
|
||||
}
|
||||
|
||||
// SetSequence updates the sequence number for the bucket.
|
||||
func (b *readWriteBucket) SetSequence(v uint64) error {
|
||||
if b.id == nil {
|
||||
panic("sequence not supported on top level bucket")
|
||||
}
|
||||
|
||||
result, err := b.tx.Exec(
|
||||
"UPDATE "+b.table+" SET sequence=$2 WHERE id=$1",
|
||||
b.id, int64(v),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rows, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rows != 1 {
|
||||
return errors.New("cannot set sequence")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Sequence returns the current sequence number for this bucket without
|
||||
// incrementing it.
|
||||
func (b *readWriteBucket) Sequence() uint64 {
|
||||
if b.id == nil {
|
||||
panic("sequence not supported on top level bucket")
|
||||
}
|
||||
|
||||
var seq int64
|
||||
row, cancel := b.tx.QueryRow(
|
||||
"SELECT sequence FROM "+b.table+" WHERE id=$1 "+
|
||||
"AND sequence IS NOT NULL",
|
||||
b.id,
|
||||
)
|
||||
defer cancel()
|
||||
err := row.Scan(&seq)
|
||||
|
||||
switch {
|
||||
case err == sql.ErrNoRows:
|
||||
return 0
|
||||
|
||||
case err != nil:
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return uint64(seq)
|
||||
}
|
||||
|
||||
// Prefetch will attempt to prefetch all values under a path from the passed
|
||||
// bucket.
|
||||
func (b *readWriteBucket) Prefetch(paths ...[]string) {}
|
||||
|
||||
// ForAll is an optimized version of ForEach with the limitation that no
|
||||
// additional queries can be executed within the callback.
|
||||
func (b *readWriteBucket) ForAll(cb func(k, v []byte) error) error {
|
||||
rows, cancel, err := b.tx.Query(
|
||||
"SELECT key, value FROM " + b.table + " WHERE " +
|
||||
parentSelector(b.id) + " ORDER BY key",
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
for rows.Next() {
|
||||
var key, value []byte
|
||||
|
||||
err := rows.Scan(&key, &value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = cb(key, value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@@ -1,232 +0,0 @@
|
||||
//go:build kvdb_postgres
|
||||
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
"github.com/btcsuite/btcwallet/walletdb"
|
||||
)
|
||||
|
||||
// readWriteCursor holds a reference to the cursors bucket, the value
|
||||
// prefix and the current key used while iterating.
|
||||
type readWriteCursor struct {
|
||||
bucket *readWriteBucket
|
||||
|
||||
// currKey holds the current key of the cursor.
|
||||
currKey []byte
|
||||
}
|
||||
|
||||
func newReadWriteCursor(b *readWriteBucket) *readWriteCursor {
|
||||
return &readWriteCursor{
|
||||
bucket: b,
|
||||
}
|
||||
}
|
||||
|
||||
// First positions the cursor at the first key/value pair and returns
|
||||
// the pair.
|
||||
func (c *readWriteCursor) First() ([]byte, []byte) {
|
||||
var (
|
||||
key []byte
|
||||
value []byte
|
||||
)
|
||||
row, cancel := c.bucket.tx.QueryRow(
|
||||
"SELECT key, value FROM " + c.bucket.table + " WHERE " +
|
||||
parentSelector(c.bucket.id) +
|
||||
" ORDER BY key LIMIT 1",
|
||||
)
|
||||
defer cancel()
|
||||
err := row.Scan(&key, &value)
|
||||
|
||||
switch {
|
||||
case err == sql.ErrNoRows:
|
||||
return nil, nil
|
||||
|
||||
case err != nil:
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Copy current key to prevent modification by the caller.
|
||||
c.currKey = make([]byte, len(key))
|
||||
copy(c.currKey, key)
|
||||
|
||||
return key, value
|
||||
}
|
||||
|
||||
// Last positions the cursor at the last key/value pair and returns the
|
||||
// pair.
|
||||
func (c *readWriteCursor) Last() ([]byte, []byte) {
|
||||
var (
|
||||
key []byte
|
||||
value []byte
|
||||
)
|
||||
row, cancel := c.bucket.tx.QueryRow(
|
||||
"SELECT key, value FROM " + c.bucket.table + " WHERE " +
|
||||
parentSelector(c.bucket.id) +
|
||||
" ORDER BY key DESC LIMIT 1",
|
||||
)
|
||||
defer cancel()
|
||||
err := row.Scan(&key, &value)
|
||||
|
||||
switch {
|
||||
case err == sql.ErrNoRows:
|
||||
return nil, nil
|
||||
|
||||
case err != nil:
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Copy current key to prevent modification by the caller.
|
||||
c.currKey = make([]byte, len(key))
|
||||
copy(c.currKey, key)
|
||||
|
||||
return key, value
|
||||
}
|
||||
|
||||
// Next moves the cursor one key/value pair forward and returns the new
|
||||
// pair.
|
||||
func (c *readWriteCursor) Next() ([]byte, []byte) {
|
||||
var (
|
||||
key []byte
|
||||
value []byte
|
||||
)
|
||||
row, cancel := c.bucket.tx.QueryRow(
|
||||
"SELECT key, value FROM "+c.bucket.table+" WHERE "+
|
||||
parentSelector(c.bucket.id)+
|
||||
" AND key>$1 ORDER BY key LIMIT 1",
|
||||
c.currKey,
|
||||
)
|
||||
defer cancel()
|
||||
err := row.Scan(&key, &value)
|
||||
|
||||
switch {
|
||||
case err == sql.ErrNoRows:
|
||||
return nil, nil
|
||||
|
||||
case err != nil:
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Copy current key to prevent modification by the caller.
|
||||
c.currKey = make([]byte, len(key))
|
||||
copy(c.currKey, key)
|
||||
|
||||
return key, value
|
||||
}
|
||||
|
||||
// Prev moves the cursor one key/value pair backward and returns the new
|
||||
// pair.
|
||||
func (c *readWriteCursor) Prev() ([]byte, []byte) {
|
||||
var (
|
||||
key []byte
|
||||
value []byte
|
||||
)
|
||||
row, cancel := c.bucket.tx.QueryRow(
|
||||
"SELECT key, value FROM "+c.bucket.table+" WHERE "+
|
||||
parentSelector(c.bucket.id)+
|
||||
" AND key<$1 ORDER BY key DESC LIMIT 1",
|
||||
c.currKey,
|
||||
)
|
||||
defer cancel()
|
||||
err := row.Scan(&key, &value)
|
||||
|
||||
switch {
|
||||
case err == sql.ErrNoRows:
|
||||
return nil, nil
|
||||
|
||||
case err != nil:
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Copy current key to prevent modification by the caller.
|
||||
c.currKey = make([]byte, len(key))
|
||||
copy(c.currKey, key)
|
||||
|
||||
return key, value
|
||||
}
|
||||
|
||||
// Seek positions the cursor at the passed seek key. If the key does
|
||||
// not exist, the cursor is moved to the next key after seek. Returns
|
||||
// the new pair.
|
||||
func (c *readWriteCursor) Seek(seek []byte) ([]byte, []byte) {
|
||||
// Convert nil to empty slice, otherwise sql mapping won't be correct
|
||||
// and no keys are found.
|
||||
if seek == nil {
|
||||
seek = []byte{}
|
||||
}
|
||||
|
||||
var (
|
||||
key []byte
|
||||
value []byte
|
||||
)
|
||||
row, cancel := c.bucket.tx.QueryRow(
|
||||
"SELECT key, value FROM "+c.bucket.table+" WHERE "+
|
||||
parentSelector(c.bucket.id)+
|
||||
" AND key>=$1 ORDER BY key LIMIT 1",
|
||||
seek,
|
||||
)
|
||||
defer cancel()
|
||||
err := row.Scan(&key, &value)
|
||||
|
||||
switch {
|
||||
case err == sql.ErrNoRows:
|
||||
return nil, nil
|
||||
|
||||
case err != nil:
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Copy current key to prevent modification by the caller.
|
||||
c.currKey = make([]byte, len(key))
|
||||
copy(c.currKey, key)
|
||||
|
||||
return key, value
|
||||
}
|
||||
|
||||
// Delete removes the current key/value pair the cursor is at without
|
||||
// invalidating the cursor. Returns ErrIncompatibleValue if attempted
|
||||
// when the cursor points to a nested bucket.
|
||||
func (c *readWriteCursor) Delete() error {
|
||||
// Get first record at or after cursor.
|
||||
var key []byte
|
||||
row, cancel := c.bucket.tx.QueryRow(
|
||||
"SELECT key FROM "+c.bucket.table+" WHERE "+
|
||||
parentSelector(c.bucket.id)+
|
||||
" AND key>=$1 ORDER BY key LIMIT 1",
|
||||
c.currKey,
|
||||
)
|
||||
defer cancel()
|
||||
err := row.Scan(&key)
|
||||
|
||||
switch {
|
||||
case err == sql.ErrNoRows:
|
||||
return nil
|
||||
|
||||
case err != nil:
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Delete record.
|
||||
result, err := c.bucket.tx.Exec(
|
||||
"DELETE FROM "+c.bucket.table+" WHERE "+
|
||||
parentSelector(c.bucket.id)+
|
||||
" AND key=$1 AND value IS NOT NULL",
|
||||
key,
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
rows, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// The key exists but nothing has been deleted. This means that the key
|
||||
// must have been a bucket key.
|
||||
if rows != 1 {
|
||||
return walletdb.ErrIncompatibleValue
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
@@ -1,225 +0,0 @@
|
||||
//go:build kvdb_postgres
|
||||
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"sync"
|
||||
|
||||
"github.com/btcsuite/btcwallet/walletdb"
|
||||
)
|
||||
|
||||
// readWriteTx holds a reference to an open postgres transaction.
|
||||
type readWriteTx struct {
|
||||
db *db
|
||||
tx *sql.Tx
|
||||
|
||||
// onCommit gets called upon commit.
|
||||
onCommit func()
|
||||
|
||||
// active is true if the transaction hasn't been committed yet.
|
||||
active bool
|
||||
|
||||
// locker is a pointer to the global db lock.
|
||||
locker sync.Locker
|
||||
}
|
||||
|
||||
// newReadWriteTx creates an rw transaction using a connection from the
|
||||
// specified pool.
|
||||
func newReadWriteTx(db *db, readOnly bool) (*readWriteTx, error) {
|
||||
locker := newNoopLocker()
|
||||
if db.cfg.WithTxLevelLock {
|
||||
// Obtain the global lock instance. An alternative here is to
|
||||
// obtain a database lock from Postgres. Unfortunately there is
|
||||
// no database-level lock in Postgres, meaning that each table
|
||||
// would need to be locked individually. Perhaps an advisory
|
||||
// lock could perform this function too.
|
||||
locker = &db.lock
|
||||
if readOnly {
|
||||
locker = db.lock.RLocker()
|
||||
}
|
||||
}
|
||||
locker.Lock()
|
||||
|
||||
// Start the transaction. Don't use the timeout context because it would
|
||||
// be applied to the transaction as a whole. If possible, mark the
|
||||
// transaction as read-only to make sure that potential programming
|
||||
// errors cannot cause changes to the database.
|
||||
tx, err := db.db.BeginTx(
|
||||
context.Background(),
|
||||
&sql.TxOptions{
|
||||
ReadOnly: readOnly,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
locker.Unlock()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &readWriteTx{
|
||||
db: db,
|
||||
tx: tx,
|
||||
active: true,
|
||||
locker: locker,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ReadBucket opens the root bucket for read only access. If the bucket
|
||||
// described by the key does not exist, nil is returned.
|
||||
func (tx *readWriteTx) ReadBucket(key []byte) walletdb.ReadBucket {
|
||||
return tx.ReadWriteBucket(key)
|
||||
}
|
||||
|
||||
// ForEachBucket iterates through all top level buckets.
|
||||
func (tx *readWriteTx) ForEachBucket(fn func(key []byte) error) error {
|
||||
// Fetch binary top level buckets.
|
||||
bucket := newReadWriteBucket(tx, nil)
|
||||
err := bucket.ForEach(func(k, _ []byte) error {
|
||||
return fn(k)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Rollback closes the transaction, discarding changes (if any) if the
|
||||
// database was modified by a write transaction.
|
||||
func (tx *readWriteTx) Rollback() error {
|
||||
// If the transaction has been closed roolback will fail.
|
||||
if !tx.active {
|
||||
return walletdb.ErrTxClosed
|
||||
}
|
||||
|
||||
err := tx.tx.Rollback()
|
||||
|
||||
// Unlock the transaction regardless of the error result.
|
||||
tx.active = false
|
||||
tx.locker.Unlock()
|
||||
return err
|
||||
}
|
||||
|
||||
// ReadWriteBucket opens the root bucket for read/write access. If the
|
||||
// bucket described by the key does not exist, nil is returned.
|
||||
func (tx *readWriteTx) ReadWriteBucket(key []byte) walletdb.ReadWriteBucket {
|
||||
if len(key) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
bucket := newReadWriteBucket(tx, nil)
|
||||
return bucket.NestedReadWriteBucket(key)
|
||||
}
|
||||
|
||||
// CreateTopLevelBucket creates the top level bucket for a key if it
|
||||
// does not exist. The newly-created bucket it returned.
|
||||
func (tx *readWriteTx) CreateTopLevelBucket(key []byte) (walletdb.ReadWriteBucket, error) {
|
||||
if len(key) == 0 {
|
||||
return nil, walletdb.ErrBucketNameRequired
|
||||
}
|
||||
|
||||
bucket := newReadWriteBucket(tx, nil)
|
||||
return bucket.CreateBucketIfNotExists(key)
|
||||
}
|
||||
|
||||
// DeleteTopLevelBucket deletes the top level bucket for a key. This
|
||||
// errors if the bucket can not be found or the key keys a single value
|
||||
// instead of a bucket.
|
||||
func (tx *readWriteTx) DeleteTopLevelBucket(key []byte) error {
|
||||
// Execute a cascading delete on the key.
|
||||
result, err := tx.Exec(
|
||||
"DELETE FROM "+tx.db.table+" WHERE key=$1 "+
|
||||
"AND parent_id IS NULL",
|
||||
key,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rows, err := result.RowsAffected()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rows == 0 {
|
||||
return walletdb.ErrBucketNotFound
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Commit commits the transaction if not already committed.
|
||||
func (tx *readWriteTx) Commit() error {
|
||||
// Commit will fail if the transaction is already committed.
|
||||
if !tx.active {
|
||||
return walletdb.ErrTxClosed
|
||||
}
|
||||
|
||||
// Try committing the transaction.
|
||||
err := tx.tx.Commit()
|
||||
if err == nil && tx.onCommit != nil {
|
||||
tx.onCommit()
|
||||
}
|
||||
|
||||
// Unlock the transaction regardless of the error result.
|
||||
tx.active = false
|
||||
tx.locker.Unlock()
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// OnCommit sets the commit callback (overriding if already set).
|
||||
func (tx *readWriteTx) OnCommit(cb func()) {
|
||||
tx.onCommit = cb
|
||||
}
|
||||
|
||||
// QueryRow executes a QueryRow call with a timeout context.
|
||||
func (tx *readWriteTx) QueryRow(query string, args ...interface{}) (*sql.Row,
|
||||
func()) {
|
||||
|
||||
ctx, cancel := tx.db.getTimeoutCtx()
|
||||
return tx.tx.QueryRowContext(ctx, query, args...), cancel
|
||||
}
|
||||
|
||||
// Query executes a multi-row query call with a timeout context.
|
||||
func (tx *readWriteTx) Query(query string, args ...interface{}) (*sql.Rows,
|
||||
func(), error) {
|
||||
|
||||
ctx, cancel := tx.db.getTimeoutCtx()
|
||||
rows, err := tx.tx.QueryContext(ctx, query, args...)
|
||||
if err != nil {
|
||||
cancel()
|
||||
|
||||
return nil, func() {}, err
|
||||
}
|
||||
|
||||
return rows, cancel, nil
|
||||
}
|
||||
|
||||
// Exec executes a Exec call with a timeout context.
|
||||
func (tx *readWriteTx) Exec(query string, args ...interface{}) (sql.Result,
|
||||
error) {
|
||||
|
||||
ctx, cancel := tx.db.getTimeoutCtx()
|
||||
defer cancel()
|
||||
|
||||
return tx.tx.ExecContext(ctx, query, args...)
|
||||
}
|
||||
|
||||
// noopLocker is an implementation of a no-op sync.Locker.
|
||||
type noopLocker struct{}
|
||||
|
||||
// newNoopLocker creates a new noopLocker.
|
||||
func newNoopLocker() sync.Locker {
|
||||
return &noopLocker{}
|
||||
}
|
||||
|
||||
// Lock is a noop.
|
||||
//
|
||||
// NOTE: this is part of the sync.Locker interface.
|
||||
func (n *noopLocker) Lock() {
|
||||
}
|
||||
|
||||
// Unlock is a noop.
|
||||
//
|
||||
// NOTE: this is part of the sync.Locker interface.
|
||||
func (n *noopLocker) Unlock() {
|
||||
}
|
||||
|
||||
var _ sync.Locker = (*noopLocker)(nil)
|
@@ -1,74 +0,0 @@
|
||||
//go:build kvdb_postgres
|
||||
|
||||
package postgres
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// SQLiteCmdReplacements is a one to one mapping of sqlite keywords that should
|
||||
// be replaced by the mapped strings in any command. Note that the sqlite
|
||||
// keywords to be replaced are case-sensitive.
|
||||
type SQLiteCmdReplacements map[string]string
|
||||
|
||||
func newKVSchemaCreationCmd(table, schema string,
|
||||
replacements SQLiteCmdReplacements) string {
|
||||
|
||||
var (
|
||||
tableInSchema = table
|
||||
finalCmd string
|
||||
)
|
||||
if schema != "" {
|
||||
finalCmd = fmt.Sprintf(
|
||||
`CREATE SCHEMA IF NOT EXISTS ` + schema + `;`,
|
||||
)
|
||||
|
||||
tableInSchema = fmt.Sprintf("%s.%s", schema, table)
|
||||
}
|
||||
|
||||
// Construct the sql statements to set up a kv table in postgres. Every
|
||||
// row points to the bucket that it is one via its parent_id field. A
|
||||
// NULL parent_id means that the key belongs to the uppermost bucket in
|
||||
// this table. A constraint on parent_id is enforcing referential
|
||||
// integrity.
|
||||
//
|
||||
// Furthermore, there is a <table>_p index on parent_id that is required
|
||||
// for the foreign key constraint.
|
||||
//
|
||||
// Finally, there are unique indices on (parent_id, key) to prevent the
|
||||
// same key being present in a bucket more than once (<table>_up and
|
||||
// <table>_unp). In postgres, a single index wouldn't enforce the unique
|
||||
// constraint on rows with a NULL parent_id. Therefore, two indices are
|
||||
// defined.
|
||||
//
|
||||
// The replacements map can be used to replace any sqlite keywords.
|
||||
// Callers should note that the sqlite keywords are case-sensitive.
|
||||
finalCmd += fmt.Sprintf(`
|
||||
CREATE TABLE IF NOT EXISTS ` + tableInSchema + `
|
||||
(
|
||||
key BLOB NOT NULL,
|
||||
value BLOB,
|
||||
parent_id BIGINT,
|
||||
id INTEGER PRIMARY KEY,
|
||||
sequence BIGINT,
|
||||
CONSTRAINT ` + table + `_parent FOREIGN KEY (parent_id)
|
||||
REFERENCES ` + tableInSchema + ` (id)
|
||||
ON UPDATE NO ACTION
|
||||
ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX IF NOT EXISTS ` + table + `_p
|
||||
ON ` + tableInSchema + ` (parent_id);
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS ` + table + `_up
|
||||
ON ` + tableInSchema + `
|
||||
(parent_id, key) WHERE parent_id IS NOT NULL;
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS ` + table + `_unp
|
||||
ON ` + tableInSchema + ` (key) WHERE parent_id IS NULL;
|
||||
`)
|
||||
|
||||
for from, to := range replacements {
|
||||
finalCmd = strings.Replace(finalCmd, from, to, -1)
|
||||
}
|
||||
|
||||
return finalCmd
|
||||
}
|
Reference in New Issue
Block a user