channeldb: convert to uniformly use new kvdb abstractions

In this commit, we migrate all the code in `channeldb` to only reference
the new `kvdb` package rather than `bbolt` directly.

In many instances, we need to add two version to fetch a bucket as both
read and write when needed. As an example, we add a new
`fetchChanBucketRw` function. This function is identical to
`fetchChanBucket`, but it will be used to fetch the main channel bucket
for all _write_ transactions. We need a new method as you can pass a
write transaction where a read is accepted, but not the other way around
due to the stronger typing of the new `kvdb` package.
This commit is contained in:
Olaoluwa Osuntokun 2019-12-12 18:22:19 -08:00
parent fc808ac538
commit f0911765af
No known key found for this signature in database
GPG Key ID: BC13F65E2DC84465
36 changed files with 804 additions and 752 deletions

View File

@ -15,7 +15,7 @@ import (
"github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil" "github.com/btcsuite/btcutil"
"github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/input"
"github.com/lightningnetwork/lnd/keychain" "github.com/lightningnetwork/lnd/keychain"
"github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/lnwire"
@ -692,7 +692,7 @@ func (c *OpenChannel) RefreshShortChanID() error {
c.Lock() c.Lock()
defer c.Unlock() defer c.Unlock()
err := c.Db.View(func(tx *bbolt.Tx) error { err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error {
chanBucket, err := fetchChanBucket( chanBucket, err := fetchChanBucket(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
) )
@ -718,27 +718,30 @@ func (c *OpenChannel) RefreshShortChanID() error {
// fetchChanBucket is a helper function that returns the bucket where a // fetchChanBucket is a helper function that returns the bucket where a
// channel's data resides in given: the public key for the node, the outpoint, // channel's data resides in given: the public key for the node, the outpoint,
// and the chainhash that the channel resides on. // and the chainhash that the channel resides on.
func fetchChanBucket(tx *bbolt.Tx, nodeKey *btcec.PublicKey, func fetchChanBucket(tx kvdb.ReadTx, nodeKey *btcec.PublicKey,
outPoint *wire.OutPoint, chainHash chainhash.Hash) (*bbolt.Bucket, error) { outPoint *wire.OutPoint, chainHash chainhash.Hash) (kvdb.ReadBucket, error) {
// First fetch the top level bucket which stores all data related to // First fetch the top level bucket which stores all data related to
// current, active channels. // current, active channels.
openChanBucket := tx.Bucket(openChannelBucket) openChanBucket := tx.ReadBucket(openChannelBucket)
if openChanBucket == nil { if openChanBucket == nil {
return nil, ErrNoChanDBExists return nil, ErrNoChanDBExists
} }
// TODO(roasbeef): CreateTopLevelBucket on the interface isn't like
// CreateIfNotExists, will return error
// Within this top level bucket, fetch the bucket dedicated to storing // Within this top level bucket, fetch the bucket dedicated to storing
// open channel data specific to the remote node. // open channel data specific to the remote node.
nodePub := nodeKey.SerializeCompressed() nodePub := nodeKey.SerializeCompressed()
nodeChanBucket := openChanBucket.Bucket(nodePub) nodeChanBucket := openChanBucket.NestedReadBucket(nodePub)
if nodeChanBucket == nil { if nodeChanBucket == nil {
return nil, ErrNoActiveChannels return nil, ErrNoActiveChannels
} }
// We'll then recurse down an additional layer in order to fetch the // We'll then recurse down an additional layer in order to fetch the
// bucket for this particular chain. // bucket for this particular chain.
chainBucket := nodeChanBucket.Bucket(chainHash[:]) chainBucket := nodeChanBucket.NestedReadBucket(chainHash[:])
if chainBucket == nil { if chainBucket == nil {
return nil, ErrNoActiveChannels return nil, ErrNoActiveChannels
} }
@ -749,7 +752,7 @@ func fetchChanBucket(tx *bbolt.Tx, nodeKey *btcec.PublicKey,
if err := writeOutpoint(&chanPointBuf, outPoint); err != nil { if err := writeOutpoint(&chanPointBuf, outPoint); err != nil {
return nil, err return nil, err
} }
chanBucket := chainBucket.Bucket(chanPointBuf.Bytes()) chanBucket := chainBucket.NestedReadBucket(chanPointBuf.Bytes())
if chanBucket == nil { if chanBucket == nil {
return nil, ErrChannelNotFound return nil, ErrChannelNotFound
} }
@ -757,12 +760,27 @@ func fetchChanBucket(tx *bbolt.Tx, nodeKey *btcec.PublicKey,
return chanBucket, nil return chanBucket, nil
} }
// fetchChanBucketRw is a helper function that returns the bucket where a
// channel's data resides in given: the public key for the node, the outpoint,
// and the chainhash that the channel resides on. This differs from
// fetchChanBucket in that it returns a writeable bucket.
func fetchChanBucketRw(tx kvdb.RwTx, nodeKey *btcec.PublicKey,
outPoint *wire.OutPoint, chainHash chainhash.Hash) (kvdb.RwBucket, error) {
readBucket, err := fetchChanBucket(tx, nodeKey, outPoint, chainHash)
if err != nil {
return nil, err
}
return readBucket.(kvdb.RwBucket), nil
}
// fullSync syncs the contents of an OpenChannel while re-using an existing // fullSync syncs the contents of an OpenChannel while re-using an existing
// database transaction. // database transaction.
func (c *OpenChannel) fullSync(tx *bbolt.Tx) error { func (c *OpenChannel) fullSync(tx kvdb.RwTx) error {
// First fetch the top level bucket which stores all data related to // First fetch the top level bucket which stores all data related to
// current, active channels. // current, active channels.
openChanBucket, err := tx.CreateBucketIfNotExists(openChannelBucket) openChanBucket, err := tx.CreateTopLevelBucket(openChannelBucket)
if err != nil { if err != nil {
return err return err
} }
@ -792,7 +810,7 @@ func (c *OpenChannel) fullSync(tx *bbolt.Tx) error {
chanPointBuf.Bytes(), chanPointBuf.Bytes(),
) )
switch { switch {
case err == bbolt.ErrBucketExists: case err == kvdb.ErrBucketExists:
// If this channel already exists, then in order to avoid // If this channel already exists, then in order to avoid
// overriding it, we'll return an error back up to the caller. // overriding it, we'll return an error back up to the caller.
return ErrChanAlreadyExists return ErrChanAlreadyExists
@ -809,7 +827,7 @@ func (c *OpenChannel) MarkAsOpen(openLoc lnwire.ShortChannelID) error {
c.Lock() c.Lock()
defer c.Unlock() defer c.Unlock()
if err := c.Db.Update(func(tx *bbolt.Tx) error { if err := kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
chanBucket, err := fetchChanBucket( chanBucket, err := fetchChanBucket(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
) )
@ -825,7 +843,7 @@ func (c *OpenChannel) MarkAsOpen(openLoc lnwire.ShortChannelID) error {
channel.IsPending = false channel.IsPending = false
channel.ShortChannelID = openLoc channel.ShortChannelID = openLoc
return putOpenChannel(chanBucket, channel) return putOpenChannel(chanBucket.(kvdb.RwBucket), channel)
}); err != nil { }); err != nil {
return err return err
} }
@ -849,7 +867,7 @@ func (c *OpenChannel) MarkDataLoss(commitPoint *btcec.PublicKey) error {
return err return err
} }
putCommitPoint := func(chanBucket *bbolt.Bucket) error { putCommitPoint := func(chanBucket kvdb.RwBucket) error {
return chanBucket.Put(dataLossCommitPointKey, b.Bytes()) return chanBucket.Put(dataLossCommitPointKey, b.Bytes())
} }
@ -861,7 +879,7 @@ func (c *OpenChannel) MarkDataLoss(commitPoint *btcec.PublicKey) error {
func (c *OpenChannel) DataLossCommitPoint() (*btcec.PublicKey, error) { func (c *OpenChannel) DataLossCommitPoint() (*btcec.PublicKey, error) {
var commitPoint *btcec.PublicKey var commitPoint *btcec.PublicKey
err := c.Db.View(func(tx *bbolt.Tx) error { err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error {
chanBucket, err := fetchChanBucket( chanBucket, err := fetchChanBucket(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
) )
@ -990,7 +1008,7 @@ func (c *OpenChannel) ChanSyncMsg() (*lnwire.ChannelReestablish, error) {
// active. // active.
// //
// NOTE: The primary mutex should already be held before this method is called. // NOTE: The primary mutex should already be held before this method is called.
func (c *OpenChannel) isBorked(chanBucket *bbolt.Bucket) (bool, error) { func (c *OpenChannel) isBorked(chanBucket kvdb.ReadBucket) (bool, error) {
channel, err := fetchOpenChannel(chanBucket, &c.FundingOutpoint) channel, err := fetchOpenChannel(chanBucket, &c.FundingOutpoint)
if err != nil { if err != nil {
return false, err return false, err
@ -1042,14 +1060,14 @@ func (c *OpenChannel) markBroadcasted(status ChannelStatus, key []byte,
// If a closing tx is provided, we'll generate a closure to write the // If a closing tx is provided, we'll generate a closure to write the
// transaction in the appropriate bucket under the given key. // transaction in the appropriate bucket under the given key.
var putClosingTx func(*bbolt.Bucket) error var putClosingTx func(kvdb.RwBucket) error
if closeTx != nil { if closeTx != nil {
var b bytes.Buffer var b bytes.Buffer
if err := WriteElement(&b, closeTx); err != nil { if err := WriteElement(&b, closeTx); err != nil {
return err return err
} }
putClosingTx = func(chanBucket *bbolt.Bucket) error { putClosingTx = func(chanBucket kvdb.RwBucket) error {
return chanBucket.Put(key, b.Bytes()) return chanBucket.Put(key, b.Bytes())
} }
} }
@ -1083,7 +1101,7 @@ func (c *OpenChannel) BroadcastedCooperative() (*wire.MsgTx, error) {
func (c *OpenChannel) getClosingTx(key []byte) (*wire.MsgTx, error) { func (c *OpenChannel) getClosingTx(key []byte) (*wire.MsgTx, error) {
var closeTx *wire.MsgTx var closeTx *wire.MsgTx
err := c.Db.View(func(tx *bbolt.Tx) error { err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error {
chanBucket, err := fetchChanBucket( chanBucket, err := fetchChanBucket(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
) )
@ -1113,10 +1131,10 @@ func (c *OpenChannel) getClosingTx(key []byte) (*wire.MsgTx, error) {
// list of closures that are given the chanBucket in order to atomically add // list of closures that are given the chanBucket in order to atomically add
// extra information together with the new status. // extra information together with the new status.
func (c *OpenChannel) putChanStatus(status ChannelStatus, func (c *OpenChannel) putChanStatus(status ChannelStatus,
fs ...func(*bbolt.Bucket) error) error { fs ...func(kvdb.RwBucket) error) error {
if err := c.Db.Update(func(tx *bbolt.Tx) error { if err := kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
chanBucket, err := fetchChanBucket( chanBucket, err := fetchChanBucketRw(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
) )
if err != nil { if err != nil {
@ -1159,8 +1177,8 @@ func (c *OpenChannel) putChanStatus(status ChannelStatus,
} }
func (c *OpenChannel) clearChanStatus(status ChannelStatus) error { func (c *OpenChannel) clearChanStatus(status ChannelStatus) error {
if err := c.Db.Update(func(tx *bbolt.Tx) error { if err := kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
chanBucket, err := fetchChanBucket( chanBucket, err := fetchChanBucketRw(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
) )
if err != nil { if err != nil {
@ -1189,7 +1207,7 @@ func (c *OpenChannel) clearChanStatus(status ChannelStatus) error {
// putChannel serializes, and stores the current state of the channel in its // putChannel serializes, and stores the current state of the channel in its
// entirety. // entirety.
func putOpenChannel(chanBucket *bbolt.Bucket, channel *OpenChannel) error { func putOpenChannel(chanBucket kvdb.RwBucket, channel *OpenChannel) error {
// First, we'll write out all the relatively static fields, that are // First, we'll write out all the relatively static fields, that are
// decided upon initial channel creation. // decided upon initial channel creation.
if err := putChanInfo(chanBucket, channel); err != nil { if err := putChanInfo(chanBucket, channel); err != nil {
@ -1213,7 +1231,7 @@ func putOpenChannel(chanBucket *bbolt.Bucket, channel *OpenChannel) error {
// fetchOpenChannel retrieves, and deserializes (including decrypting // fetchOpenChannel retrieves, and deserializes (including decrypting
// sensitive) the complete channel currently active with the passed nodeID. // sensitive) the complete channel currently active with the passed nodeID.
func fetchOpenChannel(chanBucket *bbolt.Bucket, func fetchOpenChannel(chanBucket kvdb.ReadBucket,
chanPoint *wire.OutPoint) (*OpenChannel, error) { chanPoint *wire.OutPoint) (*OpenChannel, error) {
channel := &OpenChannel{ channel := &OpenChannel{
@ -1260,20 +1278,20 @@ func (c *OpenChannel) SyncPending(addr net.Addr, pendingHeight uint32) error {
c.FundingBroadcastHeight = pendingHeight c.FundingBroadcastHeight = pendingHeight
return c.Db.Update(func(tx *bbolt.Tx) error { return kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
return syncNewChannel(tx, c, []net.Addr{addr}) return syncNewChannel(tx, c, []net.Addr{addr})
}) })
} }
// syncNewChannel will write the passed channel to disk, and also create a // syncNewChannel will write the passed channel to disk, and also create a
// LinkNode (if needed) for the channel peer. // LinkNode (if needed) for the channel peer.
func syncNewChannel(tx *bbolt.Tx, c *OpenChannel, addrs []net.Addr) error { func syncNewChannel(tx kvdb.RwTx, c *OpenChannel, addrs []net.Addr) error {
// First, sync all the persistent channel state to disk. // First, sync all the persistent channel state to disk.
if err := c.fullSync(tx); err != nil { if err := c.fullSync(tx); err != nil {
return err return err
} }
nodeInfoBucket, err := tx.CreateBucketIfNotExists(nodeInfoBucket) nodeInfoBucket, err := tx.CreateTopLevelBucket(nodeInfoBucket)
if err != nil { if err != nil {
return err return err
} }
@ -1316,8 +1334,8 @@ func (c *OpenChannel) UpdateCommitment(newCommitment *ChannelCommitment,
return ErrNoRestoredChannelMutation return ErrNoRestoredChannelMutation
} }
err := c.Db.Update(func(tx *bbolt.Tx) error { err := kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
chanBucket, err := fetchChanBucket( chanBucket, err := fetchChanBucketRw(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
) )
if err != nil { if err != nil {
@ -1786,10 +1804,10 @@ func (c *OpenChannel) AppendRemoteCommitChain(diff *CommitDiff) error {
return ErrNoRestoredChannelMutation return ErrNoRestoredChannelMutation
} }
return c.Db.Update(func(tx *bbolt.Tx) error { return kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
// First, we'll grab the writable bucket where this channel's // First, we'll grab the writable bucket where this channel's
// data resides. // data resides.
chanBucket, err := fetchChanBucket( chanBucket, err := fetchChanBucketRw(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
) )
if err != nil { if err != nil {
@ -1854,7 +1872,7 @@ func (c *OpenChannel) AppendRemoteCommitChain(diff *CommitDiff) error {
// these pointers, causing the tip and the tail to point to the same entry. // these pointers, causing the tip and the tail to point to the same entry.
func (c *OpenChannel) RemoteCommitChainTip() (*CommitDiff, error) { func (c *OpenChannel) RemoteCommitChainTip() (*CommitDiff, error) {
var cd *CommitDiff var cd *CommitDiff
err := c.Db.View(func(tx *bbolt.Tx) error { err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error {
chanBucket, err := fetchChanBucket( chanBucket, err := fetchChanBucket(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
) )
@ -1891,7 +1909,7 @@ func (c *OpenChannel) RemoteCommitChainTip() (*CommitDiff, error) {
// updates that still need to be signed for. // updates that still need to be signed for.
func (c *OpenChannel) UnsignedAckedUpdates() ([]LogUpdate, error) { func (c *OpenChannel) UnsignedAckedUpdates() ([]LogUpdate, error) {
var updates []LogUpdate var updates []LogUpdate
err := c.Db.View(func(tx *bbolt.Tx) error { err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error {
chanBucket, err := fetchChanBucket( chanBucket, err := fetchChanBucket(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
) )
@ -1932,8 +1950,8 @@ func (c *OpenChannel) InsertNextRevocation(revKey *btcec.PublicKey) error {
c.RemoteNextRevocation = revKey c.RemoteNextRevocation = revKey
err := c.Db.Update(func(tx *bbolt.Tx) error { err := kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
chanBucket, err := fetchChanBucket( chanBucket, err := fetchChanBucketRw(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
) )
if err != nil { if err != nil {
@ -1969,8 +1987,8 @@ func (c *OpenChannel) AdvanceCommitChainTail(fwdPkg *FwdPkg) error {
var newRemoteCommit *ChannelCommitment var newRemoteCommit *ChannelCommitment
err := c.Db.Update(func(tx *bbolt.Tx) error { err := kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
chanBucket, err := fetchChanBucket( chanBucket, err := fetchChanBucketRw(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
) )
if err != nil { if err != nil {
@ -2089,7 +2107,7 @@ func (c *OpenChannel) LoadFwdPkgs() ([]*FwdPkg, error) {
defer c.RUnlock() defer c.RUnlock()
var fwdPkgs []*FwdPkg var fwdPkgs []*FwdPkg
if err := c.Db.View(func(tx *bbolt.Tx) error { if err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error {
var err error var err error
fwdPkgs, err = c.Packager.LoadFwdPkgs(tx) fwdPkgs, err = c.Packager.LoadFwdPkgs(tx)
return err return err
@ -2107,7 +2125,7 @@ func (c *OpenChannel) AckAddHtlcs(addRefs ...AddRef) error {
c.Lock() c.Lock()
defer c.Unlock() defer c.Unlock()
return c.Db.Update(func(tx *bbolt.Tx) error { return kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
return c.Packager.AckAddHtlcs(tx, addRefs...) return c.Packager.AckAddHtlcs(tx, addRefs...)
}) })
} }
@ -2120,7 +2138,7 @@ func (c *OpenChannel) AckSettleFails(settleFailRefs ...SettleFailRef) error {
c.Lock() c.Lock()
defer c.Unlock() defer c.Unlock()
return c.Db.Update(func(tx *bbolt.Tx) error { return kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
return c.Packager.AckSettleFails(tx, settleFailRefs...) return c.Packager.AckSettleFails(tx, settleFailRefs...)
}) })
} }
@ -2131,7 +2149,7 @@ func (c *OpenChannel) SetFwdFilter(height uint64, fwdFilter *PkgFilter) error {
c.Lock() c.Lock()
defer c.Unlock() defer c.Unlock()
return c.Db.Update(func(tx *bbolt.Tx) error { return kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
return c.Packager.SetFwdFilter(tx, height, fwdFilter) return c.Packager.SetFwdFilter(tx, height, fwdFilter)
}) })
} }
@ -2144,7 +2162,7 @@ func (c *OpenChannel) RemoveFwdPkg(height uint64) error {
c.Lock() c.Lock()
defer c.Unlock() defer c.Unlock()
return c.Db.Update(func(tx *bbolt.Tx) error { return kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
return c.Packager.RemovePkg(tx, height) return c.Packager.RemovePkg(tx, height)
}) })
} }
@ -2165,7 +2183,7 @@ func (c *OpenChannel) RevocationLogTail() (*ChannelCommitment, error) {
} }
var commit ChannelCommitment var commit ChannelCommitment
if err := c.Db.View(func(tx *bbolt.Tx) error { if err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error {
chanBucket, err := fetchChanBucket( chanBucket, err := fetchChanBucket(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
) )
@ -2173,7 +2191,7 @@ func (c *OpenChannel) RevocationLogTail() (*ChannelCommitment, error) {
return err return err
} }
logBucket := chanBucket.Bucket(revocationLogBucket) logBucket := chanBucket.NestedReadBucket(revocationLogBucket)
if logBucket == nil { if logBucket == nil {
return ErrNoPastDeltas return ErrNoPastDeltas
} }
@ -2182,7 +2200,7 @@ func (c *OpenChannel) RevocationLogTail() (*ChannelCommitment, error) {
// this channel, we'll jump to the _last_ key in bucket. As we // this channel, we'll jump to the _last_ key in bucket. As we
// store the update number on disk in a big-endian format, // store the update number on disk in a big-endian format,
// this will retrieve the latest entry. // this will retrieve the latest entry.
cursor := logBucket.Cursor() cursor := logBucket.ReadCursor()
_, tailLogEntry := cursor.Last() _, tailLogEntry := cursor.Last()
logEntryReader := bytes.NewReader(tailLogEntry) logEntryReader := bytes.NewReader(tailLogEntry)
@ -2212,7 +2230,7 @@ func (c *OpenChannel) CommitmentHeight() (uint64, error) {
defer c.RUnlock() defer c.RUnlock()
var height uint64 var height uint64
err := c.Db.View(func(tx *bbolt.Tx) error { err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error {
// Get the bucket dedicated to storing the metadata for open // Get the bucket dedicated to storing the metadata for open
// channels. // channels.
chanBucket, err := fetchChanBucket( chanBucket, err := fetchChanBucket(
@ -2247,7 +2265,7 @@ func (c *OpenChannel) FindPreviousState(updateNum uint64) (*ChannelCommitment, e
defer c.RUnlock() defer c.RUnlock()
var commit ChannelCommitment var commit ChannelCommitment
err := c.Db.View(func(tx *bbolt.Tx) error { err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error {
chanBucket, err := fetchChanBucket( chanBucket, err := fetchChanBucket(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
) )
@ -2255,7 +2273,7 @@ func (c *OpenChannel) FindPreviousState(updateNum uint64) (*ChannelCommitment, e
return err return err
} }
logBucket := chanBucket.Bucket(revocationLogBucket) logBucket := chanBucket.NestedReadBucket(revocationLogBucket)
if logBucket == nil { if logBucket == nil {
return ErrNoPastDeltas return ErrNoPastDeltas
} }
@ -2405,19 +2423,19 @@ func (c *OpenChannel) CloseChannel(summary *ChannelCloseSummary,
c.Lock() c.Lock()
defer c.Unlock() defer c.Unlock()
return c.Db.Update(func(tx *bbolt.Tx) error { return kvdb.Update(c.Db, func(tx kvdb.RwTx) error {
openChanBucket := tx.Bucket(openChannelBucket) openChanBucket := tx.ReadWriteBucket(openChannelBucket)
if openChanBucket == nil { if openChanBucket == nil {
return ErrNoChanDBExists return ErrNoChanDBExists
} }
nodePub := c.IdentityPub.SerializeCompressed() nodePub := c.IdentityPub.SerializeCompressed()
nodeChanBucket := openChanBucket.Bucket(nodePub) nodeChanBucket := openChanBucket.NestedReadWriteBucket(nodePub)
if nodeChanBucket == nil { if nodeChanBucket == nil {
return ErrNoActiveChannels return ErrNoActiveChannels
} }
chainBucket := nodeChanBucket.Bucket(c.ChainHash[:]) chainBucket := nodeChanBucket.NestedReadWriteBucket(c.ChainHash[:])
if chainBucket == nil { if chainBucket == nil {
return ErrNoActiveChannels return ErrNoActiveChannels
} }
@ -2428,7 +2446,9 @@ func (c *OpenChannel) CloseChannel(summary *ChannelCloseSummary,
return err return err
} }
chanKey := chanPointBuf.Bytes() chanKey := chanPointBuf.Bytes()
chanBucket := chainBucket.Bucket(chanKey) chanBucket := chainBucket.NestedReadWriteBucket(
chanKey,
)
if chanBucket == nil { if chanBucket == nil {
return ErrNoActiveChannels return ErrNoActiveChannels
} }
@ -2452,21 +2472,21 @@ func (c *OpenChannel) CloseChannel(summary *ChannelCloseSummary,
// With the base channel data deleted, attempt to delete the // With the base channel data deleted, attempt to delete the
// information stored within the revocation log. // information stored within the revocation log.
logBucket := chanBucket.Bucket(revocationLogBucket) logBucket := chanBucket.NestedReadWriteBucket(revocationLogBucket)
if logBucket != nil { if logBucket != nil {
err = chanBucket.DeleteBucket(revocationLogBucket) err = chanBucket.DeleteNestedBucket(revocationLogBucket)
if err != nil { if err != nil {
return err return err
} }
} }
err = chainBucket.DeleteBucket(chanPointBuf.Bytes()) err = chainBucket.DeleteNestedBucket(chanPointBuf.Bytes())
if err != nil { if err != nil {
return err return err
} }
// Add channel state to the historical channel bucket. // Add channel state to the historical channel bucket.
historicalBucket, err := tx.CreateBucketIfNotExists( historicalBucket, err := tx.CreateTopLevelBucket(
historicalChannelBucket, historicalChannelBucket,
) )
if err != nil { if err != nil {
@ -2570,7 +2590,7 @@ func (c *OpenChannel) Snapshot() *ChannelSnapshot {
// latest fully committed state is returned. The first commitment returned is // latest fully committed state is returned. The first commitment returned is
// the local commitment, and the second returned is the remote commitment. // the local commitment, and the second returned is the remote commitment.
func (c *OpenChannel) LatestCommitments() (*ChannelCommitment, *ChannelCommitment, error) { func (c *OpenChannel) LatestCommitments() (*ChannelCommitment, *ChannelCommitment, error) {
err := c.Db.View(func(tx *bbolt.Tx) error { err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error {
chanBucket, err := fetchChanBucket( chanBucket, err := fetchChanBucket(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
) )
@ -2592,7 +2612,7 @@ func (c *OpenChannel) LatestCommitments() (*ChannelCommitment, *ChannelCommitmen
// acting on a possible contract breach to ensure, that the caller has the most // acting on a possible contract breach to ensure, that the caller has the most
// up to date information required to deliver justice. // up to date information required to deliver justice.
func (c *OpenChannel) RemoteRevocationStore() (shachain.Store, error) { func (c *OpenChannel) RemoteRevocationStore() (shachain.Store, error) {
err := c.Db.View(func(tx *bbolt.Tx) error { err := kvdb.View(c.Db, func(tx kvdb.ReadTx) error {
chanBucket, err := fetchChanBucket( chanBucket, err := fetchChanBucket(
tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash,
) )
@ -2609,10 +2629,10 @@ func (c *OpenChannel) RemoteRevocationStore() (shachain.Store, error) {
return c.RevocationStore, nil return c.RevocationStore, nil
} }
func putChannelCloseSummary(tx *bbolt.Tx, chanID []byte, func putChannelCloseSummary(tx kvdb.RwTx, chanID []byte,
summary *ChannelCloseSummary, lastChanState *OpenChannel) error { summary *ChannelCloseSummary, lastChanState *OpenChannel) error {
closedChanBucket, err := tx.CreateBucketIfNotExists(closedChannelBucket) closedChanBucket, err := tx.CreateTopLevelBucket(closedChannelBucket)
if err != nil { if err != nil {
return err return err
} }
@ -2788,7 +2808,7 @@ func fundingTxPresent(channel *OpenChannel) bool {
!channel.hasChanStatus(ChanStatusRestored) !channel.hasChanStatus(ChanStatusRestored)
} }
func putChanInfo(chanBucket *bbolt.Bucket, channel *OpenChannel) error { func putChanInfo(chanBucket kvdb.RwBucket, channel *OpenChannel) error {
var w bytes.Buffer var w bytes.Buffer
if err := WriteElements(&w, if err := WriteElements(&w,
channel.ChanType, channel.ChainHash, channel.FundingOutpoint, channel.ChanType, channel.ChainHash, channel.FundingOutpoint,
@ -2835,7 +2855,7 @@ func putChanInfo(chanBucket *bbolt.Bucket, channel *OpenChannel) error {
// putOptionalUpfrontShutdownScript adds a shutdown script under the key // putOptionalUpfrontShutdownScript adds a shutdown script under the key
// provided if it has a non-zero length. // provided if it has a non-zero length.
func putOptionalUpfrontShutdownScript(chanBucket *bbolt.Bucket, key []byte, func putOptionalUpfrontShutdownScript(chanBucket kvdb.RwBucket, key []byte,
script []byte) error { script []byte) error {
// If the script is empty, we do not need to add anything. // If the script is empty, we do not need to add anything.
if len(script) == 0 { if len(script) == 0 {
@ -2853,7 +2873,7 @@ func putOptionalUpfrontShutdownScript(chanBucket *bbolt.Bucket, key []byte,
// getOptionalUpfrontShutdownScript reads the shutdown script stored under the // getOptionalUpfrontShutdownScript reads the shutdown script stored under the
// key provided if it is present. Upfront shutdown scripts are optional, so the // key provided if it is present. Upfront shutdown scripts are optional, so the
// function returns with no error if the key is not present. // function returns with no error if the key is not present.
func getOptionalUpfrontShutdownScript(chanBucket *bbolt.Bucket, key []byte, func getOptionalUpfrontShutdownScript(chanBucket kvdb.ReadBucket, key []byte,
script *lnwire.DeliveryAddress) error { script *lnwire.DeliveryAddress) error {
// Return early if the bucket does not exit, a shutdown script was not set. // Return early if the bucket does not exit, a shutdown script was not set.
@ -2885,7 +2905,7 @@ func serializeChanCommit(w io.Writer, c *ChannelCommitment) error {
return SerializeHtlcs(w, c.Htlcs...) return SerializeHtlcs(w, c.Htlcs...)
} }
func putChanCommitment(chanBucket *bbolt.Bucket, c *ChannelCommitment, func putChanCommitment(chanBucket kvdb.RwBucket, c *ChannelCommitment,
local bool) error { local bool) error {
var commitKey []byte var commitKey []byte
@ -2903,7 +2923,7 @@ func putChanCommitment(chanBucket *bbolt.Bucket, c *ChannelCommitment,
return chanBucket.Put(commitKey, b.Bytes()) return chanBucket.Put(commitKey, b.Bytes())
} }
func putChanCommitments(chanBucket *bbolt.Bucket, channel *OpenChannel) error { func putChanCommitments(chanBucket kvdb.RwBucket, channel *OpenChannel) error {
// If this is a restored channel, then we don't have any commitments to // If this is a restored channel, then we don't have any commitments to
// write. // write.
if channel.hasChanStatus(ChanStatusRestored) { if channel.hasChanStatus(ChanStatusRestored) {
@ -2922,7 +2942,7 @@ func putChanCommitments(chanBucket *bbolt.Bucket, channel *OpenChannel) error {
) )
} }
func putChanRevocationState(chanBucket *bbolt.Bucket, channel *OpenChannel) error { func putChanRevocationState(chanBucket kvdb.RwBucket, channel *OpenChannel) error {
var b bytes.Buffer var b bytes.Buffer
err := WriteElements( err := WriteElements(
@ -2957,7 +2977,7 @@ func readChanConfig(b io.Reader, c *ChannelConfig) error {
) )
} }
func fetchChanInfo(chanBucket *bbolt.Bucket, channel *OpenChannel) error { func fetchChanInfo(chanBucket kvdb.ReadBucket, channel *OpenChannel) error {
infoBytes := chanBucket.Get(chanInfoKey) infoBytes := chanBucket.Get(chanInfoKey)
if infoBytes == nil { if infoBytes == nil {
return ErrNoChanInfoFound return ErrNoChanInfoFound
@ -3024,7 +3044,7 @@ func deserializeChanCommit(r io.Reader) (ChannelCommitment, error) {
return c, nil return c, nil
} }
func fetchChanCommitment(chanBucket *bbolt.Bucket, local bool) (ChannelCommitment, error) { func fetchChanCommitment(chanBucket kvdb.ReadBucket, local bool) (ChannelCommitment, error) {
var commitKey []byte var commitKey []byte
if local { if local {
commitKey = append(chanCommitmentKey, byte(0x00)) commitKey = append(chanCommitmentKey, byte(0x00))
@ -3041,7 +3061,7 @@ func fetchChanCommitment(chanBucket *bbolt.Bucket, local bool) (ChannelCommitmen
return deserializeChanCommit(r) return deserializeChanCommit(r)
} }
func fetchChanCommitments(chanBucket *bbolt.Bucket, channel *OpenChannel) error { func fetchChanCommitments(chanBucket kvdb.ReadBucket, channel *OpenChannel) error {
var err error var err error
// If this is a restored channel, then we don't have any commitments to // If this is a restored channel, then we don't have any commitments to
@ -3062,7 +3082,7 @@ func fetchChanCommitments(chanBucket *bbolt.Bucket, channel *OpenChannel) error
return nil return nil
} }
func fetchChanRevocationState(chanBucket *bbolt.Bucket, channel *OpenChannel) error { func fetchChanRevocationState(chanBucket kvdb.ReadBucket, channel *OpenChannel) error {
revBytes := chanBucket.Get(revocationStateKey) revBytes := chanBucket.Get(revocationStateKey)
if revBytes == nil { if revBytes == nil {
return ErrNoRevocationsFound return ErrNoRevocationsFound
@ -3088,7 +3108,7 @@ func fetchChanRevocationState(chanBucket *bbolt.Bucket, channel *OpenChannel) er
return ReadElements(r, &channel.RemoteNextRevocation) return ReadElements(r, &channel.RemoteNextRevocation)
} }
func deleteOpenChannel(chanBucket *bbolt.Bucket, chanPointBytes []byte) error { func deleteOpenChannel(chanBucket kvdb.RwBucket, chanPointBytes []byte) error {
if err := chanBucket.Delete(chanInfoKey); err != nil { if err := chanBucket.Delete(chanInfoKey); err != nil {
return err return err
@ -3122,7 +3142,7 @@ func makeLogKey(updateNum uint64) [8]byte {
return key return key
} }
func appendChannelLogEntry(log *bbolt.Bucket, func appendChannelLogEntry(log kvdb.RwBucket,
commit *ChannelCommitment) error { commit *ChannelCommitment) error {
var b bytes.Buffer var b bytes.Buffer
@ -3134,7 +3154,7 @@ func appendChannelLogEntry(log *bbolt.Bucket,
return log.Put(logEntrykey[:], b.Bytes()) return log.Put(logEntrykey[:], b.Bytes())
} }
func fetchChannelLogEntry(log *bbolt.Bucket, func fetchChannelLogEntry(log kvdb.ReadBucket,
updateNum uint64) (ChannelCommitment, error) { updateNum uint64) (ChannelCommitment, error) {
logEntrykey := makeLogKey(updateNum) logEntrykey := makeLogKey(updateNum)

View File

@ -11,7 +11,6 @@ import (
"github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcd/wire"
"github.com/coreos/bbolt"
"github.com/go-errors/errors" "github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/channeldb/migration12" "github.com/lightningnetwork/lnd/channeldb/migration12"
@ -142,7 +141,7 @@ var (
// information related to nodes, routing data, open/closed channels, fee // information related to nodes, routing data, open/closed channels, fee
// schedules, and reputation data. // schedules, and reputation data.
type DB struct { type DB struct {
*bbolt.DB kvdb.Backend
dbPath string dbPath string
graph *ChannelGraph graph *ChannelGraph
clock clock.Clock clock clock.Clock
@ -166,18 +165,13 @@ func Open(dbPath string, modifiers ...OptionModifier) (*DB, error) {
// Specify bbolt freelist options to reduce heap pressure in case the // Specify bbolt freelist options to reduce heap pressure in case the
// freelist grows to be very large. // freelist grows to be very large.
options := &bbolt.Options{ bdb, err := kvdb.Open(kvdb.BoltBackendName, path, opts.NoFreelistSync)
NoFreelistSync: opts.NoFreelistSync,
FreelistType: bbolt.FreelistMapType,
}
bdb, err := bbolt.Open(path, dbFilePermission, options)
if err != nil { if err != nil {
return nil, err return nil, err
} }
chanDB := &DB{ chanDB := &DB{
DB: bdb, Backend: bdb,
dbPath: dbPath, dbPath: dbPath,
clock: opts.clock, clock: opts.clock,
} }
@ -203,41 +197,41 @@ func (d *DB) Path() string {
// database. The deletion is done in a single transaction, therefore this // database. The deletion is done in a single transaction, therefore this
// operation is fully atomic. // operation is fully atomic.
func (d *DB) Wipe() error { func (d *DB) Wipe() error {
return d.Update(func(tx *bbolt.Tx) error { return kvdb.Update(d, func(tx kvdb.RwTx) error {
err := tx.DeleteBucket(openChannelBucket) err := tx.DeleteTopLevelBucket(openChannelBucket)
if err != nil && err != bbolt.ErrBucketNotFound { if err != nil && err != kvdb.ErrBucketNotFound {
return err return err
} }
err = tx.DeleteBucket(closedChannelBucket) err = tx.DeleteTopLevelBucket(closedChannelBucket)
if err != nil && err != bbolt.ErrBucketNotFound { if err != nil && err != kvdb.ErrBucketNotFound {
return err return err
} }
err = tx.DeleteBucket(invoiceBucket) err = tx.DeleteTopLevelBucket(invoiceBucket)
if err != nil && err != bbolt.ErrBucketNotFound { if err != nil && err != kvdb.ErrBucketNotFound {
return err return err
} }
err = tx.DeleteBucket(nodeInfoBucket) err = tx.DeleteTopLevelBucket(nodeInfoBucket)
if err != nil && err != bbolt.ErrBucketNotFound { if err != nil && err != kvdb.ErrBucketNotFound {
return err return err
} }
err = tx.DeleteBucket(nodeBucket) err = tx.DeleteTopLevelBucket(nodeBucket)
if err != nil && err != bbolt.ErrBucketNotFound { if err != nil && err != kvdb.ErrBucketNotFound {
return err return err
} }
err = tx.DeleteBucket(edgeBucket) err = tx.DeleteTopLevelBucket(edgeBucket)
if err != nil && err != bbolt.ErrBucketNotFound { if err != nil && err != kvdb.ErrBucketNotFound {
return err return err
} }
err = tx.DeleteBucket(edgeIndexBucket) err = tx.DeleteTopLevelBucket(edgeIndexBucket)
if err != nil && err != bbolt.ErrBucketNotFound { if err != nil && err != kvdb.ErrBucketNotFound {
return err return err
} }
err = tx.DeleteBucket(graphMetaBucket) err = tx.DeleteTopLevelBucket(graphMetaBucket)
if err != nil && err != bbolt.ErrBucketNotFound { if err != nil && err != kvdb.ErrBucketNotFound {
return err return err
} }
@ -257,36 +251,36 @@ func createChannelDB(dbPath string) error {
} }
path := filepath.Join(dbPath, dbName) path := filepath.Join(dbPath, dbName)
bdb, err := bbolt.Open(path, dbFilePermission, nil) bdb, err := kvdb.Create(kvdb.BoltBackendName, path, true)
if err != nil { if err != nil {
return err return err
} }
err = bdb.Update(func(tx *bbolt.Tx) error { err = kvdb.Update(bdb, func(tx kvdb.RwTx) error {
if _, err := tx.CreateBucket(openChannelBucket); err != nil { if _, err := tx.CreateTopLevelBucket(openChannelBucket); err != nil {
return err return err
} }
if _, err := tx.CreateBucket(closedChannelBucket); err != nil { if _, err := tx.CreateTopLevelBucket(closedChannelBucket); err != nil {
return err return err
} }
if _, err := tx.CreateBucket(forwardingLogBucket); err != nil { if _, err := tx.CreateTopLevelBucket(forwardingLogBucket); err != nil {
return err return err
} }
if _, err := tx.CreateBucket(fwdPackagesKey); err != nil { if _, err := tx.CreateTopLevelBucket(fwdPackagesKey); err != nil {
return err return err
} }
if _, err := tx.CreateBucket(invoiceBucket); err != nil { if _, err := tx.CreateTopLevelBucket(invoiceBucket); err != nil {
return err return err
} }
if _, err := tx.CreateBucket(nodeInfoBucket); err != nil { if _, err := tx.CreateTopLevelBucket(nodeInfoBucket); err != nil {
return err return err
} }
nodes, err := tx.CreateBucket(nodeBucket) nodes, err := tx.CreateTopLevelBucket(nodeBucket)
if err != nil { if err != nil {
return err return err
} }
@ -299,7 +293,7 @@ func createChannelDB(dbPath string) error {
return err return err
} }
edges, err := tx.CreateBucket(edgeBucket) edges, err := tx.CreateTopLevelBucket(edgeBucket)
if err != nil { if err != nil {
return err return err
} }
@ -316,7 +310,7 @@ func createChannelDB(dbPath string) error {
return err return err
} }
graphMeta, err := tx.CreateBucket(graphMetaBucket) graphMeta, err := tx.CreateTopLevelBucket(graphMetaBucket)
if err != nil { if err != nil {
return err return err
} }
@ -325,7 +319,7 @@ func createChannelDB(dbPath string) error {
return err return err
} }
if _, err := tx.CreateBucket(metaBucket); err != nil { if _, err := tx.CreateTopLevelBucket(metaBucket); err != nil {
return err return err
} }
@ -358,7 +352,7 @@ func fileExists(path string) bool {
// zero-length slice is returned. // zero-length slice is returned.
func (d *DB) FetchOpenChannels(nodeID *btcec.PublicKey) ([]*OpenChannel, error) { func (d *DB) FetchOpenChannels(nodeID *btcec.PublicKey) ([]*OpenChannel, error) {
var channels []*OpenChannel var channels []*OpenChannel
err := d.View(func(tx *bbolt.Tx) error { err := kvdb.View(d, func(tx kvdb.ReadTx) error {
var err error var err error
channels, err = d.fetchOpenChannels(tx, nodeID) channels, err = d.fetchOpenChannels(tx, nodeID)
return err return err
@ -371,11 +365,11 @@ func (d *DB) FetchOpenChannels(nodeID *btcec.PublicKey) ([]*OpenChannel, error)
// stored currently active/open channels associated with the target nodeID. In // stored currently active/open channels associated with the target nodeID. In
// the case that no active channels are known to have been created with this // the case that no active channels are known to have been created with this
// node, then a zero-length slice is returned. // node, then a zero-length slice is returned.
func (d *DB) fetchOpenChannels(tx *bbolt.Tx, func (d *DB) fetchOpenChannels(tx kvdb.ReadTx,
nodeID *btcec.PublicKey) ([]*OpenChannel, error) { nodeID *btcec.PublicKey) ([]*OpenChannel, error) {
// Get the bucket dedicated to storing the metadata for open channels. // Get the bucket dedicated to storing the metadata for open channels.
openChanBucket := tx.Bucket(openChannelBucket) openChanBucket := tx.ReadBucket(openChannelBucket)
if openChanBucket == nil { if openChanBucket == nil {
return nil, nil return nil, nil
} }
@ -383,7 +377,7 @@ func (d *DB) fetchOpenChannels(tx *bbolt.Tx,
// Within this top level bucket, fetch the bucket dedicated to storing // Within this top level bucket, fetch the bucket dedicated to storing
// open channel data specific to the remote node. // open channel data specific to the remote node.
pub := nodeID.SerializeCompressed() pub := nodeID.SerializeCompressed()
nodeChanBucket := openChanBucket.Bucket(pub) nodeChanBucket := openChanBucket.NestedReadBucket(pub)
if nodeChanBucket == nil { if nodeChanBucket == nil {
return nil, nil return nil, nil
} }
@ -399,7 +393,7 @@ func (d *DB) fetchOpenChannels(tx *bbolt.Tx,
// If we've found a valid chainhash bucket, then we'll retrieve // If we've found a valid chainhash bucket, then we'll retrieve
// that so we can extract all the channels. // that so we can extract all the channels.
chainBucket := nodeChanBucket.Bucket(chainHash) chainBucket := nodeChanBucket.NestedReadBucket(chainHash)
if chainBucket == nil { if chainBucket == nil {
return fmt.Errorf("unable to read bucket for chain=%x", return fmt.Errorf("unable to read bucket for chain=%x",
chainHash[:]) chainHash[:])
@ -424,7 +418,7 @@ func (d *DB) fetchOpenChannels(tx *bbolt.Tx,
// fetchNodeChannels retrieves all active channels from the target chainBucket // fetchNodeChannels retrieves all active channels from the target chainBucket
// which is under a node's dedicated channel bucket. This function is typically // which is under a node's dedicated channel bucket. This function is typically
// used to fetch all the active channels related to a particular node. // used to fetch all the active channels related to a particular node.
func (d *DB) fetchNodeChannels(chainBucket *bbolt.Bucket) ([]*OpenChannel, error) { func (d *DB) fetchNodeChannels(chainBucket kvdb.ReadBucket) ([]*OpenChannel, error) {
var channels []*OpenChannel var channels []*OpenChannel
@ -438,7 +432,7 @@ func (d *DB) fetchNodeChannels(chainBucket *bbolt.Bucket) ([]*OpenChannel, error
// Once we've found a valid channel bucket, we'll extract it // Once we've found a valid channel bucket, we'll extract it
// from the node's chain bucket. // from the node's chain bucket.
chanBucket := chainBucket.Bucket(chanPoint) chanBucket := chainBucket.NestedReadBucket(chanPoint)
var outPoint wire.OutPoint var outPoint wire.OutPoint
err := readOutpoint(bytes.NewReader(chanPoint), &outPoint) err := readOutpoint(bytes.NewReader(chanPoint), &outPoint)
@ -483,10 +477,10 @@ func (d *DB) FetchChannel(chanPoint wire.OutPoint) (*OpenChannel, error) {
// structure and skipping fully decoding each channel, we save a good // structure and skipping fully decoding each channel, we save a good
// bit of CPU as we don't need to do things like decompress public // bit of CPU as we don't need to do things like decompress public
// keys. // keys.
chanScan := func(tx *bbolt.Tx) error { chanScan := func(tx kvdb.ReadTx) error {
// Get the bucket dedicated to storing the metadata for open // Get the bucket dedicated to storing the metadata for open
// channels. // channels.
openChanBucket := tx.Bucket(openChannelBucket) openChanBucket := tx.ReadBucket(openChannelBucket)
if openChanBucket == nil { if openChanBucket == nil {
return ErrNoActiveChannels return ErrNoActiveChannels
} }
@ -501,7 +495,7 @@ func (d *DB) FetchChannel(chanPoint wire.OutPoint) (*OpenChannel, error) {
return nil return nil
} }
nodeChanBucket := openChanBucket.Bucket(nodePub) nodeChanBucket := openChanBucket.NestedReadBucket(nodePub)
if nodeChanBucket == nil { if nodeChanBucket == nil {
return nil return nil
} }
@ -515,7 +509,9 @@ func (d *DB) FetchChannel(chanPoint wire.OutPoint) (*OpenChannel, error) {
return nil return nil
} }
chainBucket := nodeChanBucket.Bucket(chainHash) chainBucket := nodeChanBucket.NestedReadBucket(
chainHash,
)
if chainBucket == nil { if chainBucket == nil {
return fmt.Errorf("unable to read "+ return fmt.Errorf("unable to read "+
"bucket for chain=%x", chainHash[:]) "bucket for chain=%x", chainHash[:])
@ -523,7 +519,7 @@ func (d *DB) FetchChannel(chanPoint wire.OutPoint) (*OpenChannel, error) {
// Finally we reach the leaf bucket that stores // Finally we reach the leaf bucket that stores
// all the chanPoints for this node. // all the chanPoints for this node.
chanBucket := chainBucket.Bucket( chanBucket := chainBucket.NestedReadBucket(
targetChanPoint.Bytes(), targetChanPoint.Bytes(),
) )
if chanBucket == nil { if chanBucket == nil {
@ -545,7 +541,7 @@ func (d *DB) FetchChannel(chanPoint wire.OutPoint) (*OpenChannel, error) {
}) })
} }
err := d.View(chanScan) err := kvdb.View(d, chanScan)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -637,10 +633,10 @@ func waitingCloseFilter(waitingClose bool) fetchChannelsFilter {
func fetchChannels(d *DB, filters ...fetchChannelsFilter) ([]*OpenChannel, error) { func fetchChannels(d *DB, filters ...fetchChannelsFilter) ([]*OpenChannel, error) {
var channels []*OpenChannel var channels []*OpenChannel
err := d.View(func(tx *bbolt.Tx) error { err := kvdb.View(d, func(tx kvdb.ReadTx) error {
// Get the bucket dedicated to storing the metadata for open // Get the bucket dedicated to storing the metadata for open
// channels. // channels.
openChanBucket := tx.Bucket(openChannelBucket) openChanBucket := tx.ReadBucket(openChannelBucket)
if openChanBucket == nil { if openChanBucket == nil {
return ErrNoActiveChannels return ErrNoActiveChannels
} }
@ -648,7 +644,7 @@ func fetchChannels(d *DB, filters ...fetchChannelsFilter) ([]*OpenChannel, error
// Next, fetch the bucket dedicated to storing metadata related // Next, fetch the bucket dedicated to storing metadata related
// to all nodes. All keys within this bucket are the serialized // to all nodes. All keys within this bucket are the serialized
// public keys of all our direct counterparties. // public keys of all our direct counterparties.
nodeMetaBucket := tx.Bucket(nodeInfoBucket) nodeMetaBucket := tx.ReadBucket(nodeInfoBucket)
if nodeMetaBucket == nil { if nodeMetaBucket == nil {
return fmt.Errorf("node bucket not created") return fmt.Errorf("node bucket not created")
} }
@ -656,7 +652,7 @@ func fetchChannels(d *DB, filters ...fetchChannelsFilter) ([]*OpenChannel, error
// Finally for each node public key in the bucket, fetch all // Finally for each node public key in the bucket, fetch all
// the channels related to this particular node. // the channels related to this particular node.
return nodeMetaBucket.ForEach(func(k, v []byte) error { return nodeMetaBucket.ForEach(func(k, v []byte) error {
nodeChanBucket := openChanBucket.Bucket(k) nodeChanBucket := openChanBucket.NestedReadBucket(k)
if nodeChanBucket == nil { if nodeChanBucket == nil {
return nil return nil
} }
@ -671,7 +667,9 @@ func fetchChannels(d *DB, filters ...fetchChannelsFilter) ([]*OpenChannel, error
// If we've found a valid chainhash bucket, // If we've found a valid chainhash bucket,
// then we'll retrieve that so we can extract // then we'll retrieve that so we can extract
// all the channels. // all the channels.
chainBucket := nodeChanBucket.Bucket(chainHash) chainBucket := nodeChanBucket.NestedReadBucket(
chainHash,
)
if chainBucket == nil { if chainBucket == nil {
return fmt.Errorf("unable to read "+ return fmt.Errorf("unable to read "+
"bucket for chain=%x", chainHash[:]) "bucket for chain=%x", chainHash[:])
@ -727,8 +725,8 @@ func fetchChannels(d *DB, filters ...fetchChannelsFilter) ([]*OpenChannel, error
func (d *DB) FetchClosedChannels(pendingOnly bool) ([]*ChannelCloseSummary, error) { func (d *DB) FetchClosedChannels(pendingOnly bool) ([]*ChannelCloseSummary, error) {
var chanSummaries []*ChannelCloseSummary var chanSummaries []*ChannelCloseSummary
if err := d.View(func(tx *bbolt.Tx) error { if err := kvdb.View(d, func(tx kvdb.ReadTx) error {
closeBucket := tx.Bucket(closedChannelBucket) closeBucket := tx.ReadBucket(closedChannelBucket)
if closeBucket == nil { if closeBucket == nil {
return ErrNoClosedChannels return ErrNoClosedChannels
} }
@ -765,8 +763,8 @@ var ErrClosedChannelNotFound = errors.New("unable to find closed channel summary
// point of the channel in question. // point of the channel in question.
func (d *DB) FetchClosedChannel(chanID *wire.OutPoint) (*ChannelCloseSummary, error) { func (d *DB) FetchClosedChannel(chanID *wire.OutPoint) (*ChannelCloseSummary, error) {
var chanSummary *ChannelCloseSummary var chanSummary *ChannelCloseSummary
if err := d.View(func(tx *bbolt.Tx) error { if err := kvdb.View(d, func(tx kvdb.ReadTx) error {
closeBucket := tx.Bucket(closedChannelBucket) closeBucket := tx.ReadBucket(closedChannelBucket)
if closeBucket == nil { if closeBucket == nil {
return ErrClosedChannelNotFound return ErrClosedChannelNotFound
} }
@ -799,15 +797,15 @@ func (d *DB) FetchClosedChannelForID(cid lnwire.ChannelID) (
*ChannelCloseSummary, error) { *ChannelCloseSummary, error) {
var chanSummary *ChannelCloseSummary var chanSummary *ChannelCloseSummary
if err := d.View(func(tx *bbolt.Tx) error { if err := kvdb.View(d, func(tx kvdb.ReadTx) error {
closeBucket := tx.Bucket(closedChannelBucket) closeBucket := tx.ReadBucket(closedChannelBucket)
if closeBucket == nil { if closeBucket == nil {
return ErrClosedChannelNotFound return ErrClosedChannelNotFound
} }
// The first 30 bytes of the channel ID and outpoint will be // The first 30 bytes of the channel ID and outpoint will be
// equal. // equal.
cursor := closeBucket.Cursor() cursor := closeBucket.ReadCursor()
op, c := cursor.Seek(cid[:30]) op, c := cursor.Seek(cid[:30])
// We scan over all possible candidates for this channel ID. // We scan over all possible candidates for this channel ID.
@ -847,7 +845,7 @@ func (d *DB) FetchClosedChannelForID(cid lnwire.ChannelID) (
// the pending funds in a channel that has been forcibly closed have been // the pending funds in a channel that has been forcibly closed have been
// swept. // swept.
func (d *DB) MarkChanFullyClosed(chanPoint *wire.OutPoint) error { func (d *DB) MarkChanFullyClosed(chanPoint *wire.OutPoint) error {
return d.Update(func(tx *bbolt.Tx) error { return kvdb.Update(d, func(tx kvdb.RwTx) error {
var b bytes.Buffer var b bytes.Buffer
if err := writeOutpoint(&b, chanPoint); err != nil { if err := writeOutpoint(&b, chanPoint); err != nil {
return err return err
@ -855,7 +853,7 @@ func (d *DB) MarkChanFullyClosed(chanPoint *wire.OutPoint) error {
chanID := b.Bytes() chanID := b.Bytes()
closedChanBucket, err := tx.CreateBucketIfNotExists( closedChanBucket, err := tx.CreateTopLevelBucket(
closedChannelBucket, closedChannelBucket,
) )
if err != nil { if err != nil {
@ -900,7 +898,7 @@ func (d *DB) MarkChanFullyClosed(chanPoint *wire.OutPoint) error {
// pruneLinkNode determines whether we should garbage collect a link node from // pruneLinkNode determines whether we should garbage collect a link node from
// the database due to no longer having any open channels with it. If there are // the database due to no longer having any open channels with it. If there are
// any left, then this acts as a no-op. // any left, then this acts as a no-op.
func (d *DB) pruneLinkNode(tx *bbolt.Tx, remotePub *btcec.PublicKey) error { func (d *DB) pruneLinkNode(tx kvdb.RwTx, remotePub *btcec.PublicKey) error {
openChannels, err := d.fetchOpenChannels(tx, remotePub) openChannels, err := d.fetchOpenChannels(tx, remotePub)
if err != nil { if err != nil {
return fmt.Errorf("unable to fetch open channels for peer %x: "+ return fmt.Errorf("unable to fetch open channels for peer %x: "+
@ -920,7 +918,7 @@ func (d *DB) pruneLinkNode(tx *bbolt.Tx, remotePub *btcec.PublicKey) error {
// PruneLinkNodes attempts to prune all link nodes found within the databse with // PruneLinkNodes attempts to prune all link nodes found within the databse with
// whom we no longer have any open channels with. // whom we no longer have any open channels with.
func (d *DB) PruneLinkNodes() error { func (d *DB) PruneLinkNodes() error {
return d.Update(func(tx *bbolt.Tx) error { return kvdb.Update(d, func(tx kvdb.RwTx) error {
linkNodes, err := d.fetchAllLinkNodes(tx) linkNodes, err := d.fetchAllLinkNodes(tx)
if err != nil { if err != nil {
return err return err
@ -964,7 +962,7 @@ func (d *DB) RestoreChannelShells(channelShells ...*ChannelShell) error {
defer chanGraph.cacheMu.Unlock() defer chanGraph.cacheMu.Unlock()
var chansRestored []uint64 var chansRestored []uint64
err := d.Update(func(tx *bbolt.Tx) error { err := kvdb.Update(d, func(tx kvdb.RwTx) error {
for _, channelShell := range channelShells { for _, channelShell := range channelShells {
channel := channelShell.Chan channel := channelShell.Chan
@ -1001,7 +999,7 @@ func (d *DB) RestoreChannelShells(channelShells ...*ChannelShell) error {
Capacity: channel.Capacity, Capacity: channel.Capacity,
} }
nodes := tx.Bucket(nodeBucket) nodes := tx.ReadWriteBucket(nodeBucket)
if nodes == nil { if nodes == nil {
return ErrGraphNotFound return ErrGraphNotFound
} }
@ -1075,7 +1073,7 @@ func (d *DB) AddrsForNode(nodePub *btcec.PublicKey) ([]net.Addr, error) {
graphNode LightningNode graphNode LightningNode
) )
dbErr := d.View(func(tx *bbolt.Tx) error { dbErr := kvdb.View(d, func(tx kvdb.ReadTx) error {
var err error var err error
linkNode, err = fetchLinkNode(tx, nodePub) linkNode, err = fetchLinkNode(tx, nodePub)
@ -1086,7 +1084,7 @@ func (d *DB) AddrsForNode(nodePub *btcec.PublicKey) ([]net.Addr, error) {
// We'll also query the graph for this peer to see if they have // We'll also query the graph for this peer to see if they have
// any addresses that we don't currently have stored within the // any addresses that we don't currently have stored within the
// link node database. // link node database.
nodes := tx.Bucket(nodeBucket) nodes := tx.ReadBucket(nodeBucket)
if nodes == nil { if nodes == nil {
return ErrGraphNotFound return ErrGraphNotFound
} }
@ -1213,7 +1211,7 @@ func (d *DB) syncVersions(versions []version) error {
migrations, migrationVersions := getMigrationsToApply( migrations, migrationVersions := getMigrationsToApply(
versions, meta.DbVersionNumber, versions, meta.DbVersionNumber,
) )
return d.Update(func(tx *bbolt.Tx) error { return kvdb.Update(d, func(tx kvdb.RwTx) error {
for i, migration := range migrations { for i, migration := range migrations {
if migration == nil { if migration == nil {
continue continue
@ -1261,12 +1259,12 @@ func getMigrationsToApply(versions []version, version uint32) ([]migration, []ui
// fetchHistoricalChanBucket returns a the channel bucket for a given outpoint // fetchHistoricalChanBucket returns a the channel bucket for a given outpoint
// from the historical channel bucket. If the bucket does not exist, // from the historical channel bucket. If the bucket does not exist,
// ErrNoHistoricalBucket is returned. // ErrNoHistoricalBucket is returned.
func fetchHistoricalChanBucket(tx *bbolt.Tx, func fetchHistoricalChanBucket(tx kvdb.ReadTx,
outPoint *wire.OutPoint) (*bbolt.Bucket, error) { outPoint *wire.OutPoint) (kvdb.ReadBucket, error) {
// First fetch the top level bucket which stores all data related to // First fetch the top level bucket which stores all data related to
// historically stored channels. // historically stored channels.
historicalChanBucket := tx.Bucket(historicalChannelBucket) historicalChanBucket := tx.ReadBucket(historicalChannelBucket)
if historicalChanBucket == nil { if historicalChanBucket == nil {
return nil, ErrNoHistoricalBucket return nil, ErrNoHistoricalBucket
} }
@ -1277,7 +1275,7 @@ func fetchHistoricalChanBucket(tx *bbolt.Tx,
if err := writeOutpoint(&chanPointBuf, outPoint); err != nil { if err := writeOutpoint(&chanPointBuf, outPoint); err != nil {
return nil, err return nil, err
} }
chanBucket := historicalChanBucket.Bucket(chanPointBuf.Bytes()) chanBucket := historicalChanBucket.NestedReadBucket(chanPointBuf.Bytes())
if chanBucket == nil { if chanBucket == nil {
return nil, ErrChannelNotFound return nil, ErrChannelNotFound
} }
@ -1289,7 +1287,7 @@ func fetchHistoricalChanBucket(tx *bbolt.Tx,
// bucket. // bucket.
func (d *DB) FetchHistoricalChannel(outPoint *wire.OutPoint) (*OpenChannel, error) { func (d *DB) FetchHistoricalChannel(outPoint *wire.OutPoint) (*OpenChannel, error) {
var channel *OpenChannel var channel *OpenChannel
err := d.View(func(tx *bbolt.Tx) error { err := kvdb.View(d, func(tx kvdb.ReadTx) error {
chanBucket, err := fetchHistoricalChanBucket(tx, outPoint) chanBucket, err := fetchHistoricalChanBucket(tx, outPoint)
if err != nil { if err != nil {
return err return err

View File

@ -8,7 +8,7 @@ import (
"time" "time"
"github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/btcec"
"github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/routing/route" "github.com/lightningnetwork/lnd/routing/route"
@ -61,7 +61,7 @@ type duplicateHTLCAttemptInfo struct {
// fetchDuplicatePaymentStatus fetches the payment status of the payment. If the // fetchDuplicatePaymentStatus fetches the payment status of the payment. If the
// payment isn't found, it will default to "StatusUnknown". // payment isn't found, it will default to "StatusUnknown".
func fetchDuplicatePaymentStatus(bucket *bbolt.Bucket) PaymentStatus { func fetchDuplicatePaymentStatus(bucket kvdb.ReadBucket) PaymentStatus {
if bucket.Get(duplicatePaymentSettleInfoKey) != nil { if bucket.Get(duplicatePaymentSettleInfoKey) != nil {
return StatusSucceeded return StatusSucceeded
} }
@ -129,7 +129,7 @@ func deserializeDuplicatePaymentCreationInfo(r io.Reader) (
return c, nil return c, nil
} }
func fetchDuplicatePayment(bucket *bbolt.Bucket) (*MPPayment, error) { func fetchDuplicatePayment(bucket kvdb.ReadBucket) (*MPPayment, error) {
seqBytes := bucket.Get(duplicatePaymentSequenceKey) seqBytes := bucket.Get(duplicatePaymentSequenceKey)
if seqBytes == nil { if seqBytes == nil {
return nil, fmt.Errorf("sequence number not found") return nil, fmt.Errorf("sequence number not found")
@ -209,7 +209,7 @@ func fetchDuplicatePayment(bucket *bbolt.Bucket) (*MPPayment, error) {
return payment, nil return payment, nil
} }
func fetchDuplicatePayments(paymentHashBucket *bbolt.Bucket) ([]*MPPayment, func fetchDuplicatePayments(paymentHashBucket kvdb.ReadBucket) ([]*MPPayment,
error) { error) {
var payments []*MPPayment var payments []*MPPayment
@ -217,13 +217,13 @@ func fetchDuplicatePayments(paymentHashBucket *bbolt.Bucket) ([]*MPPayment,
// For older versions of lnd, duplicate payments to a payment has was // For older versions of lnd, duplicate payments to a payment has was
// possible. These will be found in a sub-bucket indexed by their // possible. These will be found in a sub-bucket indexed by their
// sequence number if available. // sequence number if available.
dup := paymentHashBucket.Bucket(duplicatePaymentsBucket) dup := paymentHashBucket.NestedReadBucket(duplicatePaymentsBucket)
if dup == nil { if dup == nil {
return nil, nil return nil, nil
} }
err := dup.ForEach(func(k, v []byte) error { err := dup.ForEach(func(k, v []byte) error {
subBucket := dup.Bucket(k) subBucket := dup.NestedReadBucket(k)
if subBucket == nil { if subBucket == nil {
// We one bucket for each duplicate to be found. // We one bucket for each duplicate to be found.
return fmt.Errorf("non bucket element" + return fmt.Errorf("non bucket element" +

View File

@ -6,7 +6,7 @@ import (
"sort" "sort"
"time" "time"
"github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/lnwire"
) )
@ -111,10 +111,10 @@ func (f *ForwardingLog) AddForwardingEvents(events []ForwardingEvent) error {
var timestamp [8]byte var timestamp [8]byte
return f.db.Batch(func(tx *bbolt.Tx) error { return kvdb.Batch(f.db.Backend, func(tx kvdb.RwTx) error {
// First, we'll fetch the bucket that stores our time series // First, we'll fetch the bucket that stores our time series
// log. // log.
logBucket, err := tx.CreateBucketIfNotExists( logBucket, err := tx.CreateTopLevelBucket(
forwardingLogBucket, forwardingLogBucket,
) )
if err != nil { if err != nil {
@ -204,10 +204,10 @@ func (f *ForwardingLog) Query(q ForwardingEventQuery) (ForwardingLogTimeSlice, e
recordsToSkip := q.IndexOffset recordsToSkip := q.IndexOffset
recordOffset := q.IndexOffset recordOffset := q.IndexOffset
err := f.db.View(func(tx *bbolt.Tx) error { err := kvdb.View(f.db, func(tx kvdb.ReadTx) error {
// If the bucket wasn't found, then there aren't any events to // If the bucket wasn't found, then there aren't any events to
// be returned. // be returned.
logBucket := tx.Bucket(forwardingLogBucket) logBucket := tx.ReadBucket(forwardingLogBucket)
if logBucket == nil { if logBucket == nil {
return ErrNoForwardingEvents return ErrNoForwardingEvents
} }
@ -223,7 +223,7 @@ func (f *ForwardingLog) Query(q ForwardingEventQuery) (ForwardingLogTimeSlice, e
// our seek through the log in order to satisfy the query. // our seek through the log in order to satisfy the query.
// We'll continue until either we reach the end of the range, // We'll continue until either we reach the end of the range,
// or reach our max number of events. // or reach our max number of events.
logCursor := logBucket.Cursor() logCursor := logBucket.ReadCursor()
timestamp, events := logCursor.Seek(startTime[:]) timestamp, events := logCursor.Seek(startTime[:])
for ; timestamp != nil && bytes.Compare(timestamp, endTime[:]) <= 0; timestamp, events = logCursor.Next() { for ; timestamp != nil && bytes.Compare(timestamp, endTime[:]) <= 0; timestamp, events = logCursor.Next() {
// If our current return payload exceeds the max number // If our current return payload exceeds the max number

View File

@ -7,7 +7,7 @@ import (
"fmt" "fmt"
"io" "io"
"github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/lnwire"
) )
@ -318,7 +318,7 @@ type SettleFailRef struct {
type SettleFailAcker interface { type SettleFailAcker interface {
// AckSettleFails atomically updates the settle-fail filters in *other* // AckSettleFails atomically updates the settle-fail filters in *other*
// channels' forwarding packages. // channels' forwarding packages.
AckSettleFails(tx *bbolt.Tx, settleFailRefs ...SettleFailRef) error AckSettleFails(tx kvdb.RwTx, settleFailRefs ...SettleFailRef) error
} }
// GlobalFwdPkgReader is an interface used to retrieve the forwarding packages // GlobalFwdPkgReader is an interface used to retrieve the forwarding packages
@ -326,7 +326,7 @@ type SettleFailAcker interface {
type GlobalFwdPkgReader interface { type GlobalFwdPkgReader interface {
// LoadChannelFwdPkgs loads all known forwarding packages for the given // LoadChannelFwdPkgs loads all known forwarding packages for the given
// channel. // channel.
LoadChannelFwdPkgs(tx *bbolt.Tx, LoadChannelFwdPkgs(tx kvdb.RwTx,
source lnwire.ShortChannelID) ([]*FwdPkg, error) source lnwire.ShortChannelID) ([]*FwdPkg, error)
} }
@ -357,14 +357,14 @@ func NewSwitchPackager() *SwitchPackager {
// AckSettleFails atomically updates the settle-fail filters in *other* // AckSettleFails atomically updates the settle-fail filters in *other*
// channels' forwarding packages, to mark that the switch has received a settle // channels' forwarding packages, to mark that the switch has received a settle
// or fail residing in the forwarding package of a link. // or fail residing in the forwarding package of a link.
func (*SwitchPackager) AckSettleFails(tx *bbolt.Tx, func (*SwitchPackager) AckSettleFails(tx kvdb.RwTx,
settleFailRefs ...SettleFailRef) error { settleFailRefs ...SettleFailRef) error {
return ackSettleFails(tx, settleFailRefs) return ackSettleFails(tx, settleFailRefs)
} }
// LoadChannelFwdPkgs loads all forwarding packages for a particular channel. // LoadChannelFwdPkgs loads all forwarding packages for a particular channel.
func (*SwitchPackager) LoadChannelFwdPkgs(tx *bbolt.Tx, func (*SwitchPackager) LoadChannelFwdPkgs(tx kvdb.RwTx,
source lnwire.ShortChannelID) ([]*FwdPkg, error) { source lnwire.ShortChannelID) ([]*FwdPkg, error) {
return loadChannelFwdPkgs(tx, source) return loadChannelFwdPkgs(tx, source)
@ -376,19 +376,19 @@ func (*SwitchPackager) LoadChannelFwdPkgs(tx *bbolt.Tx,
type FwdPackager interface { type FwdPackager interface {
// AddFwdPkg serializes and writes a FwdPkg for this channel at the // AddFwdPkg serializes and writes a FwdPkg for this channel at the
// remote commitment height included in the forwarding package. // remote commitment height included in the forwarding package.
AddFwdPkg(tx *bbolt.Tx, fwdPkg *FwdPkg) error AddFwdPkg(tx kvdb.RwTx, fwdPkg *FwdPkg) error
// SetFwdFilter looks up the forwarding package at the remote `height` // SetFwdFilter looks up the forwarding package at the remote `height`
// and sets the `fwdFilter`, marking the Adds for which: // and sets the `fwdFilter`, marking the Adds for which:
// 1) We are not the exit node // 1) We are not the exit node
// 2) Passed all validation // 2) Passed all validation
// 3) Should be forwarded to the switch immediately after a failure // 3) Should be forwarded to the switch immediately after a failure
SetFwdFilter(tx *bbolt.Tx, height uint64, fwdFilter *PkgFilter) error SetFwdFilter(tx kvdb.RwTx, height uint64, fwdFilter *PkgFilter) error
// AckAddHtlcs atomically updates the add filters in this channel's // AckAddHtlcs atomically updates the add filters in this channel's
// forwarding packages to mark the resolution of an Add that was // forwarding packages to mark the resolution of an Add that was
// received from the remote party. // received from the remote party.
AckAddHtlcs(tx *bbolt.Tx, addRefs ...AddRef) error AckAddHtlcs(tx kvdb.RwTx, addRefs ...AddRef) error
// SettleFailAcker allows a link to acknowledge settle/fail HTLCs // SettleFailAcker allows a link to acknowledge settle/fail HTLCs
// belonging to other channels. // belonging to other channels.
@ -396,11 +396,11 @@ type FwdPackager interface {
// LoadFwdPkgs loads all known forwarding packages owned by this // LoadFwdPkgs loads all known forwarding packages owned by this
// channel. // channel.
LoadFwdPkgs(tx *bbolt.Tx) ([]*FwdPkg, error) LoadFwdPkgs(tx kvdb.ReadTx) ([]*FwdPkg, error)
// RemovePkg deletes a forwarding package owned by this channel at // RemovePkg deletes a forwarding package owned by this channel at
// the provided remote `height`. // the provided remote `height`.
RemovePkg(tx *bbolt.Tx, height uint64) error RemovePkg(tx kvdb.RwTx, height uint64) error
} }
// ChannelPackager is used by a channel to manage the lifecycle of its forwarding // ChannelPackager is used by a channel to manage the lifecycle of its forwarding
@ -420,8 +420,8 @@ func NewChannelPackager(source lnwire.ShortChannelID) *ChannelPackager {
} }
// AddFwdPkg writes a newly locked in forwarding package to disk. // AddFwdPkg writes a newly locked in forwarding package to disk.
func (*ChannelPackager) AddFwdPkg(tx *bbolt.Tx, fwdPkg *FwdPkg) error { func (*ChannelPackager) AddFwdPkg(tx kvdb.RwTx, fwdPkg *FwdPkg) error {
fwdPkgBkt, err := tx.CreateBucketIfNotExists(fwdPackagesKey) fwdPkgBkt, err := tx.CreateTopLevelBucket(fwdPackagesKey)
if err != nil { if err != nil {
return err return err
} }
@ -485,7 +485,7 @@ func (*ChannelPackager) AddFwdPkg(tx *bbolt.Tx, fwdPkg *FwdPkg) error {
} }
// putLogUpdate writes an htlc to the provided `bkt`, using `index` as the key. // putLogUpdate writes an htlc to the provided `bkt`, using `index` as the key.
func putLogUpdate(bkt *bbolt.Bucket, idx uint16, htlc *LogUpdate) error { func putLogUpdate(bkt kvdb.RwBucket, idx uint16, htlc *LogUpdate) error {
var b bytes.Buffer var b bytes.Buffer
if err := htlc.Encode(&b); err != nil { if err := htlc.Encode(&b); err != nil {
return err return err
@ -497,19 +497,19 @@ func putLogUpdate(bkt *bbolt.Bucket, idx uint16, htlc *LogUpdate) error {
// LoadFwdPkgs scans the forwarding log for any packages that haven't been // LoadFwdPkgs scans the forwarding log for any packages that haven't been
// processed, and returns their deserialized log updates in a map indexed by the // processed, and returns their deserialized log updates in a map indexed by the
// remote commitment height at which the updates were locked in. // remote commitment height at which the updates were locked in.
func (p *ChannelPackager) LoadFwdPkgs(tx *bbolt.Tx) ([]*FwdPkg, error) { func (p *ChannelPackager) LoadFwdPkgs(tx kvdb.ReadTx) ([]*FwdPkg, error) {
return loadChannelFwdPkgs(tx, p.source) return loadChannelFwdPkgs(tx, p.source)
} }
// loadChannelFwdPkgs loads all forwarding packages owned by `source`. // loadChannelFwdPkgs loads all forwarding packages owned by `source`.
func loadChannelFwdPkgs(tx *bbolt.Tx, source lnwire.ShortChannelID) ([]*FwdPkg, error) { func loadChannelFwdPkgs(tx kvdb.ReadTx, source lnwire.ShortChannelID) ([]*FwdPkg, error) {
fwdPkgBkt := tx.Bucket(fwdPackagesKey) fwdPkgBkt := tx.ReadBucket(fwdPackagesKey)
if fwdPkgBkt == nil { if fwdPkgBkt == nil {
return nil, nil return nil, nil
} }
sourceKey := makeLogKey(source.ToUint64()) sourceKey := makeLogKey(source.ToUint64())
sourceBkt := fwdPkgBkt.Bucket(sourceKey[:]) sourceBkt := fwdPkgBkt.NestedReadBucket(sourceKey[:])
if sourceBkt == nil { if sourceBkt == nil {
return nil, nil return nil, nil
} }
@ -543,23 +543,23 @@ func loadChannelFwdPkgs(tx *bbolt.Tx, source lnwire.ShortChannelID) ([]*FwdPkg,
// loadFwPkg reads the packager's fwd pkg at a given height, and determines the // loadFwPkg reads the packager's fwd pkg at a given height, and determines the
// appropriate FwdState. // appropriate FwdState.
func loadFwdPkg(fwdPkgBkt *bbolt.Bucket, source lnwire.ShortChannelID, func loadFwdPkg(fwdPkgBkt kvdb.ReadBucket, source lnwire.ShortChannelID,
height uint64) (*FwdPkg, error) { height uint64) (*FwdPkg, error) {
sourceKey := makeLogKey(source.ToUint64()) sourceKey := makeLogKey(source.ToUint64())
sourceBkt := fwdPkgBkt.Bucket(sourceKey[:]) sourceBkt := fwdPkgBkt.NestedReadBucket(sourceKey[:])
if sourceBkt == nil { if sourceBkt == nil {
return nil, ErrCorruptedFwdPkg return nil, ErrCorruptedFwdPkg
} }
heightKey := makeLogKey(height) heightKey := makeLogKey(height)
heightBkt := sourceBkt.Bucket(heightKey[:]) heightBkt := sourceBkt.NestedReadBucket(heightKey[:])
if heightBkt == nil { if heightBkt == nil {
return nil, ErrCorruptedFwdPkg return nil, ErrCorruptedFwdPkg
} }
// Load ADDs from disk. // Load ADDs from disk.
addBkt := heightBkt.Bucket(addBucketKey) addBkt := heightBkt.NestedReadBucket(addBucketKey)
if addBkt == nil { if addBkt == nil {
return nil, ErrCorruptedFwdPkg return nil, ErrCorruptedFwdPkg
} }
@ -582,7 +582,7 @@ func loadFwdPkg(fwdPkgBkt *bbolt.Bucket, source lnwire.ShortChannelID,
} }
// Load SETTLE/FAILs from disk. // Load SETTLE/FAILs from disk.
failSettleBkt := heightBkt.Bucket(failSettleBucketKey) failSettleBkt := heightBkt.NestedReadBucket(failSettleBucketKey)
if failSettleBkt == nil { if failSettleBkt == nil {
return nil, ErrCorruptedFwdPkg return nil, ErrCorruptedFwdPkg
} }
@ -649,7 +649,7 @@ func loadFwdPkg(fwdPkgBkt *bbolt.Bucket, source lnwire.ShortChannelID,
// loadHtlcs retrieves all serialized htlcs in a bucket, returning // loadHtlcs retrieves all serialized htlcs in a bucket, returning
// them in order of the indexes they were written under. // them in order of the indexes they were written under.
func loadHtlcs(bkt *bbolt.Bucket) ([]LogUpdate, error) { func loadHtlcs(bkt kvdb.ReadBucket) ([]LogUpdate, error) {
var htlcs []LogUpdate var htlcs []LogUpdate
if err := bkt.ForEach(func(_, v []byte) error { if err := bkt.ForEach(func(_, v []byte) error {
var htlc LogUpdate var htlc LogUpdate
@ -674,22 +674,22 @@ func loadHtlcs(bkt *bbolt.Bucket) ([]LogUpdate, error) {
// leaving this channel. After a restart, we skip validation of these Adds, // leaving this channel. After a restart, we skip validation of these Adds,
// since they are assumed to have already been validated, and make the switch or // since they are assumed to have already been validated, and make the switch or
// outgoing link responsible for handling replays. // outgoing link responsible for handling replays.
func (p *ChannelPackager) SetFwdFilter(tx *bbolt.Tx, height uint64, func (p *ChannelPackager) SetFwdFilter(tx kvdb.RwTx, height uint64,
fwdFilter *PkgFilter) error { fwdFilter *PkgFilter) error {
fwdPkgBkt := tx.Bucket(fwdPackagesKey) fwdPkgBkt := tx.ReadWriteBucket(fwdPackagesKey)
if fwdPkgBkt == nil { if fwdPkgBkt == nil {
return ErrCorruptedFwdPkg return ErrCorruptedFwdPkg
} }
source := makeLogKey(p.source.ToUint64()) source := makeLogKey(p.source.ToUint64())
sourceBkt := fwdPkgBkt.Bucket(source[:]) sourceBkt := fwdPkgBkt.NestedReadWriteBucket(source[:])
if sourceBkt == nil { if sourceBkt == nil {
return ErrCorruptedFwdPkg return ErrCorruptedFwdPkg
} }
heightKey := makeLogKey(height) heightKey := makeLogKey(height)
heightBkt := sourceBkt.Bucket(heightKey[:]) heightBkt := sourceBkt.NestedReadWriteBucket(heightKey[:])
if heightBkt == nil { if heightBkt == nil {
return ErrCorruptedFwdPkg return ErrCorruptedFwdPkg
} }
@ -713,18 +713,18 @@ func (p *ChannelPackager) SetFwdFilter(tx *bbolt.Tx, height uint64,
// AckAddHtlcs accepts a list of references to add htlcs, and updates the // AckAddHtlcs accepts a list of references to add htlcs, and updates the
// AckAddFilter of those forwarding packages to indicate that a settle or fail // AckAddFilter of those forwarding packages to indicate that a settle or fail
// has been received in response to the add. // has been received in response to the add.
func (p *ChannelPackager) AckAddHtlcs(tx *bbolt.Tx, addRefs ...AddRef) error { func (p *ChannelPackager) AckAddHtlcs(tx kvdb.RwTx, addRefs ...AddRef) error {
if len(addRefs) == 0 { if len(addRefs) == 0 {
return nil return nil
} }
fwdPkgBkt := tx.Bucket(fwdPackagesKey) fwdPkgBkt := tx.ReadWriteBucket(fwdPackagesKey)
if fwdPkgBkt == nil { if fwdPkgBkt == nil {
return ErrCorruptedFwdPkg return ErrCorruptedFwdPkg
} }
sourceKey := makeLogKey(p.source.ToUint64()) sourceKey := makeLogKey(p.source.ToUint64())
sourceBkt := fwdPkgBkt.Bucket(sourceKey[:]) sourceBkt := fwdPkgBkt.NestedReadWriteBucket(sourceKey[:])
if sourceBkt == nil { if sourceBkt == nil {
return ErrCorruptedFwdPkg return ErrCorruptedFwdPkg
} }
@ -753,11 +753,11 @@ func (p *ChannelPackager) AckAddHtlcs(tx *bbolt.Tx, addRefs ...AddRef) error {
// ackAddHtlcsAtHeight updates the AddAckFilter of a single forwarding package // ackAddHtlcsAtHeight updates the AddAckFilter of a single forwarding package
// with a list of indexes, writing the resulting filter back in its place. // with a list of indexes, writing the resulting filter back in its place.
func ackAddHtlcsAtHeight(sourceBkt *bbolt.Bucket, height uint64, func ackAddHtlcsAtHeight(sourceBkt kvdb.RwBucket, height uint64,
indexes []uint16) error { indexes []uint16) error {
heightKey := makeLogKey(height) heightKey := makeLogKey(height)
heightBkt := sourceBkt.Bucket(heightKey[:]) heightBkt := sourceBkt.NestedReadWriteBucket(heightKey[:])
if heightBkt == nil { if heightBkt == nil {
// If the height bucket isn't found, this could be because the // If the height bucket isn't found, this could be because the
// forwarding package was already removed. We'll return nil to // forwarding package was already removed. We'll return nil to
@ -796,17 +796,17 @@ func ackAddHtlcsAtHeight(sourceBkt *bbolt.Bucket, height uint64,
// package. This should only be called after the source of the Add has locked in // package. This should only be called after the source of the Add has locked in
// the settle/fail, or it becomes otherwise safe to forgo retransmitting the // the settle/fail, or it becomes otherwise safe to forgo retransmitting the
// settle/fail after a restart. // settle/fail after a restart.
func (p *ChannelPackager) AckSettleFails(tx *bbolt.Tx, settleFailRefs ...SettleFailRef) error { func (p *ChannelPackager) AckSettleFails(tx kvdb.RwTx, settleFailRefs ...SettleFailRef) error {
return ackSettleFails(tx, settleFailRefs) return ackSettleFails(tx, settleFailRefs)
} }
// ackSettleFails persistently acknowledges a batch of settle fail references. // ackSettleFails persistently acknowledges a batch of settle fail references.
func ackSettleFails(tx *bbolt.Tx, settleFailRefs []SettleFailRef) error { func ackSettleFails(tx kvdb.RwTx, settleFailRefs []SettleFailRef) error {
if len(settleFailRefs) == 0 { if len(settleFailRefs) == 0 {
return nil return nil
} }
fwdPkgBkt := tx.Bucket(fwdPackagesKey) fwdPkgBkt := tx.ReadWriteBucket(fwdPackagesKey)
if fwdPkgBkt == nil { if fwdPkgBkt == nil {
return ErrCorruptedFwdPkg return ErrCorruptedFwdPkg
} }
@ -832,7 +832,7 @@ func ackSettleFails(tx *bbolt.Tx, settleFailRefs []SettleFailRef) error {
// settle/fail htlcs. // settle/fail htlcs.
for dest, destHeights := range destHeightDiffs { for dest, destHeights := range destHeightDiffs {
destKey := makeLogKey(dest.ToUint64()) destKey := makeLogKey(dest.ToUint64())
destBkt := fwdPkgBkt.Bucket(destKey[:]) destBkt := fwdPkgBkt.NestedReadWriteBucket(destKey[:])
if destBkt == nil { if destBkt == nil {
// If the destination bucket is not found, this is // If the destination bucket is not found, this is
// likely the result of the destination channel being // likely the result of the destination channel being
@ -855,11 +855,11 @@ func ackSettleFails(tx *bbolt.Tx, settleFailRefs []SettleFailRef) error {
// ackSettleFailsAtHeight given a destination bucket, acks the provided indexes // ackSettleFailsAtHeight given a destination bucket, acks the provided indexes
// at particular a height by updating the settle fail filter. // at particular a height by updating the settle fail filter.
func ackSettleFailsAtHeight(destBkt *bbolt.Bucket, height uint64, func ackSettleFailsAtHeight(destBkt kvdb.RwBucket, height uint64,
indexes []uint16) error { indexes []uint16) error {
heightKey := makeLogKey(height) heightKey := makeLogKey(height)
heightBkt := destBkt.Bucket(heightKey[:]) heightBkt := destBkt.NestedReadWriteBucket(heightKey[:])
if heightBkt == nil { if heightBkt == nil {
// If the height bucket isn't found, this could be because the // If the height bucket isn't found, this could be because the
// forwarding package was already removed. We'll return nil to // forwarding package was already removed. We'll return nil to
@ -895,21 +895,21 @@ func ackSettleFailsAtHeight(destBkt *bbolt.Bucket, height uint64,
// RemovePkg deletes the forwarding package at the given height from the // RemovePkg deletes the forwarding package at the given height from the
// packager's source bucket. // packager's source bucket.
func (p *ChannelPackager) RemovePkg(tx *bbolt.Tx, height uint64) error { func (p *ChannelPackager) RemovePkg(tx kvdb.RwTx, height uint64) error {
fwdPkgBkt := tx.Bucket(fwdPackagesKey) fwdPkgBkt := tx.ReadWriteBucket(fwdPackagesKey)
if fwdPkgBkt == nil { if fwdPkgBkt == nil {
return nil return nil
} }
sourceBytes := makeLogKey(p.source.ToUint64()) sourceBytes := makeLogKey(p.source.ToUint64())
sourceBkt := fwdPkgBkt.Bucket(sourceBytes[:]) sourceBkt := fwdPkgBkt.NestedReadWriteBucket(sourceBytes[:])
if sourceBkt == nil { if sourceBkt == nil {
return ErrCorruptedFwdPkg return ErrCorruptedFwdPkg
} }
heightKey := makeLogKey(height) heightKey := makeLogKey(height)
return sourceBkt.DeleteBucket(heightKey[:]) return sourceBkt.DeleteNestedBucket(heightKey[:])
} }
// uint16Key writes the provided 16-bit unsigned integer to a 2-byte slice. // uint16Key writes the provided 16-bit unsigned integer to a 2-byte slice.

View File

@ -8,8 +8,8 @@ import (
"testing" "testing"
"github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcd/wire"
"github.com/coreos/bbolt"
"github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/lnwire"
) )
@ -207,7 +207,7 @@ func TestPackagerEmptyFwdPkg(t *testing.T) {
// Next, create and write a new forwarding package with no htlcs. // Next, create and write a new forwarding package with no htlcs.
fwdPkg := channeldb.NewFwdPkg(shortChanID, 0, nil, nil) fwdPkg := channeldb.NewFwdPkg(shortChanID, 0, nil, nil)
if err := db.Update(func(tx *bbolt.Tx) error { if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.AddFwdPkg(tx, fwdPkg) return packager.AddFwdPkg(tx, fwdPkg)
}); err != nil { }); err != nil {
t.Fatalf("unable to add fwd pkg: %v", err) t.Fatalf("unable to add fwd pkg: %v", err)
@ -226,7 +226,7 @@ func TestPackagerEmptyFwdPkg(t *testing.T) {
// Now, write the forwarding decision. In this case, its just an empty // Now, write the forwarding decision. In this case, its just an empty
// fwd filter. // fwd filter.
if err := db.Update(func(tx *bbolt.Tx) error { if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter) return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter)
}); err != nil { }); err != nil {
t.Fatalf("unable to set fwdfiter: %v", err) t.Fatalf("unable to set fwdfiter: %v", err)
@ -244,7 +244,7 @@ func TestPackagerEmptyFwdPkg(t *testing.T) {
assertAckFilterIsFull(t, fwdPkgs[0], true) assertAckFilterIsFull(t, fwdPkgs[0], true)
// Lastly, remove the completed forwarding package from disk. // Lastly, remove the completed forwarding package from disk.
if err := db.Update(func(tx *bbolt.Tx) error { if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.RemovePkg(tx, fwdPkg.Height) return packager.RemovePkg(tx, fwdPkg.Height)
}); err != nil { }); err != nil {
t.Fatalf("unable to remove fwdpkg: %v", err) t.Fatalf("unable to remove fwdpkg: %v", err)
@ -279,7 +279,7 @@ func TestPackagerOnlyAdds(t *testing.T) {
nAdds := len(adds) nAdds := len(adds)
if err := db.Update(func(tx *bbolt.Tx) error { if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.AddFwdPkg(tx, fwdPkg) return packager.AddFwdPkg(tx, fwdPkg)
}); err != nil { }); err != nil {
t.Fatalf("unable to add fwd pkg: %v", err) t.Fatalf("unable to add fwd pkg: %v", err)
@ -300,7 +300,7 @@ func TestPackagerOnlyAdds(t *testing.T) {
// added any adds to the fwdfilter, this would indicate that all of the // added any adds to the fwdfilter, this would indicate that all of the
// adds were 1) settled locally by this link (exit hop), or 2) the htlc // adds were 1) settled locally by this link (exit hop), or 2) the htlc
// was failed locally. // was failed locally.
if err := db.Update(func(tx *bbolt.Tx) error { if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter) return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter)
}); err != nil { }); err != nil {
t.Fatalf("unable to set fwdfiter: %v", err) t.Fatalf("unable to set fwdfiter: %v", err)
@ -324,7 +324,7 @@ func TestPackagerOnlyAdds(t *testing.T) {
Index: uint16(i), Index: uint16(i),
} }
if err := db.Update(func(tx *bbolt.Tx) error { if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.AckAddHtlcs(tx, addRef) return packager.AckAddHtlcs(tx, addRef)
}); err != nil { }); err != nil {
t.Fatalf("unable to ack add htlc: %v", err) t.Fatalf("unable to ack add htlc: %v", err)
@ -343,7 +343,7 @@ func TestPackagerOnlyAdds(t *testing.T) {
assertAckFilterIsFull(t, fwdPkgs[0], true) assertAckFilterIsFull(t, fwdPkgs[0], true)
// Lastly, remove the completed forwarding package from disk. // Lastly, remove the completed forwarding package from disk.
if err := db.Update(func(tx *bbolt.Tx) error { if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.RemovePkg(tx, fwdPkg.Height) return packager.RemovePkg(tx, fwdPkg.Height)
}); err != nil { }); err != nil {
t.Fatalf("unable to remove fwdpkg: %v", err) t.Fatalf("unable to remove fwdpkg: %v", err)
@ -381,7 +381,7 @@ func TestPackagerOnlySettleFails(t *testing.T) {
nSettleFails := len(settleFails) nSettleFails := len(settleFails)
if err := db.Update(func(tx *bbolt.Tx) error { if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.AddFwdPkg(tx, fwdPkg) return packager.AddFwdPkg(tx, fwdPkg)
}); err != nil { }); err != nil {
t.Fatalf("unable to add fwd pkg: %v", err) t.Fatalf("unable to add fwd pkg: %v", err)
@ -402,7 +402,7 @@ func TestPackagerOnlySettleFails(t *testing.T) {
// added any adds to the fwdfilter, this would indicate that all of the // added any adds to the fwdfilter, this would indicate that all of the
// adds were 1) settled locally by this link (exit hop), or 2) the htlc // adds were 1) settled locally by this link (exit hop), or 2) the htlc
// was failed locally. // was failed locally.
if err := db.Update(func(tx *bbolt.Tx) error { if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter) return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter)
}); err != nil { }); err != nil {
t.Fatalf("unable to set fwdfiter: %v", err) t.Fatalf("unable to set fwdfiter: %v", err)
@ -428,7 +428,7 @@ func TestPackagerOnlySettleFails(t *testing.T) {
Index: uint16(i), Index: uint16(i),
} }
if err := db.Update(func(tx *bbolt.Tx) error { if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.AckSettleFails(tx, failSettleRef) return packager.AckSettleFails(tx, failSettleRef)
}); err != nil { }); err != nil {
t.Fatalf("unable to ack add htlc: %v", err) t.Fatalf("unable to ack add htlc: %v", err)
@ -448,7 +448,7 @@ func TestPackagerOnlySettleFails(t *testing.T) {
assertAckFilterIsFull(t, fwdPkgs[0], true) assertAckFilterIsFull(t, fwdPkgs[0], true)
// Lastly, remove the completed forwarding package from disk. // Lastly, remove the completed forwarding package from disk.
if err := db.Update(func(tx *bbolt.Tx) error { if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.RemovePkg(tx, fwdPkg.Height) return packager.RemovePkg(tx, fwdPkg.Height)
}); err != nil { }); err != nil {
t.Fatalf("unable to remove fwdpkg: %v", err) t.Fatalf("unable to remove fwdpkg: %v", err)
@ -486,7 +486,7 @@ func TestPackagerAddsThenSettleFails(t *testing.T) {
nAdds := len(adds) nAdds := len(adds)
nSettleFails := len(settleFails) nSettleFails := len(settleFails)
if err := db.Update(func(tx *bbolt.Tx) error { if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.AddFwdPkg(tx, fwdPkg) return packager.AddFwdPkg(tx, fwdPkg)
}); err != nil { }); err != nil {
t.Fatalf("unable to add fwd pkg: %v", err) t.Fatalf("unable to add fwd pkg: %v", err)
@ -507,7 +507,7 @@ func TestPackagerAddsThenSettleFails(t *testing.T) {
// added any adds to the fwdfilter, this would indicate that all of the // added any adds to the fwdfilter, this would indicate that all of the
// adds were 1) settled locally by this link (exit hop), or 2) the htlc // adds were 1) settled locally by this link (exit hop), or 2) the htlc
// was failed locally. // was failed locally.
if err := db.Update(func(tx *bbolt.Tx) error { if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter) return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter)
}); err != nil { }); err != nil {
t.Fatalf("unable to set fwdfiter: %v", err) t.Fatalf("unable to set fwdfiter: %v", err)
@ -532,7 +532,7 @@ func TestPackagerAddsThenSettleFails(t *testing.T) {
Index: uint16(i), Index: uint16(i),
} }
if err := db.Update(func(tx *bbolt.Tx) error { if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.AckAddHtlcs(tx, addRef) return packager.AckAddHtlcs(tx, addRef)
}); err != nil { }); err != nil {
t.Fatalf("unable to ack add htlc: %v", err) t.Fatalf("unable to ack add htlc: %v", err)
@ -559,7 +559,7 @@ func TestPackagerAddsThenSettleFails(t *testing.T) {
Index: uint16(i), Index: uint16(i),
} }
if err := db.Update(func(tx *bbolt.Tx) error { if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.AckSettleFails(tx, failSettleRef) return packager.AckSettleFails(tx, failSettleRef)
}); err != nil { }); err != nil {
t.Fatalf("unable to remove settle/fail htlc: %v", err) t.Fatalf("unable to remove settle/fail htlc: %v", err)
@ -579,7 +579,7 @@ func TestPackagerAddsThenSettleFails(t *testing.T) {
assertAckFilterIsFull(t, fwdPkgs[0], true) assertAckFilterIsFull(t, fwdPkgs[0], true)
// Lastly, remove the completed forwarding package from disk. // Lastly, remove the completed forwarding package from disk.
if err := db.Update(func(tx *bbolt.Tx) error { if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.RemovePkg(tx, fwdPkg.Height) return packager.RemovePkg(tx, fwdPkg.Height)
}); err != nil { }); err != nil {
t.Fatalf("unable to remove fwdpkg: %v", err) t.Fatalf("unable to remove fwdpkg: %v", err)
@ -619,7 +619,7 @@ func TestPackagerSettleFailsThenAdds(t *testing.T) {
nAdds := len(adds) nAdds := len(adds)
nSettleFails := len(settleFails) nSettleFails := len(settleFails)
if err := db.Update(func(tx *bbolt.Tx) error { if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.AddFwdPkg(tx, fwdPkg) return packager.AddFwdPkg(tx, fwdPkg)
}); err != nil { }); err != nil {
t.Fatalf("unable to add fwd pkg: %v", err) t.Fatalf("unable to add fwd pkg: %v", err)
@ -640,7 +640,7 @@ func TestPackagerSettleFailsThenAdds(t *testing.T) {
// added any adds to the fwdfilter, this would indicate that all of the // added any adds to the fwdfilter, this would indicate that all of the
// adds were 1) settled locally by this link (exit hop), or 2) the htlc // adds were 1) settled locally by this link (exit hop), or 2) the htlc
// was failed locally. // was failed locally.
if err := db.Update(func(tx *bbolt.Tx) error { if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter) return packager.SetFwdFilter(tx, fwdPkg.Height, fwdPkg.FwdFilter)
}); err != nil { }); err != nil {
t.Fatalf("unable to set fwdfiter: %v", err) t.Fatalf("unable to set fwdfiter: %v", err)
@ -669,7 +669,7 @@ func TestPackagerSettleFailsThenAdds(t *testing.T) {
Index: uint16(i), Index: uint16(i),
} }
if err := db.Update(func(tx *bbolt.Tx) error { if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.AckSettleFails(tx, failSettleRef) return packager.AckSettleFails(tx, failSettleRef)
}); err != nil { }); err != nil {
t.Fatalf("unable to remove settle/fail htlc: %v", err) t.Fatalf("unable to remove settle/fail htlc: %v", err)
@ -696,7 +696,7 @@ func TestPackagerSettleFailsThenAdds(t *testing.T) {
Index: uint16(i), Index: uint16(i),
} }
if err := db.Update(func(tx *bbolt.Tx) error { if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.AckAddHtlcs(tx, addRef) return packager.AckAddHtlcs(tx, addRef)
}); err != nil { }); err != nil {
t.Fatalf("unable to ack add htlc: %v", err) t.Fatalf("unable to ack add htlc: %v", err)
@ -716,7 +716,7 @@ func TestPackagerSettleFailsThenAdds(t *testing.T) {
assertAckFilterIsFull(t, fwdPkgs[0], true) assertAckFilterIsFull(t, fwdPkgs[0], true)
// Lastly, remove the completed forwarding package from disk. // Lastly, remove the completed forwarding package from disk.
if err := db.Update(func(tx *bbolt.Tx) error { if err := kvdb.Update(db, func(tx kvdb.RwTx) error {
return packager.RemovePkg(tx, fwdPkg.Height) return packager.RemovePkg(tx, fwdPkg.Height)
}); err != nil { }); err != nil {
t.Fatalf("unable to remove fwdpkg: %v", err) t.Fatalf("unable to remove fwdpkg: %v", err)
@ -778,11 +778,11 @@ func assertSettleFailFilterIsFull(t *testing.T, fwdPkg *channeldb.FwdPkg, expect
// loadFwdPkgs is a helper method that reads all forwarding packages for a // loadFwdPkgs is a helper method that reads all forwarding packages for a
// particular packager. // particular packager.
func loadFwdPkgs(t *testing.T, db *bbolt.DB, func loadFwdPkgs(t *testing.T, db kvdb.Backend,
packager channeldb.FwdPackager) []*channeldb.FwdPkg { packager channeldb.FwdPackager) []*channeldb.FwdPkg {
var fwdPkgs []*channeldb.FwdPkg var fwdPkgs []*channeldb.FwdPkg
if err := db.View(func(tx *bbolt.Tx) error { if err := kvdb.View(db, func(tx kvdb.ReadTx) error {
var err error var err error
fwdPkgs, err = packager.LoadFwdPkgs(tx) fwdPkgs, err = packager.LoadFwdPkgs(tx)
return err return err
@ -795,7 +795,7 @@ func loadFwdPkgs(t *testing.T, db *bbolt.DB,
// makeFwdPkgDB initializes a test database for forwarding packages. If the // makeFwdPkgDB initializes a test database for forwarding packages. If the
// provided path is an empty, it will create a temp dir/file to use. // provided path is an empty, it will create a temp dir/file to use.
func makeFwdPkgDB(t *testing.T, path string) *bbolt.DB { func makeFwdPkgDB(t *testing.T, path string) kvdb.Backend {
if path == "" { if path == "" {
var err error var err error
path, err = ioutil.TempDir("", "fwdpkgdb") path, err = ioutil.TempDir("", "fwdpkgdb")
@ -806,10 +806,10 @@ func makeFwdPkgDB(t *testing.T, path string) *bbolt.DB {
path = filepath.Join(path, "fwdpkg.db") path = filepath.Join(path, "fwdpkg.db")
} }
db, err := bbolt.Open(path, 0600, nil) bdb, err := kvdb.Create(kvdb.BoltBackendName, path, true)
if err != nil { if err != nil {
t.Fatalf("unable to open boltdb: %v", err) t.Fatalf("unable to open boltdb: %v", err)
} }
return db return bdb
} }

File diff suppressed because it is too large Load Diff

View File

@ -17,8 +17,8 @@ import (
"github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcd/wire"
"github.com/coreos/bbolt"
"github.com/davecgh/go-spew/spew" "github.com/davecgh/go-spew/spew"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/routing/route" "github.com/lightningnetwork/lnd/routing/route"
) )
@ -882,7 +882,7 @@ func TestGraphTraversal(t *testing.T) {
// Iterate over each node as returned by the graph, if all nodes are // Iterate over each node as returned by the graph, if all nodes are
// reached, then the map created above should be empty. // reached, then the map created above should be empty.
err = graph.ForEachNode(nil, func(_ *bbolt.Tx, node *LightningNode) error { err = graph.ForEachNode(nil, func(_ kvdb.ReadTx, node *LightningNode) error {
delete(nodeIndex, node.Alias) delete(nodeIndex, node.Alias)
return nil return nil
}) })
@ -978,7 +978,7 @@ func TestGraphTraversal(t *testing.T) {
// Finally, we want to test the ability to iterate over all the // Finally, we want to test the ability to iterate over all the
// outgoing channels for a particular node. // outgoing channels for a particular node.
numNodeChans := 0 numNodeChans := 0
err = firstNode.ForEachChannel(nil, func(_ *bbolt.Tx, _ *ChannelEdgeInfo, err = firstNode.ForEachChannel(nil, func(_ kvdb.ReadTx, _ *ChannelEdgeInfo,
outEdge, inEdge *ChannelEdgePolicy) error { outEdge, inEdge *ChannelEdgePolicy) error {
// All channels between first and second node should have fully // All channels between first and second node should have fully
@ -1051,7 +1051,7 @@ func assertNumChans(t *testing.T, graph *ChannelGraph, n int) {
func assertNumNodes(t *testing.T, graph *ChannelGraph, n int) { func assertNumNodes(t *testing.T, graph *ChannelGraph, n int) {
numNodes := 0 numNodes := 0
err := graph.ForEachNode(nil, func(_ *bbolt.Tx, _ *LightningNode) error { err := graph.ForEachNode(nil, func(_ kvdb.ReadTx, _ *LightningNode) error {
numNodes++ numNodes++
return nil return nil
}) })
@ -2097,10 +2097,9 @@ func TestIncompleteChannelPolicies(t *testing.T) {
} }
// Ensure that channel is reported with unknown policies. // Ensure that channel is reported with unknown policies.
checkPolicies := func(node *LightningNode, expectedIn, expectedOut bool) { checkPolicies := func(node *LightningNode, expectedIn, expectedOut bool) {
calls := 0 calls := 0
node.ForEachChannel(nil, func(_ *bbolt.Tx, _ *ChannelEdgeInfo, err := node.ForEachChannel(nil, func(_ kvdb.ReadTx, _ *ChannelEdgeInfo,
outEdge, inEdge *ChannelEdgePolicy) error { outEdge, inEdge *ChannelEdgePolicy) error {
if !expectedOut && outEdge != nil { if !expectedOut && outEdge != nil {
@ -2123,6 +2122,9 @@ func TestIncompleteChannelPolicies(t *testing.T) {
return nil return nil
}) })
if err != nil {
t.Fatalf("unable to scan channels: %v", err)
}
if calls != 1 { if calls != 1 {
t.Fatalf("Expected only one callback call") t.Fatalf("Expected only one callback call")
@ -2233,17 +2235,27 @@ func TestChannelEdgePruningUpdateIndexDeletion(t *testing.T) {
timestampSet[t] = struct{}{} timestampSet[t] = struct{}{}
} }
err := db.View(func(tx *bbolt.Tx) error { err := kvdb.View(db, func(tx kvdb.ReadTx) error {
edges := tx.Bucket(edgeBucket) edges := tx.ReadBucket(edgeBucket)
if edges == nil { if edges == nil {
return ErrGraphNoEdgesFound return ErrGraphNoEdgesFound
} }
edgeUpdateIndex := edges.Bucket(edgeUpdateIndexBucket) edgeUpdateIndex := edges.NestedReadBucket(
edgeUpdateIndexBucket,
)
if edgeUpdateIndex == nil { if edgeUpdateIndex == nil {
return ErrGraphNoEdgesFound return ErrGraphNoEdgesFound
} }
numEntries := edgeUpdateIndex.Stats().KeyN var numEntries int
err := edgeUpdateIndex.ForEach(func(k, v []byte) error {
numEntries++
return nil
})
if err != nil {
return err
}
expectedEntries := len(timestampSet) expectedEntries := len(timestampSet)
if numEntries != expectedEntries { if numEntries != expectedEntries {
return fmt.Errorf("expected %v entries in the "+ return fmt.Errorf("expected %v entries in the "+
@ -2832,8 +2844,8 @@ func TestEdgePolicyMissingMaxHtcl(t *testing.T) {
// Attempting to deserialize these bytes should return an error. // Attempting to deserialize these bytes should return an error.
r := bytes.NewReader(stripped) r := bytes.NewReader(stripped)
err = db.View(func(tx *bbolt.Tx) error { err = kvdb.View(db, func(tx kvdb.ReadTx) error {
nodes := tx.Bucket(nodeBucket) nodes := tx.ReadBucket(nodeBucket)
if nodes == nil { if nodes == nil {
return ErrGraphNotFound return ErrGraphNotFound
} }
@ -2852,13 +2864,13 @@ func TestEdgePolicyMissingMaxHtcl(t *testing.T) {
} }
// Put the stripped bytes in the DB. // Put the stripped bytes in the DB.
err = db.Update(func(tx *bbolt.Tx) error { err = kvdb.Update(db, func(tx kvdb.RwTx) error {
edges := tx.Bucket(edgeBucket) edges := tx.ReadWriteBucket(edgeBucket)
if edges == nil { if edges == nil {
return ErrEdgeNotFound return ErrEdgeNotFound
} }
edgeIndex := edges.Bucket(edgeIndexBucket) edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
if edgeIndex == nil { if edgeIndex == nil {
return ErrEdgeNotFound return ErrEdgeNotFound
} }

View File

@ -8,7 +8,7 @@ import (
"io" "io"
"time" "time"
"github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/htlcswitch/hop" "github.com/lightningnetwork/lnd/htlcswitch/hop"
"github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/lnwire"
@ -401,8 +401,8 @@ func (d *DB) AddInvoice(newInvoice *Invoice, paymentHash lntypes.Hash) (
} }
var invoiceAddIndex uint64 var invoiceAddIndex uint64
err := d.Update(func(tx *bbolt.Tx) error { err := kvdb.Update(d, func(tx kvdb.RwTx) error {
invoices, err := tx.CreateBucketIfNotExists(invoiceBucket) invoices, err := tx.CreateTopLevelBucket(invoiceBucket)
if err != nil { if err != nil {
return err return err
} }
@ -479,13 +479,13 @@ func (d *DB) InvoicesAddedSince(sinceAddIndex uint64) ([]Invoice, error) {
var startIndex [8]byte var startIndex [8]byte
byteOrder.PutUint64(startIndex[:], sinceAddIndex) byteOrder.PutUint64(startIndex[:], sinceAddIndex)
err := d.DB.View(func(tx *bbolt.Tx) error { err := kvdb.View(d, func(tx kvdb.ReadTx) error {
invoices := tx.Bucket(invoiceBucket) invoices := tx.ReadBucket(invoiceBucket)
if invoices == nil { if invoices == nil {
return ErrNoInvoicesCreated return ErrNoInvoicesCreated
} }
addIndex := invoices.Bucket(addIndexBucket) addIndex := invoices.NestedReadBucket(addIndexBucket)
if addIndex == nil { if addIndex == nil {
return ErrNoInvoicesCreated return ErrNoInvoicesCreated
} }
@ -493,7 +493,7 @@ func (d *DB) InvoicesAddedSince(sinceAddIndex uint64) ([]Invoice, error) {
// We'll now run through each entry in the add index starting // We'll now run through each entry in the add index starting
// at our starting index. We'll continue until we reach the // at our starting index. We'll continue until we reach the
// very end of the current key space. // very end of the current key space.
invoiceCursor := addIndex.Cursor() invoiceCursor := addIndex.ReadCursor()
// We'll seek to the starting index, then manually advance the // We'll seek to the starting index, then manually advance the
// cursor in order to skip the entry with the since add index. // cursor in order to skip the entry with the since add index.
@ -534,12 +534,12 @@ func (d *DB) InvoicesAddedSince(sinceAddIndex uint64) ([]Invoice, error) {
// terms of the payment. // terms of the payment.
func (d *DB) LookupInvoice(paymentHash [32]byte) (Invoice, error) { func (d *DB) LookupInvoice(paymentHash [32]byte) (Invoice, error) {
var invoice Invoice var invoice Invoice
err := d.View(func(tx *bbolt.Tx) error { err := kvdb.View(d, func(tx kvdb.ReadTx) error {
invoices := tx.Bucket(invoiceBucket) invoices := tx.ReadBucket(invoiceBucket)
if invoices == nil { if invoices == nil {
return ErrNoInvoicesCreated return ErrNoInvoicesCreated
} }
invoiceIndex := invoices.Bucket(invoiceIndexBucket) invoiceIndex := invoices.NestedReadBucket(invoiceIndexBucket)
if invoiceIndex == nil { if invoiceIndex == nil {
return ErrNoInvoicesCreated return ErrNoInvoicesCreated
} }
@ -589,13 +589,13 @@ func (d *DB) FetchAllInvoicesWithPaymentHash(pendingOnly bool) (
var result []InvoiceWithPaymentHash var result []InvoiceWithPaymentHash
err := d.View(func(tx *bbolt.Tx) error { err := kvdb.View(d, func(tx kvdb.ReadTx) error {
invoices := tx.Bucket(invoiceBucket) invoices := tx.ReadBucket(invoiceBucket)
if invoices == nil { if invoices == nil {
return ErrNoInvoicesCreated return ErrNoInvoicesCreated
} }
invoiceIndex := invoices.Bucket(invoiceIndexBucket) invoiceIndex := invoices.NestedReadBucket(invoiceIndexBucket)
if invoiceIndex == nil { if invoiceIndex == nil {
// Mask the error if there's no invoice // Mask the error if there's no invoice
// index as that simply means there are no // index as that simply means there are no
@ -695,21 +695,21 @@ func (d *DB) QueryInvoices(q InvoiceQuery) (InvoiceSlice, error) {
InvoiceQuery: q, InvoiceQuery: q,
} }
err := d.View(func(tx *bbolt.Tx) error { err := kvdb.View(d, func(tx kvdb.ReadTx) error {
// If the bucket wasn't found, then there aren't any invoices // If the bucket wasn't found, then there aren't any invoices
// within the database yet, so we can simply exit. // within the database yet, so we can simply exit.
invoices := tx.Bucket(invoiceBucket) invoices := tx.ReadBucket(invoiceBucket)
if invoices == nil { if invoices == nil {
return ErrNoInvoicesCreated return ErrNoInvoicesCreated
} }
invoiceAddIndex := invoices.Bucket(addIndexBucket) invoiceAddIndex := invoices.NestedReadBucket(addIndexBucket)
if invoiceAddIndex == nil { if invoiceAddIndex == nil {
return ErrNoInvoicesCreated return ErrNoInvoicesCreated
} }
// keyForIndex is a helper closure that retrieves the invoice // keyForIndex is a helper closure that retrieves the invoice
// key for the given add index of an invoice. // key for the given add index of an invoice.
keyForIndex := func(c *bbolt.Cursor, index uint64) []byte { keyForIndex := func(c kvdb.ReadCursor, index uint64) []byte {
var keyIndex [8]byte var keyIndex [8]byte
byteOrder.PutUint64(keyIndex[:], index) byteOrder.PutUint64(keyIndex[:], index)
_, invoiceKey := c.Seek(keyIndex[:]) _, invoiceKey := c.Seek(keyIndex[:])
@ -718,7 +718,7 @@ func (d *DB) QueryInvoices(q InvoiceQuery) (InvoiceSlice, error) {
// nextKey is a helper closure to determine what the next // nextKey is a helper closure to determine what the next
// invoice key is when iterating over the invoice add index. // invoice key is when iterating over the invoice add index.
nextKey := func(c *bbolt.Cursor) ([]byte, []byte) { nextKey := func(c kvdb.ReadCursor) ([]byte, []byte) {
if q.Reversed { if q.Reversed {
return c.Prev() return c.Prev()
} }
@ -728,7 +728,7 @@ func (d *DB) QueryInvoices(q InvoiceQuery) (InvoiceSlice, error) {
// We'll be using a cursor to seek into the database and return // We'll be using a cursor to seek into the database and return
// a slice of invoices. We'll need to determine where to start // a slice of invoices. We'll need to determine where to start
// our cursor depending on the parameters set within the query. // our cursor depending on the parameters set within the query.
c := invoiceAddIndex.Cursor() c := invoiceAddIndex.ReadCursor()
invoiceKey := keyForIndex(c, q.IndexOffset+1) invoiceKey := keyForIndex(c, q.IndexOffset+1)
// If the query is specifying reverse iteration, then we must // If the query is specifying reverse iteration, then we must
@ -822,8 +822,8 @@ func (d *DB) UpdateInvoice(paymentHash lntypes.Hash,
callback InvoiceUpdateCallback) (*Invoice, error) { callback InvoiceUpdateCallback) (*Invoice, error) {
var updatedInvoice *Invoice var updatedInvoice *Invoice
err := d.Update(func(tx *bbolt.Tx) error { err := kvdb.Update(d, func(tx kvdb.RwTx) error {
invoices, err := tx.CreateBucketIfNotExists(invoiceBucket) invoices, err := tx.CreateTopLevelBucket(invoiceBucket)
if err != nil { if err != nil {
return err return err
} }
@ -877,13 +877,13 @@ func (d *DB) InvoicesSettledSince(sinceSettleIndex uint64) ([]Invoice, error) {
var startIndex [8]byte var startIndex [8]byte
byteOrder.PutUint64(startIndex[:], sinceSettleIndex) byteOrder.PutUint64(startIndex[:], sinceSettleIndex)
err := d.DB.View(func(tx *bbolt.Tx) error { err := kvdb.View(d, func(tx kvdb.ReadTx) error {
invoices := tx.Bucket(invoiceBucket) invoices := tx.ReadBucket(invoiceBucket)
if invoices == nil { if invoices == nil {
return ErrNoInvoicesCreated return ErrNoInvoicesCreated
} }
settleIndex := invoices.Bucket(settleIndexBucket) settleIndex := invoices.NestedReadBucket(settleIndexBucket)
if settleIndex == nil { if settleIndex == nil {
return ErrNoInvoicesCreated return ErrNoInvoicesCreated
} }
@ -891,7 +891,7 @@ func (d *DB) InvoicesSettledSince(sinceSettleIndex uint64) ([]Invoice, error) {
// We'll now run through each entry in the add index starting // We'll now run through each entry in the add index starting
// at our starting index. We'll continue until we reach the // at our starting index. We'll continue until we reach the
// very end of the current key space. // very end of the current key space.
invoiceCursor := settleIndex.Cursor() invoiceCursor := settleIndex.ReadCursor()
// We'll seek to the starting index, then manually advance the // We'll seek to the starting index, then manually advance the
// cursor in order to skip the entry with the since add index. // cursor in order to skip the entry with the since add index.
@ -919,7 +919,7 @@ func (d *DB) InvoicesSettledSince(sinceSettleIndex uint64) ([]Invoice, error) {
return settledInvoices, nil return settledInvoices, nil
} }
func putInvoice(invoices, invoiceIndex, addIndex *bbolt.Bucket, func putInvoice(invoices, invoiceIndex, addIndex kvdb.RwBucket,
i *Invoice, invoiceNum uint32, paymentHash lntypes.Hash) ( i *Invoice, invoiceNum uint32, paymentHash lntypes.Hash) (
uint64, error) { uint64, error) {
@ -1112,7 +1112,7 @@ func serializeHtlcs(w io.Writer, htlcs map[CircuitKey]*InvoiceHTLC) error {
return nil return nil
} }
func fetchInvoice(invoiceNum []byte, invoices *bbolt.Bucket) (Invoice, error) { func fetchInvoice(invoiceNum []byte, invoices kvdb.ReadBucket) (Invoice, error) {
invoiceBytes := invoices.Get(invoiceNum) invoiceBytes := invoices.Get(invoiceNum)
if invoiceBytes == nil { if invoiceBytes == nil {
return Invoice{}, ErrInvoiceNotFound return Invoice{}, ErrInvoiceNotFound
@ -1325,7 +1325,7 @@ func copyInvoice(src *Invoice) *Invoice {
// updateInvoice fetches the invoice, obtains the update descriptor from the // updateInvoice fetches the invoice, obtains the update descriptor from the
// callback and applies the updates in a single db transaction. // callback and applies the updates in a single db transaction.
func (d *DB) updateInvoice(hash lntypes.Hash, invoices, settleIndex *bbolt.Bucket, func (d *DB) updateInvoice(hash lntypes.Hash, invoices, settleIndex kvdb.RwBucket,
invoiceNum []byte, callback InvoiceUpdateCallback) (*Invoice, error) { invoiceNum []byte, callback InvoiceUpdateCallback) (*Invoice, error) {
invoice, err := fetchInvoice(invoiceNum, invoices) invoice, err := fetchInvoice(invoiceNum, invoices)
@ -1572,7 +1572,7 @@ func updateHtlc(resolveTime time.Time, htlc *InvoiceHTLC,
// setSettleMetaFields updates the metadata associated with settlement of an // setSettleMetaFields updates the metadata associated with settlement of an
// invoice. // invoice.
func setSettleMetaFields(settleIndex *bbolt.Bucket, invoiceNum []byte, func setSettleMetaFields(settleIndex kvdb.RwBucket, invoiceNum []byte,
invoice *Invoice, now time.Time) error { invoice *Invoice, now time.Time) error {
// Now that we know the invoice hasn't already been settled, we'll // Now that we know the invoice hasn't already been settled, we'll

View File

@ -1,6 +1,8 @@
package channeldb package channeldb
import "github.com/coreos/bbolt" import (
"github.com/lightningnetwork/lnd/channeldb/kvdb"
)
var ( var (
// metaBucket stores all the meta information concerning the state of // metaBucket stores all the meta information concerning the state of
@ -20,10 +22,10 @@ type Meta struct {
// FetchMeta fetches the meta data from boltdb and returns filled meta // FetchMeta fetches the meta data from boltdb and returns filled meta
// structure. // structure.
func (d *DB) FetchMeta(tx *bbolt.Tx) (*Meta, error) { func (d *DB) FetchMeta(tx kvdb.ReadTx) (*Meta, error) {
meta := &Meta{} meta := &Meta{}
err := d.View(func(tx *bbolt.Tx) error { err := kvdb.View(d, func(tx kvdb.ReadTx) error {
return fetchMeta(meta, tx) return fetchMeta(meta, tx)
}) })
if err != nil { if err != nil {
@ -36,8 +38,8 @@ func (d *DB) FetchMeta(tx *bbolt.Tx) (*Meta, error) {
// fetchMeta is an internal helper function used in order to allow callers to // fetchMeta is an internal helper function used in order to allow callers to
// re-use a database transaction. See the publicly exported FetchMeta method // re-use a database transaction. See the publicly exported FetchMeta method
// for more information. // for more information.
func fetchMeta(meta *Meta, tx *bbolt.Tx) error { func fetchMeta(meta *Meta, tx kvdb.ReadTx) error {
metaBucket := tx.Bucket(metaBucket) metaBucket := tx.ReadBucket(metaBucket)
if metaBucket == nil { if metaBucket == nil {
return ErrMetaNotFound return ErrMetaNotFound
} }
@ -54,7 +56,7 @@ func fetchMeta(meta *Meta, tx *bbolt.Tx) error {
// PutMeta writes the passed instance of the database met-data struct to disk. // PutMeta writes the passed instance of the database met-data struct to disk.
func (d *DB) PutMeta(meta *Meta) error { func (d *DB) PutMeta(meta *Meta) error {
return d.Update(func(tx *bbolt.Tx) error { return kvdb.Update(d, func(tx kvdb.RwTx) error {
return putMeta(meta, tx) return putMeta(meta, tx)
}) })
} }
@ -62,8 +64,8 @@ func (d *DB) PutMeta(meta *Meta) error {
// putMeta is an internal helper function used in order to allow callers to // putMeta is an internal helper function used in order to allow callers to
// re-use a database transaction. See the publicly exported PutMeta method for // re-use a database transaction. See the publicly exported PutMeta method for
// more information. // more information.
func putMeta(meta *Meta, tx *bbolt.Tx) error { func putMeta(meta *Meta, tx kvdb.RwTx) error {
metaBucket, err := tx.CreateBucketIfNotExists(metaBucket) metaBucket, err := tx.CreateTopLevelBucket(metaBucket)
if err != nil { if err != nil {
return err return err
} }
@ -71,7 +73,7 @@ func putMeta(meta *Meta, tx *bbolt.Tx) error {
return putDbVersion(metaBucket, meta) return putDbVersion(metaBucket, meta)
} }
func putDbVersion(metaBucket *bbolt.Bucket, meta *Meta) error { func putDbVersion(metaBucket kvdb.RwBucket, meta *Meta) error {
scratch := make([]byte, 4) scratch := make([]byte, 4)
byteOrder.PutUint32(scratch, meta.DbVersionNumber) byteOrder.PutUint32(scratch, meta.DbVersionNumber)
return metaBucket.Put(dbVersionKey, scratch) return metaBucket.Put(dbVersionKey, scratch)

View File

@ -5,8 +5,8 @@ import (
"io/ioutil" "io/ioutil"
"testing" "testing"
"github.com/coreos/bbolt"
"github.com/go-errors/errors" "github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
) )
// applyMigration is a helper test function that encapsulates the general steps // applyMigration is a helper test function that encapsulates the general steps
@ -121,11 +121,11 @@ func TestOrderOfMigrations(t *testing.T) {
versions := []version{ versions := []version{
{0, nil}, {0, nil},
{1, nil}, {1, nil},
{2, func(tx *bbolt.Tx) error { {2, func(tx kvdb.RwTx) error {
appliedMigration = 2 appliedMigration = 2
return nil return nil
}}, }},
{3, func(tx *bbolt.Tx) error { {3, func(tx kvdb.RwTx) error {
appliedMigration = 3 appliedMigration = 3
return nil return nil
}}, }},
@ -197,21 +197,23 @@ func TestMigrationWithPanic(t *testing.T) {
beforeMigrationFunc := func(d *DB) { beforeMigrationFunc := func(d *DB) {
// Insert data in database and in order then make sure that the // Insert data in database and in order then make sure that the
// key isn't changes in case of panic or fail. // key isn't changes in case of panic or fail.
d.Update(func(tx *bbolt.Tx) error { err := kvdb.Update(d, func(tx kvdb.RwTx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketPrefix) bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
if err != nil { if err != nil {
return err return err
} }
bucket.Put(keyPrefix, beforeMigration) return bucket.Put(keyPrefix, beforeMigration)
return nil
}) })
if err != nil {
t.Fatalf("unable to insert: %v", err)
}
} }
// Create migration function which changes the initially created data and // Create migration function which changes the initially created data and
// throw the panic, in this case we pretending that something goes. // throw the panic, in this case we pretending that something goes.
migrationWithPanic := func(tx *bbolt.Tx) error { migrationWithPanic := func(tx kvdb.RwTx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketPrefix) bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
if err != nil { if err != nil {
return err return err
} }
@ -231,8 +233,8 @@ func TestMigrationWithPanic(t *testing.T) {
t.Fatal("migration panicked but version is changed") t.Fatal("migration panicked but version is changed")
} }
err = d.Update(func(tx *bbolt.Tx) error { err = kvdb.Update(d, func(tx kvdb.RwTx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketPrefix) bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
if err != nil { if err != nil {
return err return err
} }
@ -268,22 +270,24 @@ func TestMigrationWithFatal(t *testing.T) {
afterMigration := []byte("aftermigration") afterMigration := []byte("aftermigration")
beforeMigrationFunc := func(d *DB) { beforeMigrationFunc := func(d *DB) {
d.Update(func(tx *bbolt.Tx) error { err := kvdb.Update(d, func(tx kvdb.RwTx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketPrefix) bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
if err != nil { if err != nil {
return err return err
} }
bucket.Put(keyPrefix, beforeMigration) return bucket.Put(keyPrefix, beforeMigration)
return nil
}) })
if err != nil {
t.Fatalf("unable to insert pre migration key: %v", err)
}
} }
// Create migration function which changes the initially created data and // Create migration function which changes the initially created data and
// return the error, in this case we pretending that something goes // return the error, in this case we pretending that something goes
// wrong. // wrong.
migrationWithFatal := func(tx *bbolt.Tx) error { migrationWithFatal := func(tx kvdb.RwTx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketPrefix) bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
if err != nil { if err != nil {
return err return err
} }
@ -303,8 +307,8 @@ func TestMigrationWithFatal(t *testing.T) {
t.Fatal("migration failed but version is changed") t.Fatal("migration failed but version is changed")
} }
err = d.Update(func(tx *bbolt.Tx) error { err = kvdb.Update(d, func(tx kvdb.RwTx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketPrefix) bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
if err != nil { if err != nil {
return err return err
} }
@ -341,8 +345,8 @@ func TestMigrationWithoutErrors(t *testing.T) {
// Populate database with initial data. // Populate database with initial data.
beforeMigrationFunc := func(d *DB) { beforeMigrationFunc := func(d *DB) {
d.Update(func(tx *bbolt.Tx) error { kvdb.Update(d, func(tx kvdb.RwTx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketPrefix) bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
if err != nil { if err != nil {
return err return err
} }
@ -353,8 +357,8 @@ func TestMigrationWithoutErrors(t *testing.T) {
} }
// Create migration function which changes the initially created data. // Create migration function which changes the initially created data.
migrationWithoutErrors := func(tx *bbolt.Tx) error { migrationWithoutErrors := func(tx kvdb.RwTx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketPrefix) bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
if err != nil { if err != nil {
return err return err
} }
@ -375,8 +379,8 @@ func TestMigrationWithoutErrors(t *testing.T) {
"successfully applied migration") "successfully applied migration")
} }
err = d.Update(func(tx *bbolt.Tx) error { err = kvdb.Update(d, func(tx kvdb.RwTx) error {
bucket, err := tx.CreateBucketIfNotExists(bucketPrefix) bucket, err := tx.CreateTopLevelBucket(bucketPrefix)
if err != nil { if err != nil {
return err return err
} }
@ -419,7 +423,7 @@ func TestMigrationReversion(t *testing.T) {
// Update the database metadata to point to one more than the highest // Update the database metadata to point to one more than the highest
// known version. // known version.
err = cdb.Update(func(tx *bbolt.Tx) error { err = kvdb.Update(cdb, func(tx kvdb.RwTx) error {
newMeta := &Meta{ newMeta := &Meta{
DbVersionNumber: getLatestDBVersion(dbVersions) + 1, DbVersionNumber: getLatestDBVersion(dbVersions) + 1,
} }

View File

@ -3,7 +3,7 @@ package migration12
import ( import (
"bytes" "bytes"
"github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/lnwire"
) )
@ -12,11 +12,11 @@ var emptyFeatures = lnwire.NewFeatureVector(nil, nil)
// MigrateInvoiceTLV migrates all existing invoice bodies over to be serialized // MigrateInvoiceTLV migrates all existing invoice bodies over to be serialized
// in a single TLV stream. In the process, we drop the Receipt field and add // in a single TLV stream. In the process, we drop the Receipt field and add
// PaymentAddr and Features to the invoice Terms. // PaymentAddr and Features to the invoice Terms.
func MigrateInvoiceTLV(tx *bbolt.Tx) error { func MigrateInvoiceTLV(tx kvdb.RwTx) error {
log.Infof("Migrating invoice bodies to TLV, " + log.Infof("Migrating invoice bodies to TLV, " +
"adding payment addresses and feature vectors.") "adding payment addresses and feature vectors.")
invoiceB := tx.Bucket(invoiceBucket) invoiceB := tx.ReadWriteBucket(invoiceBucket)
if invoiceB == nil { if invoiceB == nil {
return nil return nil
} }

View File

@ -5,7 +5,7 @@ import (
"fmt" "fmt"
"testing" "testing"
"github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/channeldb/migration12" "github.com/lightningnetwork/lnd/channeldb/migration12"
"github.com/lightningnetwork/lnd/channeldb/migtest" "github.com/lightningnetwork/lnd/channeldb/migtest"
"github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lntypes"
@ -121,15 +121,15 @@ var (
type migrationTest struct { type migrationTest struct {
name string name string
beforeMigration func(*bbolt.Tx) error beforeMigration func(kvdb.RwTx) error
afterMigration func(*bbolt.Tx) error afterMigration func(kvdb.RwTx) error
} }
var migrationTests = []migrationTest{ var migrationTests = []migrationTest{
{ {
name: "no invoices", name: "no invoices",
beforeMigration: func(*bbolt.Tx) error { return nil }, beforeMigration: func(kvdb.RwTx) error { return nil },
afterMigration: func(*bbolt.Tx) error { return nil }, afterMigration: func(kvdb.RwTx) error { return nil },
}, },
{ {
name: "zero htlcs", name: "zero htlcs",
@ -145,9 +145,9 @@ var migrationTests = []migrationTest{
// genBeforeMigration creates a closure that inserts an invoice serialized under // genBeforeMigration creates a closure that inserts an invoice serialized under
// the old format under the test payment hash. // the old format under the test payment hash.
func genBeforeMigration(beforeBytes []byte) func(*bbolt.Tx) error { func genBeforeMigration(beforeBytes []byte) func(kvdb.RwTx) error {
return func(tx *bbolt.Tx) error { return func(tx kvdb.RwTx) error {
invoices, err := tx.CreateBucketIfNotExists( invoices, err := tx.CreateTopLevelBucket(
invoiceBucket, invoiceBucket,
) )
if err != nil { if err != nil {
@ -162,9 +162,9 @@ func genBeforeMigration(beforeBytes []byte) func(*bbolt.Tx) error {
// succeeded, but comparing the resulting encoding of the invoice to the // succeeded, but comparing the resulting encoding of the invoice to the
// expected serialization. In addition, the decoded invoice is compared against // expected serialization. In addition, the decoded invoice is compared against
// the expected invoice for equality. // the expected invoice for equality.
func genAfterMigration(afterBytes []byte) func(*bbolt.Tx) error { func genAfterMigration(afterBytes []byte) func(kvdb.RwTx) error {
return func(tx *bbolt.Tx) error { return func(tx kvdb.RwTx) error {
invoices := tx.Bucket(invoiceBucket) invoices := tx.ReadWriteBucket(invoiceBucket)
if invoices == nil { if invoices == nil {
return fmt.Errorf("invoice bucket not found") return fmt.Errorf("invoice bucket not found")
} }

View File

@ -4,7 +4,7 @@ import (
"encoding/binary" "encoding/binary"
"fmt" "fmt"
"github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb/kvdb"
) )
var ( var (
@ -48,13 +48,13 @@ var (
// MigrateMPP migrates the payments to a new structure that accommodates for mpp // MigrateMPP migrates the payments to a new structure that accommodates for mpp
// payments. // payments.
func MigrateMPP(tx *bbolt.Tx) error { func MigrateMPP(tx kvdb.RwTx) error {
log.Infof("Migrating payments to mpp structure") log.Infof("Migrating payments to mpp structure")
// Iterate over all payments and store their indexing keys. This is // Iterate over all payments and store their indexing keys. This is
// needed, because no modifications are allowed inside a Bucket.ForEach // needed, because no modifications are allowed inside a Bucket.ForEach
// loop. // loop.
paymentsBucket := tx.Bucket(paymentsRootBucket) paymentsBucket := tx.ReadWriteBucket(paymentsRootBucket)
if paymentsBucket == nil { if paymentsBucket == nil {
return nil return nil
} }
@ -70,7 +70,7 @@ func MigrateMPP(tx *bbolt.Tx) error {
// With all keys retrieved, start the migration. // With all keys retrieved, start the migration.
for _, k := range paymentKeys { for _, k := range paymentKeys {
bucket := paymentsBucket.Bucket(k) bucket := paymentsBucket.NestedReadWriteBucket(k)
// We only expect sub-buckets to be found in // We only expect sub-buckets to be found in
// this top-level bucket. // this top-level bucket.

View File

@ -3,7 +3,7 @@ package migration13
import ( import (
"testing" "testing"
"github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/channeldb/migtest" "github.com/lightningnetwork/lnd/channeldb/migtest"
) )
@ -111,10 +111,10 @@ func TestMigrateMpp(t *testing.T) {
migtest.ApplyMigration( migtest.ApplyMigration(
t, t,
func(tx *bbolt.Tx) error { func(tx kvdb.RwTx) error {
return migtest.RestoreDB(tx, paymentsRootBucket, pre) return migtest.RestoreDB(tx, paymentsRootBucket, pre)
}, },
func(tx *bbolt.Tx) error { func(tx kvdb.RwTx) error {
return migtest.VerifyDB(tx, paymentsRootBucket, post) return migtest.VerifyDB(tx, paymentsRootBucket, post)
}, },
MigrateMPP, MigrateMPP,

View File

@ -8,7 +8,7 @@ import (
"path/filepath" "path/filepath"
"time" "time"
"github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb/kvdb"
) )
const ( const (
@ -19,7 +19,7 @@ const (
// migration is a function which takes a prior outdated version of the database // migration is a function which takes a prior outdated version of the database
// instances and mutates the key/bucket structure to arrive at a more // instances and mutates the key/bucket structure to arrive at a more
// up-to-date version of the database. // up-to-date version of the database.
type migration func(tx *bbolt.Tx) error type migration func(tx kvdb.RwTx) error
var ( var (
// Big endian is the preferred byte order, due to cursor scans over // Big endian is the preferred byte order, due to cursor scans over
@ -31,7 +31,7 @@ var (
// information related to nodes, routing data, open/closed channels, fee // information related to nodes, routing data, open/closed channels, fee
// schedules, and reputation data. // schedules, and reputation data.
type DB struct { type DB struct {
*bbolt.DB kvdb.Backend
dbPath string dbPath string
graph *ChannelGraph graph *ChannelGraph
now func() time.Time now func() time.Time
@ -55,18 +55,13 @@ func Open(dbPath string, modifiers ...OptionModifier) (*DB, error) {
// Specify bbolt freelist options to reduce heap pressure in case the // Specify bbolt freelist options to reduce heap pressure in case the
// freelist grows to be very large. // freelist grows to be very large.
options := &bbolt.Options{ bdb, err := kvdb.Open(kvdb.BoltBackendName, path, opts.NoFreelistSync)
NoFreelistSync: opts.NoFreelistSync,
FreelistType: bbolt.FreelistMapType,
}
bdb, err := bbolt.Open(path, dbFilePermission, options)
if err != nil { if err != nil {
return nil, err return nil, err
} }
chanDB := &DB{ chanDB := &DB{
DB: bdb, Backend: bdb,
dbPath: dbPath, dbPath: dbPath,
now: time.Now, now: time.Now,
} }
@ -89,28 +84,28 @@ func createChannelDB(dbPath string) error {
} }
path := filepath.Join(dbPath, dbName) path := filepath.Join(dbPath, dbName)
bdb, err := bbolt.Open(path, dbFilePermission, nil) bdb, err := kvdb.Create(kvdb.BoltBackendName, path, false)
if err != nil { if err != nil {
return err return err
} }
err = bdb.Update(func(tx *bbolt.Tx) error { err = kvdb.Update(bdb, func(tx kvdb.RwTx) error {
if _, err := tx.CreateBucket(openChannelBucket); err != nil { if _, err := tx.CreateTopLevelBucket(openChannelBucket); err != nil {
return err return err
} }
if _, err := tx.CreateBucket(closedChannelBucket); err != nil { if _, err := tx.CreateTopLevelBucket(closedChannelBucket); err != nil {
return err return err
} }
if _, err := tx.CreateBucket(invoiceBucket); err != nil { if _, err := tx.CreateTopLevelBucket(invoiceBucket); err != nil {
return err return err
} }
if _, err := tx.CreateBucket(paymentBucket); err != nil { if _, err := tx.CreateTopLevelBucket(paymentBucket); err != nil {
return err return err
} }
nodes, err := tx.CreateBucket(nodeBucket) nodes, err := tx.CreateTopLevelBucket(nodeBucket)
if err != nil { if err != nil {
return err return err
} }
@ -123,7 +118,7 @@ func createChannelDB(dbPath string) error {
return err return err
} }
edges, err := tx.CreateBucket(edgeBucket) edges, err := tx.CreateTopLevelBucket(edgeBucket)
if err != nil { if err != nil {
return err return err
} }
@ -140,7 +135,7 @@ func createChannelDB(dbPath string) error {
return err return err
} }
graphMeta, err := tx.CreateBucket(graphMetaBucket) graphMeta, err := tx.CreateTopLevelBucket(graphMetaBucket)
if err != nil { if err != nil {
return err return err
} }
@ -149,7 +144,7 @@ func createChannelDB(dbPath string) error {
return err return err
} }
if _, err := tx.CreateBucket(metaBucket); err != nil { if _, err := tx.CreateTopLevelBucket(metaBucket); err != nil {
return err return err
} }
@ -185,8 +180,8 @@ func fileExists(path string) bool {
func (d *DB) FetchClosedChannels(pendingOnly bool) ([]*ChannelCloseSummary, error) { func (d *DB) FetchClosedChannels(pendingOnly bool) ([]*ChannelCloseSummary, error) {
var chanSummaries []*ChannelCloseSummary var chanSummaries []*ChannelCloseSummary
if err := d.View(func(tx *bbolt.Tx) error { if err := kvdb.View(d, func(tx kvdb.ReadTx) error {
closeBucket := tx.Bucket(closedChannelBucket) closeBucket := tx.ReadBucket(closedChannelBucket)
if closeBucket == nil { if closeBucket == nil {
return ErrNoClosedChannels return ErrNoClosedChannels
} }

View File

@ -13,7 +13,7 @@ import (
"github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcd/wire"
"github.com/btcsuite/btcutil" "github.com/btcsuite/btcutil"
"github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/lnwire"
) )
@ -175,10 +175,10 @@ func newChannelGraph(db *DB, rejectCacheSize, chanCacheSize int) *ChannelGraph {
// node based off the source node. // node based off the source node.
func (c *ChannelGraph) SourceNode() (*LightningNode, error) { func (c *ChannelGraph) SourceNode() (*LightningNode, error) {
var source *LightningNode var source *LightningNode
err := c.db.View(func(tx *bbolt.Tx) error { err := kvdb.View(c.db, func(tx kvdb.ReadTx) error {
// First grab the nodes bucket which stores the mapping from // First grab the nodes bucket which stores the mapping from
// pubKey to node information. // pubKey to node information.
nodes := tx.Bucket(nodeBucket) nodes := tx.ReadBucket(nodeBucket)
if nodes == nil { if nodes == nil {
return ErrGraphNotFound return ErrGraphNotFound
} }
@ -202,7 +202,7 @@ func (c *ChannelGraph) SourceNode() (*LightningNode, error) {
// of the graph. The source node is treated as the center node within a // of the graph. The source node is treated as the center node within a
// star-graph. This method may be used to kick off a path finding algorithm in // star-graph. This method may be used to kick off a path finding algorithm in
// order to explore the reachability of another node based off the source node. // order to explore the reachability of another node based off the source node.
func (c *ChannelGraph) sourceNode(nodes *bbolt.Bucket) (*LightningNode, error) { func (c *ChannelGraph) sourceNode(nodes kvdb.ReadBucket) (*LightningNode, error) {
selfPub := nodes.Get(sourceKey) selfPub := nodes.Get(sourceKey)
if selfPub == nil { if selfPub == nil {
return nil, ErrSourceNodeNotSet return nil, ErrSourceNodeNotSet
@ -225,10 +225,10 @@ func (c *ChannelGraph) sourceNode(nodes *bbolt.Bucket) (*LightningNode, error) {
func (c *ChannelGraph) SetSourceNode(node *LightningNode) error { func (c *ChannelGraph) SetSourceNode(node *LightningNode) error {
nodePubBytes := node.PubKeyBytes[:] nodePubBytes := node.PubKeyBytes[:]
return c.db.Update(func(tx *bbolt.Tx) error { return kvdb.Update(c.db, func(tx kvdb.RwTx) error {
// First grab the nodes bucket which stores the mapping from // First grab the nodes bucket which stores the mapping from
// pubKey to node information. // pubKey to node information.
nodes, err := tx.CreateBucketIfNotExists(nodeBucket) nodes, err := tx.CreateTopLevelBucket(nodeBucket)
if err != nil { if err != nil {
return err return err
} }
@ -245,8 +245,8 @@ func (c *ChannelGraph) SetSourceNode(node *LightningNode) error {
}) })
} }
func addLightningNode(tx *bbolt.Tx, node *LightningNode) error { func addLightningNode(tx kvdb.RwTx, node *LightningNode) error {
nodes, err := tx.CreateBucketIfNotExists(nodeBucket) nodes, err := tx.CreateTopLevelBucket(nodeBucket)
if err != nil { if err != nil {
return err return err
} }
@ -270,17 +270,17 @@ func addLightningNode(tx *bbolt.Tx, node *LightningNode) error {
// buckets using an existing database transaction. The returned boolean will be // buckets using an existing database transaction. The returned boolean will be
// true if the updated policy belongs to node1, and false if the policy belonged // true if the updated policy belongs to node1, and false if the policy belonged
// to node2. // to node2.
func updateEdgePolicy(tx *bbolt.Tx, edge *ChannelEdgePolicy) (bool, error) { func updateEdgePolicy(tx kvdb.RwTx, edge *ChannelEdgePolicy) (bool, error) {
edges := tx.Bucket(edgeBucket) edges, err := tx.CreateTopLevelBucket(edgeBucket)
if edges == nil { if err != nil {
return false, ErrEdgeNotFound return false, ErrEdgeNotFound
} }
edgeIndex := edges.Bucket(edgeIndexBucket) edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
if edgeIndex == nil { if edgeIndex == nil {
return false, ErrEdgeNotFound return false, ErrEdgeNotFound
} }
nodes, err := tx.CreateBucketIfNotExists(nodeBucket) nodes, err := tx.CreateTopLevelBucket(nodeBucket)
if err != nil { if err != nil {
return false, err return false, err
} }
@ -551,8 +551,8 @@ func (c *ChannelEdgePolicy) IsDisabled() bool {
lnwire.ChanUpdateDisabled lnwire.ChanUpdateDisabled
} }
func putLightningNode(nodeBucket *bbolt.Bucket, aliasBucket *bbolt.Bucket, func putLightningNode(nodeBucket kvdb.RwBucket, aliasBucket kvdb.RwBucket,
updateIndex *bbolt.Bucket, node *LightningNode) error { updateIndex kvdb.RwBucket, node *LightningNode) error {
var ( var (
scratch [16]byte scratch [16]byte
@ -680,7 +680,7 @@ func putLightningNode(nodeBucket *bbolt.Bucket, aliasBucket *bbolt.Bucket,
return nodeBucket.Put(nodePub, b.Bytes()) return nodeBucket.Put(nodePub, b.Bytes())
} }
func fetchLightningNode(nodeBucket *bbolt.Bucket, func fetchLightningNode(nodeBucket kvdb.ReadBucket,
nodePub []byte) (LightningNode, error) { nodePub []byte) (LightningNode, error) {
nodeBytes := nodeBucket.Get(nodePub) nodeBytes := nodeBucket.Get(nodePub)
@ -863,7 +863,7 @@ func deserializeChanEdgeInfo(r io.Reader) (ChannelEdgeInfo, error) {
return edgeInfo, nil return edgeInfo, nil
} }
func putChanEdgePolicy(edges, nodes *bbolt.Bucket, edge *ChannelEdgePolicy, func putChanEdgePolicy(edges, nodes kvdb.RwBucket, edge *ChannelEdgePolicy,
from, to []byte) error { from, to []byte) error {
var edgeKey [33 + 8]byte var edgeKey [33 + 8]byte
@ -943,7 +943,7 @@ func putChanEdgePolicy(edges, nodes *bbolt.Bucket, edge *ChannelEdgePolicy,
// in this bucket. // in this bucket.
// Maintaining the bucket this way allows a fast retrieval of disabled // Maintaining the bucket this way allows a fast retrieval of disabled
// channels, for example when prune is needed. // channels, for example when prune is needed.
func updateEdgePolicyDisabledIndex(edges *bbolt.Bucket, chanID uint64, func updateEdgePolicyDisabledIndex(edges kvdb.RwBucket, chanID uint64,
direction bool, disabled bool) error { direction bool, disabled bool) error {
var disabledEdgeKey [8 + 1]byte var disabledEdgeKey [8 + 1]byte
@ -968,7 +968,7 @@ func updateEdgePolicyDisabledIndex(edges *bbolt.Bucket, chanID uint64,
// putChanEdgePolicyUnknown marks the edge policy as unknown // putChanEdgePolicyUnknown marks the edge policy as unknown
// in the edges bucket. // in the edges bucket.
func putChanEdgePolicyUnknown(edges *bbolt.Bucket, channelID uint64, func putChanEdgePolicyUnknown(edges kvdb.RwBucket, channelID uint64,
from []byte) error { from []byte) error {
var edgeKey [33 + 8]byte var edgeKey [33 + 8]byte
@ -983,8 +983,8 @@ func putChanEdgePolicyUnknown(edges *bbolt.Bucket, channelID uint64,
return edges.Put(edgeKey[:], unknownPolicy) return edges.Put(edgeKey[:], unknownPolicy)
} }
func fetchChanEdgePolicy(edges *bbolt.Bucket, chanID []byte, func fetchChanEdgePolicy(edges kvdb.ReadBucket, chanID []byte,
nodePub []byte, nodes *bbolt.Bucket) (*ChannelEdgePolicy, error) { nodePub []byte, nodes kvdb.ReadBucket) (*ChannelEdgePolicy, error) {
var edgeKey [33 + 8]byte var edgeKey [33 + 8]byte
copy(edgeKey[:], nodePub) copy(edgeKey[:], nodePub)
@ -1084,7 +1084,7 @@ func serializeChanEdgePolicy(w io.Writer, edge *ChannelEdgePolicy,
} }
func deserializeChanEdgePolicy(r io.Reader, func deserializeChanEdgePolicy(r io.Reader,
nodes *bbolt.Bucket) (*ChannelEdgePolicy, error) { nodes kvdb.ReadBucket) (*ChannelEdgePolicy, error) {
edge := &ChannelEdgePolicy{} edge := &ChannelEdgePolicy{}

View File

@ -8,7 +8,7 @@ import (
"time" "time"
"github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcd/wire"
"github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/tlv" "github.com/lightningnetwork/lnd/tlv"
@ -252,8 +252,8 @@ func validateInvoice(i *Invoice) error {
func (d *DB) FetchAllInvoices(pendingOnly bool) ([]Invoice, error) { func (d *DB) FetchAllInvoices(pendingOnly bool) ([]Invoice, error) {
var invoices []Invoice var invoices []Invoice
err := d.View(func(tx *bbolt.Tx) error { err := kvdb.View(d, func(tx kvdb.ReadTx) error {
invoiceB := tx.Bucket(invoiceBucket) invoiceB := tx.ReadBucket(invoiceBucket)
if invoiceB == nil { if invoiceB == nil {
return ErrNoInvoicesCreated return ErrNoInvoicesCreated
} }

View File

@ -1,6 +1,8 @@
package migration_01_to_11 package migration_01_to_11
import "github.com/coreos/bbolt" import (
"github.com/lightningnetwork/lnd/channeldb/kvdb"
)
var ( var (
// metaBucket stores all the meta information concerning the state of // metaBucket stores all the meta information concerning the state of
@ -21,8 +23,8 @@ type Meta struct {
// putMeta is an internal helper function used in order to allow callers to // putMeta is an internal helper function used in order to allow callers to
// re-use a database transaction. See the publicly exported PutMeta method for // re-use a database transaction. See the publicly exported PutMeta method for
// more information. // more information.
func putMeta(meta *Meta, tx *bbolt.Tx) error { func putMeta(meta *Meta, tx kvdb.RwTx) error {
metaBucket, err := tx.CreateBucketIfNotExists(metaBucket) metaBucket, err := tx.CreateTopLevelBucket(metaBucket)
if err != nil { if err != nil {
return err return err
} }
@ -30,7 +32,7 @@ func putMeta(meta *Meta, tx *bbolt.Tx) error {
return putDbVersion(metaBucket, meta) return putDbVersion(metaBucket, meta)
} }
func putDbVersion(metaBucket *bbolt.Bucket, meta *Meta) error { func putDbVersion(metaBucket kvdb.RwBucket, meta *Meta) error {
scratch := make([]byte, 4) scratch := make([]byte, 4)
byteOrder.PutUint32(scratch, meta.DbVersionNumber) byteOrder.PutUint32(scratch, meta.DbVersionNumber)
return metaBucket.Put(dbVersionKey, scratch) return metaBucket.Put(dbVersionKey, scratch)

View File

@ -3,8 +3,8 @@ package migration_01_to_11
import ( import (
"testing" "testing"
"github.com/coreos/bbolt"
"github.com/go-errors/errors" "github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
) )
// applyMigration is a helper test function that encapsulates the general steps // applyMigration is a helper test function that encapsulates the general steps
@ -49,7 +49,7 @@ func applyMigration(t *testing.T, beforeMigration, afterMigration func(d *DB),
}() }()
// Apply migration. // Apply migration.
err = cdb.Update(func(tx *bbolt.Tx) error { err = kvdb.Update(cdb, func(tx kvdb.RwTx) error {
return migrationFunc(tx) return migrationFunc(tx)
}) })
if err != nil { if err != nil {

View File

@ -7,7 +7,7 @@ import (
"io" "io"
"sort" "sort"
"github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/lnwire"
) )
@ -76,8 +76,8 @@ func (db *DB) addPayment(payment *outgoingPayment) error {
} }
paymentBytes := b.Bytes() paymentBytes := b.Bytes()
return db.Batch(func(tx *bbolt.Tx) error { return kvdb.Update(db, func(tx kvdb.RwTx) error {
payments, err := tx.CreateBucketIfNotExists(paymentBucket) payments, err := tx.CreateTopLevelBucket(paymentBucket)
if err != nil { if err != nil {
return err return err
} }
@ -104,8 +104,8 @@ func (db *DB) addPayment(payment *outgoingPayment) error {
func (db *DB) fetchAllPayments() ([]*outgoingPayment, error) { func (db *DB) fetchAllPayments() ([]*outgoingPayment, error) {
var payments []*outgoingPayment var payments []*outgoingPayment
err := db.View(func(tx *bbolt.Tx) error { err := kvdb.View(db, func(tx kvdb.ReadTx) error {
bucket := tx.Bucket(paymentBucket) bucket := tx.ReadBucket(paymentBucket)
if bucket == nil { if bucket == nil {
return ErrNoPaymentsCreated return ErrNoPaymentsCreated
} }
@ -140,7 +140,7 @@ func (db *DB) fetchAllPayments() ([]*outgoingPayment, error) {
// NOTE: Deprecated. Kept around for migration purposes. // NOTE: Deprecated. Kept around for migration purposes.
func (db *DB) fetchPaymentStatus(paymentHash [32]byte) (PaymentStatus, error) { func (db *DB) fetchPaymentStatus(paymentHash [32]byte) (PaymentStatus, error) {
var paymentStatus = StatusUnknown var paymentStatus = StatusUnknown
err := db.View(func(tx *bbolt.Tx) error { err := kvdb.View(db, func(tx kvdb.ReadTx) error {
var err error var err error
paymentStatus, err = fetchPaymentStatusTx(tx, paymentHash) paymentStatus, err = fetchPaymentStatusTx(tx, paymentHash)
return err return err
@ -158,11 +158,11 @@ func (db *DB) fetchPaymentStatus(paymentHash [32]byte) (PaymentStatus, error) {
// can be composed into other atomic operations. // can be composed into other atomic operations.
// //
// NOTE: Deprecated. Kept around for migration purposes. // NOTE: Deprecated. Kept around for migration purposes.
func fetchPaymentStatusTx(tx *bbolt.Tx, paymentHash [32]byte) (PaymentStatus, error) { func fetchPaymentStatusTx(tx kvdb.ReadTx, paymentHash [32]byte) (PaymentStatus, error) {
// The default status for all payments that aren't recorded in database. // The default status for all payments that aren't recorded in database.
var paymentStatus = StatusUnknown var paymentStatus = StatusUnknown
bucket := tx.Bucket(paymentStatusBucket) bucket := tx.ReadBucket(paymentStatusBucket)
if bucket == nil { if bucket == nil {
return paymentStatus, nil return paymentStatus, nil
} }
@ -375,14 +375,14 @@ func deserializeHopMigration9(r io.Reader) (*Hop, error) {
func (db *DB) fetchPaymentsMigration9() ([]*Payment, error) { func (db *DB) fetchPaymentsMigration9() ([]*Payment, error) {
var payments []*Payment var payments []*Payment
err := db.View(func(tx *bbolt.Tx) error { err := kvdb.View(db, func(tx kvdb.ReadTx) error {
paymentsBucket := tx.Bucket(paymentsRootBucket) paymentsBucket := tx.ReadBucket(paymentsRootBucket)
if paymentsBucket == nil { if paymentsBucket == nil {
return nil return nil
} }
return paymentsBucket.ForEach(func(k, v []byte) error { return paymentsBucket.ForEach(func(k, v []byte) error {
bucket := paymentsBucket.Bucket(k) bucket := paymentsBucket.NestedReadBucket(k)
if bucket == nil { if bucket == nil {
// We only expect sub-buckets to be found in // We only expect sub-buckets to be found in
// this top-level bucket. // this top-level bucket.
@ -401,13 +401,13 @@ func (db *DB) fetchPaymentsMigration9() ([]*Payment, error) {
// payment has was possible. These will be found in a // payment has was possible. These will be found in a
// sub-bucket indexed by their sequence number if // sub-bucket indexed by their sequence number if
// available. // available.
dup := bucket.Bucket(paymentDuplicateBucket) dup := bucket.NestedReadBucket(paymentDuplicateBucket)
if dup == nil { if dup == nil {
return nil return nil
} }
return dup.ForEach(func(k, v []byte) error { return dup.ForEach(func(k, v []byte) error {
subBucket := dup.Bucket(k) subBucket := dup.NestedReadBucket(k)
if subBucket == nil { if subBucket == nil {
// We one bucket for each duplicate to // We one bucket for each duplicate to
// be found. // be found.
@ -437,7 +437,7 @@ func (db *DB) fetchPaymentsMigration9() ([]*Payment, error) {
return payments, nil return payments, nil
} }
func fetchPaymentMigration9(bucket *bbolt.Bucket) (*Payment, error) { func fetchPaymentMigration9(bucket kvdb.ReadBucket) (*Payment, error) {
var ( var (
err error err error
p = &Payment{} p = &Payment{}

View File

@ -4,15 +4,15 @@ import (
"bytes" "bytes"
"io" "io"
"github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb/kvdb"
) )
// MigrateRouteSerialization migrates the way we serialize routes across the // MigrateRouteSerialization migrates the way we serialize routes across the
// entire database. At the time of writing of this migration, this includes our // entire database. At the time of writing of this migration, this includes our
// payment attempts, as well as the payment results in mission control. // payment attempts, as well as the payment results in mission control.
func MigrateRouteSerialization(tx *bbolt.Tx) error { func MigrateRouteSerialization(tx kvdb.RwTx) error {
// First, we'll do all the payment attempts. // First, we'll do all the payment attempts.
rootPaymentBucket := tx.Bucket(paymentsRootBucket) rootPaymentBucket := tx.ReadWriteBucket(paymentsRootBucket)
if rootPaymentBucket == nil { if rootPaymentBucket == nil {
return nil return nil
} }
@ -36,7 +36,7 @@ func MigrateRouteSerialization(tx *bbolt.Tx) error {
// Now that we have all the payment hashes, we can carry out the // Now that we have all the payment hashes, we can carry out the
// migration itself. // migration itself.
for _, payHash := range payHashes { for _, payHash := range payHashes {
payHashBucket := rootPaymentBucket.Bucket(payHash) payHashBucket := rootPaymentBucket.NestedReadWriteBucket(payHash)
// First, we'll migrate the main (non duplicate) payment to // First, we'll migrate the main (non duplicate) payment to
// this hash. // this hash.
@ -47,7 +47,7 @@ func MigrateRouteSerialization(tx *bbolt.Tx) error {
// Now that we've migrated the main payment, we'll also check // Now that we've migrated the main payment, we'll also check
// for any duplicate payments to the same payment hash. // for any duplicate payments to the same payment hash.
dupBucket := payHashBucket.Bucket(paymentDuplicateBucket) dupBucket := payHashBucket.NestedReadWriteBucket(paymentDuplicateBucket)
// If there's no dup bucket, then we can move on to the next // If there's no dup bucket, then we can move on to the next
// payment. // payment.
@ -69,7 +69,7 @@ func MigrateRouteSerialization(tx *bbolt.Tx) error {
// Now in this second pass, we'll re-serialize their duplicate // Now in this second pass, we'll re-serialize their duplicate
// payment attempts under the new encoding. // payment attempts under the new encoding.
for _, seqNo := range dupSeqNos { for _, seqNo := range dupSeqNos {
dupPayHashBucket := dupBucket.Bucket(seqNo) dupPayHashBucket := dupBucket.NestedReadWriteBucket(seqNo)
err := migrateAttemptEncoding(tx, dupPayHashBucket) err := migrateAttemptEncoding(tx, dupPayHashBucket)
if err != nil { if err != nil {
return err return err
@ -83,8 +83,8 @@ func MigrateRouteSerialization(tx *bbolt.Tx) error {
"existing data") "existing data")
resultsKey := []byte("missioncontrol-results") resultsKey := []byte("missioncontrol-results")
err = tx.DeleteBucket(resultsKey) err = tx.DeleteTopLevelBucket(resultsKey)
if err != nil && err != bbolt.ErrBucketNotFound { if err != nil && err != kvdb.ErrBucketNotFound {
return err return err
} }
@ -95,7 +95,7 @@ func MigrateRouteSerialization(tx *bbolt.Tx) error {
// migrateAttemptEncoding migrates payment attempts using the legacy format to // migrateAttemptEncoding migrates payment attempts using the legacy format to
// the new format. // the new format.
func migrateAttemptEncoding(tx *bbolt.Tx, payHashBucket *bbolt.Bucket) error { func migrateAttemptEncoding(tx kvdb.RwTx, payHashBucket kvdb.RwBucket) error {
payAttemptBytes := payHashBucket.Get(paymentAttemptInfoKey) payAttemptBytes := payHashBucket.Get(paymentAttemptInfoKey)
if payAttemptBytes == nil { if payAttemptBytes == nil {
return nil return nil

View File

@ -8,7 +8,7 @@ import (
bitcoinCfg "github.com/btcsuite/btcd/chaincfg" bitcoinCfg "github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcd/wire"
"github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/zpay32" "github.com/lightningnetwork/lnd/zpay32"
litecoinCfg "github.com/ltcsuite/ltcd/chaincfg" litecoinCfg "github.com/ltcsuite/ltcd/chaincfg"
@ -16,10 +16,10 @@ import (
// MigrateInvoices adds invoice htlcs and a separate cltv delta field to the // MigrateInvoices adds invoice htlcs and a separate cltv delta field to the
// invoices. // invoices.
func MigrateInvoices(tx *bbolt.Tx) error { func MigrateInvoices(tx kvdb.RwTx) error {
log.Infof("Migrating invoices to new invoice format") log.Infof("Migrating invoices to new invoice format")
invoiceB := tx.Bucket(invoiceBucket) invoiceB := tx.ReadWriteBucket(invoiceBucket)
if invoiceB == nil { if invoiceB == nil {
return nil return nil
} }

View File

@ -8,7 +8,7 @@ import (
"github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/btcec"
bitcoinCfg "github.com/btcsuite/btcd/chaincfg" bitcoinCfg "github.com/btcsuite/btcd/chaincfg"
"github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/zpay32" "github.com/lightningnetwork/lnd/zpay32"
litecoinCfg "github.com/ltcsuite/ltcd/chaincfg" litecoinCfg "github.com/ltcsuite/ltcd/chaincfg"
) )
@ -26,8 +26,8 @@ var (
// beforeMigrationFuncV11 insert the test invoices in the database. // beforeMigrationFuncV11 insert the test invoices in the database.
func beforeMigrationFuncV11(t *testing.T, d *DB, invoices []Invoice) { func beforeMigrationFuncV11(t *testing.T, d *DB, invoices []Invoice) {
err := d.Update(func(tx *bbolt.Tx) error { err := kvdb.Update(d, func(tx kvdb.RwTx) error {
invoicesBucket, err := tx.CreateBucketIfNotExists( invoicesBucket, err := tx.CreateTopLevelBucket(
invoiceBucket, invoiceBucket,
) )
if err != nil { if err != nil {

View File

@ -7,7 +7,7 @@ import (
"fmt" "fmt"
"github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/btcec"
"github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/lnwire"
) )
@ -16,11 +16,11 @@ import (
// (one for nodes and one for edges) to keep track of the last time a node or // (one for nodes and one for edges) to keep track of the last time a node or
// edge was updated on the network. These new indexes allow us to implement the // edge was updated on the network. These new indexes allow us to implement the
// new graph sync protocol added. // new graph sync protocol added.
func MigrateNodeAndEdgeUpdateIndex(tx *bbolt.Tx) error { func MigrateNodeAndEdgeUpdateIndex(tx kvdb.RwTx) error {
// First, we'll populating the node portion of the new index. Before we // First, we'll populating the node portion of the new index. Before we
// can add new values to the index, we'll first create the new bucket // can add new values to the index, we'll first create the new bucket
// where these items will be housed. // where these items will be housed.
nodes, err := tx.CreateBucketIfNotExists(nodeBucket) nodes, err := tx.CreateTopLevelBucket(nodeBucket)
if err != nil { if err != nil {
return fmt.Errorf("unable to create node bucket: %v", err) return fmt.Errorf("unable to create node bucket: %v", err)
} }
@ -64,7 +64,7 @@ func MigrateNodeAndEdgeUpdateIndex(tx *bbolt.Tx) error {
// With the set of nodes updated, we'll now update all edges to have a // With the set of nodes updated, we'll now update all edges to have a
// corresponding entry in the edge update index. // corresponding entry in the edge update index.
edges, err := tx.CreateBucketIfNotExists(edgeBucket) edges, err := tx.CreateTopLevelBucket(edgeBucket)
if err != nil { if err != nil {
return fmt.Errorf("unable to create edge bucket: %v", err) return fmt.Errorf("unable to create edge bucket: %v", err)
} }
@ -121,8 +121,8 @@ func MigrateNodeAndEdgeUpdateIndex(tx *bbolt.Tx) error {
// invoices an index in the add and/or the settle index. Additionally, all // invoices an index in the add and/or the settle index. Additionally, all
// existing invoices will have their bytes padded out in order to encode the // existing invoices will have their bytes padded out in order to encode the
// add+settle index as well as the amount paid. // add+settle index as well as the amount paid.
func MigrateInvoiceTimeSeries(tx *bbolt.Tx) error { func MigrateInvoiceTimeSeries(tx kvdb.RwTx) error {
invoices, err := tx.CreateBucketIfNotExists(invoiceBucket) invoices, err := tx.CreateTopLevelBucket(invoiceBucket)
if err != nil { if err != nil {
return err return err
} }
@ -258,8 +258,8 @@ func MigrateInvoiceTimeSeries(tx *bbolt.Tx) error {
// migrateInvoiceTimeSeries migration. As at the time of writing, the // migrateInvoiceTimeSeries migration. As at the time of writing, the
// OutgoingPayment struct embeddeds an instance of the Invoice struct. As a // OutgoingPayment struct embeddeds an instance of the Invoice struct. As a
// result, we also need to migrate the internal invoice to the new format. // result, we also need to migrate the internal invoice to the new format.
func MigrateInvoiceTimeSeriesOutgoingPayments(tx *bbolt.Tx) error { func MigrateInvoiceTimeSeriesOutgoingPayments(tx kvdb.RwTx) error {
payBucket := tx.Bucket(paymentBucket) payBucket := tx.ReadWriteBucket(paymentBucket)
if payBucket == nil { if payBucket == nil {
return nil return nil
} }
@ -339,18 +339,18 @@ func MigrateInvoiceTimeSeriesOutgoingPayments(tx *bbolt.Tx) error {
// bucket. It ensure that edges with unknown policies will also have an entry // bucket. It ensure that edges with unknown policies will also have an entry
// in the bucket. After the migration, there will be two edge entries for // in the bucket. After the migration, there will be two edge entries for
// every channel, regardless of whether the policies are known. // every channel, regardless of whether the policies are known.
func MigrateEdgePolicies(tx *bbolt.Tx) error { func MigrateEdgePolicies(tx kvdb.RwTx) error {
nodes := tx.Bucket(nodeBucket) nodes := tx.ReadWriteBucket(nodeBucket)
if nodes == nil { if nodes == nil {
return nil return nil
} }
edges := tx.Bucket(edgeBucket) edges := tx.ReadWriteBucket(edgeBucket)
if edges == nil { if edges == nil {
return nil return nil
} }
edgeIndex := edges.Bucket(edgeIndexBucket) edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
if edgeIndex == nil { if edgeIndex == nil {
return nil return nil
} }
@ -411,10 +411,10 @@ func MigrateEdgePolicies(tx *bbolt.Tx) error {
// PaymentStatusesMigration is a database migration intended for adding payment // PaymentStatusesMigration is a database migration intended for adding payment
// statuses for each existing payment entity in bucket to be able control // statuses for each existing payment entity in bucket to be able control
// transitions of statuses and prevent cases such as double payment // transitions of statuses and prevent cases such as double payment
func PaymentStatusesMigration(tx *bbolt.Tx) error { func PaymentStatusesMigration(tx kvdb.RwTx) error {
// Get the bucket dedicated to storing statuses of payments, // Get the bucket dedicated to storing statuses of payments,
// where a key is payment hash, value is payment status. // where a key is payment hash, value is payment status.
paymentStatuses, err := tx.CreateBucketIfNotExists(paymentStatusBucket) paymentStatuses, err := tx.CreateTopLevelBucket(paymentStatusBucket)
if err != nil { if err != nil {
return err return err
} }
@ -422,7 +422,7 @@ func PaymentStatusesMigration(tx *bbolt.Tx) error {
log.Infof("Migrating database to support payment statuses") log.Infof("Migrating database to support payment statuses")
circuitAddKey := []byte("circuit-adds") circuitAddKey := []byte("circuit-adds")
circuits := tx.Bucket(circuitAddKey) circuits := tx.ReadWriteBucket(circuitAddKey)
if circuits != nil { if circuits != nil {
log.Infof("Marking all known circuits with status InFlight") log.Infof("Marking all known circuits with status InFlight")
@ -455,7 +455,7 @@ func PaymentStatusesMigration(tx *bbolt.Tx) error {
log.Infof("Marking all existing payments with status Completed") log.Infof("Marking all existing payments with status Completed")
// Get the bucket dedicated to storing payments // Get the bucket dedicated to storing payments
bucket := tx.Bucket(paymentBucket) bucket := tx.ReadWriteBucket(paymentBucket)
if bucket == nil { if bucket == nil {
return nil return nil
} }
@ -498,14 +498,14 @@ func PaymentStatusesMigration(tx *bbolt.Tx) error {
// migration also fixes the case where the public keys within edge policies were // migration also fixes the case where the public keys within edge policies were
// being serialized with an extra byte, causing an even greater error when // being serialized with an extra byte, causing an even greater error when
// attempting to perform the offset calculation described earlier. // attempting to perform the offset calculation described earlier.
func MigratePruneEdgeUpdateIndex(tx *bbolt.Tx) error { func MigratePruneEdgeUpdateIndex(tx kvdb.RwTx) error {
// To begin the migration, we'll retrieve the update index bucket. If it // To begin the migration, we'll retrieve the update index bucket. If it
// does not exist, we have nothing left to do so we can simply exit. // does not exist, we have nothing left to do so we can simply exit.
edges := tx.Bucket(edgeBucket) edges := tx.ReadWriteBucket(edgeBucket)
if edges == nil { if edges == nil {
return nil return nil
} }
edgeUpdateIndex := edges.Bucket(edgeUpdateIndexBucket) edgeUpdateIndex := edges.NestedReadWriteBucket(edgeUpdateIndexBucket)
if edgeUpdateIndex == nil { if edgeUpdateIndex == nil {
return nil return nil
} }
@ -521,7 +521,7 @@ func MigratePruneEdgeUpdateIndex(tx *bbolt.Tx) error {
return fmt.Errorf("unable to create/fetch edge index " + return fmt.Errorf("unable to create/fetch edge index " +
"bucket") "bucket")
} }
nodes, err := tx.CreateBucketIfNotExists(nodeBucket) nodes, err := tx.CreateTopLevelBucket(nodeBucket)
if err != nil { if err != nil {
return fmt.Errorf("unable to make node bucket") return fmt.Errorf("unable to make node bucket")
} }
@ -612,8 +612,8 @@ func MigratePruneEdgeUpdateIndex(tx *bbolt.Tx) error {
// MigrateOptionalChannelCloseSummaryFields migrates the serialized format of // MigrateOptionalChannelCloseSummaryFields migrates the serialized format of
// ChannelCloseSummary to a format where optional fields' presence is indicated // ChannelCloseSummary to a format where optional fields' presence is indicated
// with boolean markers. // with boolean markers.
func MigrateOptionalChannelCloseSummaryFields(tx *bbolt.Tx) error { func MigrateOptionalChannelCloseSummaryFields(tx kvdb.RwTx) error {
closedChanBucket := tx.Bucket(closedChannelBucket) closedChanBucket := tx.ReadWriteBucket(closedChannelBucket)
if closedChanBucket == nil { if closedChanBucket == nil {
return nil return nil
} }
@ -671,11 +671,11 @@ var messageStoreBucket = []byte("message-store")
// MigrateGossipMessageStoreKeys migrates the key format for gossip messages // MigrateGossipMessageStoreKeys migrates the key format for gossip messages
// found in the message store to a new one that takes into consideration the of // found in the message store to a new one that takes into consideration the of
// the message being stored. // the message being stored.
func MigrateGossipMessageStoreKeys(tx *bbolt.Tx) error { func MigrateGossipMessageStoreKeys(tx kvdb.RwTx) error {
// We'll start by retrieving the bucket in which these messages are // We'll start by retrieving the bucket in which these messages are
// stored within. If there isn't one, there's nothing left for us to do // stored within. If there isn't one, there's nothing left for us to do
// so we can avoid the migration. // so we can avoid the migration.
messageStore := tx.Bucket(messageStoreBucket) messageStore := tx.ReadWriteBucket(messageStoreBucket)
if messageStore == nil { if messageStore == nil {
return nil return nil
} }
@ -747,10 +747,10 @@ func MigrateGossipMessageStoreKeys(tx *bbolt.Tx) error {
// InFlight (we have no PaymentAttemptInfo available for pre-migration // InFlight (we have no PaymentAttemptInfo available for pre-migration
// payments) we delete those statuses, so only Completed payments remain in the // payments) we delete those statuses, so only Completed payments remain in the
// new bucket structure. // new bucket structure.
func MigrateOutgoingPayments(tx *bbolt.Tx) error { func MigrateOutgoingPayments(tx kvdb.RwTx) error {
log.Infof("Migrating outgoing payments to new bucket structure") log.Infof("Migrating outgoing payments to new bucket structure")
oldPayments := tx.Bucket(paymentBucket) oldPayments := tx.ReadWriteBucket(paymentBucket)
// Return early if there are no payments to migrate. // Return early if there are no payments to migrate.
if oldPayments == nil { if oldPayments == nil {
@ -758,7 +758,7 @@ func MigrateOutgoingPayments(tx *bbolt.Tx) error {
return nil return nil
} }
newPayments, err := tx.CreateBucket(paymentsRootBucket) newPayments, err := tx.CreateTopLevelBucket(paymentsRootBucket)
if err != nil { if err != nil {
return err return err
} }
@ -767,7 +767,7 @@ func MigrateOutgoingPayments(tx *bbolt.Tx) error {
// only attempt to fetch it if needed. // only attempt to fetch it if needed.
sourcePub := func() ([33]byte, error) { sourcePub := func() ([33]byte, error) {
var pub [33]byte var pub [33]byte
nodes := tx.Bucket(nodeBucket) nodes := tx.ReadWriteBucket(nodeBucket)
if nodes == nil { if nodes == nil {
return pub, ErrGraphNotFound return pub, ErrGraphNotFound
} }
@ -862,8 +862,8 @@ func MigrateOutgoingPayments(tx *bbolt.Tx) error {
// from a database containing duplicate payments to a payment // from a database containing duplicate payments to a payment
// hash. To keep this information, we store such duplicate // hash. To keep this information, we store such duplicate
// payments in a sub-bucket. // payments in a sub-bucket.
if err == bbolt.ErrBucketExists { if err == kvdb.ErrBucketExists {
pHashBucket := newPayments.Bucket(paymentHash[:]) pHashBucket := newPayments.NestedReadWriteBucket(paymentHash[:])
// Create a bucket for duplicate payments within this // Create a bucket for duplicate payments within this
// payment hash's bucket. // payment hash's bucket.
@ -922,14 +922,14 @@ func MigrateOutgoingPayments(tx *bbolt.Tx) error {
// Now we delete the old buckets. Deleting the payment status buckets // Now we delete the old buckets. Deleting the payment status buckets
// deletes all payment statuses other than Complete. // deletes all payment statuses other than Complete.
err = tx.DeleteBucket(paymentStatusBucket) err = tx.DeleteTopLevelBucket(paymentStatusBucket)
if err != nil && err != bbolt.ErrBucketNotFound { if err != nil && err != kvdb.ErrBucketNotFound {
return err return err
} }
// Finally delete the old payment bucket. // Finally delete the old payment bucket.
err = tx.DeleteBucket(paymentBucket) err = tx.DeleteTopLevelBucket(paymentBucket)
if err != nil && err != bbolt.ErrBucketNotFound { if err != nil && err != kvdb.ErrBucketNotFound {
return err return err
} }

View File

@ -11,9 +11,9 @@ import (
"time" "time"
"github.com/btcsuite/btcutil" "github.com/btcsuite/btcutil"
"github.com/coreos/bbolt"
"github.com/davecgh/go-spew/spew" "github.com/davecgh/go-spew/spew"
"github.com/go-errors/errors" "github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/lnwire"
) )
@ -59,8 +59,8 @@ func TestPaymentStatusesMigration(t *testing.T) {
// locally-sourced payment should end up with an InFlight // locally-sourced payment should end up with an InFlight
// status, while the other should remain unchanged, which // status, while the other should remain unchanged, which
// defaults to Grounded. // defaults to Grounded.
err = d.Update(func(tx *bbolt.Tx) error { err = kvdb.Update(d, func(tx kvdb.RwTx) error {
circuits, err := tx.CreateBucketIfNotExists( circuits, err := tx.CreateTopLevelBucket(
[]byte("circuit-adds"), []byte("circuit-adds"),
) )
if err != nil { if err != nil {
@ -377,8 +377,8 @@ func TestMigrateOptionalChannelCloseSummaryFields(t *testing.T) {
// Get the old serialization format for this test's // Get the old serialization format for this test's
// close summary, and it to the closed channel bucket. // close summary, and it to the closed channel bucket.
old := test.oldSerialization(test.closeSummary) old := test.oldSerialization(test.closeSummary)
err = d.Update(func(tx *bbolt.Tx) error { err = kvdb.Update(d, func(tx kvdb.RwTx) error {
closedChanBucket, err := tx.CreateBucketIfNotExists( closedChanBucket, err := tx.CreateTopLevelBucket(
closedChannelBucket, closedChannelBucket,
) )
if err != nil { if err != nil {
@ -404,8 +404,8 @@ func TestMigrateOptionalChannelCloseSummaryFields(t *testing.T) {
newSerialization := b.Bytes() newSerialization := b.Bytes()
var dbSummary []byte var dbSummary []byte
err = d.View(func(tx *bbolt.Tx) error { err = kvdb.View(d, func(tx kvdb.ReadTx) error {
closedChanBucket := tx.Bucket(closedChannelBucket) closedChanBucket := tx.ReadBucket(closedChannelBucket)
if closedChanBucket == nil { if closedChanBucket == nil {
return errors.New("unable to find bucket") return errors.New("unable to find bucket")
} }
@ -482,8 +482,8 @@ func TestMigrateGossipMessageStoreKeys(t *testing.T) {
t.Fatalf("unable to serialize message: %v", err) t.Fatalf("unable to serialize message: %v", err)
} }
err := db.Update(func(tx *bbolt.Tx) error { err := kvdb.Update(db, func(tx kvdb.RwTx) error {
messageStore, err := tx.CreateBucketIfNotExists( messageStore, err := tx.CreateTopLevelBucket(
messageStoreBucket, messageStoreBucket,
) )
if err != nil { if err != nil {
@ -503,8 +503,8 @@ func TestMigrateGossipMessageStoreKeys(t *testing.T) {
// 3. The message matches the original. // 3. The message matches the original.
afterMigration := func(db *DB) { afterMigration := func(db *DB) {
var rawMsg []byte var rawMsg []byte
err := db.View(func(tx *bbolt.Tx) error { err := kvdb.View(db, func(tx kvdb.ReadTx) error {
messageStore := tx.Bucket(messageStoreBucket) messageStore := tx.ReadBucket(messageStoreBucket)
if messageStore == nil { if messageStore == nil {
return errors.New("message store bucket not " + return errors.New("message store bucket not " +
"found") "found")
@ -666,8 +666,8 @@ func TestOutgoingPaymentsMigration(t *testing.T) {
// Finally, check that the payment sequence number is updated // Finally, check that the payment sequence number is updated
// to reflect the migrated payments. // to reflect the migrated payments.
err = d.View(func(tx *bbolt.Tx) error { err = kvdb.Update(d, func(tx kvdb.RwTx) error {
payments := tx.Bucket(paymentsRootBucket) payments := tx.ReadWriteBucket(paymentsRootBucket)
if payments == nil { if payments == nil {
return fmt.Errorf("payments bucket not found") return fmt.Errorf("payments bucket not found")
} }
@ -746,8 +746,8 @@ func TestPaymentRouteSerialization(t *testing.T) {
// We'll first add a series of fake payments, using the existing legacy // We'll first add a series of fake payments, using the existing legacy
// serialization format. // serialization format.
beforeMigrationFunc := func(d *DB) { beforeMigrationFunc := func(d *DB) {
err := d.Update(func(tx *bbolt.Tx) error { err := kvdb.Update(d, func(tx kvdb.RwTx) error {
paymentsBucket, err := tx.CreateBucket( paymentsBucket, err := tx.CreateTopLevelBucket(
paymentsRootBucket, paymentsRootBucket,
) )
if err != nil { if err != nil {
@ -798,7 +798,7 @@ func TestPaymentRouteSerialization(t *testing.T) {
// the proper bucket. If this is the duplicate // the proper bucket. If this is the duplicate
// payment, then we'll grab the dup bucket, // payment, then we'll grab the dup bucket,
// otherwise, we'll use the top level bucket. // otherwise, we'll use the top level bucket.
var payHashBucket *bbolt.Bucket var payHashBucket kvdb.RwBucket
if i < numPayments-1 { if i < numPayments-1 {
payHashBucket, err = paymentsBucket.CreateBucket( payHashBucket, err = paymentsBucket.CreateBucket(
payInfo.PaymentHash[:], payInfo.PaymentHash[:],
@ -807,7 +807,7 @@ func TestPaymentRouteSerialization(t *testing.T) {
t.Fatalf("unable to create payments bucket: %v", err) t.Fatalf("unable to create payments bucket: %v", err)
} }
} else { } else {
payHashBucket = paymentsBucket.Bucket( payHashBucket = paymentsBucket.NestedReadWriteBucket(
payInfo.PaymentHash[:], payInfo.PaymentHash[:],
) )
dupPayBucket, err := payHashBucket.CreateBucket( dupPayBucket, err := payHashBucket.CreateBucket(

View File

@ -1,12 +1,10 @@
package migration_01_to_11 package migration_01_to_11
import ( import "github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/coreos/bbolt"
)
// fetchPaymentStatus fetches the payment status of the payment. If the payment // fetchPaymentStatus fetches the payment status of the payment. If the payment
// isn't found, it will default to "StatusUnknown". // isn't found, it will default to "StatusUnknown".
func fetchPaymentStatus(bucket *bbolt.Bucket) PaymentStatus { func fetchPaymentStatus(bucket kvdb.ReadBucket) PaymentStatus {
if bucket.Get(paymentSettleInfoKey) != nil { if bucket.Get(paymentSettleInfoKey) != nil {
return StatusSucceeded return StatusSucceeded
} }

View File

@ -11,7 +11,7 @@ import (
"github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcd/wire"
"github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/tlv" "github.com/lightningnetwork/lnd/tlv"
@ -254,14 +254,14 @@ type Payment struct {
func (db *DB) FetchPayments() ([]*Payment, error) { func (db *DB) FetchPayments() ([]*Payment, error) {
var payments []*Payment var payments []*Payment
err := db.View(func(tx *bbolt.Tx) error { err := kvdb.View(db, func(tx kvdb.ReadTx) error {
paymentsBucket := tx.Bucket(paymentsRootBucket) paymentsBucket := tx.ReadBucket(paymentsRootBucket)
if paymentsBucket == nil { if paymentsBucket == nil {
return nil return nil
} }
return paymentsBucket.ForEach(func(k, v []byte) error { return paymentsBucket.ForEach(func(k, v []byte) error {
bucket := paymentsBucket.Bucket(k) bucket := paymentsBucket.NestedReadBucket(k)
if bucket == nil { if bucket == nil {
// We only expect sub-buckets to be found in // We only expect sub-buckets to be found in
// this top-level bucket. // this top-level bucket.
@ -280,13 +280,13 @@ func (db *DB) FetchPayments() ([]*Payment, error) {
// payment has was possible. These will be found in a // payment has was possible. These will be found in a
// sub-bucket indexed by their sequence number if // sub-bucket indexed by their sequence number if
// available. // available.
dup := bucket.Bucket(paymentDuplicateBucket) dup := bucket.NestedReadBucket(paymentDuplicateBucket)
if dup == nil { if dup == nil {
return nil return nil
} }
return dup.ForEach(func(k, v []byte) error { return dup.ForEach(func(k, v []byte) error {
subBucket := dup.Bucket(k) subBucket := dup.NestedReadBucket(k)
if subBucket == nil { if subBucket == nil {
// We one bucket for each duplicate to // We one bucket for each duplicate to
// be found. // be found.
@ -316,7 +316,7 @@ func (db *DB) FetchPayments() ([]*Payment, error) {
return payments, nil return payments, nil
} }
func fetchPayment(bucket *bbolt.Bucket) (*Payment, error) { func fetchPayment(bucket kvdb.ReadBucket) (*Payment, error) {
var ( var (
err error err error
p = &Payment{} p = &Payment{}

View File

@ -6,13 +6,13 @@ import (
"os" "os"
"testing" "testing"
"github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb/kvdb"
) )
// MakeDB creates a new instance of the ChannelDB for testing purposes. A // MakeDB creates a new instance of the ChannelDB for testing purposes. A
// callback which cleans up the created temporary directories is also returned // callback which cleans up the created temporary directories is also returned
// and intended to be executed after the test completes. // and intended to be executed after the test completes.
func MakeDB() (*bbolt.DB, func(), error) { func MakeDB() (kvdb.Backend, func(), error) {
// Create temporary database for mission control. // Create temporary database for mission control.
file, err := ioutil.TempFile("", "*.db") file, err := ioutil.TempFile("", "*.db")
if err != nil { if err != nil {
@ -20,7 +20,7 @@ func MakeDB() (*bbolt.DB, func(), error) {
} }
dbPath := file.Name() dbPath := file.Name()
db, err := bbolt.Open(dbPath, 0600, nil) db, err := kvdb.Open(kvdb.BoltBackendName, dbPath, true)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -36,7 +36,7 @@ func MakeDB() (*bbolt.DB, func(), error) {
// ApplyMigration is a helper test function that encapsulates the general steps // ApplyMigration is a helper test function that encapsulates the general steps
// which are needed to properly check the result of applying migration function. // which are needed to properly check the result of applying migration function.
func ApplyMigration(t *testing.T, func ApplyMigration(t *testing.T,
beforeMigration, afterMigration, migrationFunc func(tx *bbolt.Tx) error, beforeMigration, afterMigration, migrationFunc func(tx kvdb.RwTx) error,
shouldFail bool) { shouldFail bool) {
cdb, cleanUp, err := MakeDB() cdb, cleanUp, err := MakeDB()
@ -47,7 +47,7 @@ func ApplyMigration(t *testing.T,
// beforeMigration usually used for populating the database // beforeMigration usually used for populating the database
// with test data. // with test data.
err = cdb.Update(beforeMigration) err = kvdb.Update(cdb, beforeMigration)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -65,14 +65,14 @@ func ApplyMigration(t *testing.T,
// afterMigration usually used for checking the database state and // afterMigration usually used for checking the database state and
// throwing the error if something went wrong. // throwing the error if something went wrong.
err = cdb.Update(afterMigration) err = kvdb.Update(cdb, afterMigration)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
}() }()
// Apply migration. // Apply migration.
err = cdb.Update(migrationFunc) err = kvdb.Update(cdb, migrationFunc)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -7,7 +7,7 @@ import (
"fmt" "fmt"
"strings" "strings"
"github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb/kvdb"
) )
// DumpDB dumps go code describing the contents of the database to stdout. This // DumpDB dumps go code describing the contents of the database to stdout. This
@ -21,8 +21,8 @@ import (
// hex("1111"): hex("5783492373"), // hex("1111"): hex("5783492373"),
// }, // },
// } // }
func DumpDB(tx *bbolt.Tx, rootKey []byte) error { func DumpDB(tx kvdb.ReadTx, rootKey []byte) error {
bucket := tx.Bucket(rootKey) bucket := tx.ReadBucket(rootKey)
if bucket == nil { if bucket == nil {
return fmt.Errorf("bucket %v not found", string(rootKey)) return fmt.Errorf("bucket %v not found", string(rootKey))
} }
@ -30,13 +30,13 @@ func DumpDB(tx *bbolt.Tx, rootKey []byte) error {
return dumpBucket(bucket) return dumpBucket(bucket)
} }
func dumpBucket(bucket *bbolt.Bucket) error { func dumpBucket(bucket kvdb.ReadBucket) error {
fmt.Printf("map[string]interface{} {\n") fmt.Printf("map[string]interface{} {\n")
err := bucket.ForEach(func(k, v []byte) error { err := bucket.ForEach(func(k, v []byte) error {
key := toString(k) key := toString(k)
fmt.Printf("%v: ", key) fmt.Printf("%v: ", key)
subBucket := bucket.Bucket(k) subBucket := bucket.NestedReadBucket(k)
if subBucket != nil { if subBucket != nil {
err := dumpBucket(subBucket) err := dumpBucket(subBucket)
if err != nil { if err != nil {
@ -58,8 +58,8 @@ func dumpBucket(bucket *bbolt.Bucket) error {
} }
// RestoreDB primes the database with the given data set. // RestoreDB primes the database with the given data set.
func RestoreDB(tx *bbolt.Tx, rootKey []byte, data map[string]interface{}) error { func RestoreDB(tx kvdb.RwTx, rootKey []byte, data map[string]interface{}) error {
bucket, err := tx.CreateBucket(rootKey) bucket, err := tx.CreateTopLevelBucket(rootKey)
if err != nil { if err != nil {
return err return err
} }
@ -67,7 +67,7 @@ func RestoreDB(tx *bbolt.Tx, rootKey []byte, data map[string]interface{}) error
return restoreDB(bucket, data) return restoreDB(bucket, data)
} }
func restoreDB(bucket *bbolt.Bucket, data map[string]interface{}) error { func restoreDB(bucket kvdb.RwBucket, data map[string]interface{}) error {
for k, v := range data { for k, v := range data {
key := []byte(k) key := []byte(k)
@ -100,8 +100,8 @@ func restoreDB(bucket *bbolt.Bucket, data map[string]interface{}) error {
} }
// VerifyDB verifies the database against the given data set. // VerifyDB verifies the database against the given data set.
func VerifyDB(tx *bbolt.Tx, rootKey []byte, data map[string]interface{}) error { func VerifyDB(tx kvdb.ReadTx, rootKey []byte, data map[string]interface{}) error {
bucket := tx.Bucket(rootKey) bucket := tx.ReadBucket(rootKey)
if bucket == nil { if bucket == nil {
return fmt.Errorf("bucket %v not found", string(rootKey)) return fmt.Errorf("bucket %v not found", string(rootKey))
} }
@ -109,7 +109,7 @@ func VerifyDB(tx *bbolt.Tx, rootKey []byte, data map[string]interface{}) error {
return verifyDB(bucket, data) return verifyDB(bucket, data)
} }
func verifyDB(bucket *bbolt.Bucket, data map[string]interface{}) error { func verifyDB(bucket kvdb.ReadBucket, data map[string]interface{}) error {
for k, v := range data { for k, v := range data {
key := []byte(k) key := []byte(k)
@ -126,7 +126,7 @@ func verifyDB(bucket *bbolt.Bucket, data map[string]interface{}) error {
// Key contains a sub-bucket. // Key contains a sub-bucket.
case map[string]interface{}: case map[string]interface{}:
subBucket := bucket.Bucket(key) subBucket := bucket.NestedReadBucket(key)
if subBucket == nil { if subBucket == nil {
return fmt.Errorf("bucket %v not found", k) return fmt.Errorf("bucket %v not found", k)
} }

View File

@ -8,7 +8,7 @@ import (
"github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/btcec"
"github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcd/wire"
"github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb/kvdb"
) )
var ( var (
@ -101,8 +101,8 @@ func (l *LinkNode) Sync() error {
// Finally update the database by storing the link node and updating // Finally update the database by storing the link node and updating
// any relevant indexes. // any relevant indexes.
return l.db.Update(func(tx *bbolt.Tx) error { return kvdb.Update(l.db, func(tx kvdb.RwTx) error {
nodeMetaBucket := tx.Bucket(nodeInfoBucket) nodeMetaBucket := tx.ReadWriteBucket(nodeInfoBucket)
if nodeMetaBucket == nil { if nodeMetaBucket == nil {
return ErrLinkNodesNotFound return ErrLinkNodesNotFound
} }
@ -114,7 +114,7 @@ func (l *LinkNode) Sync() error {
// putLinkNode serializes then writes the encoded version of the passed link // putLinkNode serializes then writes the encoded version of the passed link
// node into the nodeMetaBucket. This function is provided in order to allow // node into the nodeMetaBucket. This function is provided in order to allow
// the ability to re-use a database transaction across many operations. // the ability to re-use a database transaction across many operations.
func putLinkNode(nodeMetaBucket *bbolt.Bucket, l *LinkNode) error { func putLinkNode(nodeMetaBucket kvdb.RwBucket, l *LinkNode) error {
// First serialize the LinkNode into its raw-bytes encoding. // First serialize the LinkNode into its raw-bytes encoding.
var b bytes.Buffer var b bytes.Buffer
if err := serializeLinkNode(&b, l); err != nil { if err := serializeLinkNode(&b, l); err != nil {
@ -130,13 +130,13 @@ func putLinkNode(nodeMetaBucket *bbolt.Bucket, l *LinkNode) error {
// DeleteLinkNode removes the link node with the given identity from the // DeleteLinkNode removes the link node with the given identity from the
// database. // database.
func (db *DB) DeleteLinkNode(identity *btcec.PublicKey) error { func (db *DB) DeleteLinkNode(identity *btcec.PublicKey) error {
return db.Update(func(tx *bbolt.Tx) error { return kvdb.Update(db, func(tx kvdb.RwTx) error {
return db.deleteLinkNode(tx, identity) return db.deleteLinkNode(tx, identity)
}) })
} }
func (db *DB) deleteLinkNode(tx *bbolt.Tx, identity *btcec.PublicKey) error { func (db *DB) deleteLinkNode(tx kvdb.RwTx, identity *btcec.PublicKey) error {
nodeMetaBucket := tx.Bucket(nodeInfoBucket) nodeMetaBucket := tx.ReadWriteBucket(nodeInfoBucket)
if nodeMetaBucket == nil { if nodeMetaBucket == nil {
return ErrLinkNodesNotFound return ErrLinkNodesNotFound
} }
@ -150,7 +150,7 @@ func (db *DB) deleteLinkNode(tx *bbolt.Tx, identity *btcec.PublicKey) error {
// key cannot be found, then ErrNodeNotFound if returned. // key cannot be found, then ErrNodeNotFound if returned.
func (db *DB) FetchLinkNode(identity *btcec.PublicKey) (*LinkNode, error) { func (db *DB) FetchLinkNode(identity *btcec.PublicKey) (*LinkNode, error) {
var linkNode *LinkNode var linkNode *LinkNode
err := db.View(func(tx *bbolt.Tx) error { err := kvdb.View(db, func(tx kvdb.ReadTx) error {
node, err := fetchLinkNode(tx, identity) node, err := fetchLinkNode(tx, identity)
if err != nil { if err != nil {
return err return err
@ -163,10 +163,10 @@ func (db *DB) FetchLinkNode(identity *btcec.PublicKey) (*LinkNode, error) {
return linkNode, err return linkNode, err
} }
func fetchLinkNode(tx *bbolt.Tx, targetPub *btcec.PublicKey) (*LinkNode, error) { func fetchLinkNode(tx kvdb.ReadTx, targetPub *btcec.PublicKey) (*LinkNode, error) {
// First fetch the bucket for storing node metadata, bailing out early // First fetch the bucket for storing node metadata, bailing out early
// if it hasn't been created yet. // if it hasn't been created yet.
nodeMetaBucket := tx.Bucket(nodeInfoBucket) nodeMetaBucket := tx.ReadBucket(nodeInfoBucket)
if nodeMetaBucket == nil { if nodeMetaBucket == nil {
return nil, ErrLinkNodesNotFound return nil, ErrLinkNodesNotFound
} }
@ -191,7 +191,7 @@ func fetchLinkNode(tx *bbolt.Tx, targetPub *btcec.PublicKey) (*LinkNode, error)
// whom we have active channels with. // whom we have active channels with.
func (db *DB) FetchAllLinkNodes() ([]*LinkNode, error) { func (db *DB) FetchAllLinkNodes() ([]*LinkNode, error) {
var linkNodes []*LinkNode var linkNodes []*LinkNode
err := db.View(func(tx *bbolt.Tx) error { err := kvdb.View(db, func(tx kvdb.ReadTx) error {
nodes, err := db.fetchAllLinkNodes(tx) nodes, err := db.fetchAllLinkNodes(tx)
if err != nil { if err != nil {
return err return err
@ -209,8 +209,8 @@ func (db *DB) FetchAllLinkNodes() ([]*LinkNode, error) {
// fetchAllLinkNodes uses an existing database transaction to fetch all nodes // fetchAllLinkNodes uses an existing database transaction to fetch all nodes
// with whom we have active channels with. // with whom we have active channels with.
func (db *DB) fetchAllLinkNodes(tx *bbolt.Tx) ([]*LinkNode, error) { func (db *DB) fetchAllLinkNodes(tx kvdb.ReadTx) ([]*LinkNode, error) {
nodeMetaBucket := tx.Bucket(nodeInfoBucket) nodeMetaBucket := tx.ReadBucket(nodeInfoBucket)
if nodeMetaBucket == nil { if nodeMetaBucket == nil {
return nil, ErrLinkNodesNotFound return nil, ErrLinkNodesNotFound
} }

View File

@ -6,7 +6,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lntypes"
) )
@ -65,7 +65,7 @@ func (p *PaymentControl) InitPayment(paymentHash lntypes.Hash,
infoBytes := b.Bytes() infoBytes := b.Bytes()
var updateErr error var updateErr error
err := p.db.Batch(func(tx *bbolt.Tx) error { err := kvdb.Batch(p.db.Backend, func(tx kvdb.RwTx) error {
// Reset the update error, to avoid carrying over an error // Reset the update error, to avoid carrying over an error
// from a previous execution of the batched db transaction. // from a previous execution of the batched db transaction.
updateErr = nil updateErr = nil
@ -130,8 +130,8 @@ func (p *PaymentControl) InitPayment(paymentHash lntypes.Hash,
// We'll delete any lingering HTLCs to start with, in case we // We'll delete any lingering HTLCs to start with, in case we
// are initializing a payment that was attempted earlier, but // are initializing a payment that was attempted earlier, but
// left in a state where we could retry. // left in a state where we could retry.
err = bucket.DeleteBucket(paymentHtlcsBucket) err = bucket.DeleteNestedBucket(paymentHtlcsBucket)
if err != nil && err != bbolt.ErrBucketNotFound { if err != nil && err != kvdb.ErrBucketNotFound {
return err return err
} }
@ -162,9 +162,8 @@ func (p *PaymentControl) RegisterAttempt(paymentHash lntypes.Hash,
htlcIDBytes := make([]byte, 8) htlcIDBytes := make([]byte, 8)
binary.BigEndian.PutUint64(htlcIDBytes, attempt.AttemptID) binary.BigEndian.PutUint64(htlcIDBytes, attempt.AttemptID)
return p.db.Update(func(tx *bbolt.Tx) error { return kvdb.Batch(p.db.Backend, func(tx kvdb.RwTx) error {
// Get the payment bucket to register this new attempt in. bucket, err := fetchPaymentBucketUpdate(tx, paymentHash)
bucket, err := fetchPaymentBucket(tx, paymentHash)
if err != nil { if err != nil {
return err return err
} }
@ -234,10 +233,10 @@ func (p *PaymentControl) updateHtlcKey(paymentHash lntypes.Hash,
binary.BigEndian.PutUint64(htlcIDBytes, attemptID) binary.BigEndian.PutUint64(htlcIDBytes, attemptID)
var payment *MPPayment var payment *MPPayment
err := p.db.Batch(func(tx *bbolt.Tx) error { err := kvdb.Batch(p.db.Backend, func(tx kvdb.RwTx) error {
// Fetch bucket that contains all information for the payment payment = nil
// with this hash.
bucket, err := fetchPaymentBucket(tx, paymentHash) bucket, err := fetchPaymentBucketUpdate(tx, paymentHash)
if err != nil { if err != nil {
return err return err
} }
@ -247,12 +246,12 @@ func (p *PaymentControl) updateHtlcKey(paymentHash lntypes.Hash,
return err return err
} }
htlcsBucket := bucket.Bucket(paymentHtlcsBucket) htlcsBucket := bucket.NestedReadWriteBucket(paymentHtlcsBucket)
if htlcsBucket == nil { if htlcsBucket == nil {
return fmt.Errorf("htlcs bucket not found") return fmt.Errorf("htlcs bucket not found")
} }
htlcBucket := htlcsBucket.Bucket(htlcIDBytes) htlcBucket := htlcsBucket.NestedReadWriteBucket(htlcIDBytes)
if htlcBucket == nil { if htlcBucket == nil {
return fmt.Errorf("HTLC with ID %v not registered", return fmt.Errorf("HTLC with ID %v not registered",
attemptID) attemptID)
@ -286,13 +285,13 @@ func (p *PaymentControl) Fail(paymentHash lntypes.Hash,
updateErr error updateErr error
payment *MPPayment payment *MPPayment
) )
err := p.db.Batch(func(tx *bbolt.Tx) error { err := kvdb.Batch(p.db.Backend, func(tx kvdb.RwTx) error {
// Reset the update error, to avoid carrying over an error // Reset the update error, to avoid carrying over an error
// from a previous execution of the batched db transaction. // from a previous execution of the batched db transaction.
updateErr = nil updateErr = nil
payment = nil payment = nil
bucket, err := fetchPaymentBucket(tx, paymentHash) bucket, err := fetchPaymentBucketUpdate(tx, paymentHash)
if err == ErrPaymentNotInitiated { if err == ErrPaymentNotInitiated {
updateErr = ErrPaymentNotInitiated updateErr = ErrPaymentNotInitiated
return nil return nil
@ -341,7 +340,7 @@ func (p *PaymentControl) FetchPayment(paymentHash lntypes.Hash) (
*MPPayment, error) { *MPPayment, error) {
var payment *MPPayment var payment *MPPayment
err := p.db.View(func(tx *bbolt.Tx) error { err := kvdb.View(p.db, func(tx kvdb.ReadTx) error {
bucket, err := fetchPaymentBucket(tx, paymentHash) bucket, err := fetchPaymentBucket(tx, paymentHash)
if err != nil { if err != nil {
return err return err
@ -360,10 +359,10 @@ func (p *PaymentControl) FetchPayment(paymentHash lntypes.Hash) (
// createPaymentBucket creates or fetches the sub-bucket assigned to this // createPaymentBucket creates or fetches the sub-bucket assigned to this
// payment hash. // payment hash.
func createPaymentBucket(tx *bbolt.Tx, paymentHash lntypes.Hash) ( func createPaymentBucket(tx kvdb.RwTx, paymentHash lntypes.Hash) (
*bbolt.Bucket, error) { kvdb.RwBucket, error) {
payments, err := tx.CreateBucketIfNotExists(paymentsRootBucket) payments, err := tx.CreateTopLevelBucket(paymentsRootBucket)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -373,15 +372,34 @@ func createPaymentBucket(tx *bbolt.Tx, paymentHash lntypes.Hash) (
// fetchPaymentBucket fetches the sub-bucket assigned to this payment hash. If // fetchPaymentBucket fetches the sub-bucket assigned to this payment hash. If
// the bucket does not exist, it returns ErrPaymentNotInitiated. // the bucket does not exist, it returns ErrPaymentNotInitiated.
func fetchPaymentBucket(tx *bbolt.Tx, paymentHash lntypes.Hash) ( func fetchPaymentBucket(tx kvdb.ReadTx, paymentHash lntypes.Hash) (
*bbolt.Bucket, error) { kvdb.ReadBucket, error) {
payments := tx.Bucket(paymentsRootBucket) payments := tx.ReadBucket(paymentsRootBucket)
if payments == nil { if payments == nil {
return nil, ErrPaymentNotInitiated return nil, ErrPaymentNotInitiated
} }
bucket := payments.Bucket(paymentHash[:]) bucket := payments.NestedReadBucket(paymentHash[:])
if bucket == nil {
return nil, ErrPaymentNotInitiated
}
return bucket, nil
}
// fetchPaymentBucketUpdate is identical to fetchPaymentBucket, but it returns a
// bucket that can be written to.
func fetchPaymentBucketUpdate(tx kvdb.RwTx, paymentHash lntypes.Hash) (
kvdb.RwBucket, error) {
payments := tx.ReadWriteBucket(paymentsRootBucket)
if payments == nil {
return nil, ErrPaymentNotInitiated
}
bucket := payments.NestedReadWriteBucket(paymentHash[:])
if bucket == nil { if bucket == nil {
return nil, ErrPaymentNotInitiated return nil, ErrPaymentNotInitiated
} }
@ -391,8 +409,8 @@ func fetchPaymentBucket(tx *bbolt.Tx, paymentHash lntypes.Hash) (
// nextPaymentSequence returns the next sequence number to store for a new // nextPaymentSequence returns the next sequence number to store for a new
// payment. // payment.
func nextPaymentSequence(tx *bbolt.Tx) ([]byte, error) { func nextPaymentSequence(tx kvdb.RwTx) ([]byte, error) {
payments, err := tx.CreateBucketIfNotExists(paymentsRootBucket) payments, err := tx.CreateTopLevelBucket(paymentsRootBucket)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -409,8 +427,8 @@ func nextPaymentSequence(tx *bbolt.Tx) ([]byte, error) {
// fetchPaymentStatus fetches the payment status of the payment. If the payment // fetchPaymentStatus fetches the payment status of the payment. If the payment
// isn't found, it will default to "StatusUnknown". // isn't found, it will default to "StatusUnknown".
func fetchPaymentStatus(bucket *bbolt.Bucket) (PaymentStatus, error) { func fetchPaymentStatus(bucket kvdb.ReadBucket) (PaymentStatus, error) {
htlcsBucket := bucket.Bucket(paymentHtlcsBucket) htlcsBucket := bucket.NestedReadBucket(paymentHtlcsBucket)
if htlcsBucket != nil { if htlcsBucket != nil {
htlcs, err := fetchHtlcAttempts(htlcsBucket) htlcs, err := fetchHtlcAttempts(htlcsBucket)
if err != nil { if err != nil {
@ -424,7 +442,6 @@ func fetchPaymentStatus(bucket *bbolt.Bucket) (PaymentStatus, error) {
return StatusSucceeded, nil return StatusSucceeded, nil
} }
} }
} }
if bucket.Get(paymentFailInfoKey) != nil { if bucket.Get(paymentFailInfoKey) != nil {
@ -441,7 +458,7 @@ func fetchPaymentStatus(bucket *bbolt.Bucket) (PaymentStatus, error) {
// ensureInFlight checks whether the payment found in the given bucket has // ensureInFlight checks whether the payment found in the given bucket has
// status InFlight, and returns an error otherwise. This should be used to // status InFlight, and returns an error otherwise. This should be used to
// ensure we only mark in-flight payments as succeeded or failed. // ensure we only mark in-flight payments as succeeded or failed.
func ensureInFlight(bucket *bbolt.Bucket) error { func ensureInFlight(bucket kvdb.ReadBucket) error {
paymentStatus, err := fetchPaymentStatus(bucket) paymentStatus, err := fetchPaymentStatus(bucket)
if err != nil { if err != nil {
return err return err
@ -486,14 +503,14 @@ type InFlightPayment struct {
// FetchInFlightPayments returns all payments with status InFlight. // FetchInFlightPayments returns all payments with status InFlight.
func (p *PaymentControl) FetchInFlightPayments() ([]*InFlightPayment, error) { func (p *PaymentControl) FetchInFlightPayments() ([]*InFlightPayment, error) {
var inFlights []*InFlightPayment var inFlights []*InFlightPayment
err := p.db.View(func(tx *bbolt.Tx) error { err := kvdb.View(p.db, func(tx kvdb.ReadTx) error {
payments := tx.Bucket(paymentsRootBucket) payments := tx.ReadBucket(paymentsRootBucket)
if payments == nil { if payments == nil {
return nil return nil
} }
return payments.ForEach(func(k, _ []byte) error { return payments.ForEach(func(k, _ []byte) error {
bucket := payments.Bucket(k) bucket := payments.NestedReadBucket(k)
if bucket == nil { if bucket == nil {
return fmt.Errorf("non bucket element") return fmt.Errorf("non bucket element")
} }
@ -523,7 +540,9 @@ func (p *PaymentControl) FetchInFlightPayments() ([]*InFlightPayment, error) {
return err return err
} }
htlcsBucket := bucket.Bucket(paymentHtlcsBucket) htlcsBucket := bucket.NestedReadBucket(
paymentHtlcsBucket,
)
if htlcsBucket == nil { if htlcsBucket == nil {
return nil return nil
} }

View File

@ -9,7 +9,7 @@ import (
"time" "time"
"github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcd/wire"
"github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lntypes"
"github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/lnwire"
"github.com/lightningnetwork/lnd/record" "github.com/lightningnetwork/lnd/record"
@ -200,14 +200,14 @@ type PaymentCreationInfo struct {
func (db *DB) FetchPayments() ([]*MPPayment, error) { func (db *DB) FetchPayments() ([]*MPPayment, error) {
var payments []*MPPayment var payments []*MPPayment
err := db.View(func(tx *bbolt.Tx) error { err := kvdb.View(db, func(tx kvdb.ReadTx) error {
paymentsBucket := tx.Bucket(paymentsRootBucket) paymentsBucket := tx.ReadBucket(paymentsRootBucket)
if paymentsBucket == nil { if paymentsBucket == nil {
return nil return nil
} }
return paymentsBucket.ForEach(func(k, v []byte) error { return paymentsBucket.ForEach(func(k, v []byte) error {
bucket := paymentsBucket.Bucket(k) bucket := paymentsBucket.NestedReadBucket(k)
if bucket == nil { if bucket == nil {
// We only expect sub-buckets to be found in // We only expect sub-buckets to be found in
// this top-level bucket. // this top-level bucket.
@ -232,7 +232,6 @@ func (db *DB) FetchPayments() ([]*MPPayment, error) {
} }
payments = append(payments, duplicatePayments...) payments = append(payments, duplicatePayments...)
return nil return nil
}) })
}) })
@ -248,7 +247,7 @@ func (db *DB) FetchPayments() ([]*MPPayment, error) {
return payments, nil return payments, nil
} }
func fetchPayment(bucket *bbolt.Bucket) (*MPPayment, error) { func fetchPayment(bucket kvdb.ReadBucket) (*MPPayment, error) {
seqBytes := bucket.Get(paymentSequenceKey) seqBytes := bucket.Get(paymentSequenceKey)
if seqBytes == nil { if seqBytes == nil {
return nil, fmt.Errorf("sequence number not found") return nil, fmt.Errorf("sequence number not found")
@ -276,7 +275,7 @@ func fetchPayment(bucket *bbolt.Bucket) (*MPPayment, error) {
} }
var htlcs []HTLCAttempt var htlcs []HTLCAttempt
htlcsBucket := bucket.Bucket(paymentHtlcsBucket) htlcsBucket := bucket.NestedReadBucket(paymentHtlcsBucket)
if htlcsBucket != nil { if htlcsBucket != nil {
// Get the payment attempts. This can be empty. // Get the payment attempts. This can be empty.
htlcs, err = fetchHtlcAttempts(htlcsBucket) htlcs, err = fetchHtlcAttempts(htlcsBucket)
@ -304,12 +303,12 @@ func fetchPayment(bucket *bbolt.Bucket) (*MPPayment, error) {
// fetchHtlcAttempts retrives all htlc attempts made for the payment found in // fetchHtlcAttempts retrives all htlc attempts made for the payment found in
// the given bucket. // the given bucket.
func fetchHtlcAttempts(bucket *bbolt.Bucket) ([]HTLCAttempt, error) { func fetchHtlcAttempts(bucket kvdb.ReadBucket) ([]HTLCAttempt, error) {
htlcs := make([]HTLCAttempt, 0) htlcs := make([]HTLCAttempt, 0)
err := bucket.ForEach(func(k, _ []byte) error { err := bucket.ForEach(func(k, _ []byte) error {
aid := byteOrder.Uint64(k) aid := byteOrder.Uint64(k)
htlcBucket := bucket.Bucket(k) htlcBucket := bucket.NestedReadBucket(k)
attemptInfo, err := fetchHtlcAttemptInfo( attemptInfo, err := fetchHtlcAttemptInfo(
htlcBucket, htlcBucket,
@ -347,7 +346,7 @@ func fetchHtlcAttempts(bucket *bbolt.Bucket) ([]HTLCAttempt, error) {
// fetchHtlcAttemptInfo fetches the payment attempt info for this htlc from the // fetchHtlcAttemptInfo fetches the payment attempt info for this htlc from the
// bucket. // bucket.
func fetchHtlcAttemptInfo(bucket *bbolt.Bucket) (*HTLCAttemptInfo, error) { func fetchHtlcAttemptInfo(bucket kvdb.ReadBucket) (*HTLCAttemptInfo, error) {
b := bucket.Get(htlcAttemptInfoKey) b := bucket.Get(htlcAttemptInfoKey)
if b == nil { if b == nil {
return nil, errNoAttemptInfo return nil, errNoAttemptInfo
@ -359,7 +358,7 @@ func fetchHtlcAttemptInfo(bucket *bbolt.Bucket) (*HTLCAttemptInfo, error) {
// fetchHtlcSettleInfo retrieves the settle info for the htlc. If the htlc isn't // fetchHtlcSettleInfo retrieves the settle info for the htlc. If the htlc isn't
// settled, nil is returned. // settled, nil is returned.
func fetchHtlcSettleInfo(bucket *bbolt.Bucket) (*HTLCSettleInfo, error) { func fetchHtlcSettleInfo(bucket kvdb.ReadBucket) (*HTLCSettleInfo, error) {
b := bucket.Get(htlcSettleInfoKey) b := bucket.Get(htlcSettleInfoKey)
if b == nil { if b == nil {
// Settle info is optional. // Settle info is optional.
@ -372,7 +371,7 @@ func fetchHtlcSettleInfo(bucket *bbolt.Bucket) (*HTLCSettleInfo, error) {
// fetchHtlcFailInfo retrieves the failure info for the htlc. If the htlc hasn't // fetchHtlcFailInfo retrieves the failure info for the htlc. If the htlc hasn't
// failed, nil is returned. // failed, nil is returned.
func fetchHtlcFailInfo(bucket *bbolt.Bucket) (*HTLCFailInfo, error) { func fetchHtlcFailInfo(bucket kvdb.ReadBucket) (*HTLCFailInfo, error) {
b := bucket.Get(htlcFailInfoKey) b := bucket.Get(htlcFailInfoKey)
if b == nil { if b == nil {
// Fail info is optional. // Fail info is optional.
@ -385,15 +384,15 @@ func fetchHtlcFailInfo(bucket *bbolt.Bucket) (*HTLCFailInfo, error) {
// DeletePayments deletes all completed and failed payments from the DB. // DeletePayments deletes all completed and failed payments from the DB.
func (db *DB) DeletePayments() error { func (db *DB) DeletePayments() error {
return db.Update(func(tx *bbolt.Tx) error { return kvdb.Update(db, func(tx kvdb.RwTx) error {
payments := tx.Bucket(paymentsRootBucket) payments := tx.ReadWriteBucket(paymentsRootBucket)
if payments == nil { if payments == nil {
return nil return nil
} }
var deleteBuckets [][]byte var deleteBuckets [][]byte
err := payments.ForEach(func(k, _ []byte) error { err := payments.ForEach(func(k, _ []byte) error {
bucket := payments.Bucket(k) bucket := payments.NestedReadWriteBucket(k)
if bucket == nil { if bucket == nil {
// We only expect sub-buckets to be found in // We only expect sub-buckets to be found in
// this top-level bucket. // this top-level bucket.
@ -420,7 +419,7 @@ func (db *DB) DeletePayments() error {
} }
for _, k := range deleteBuckets { for _, k := range deleteBuckets {
if err := payments.DeleteBucket(k); err != nil { if err := payments.DeleteNestedBucket(k); err != nil {
return err return err
} }
} }

View File

@ -8,8 +8,8 @@ import (
"bytes" "bytes"
"github.com/coreos/bbolt"
"github.com/go-errors/errors" "github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/lnwire"
) )
@ -61,12 +61,12 @@ func (s *WaitingProofStore) Add(proof *WaitingProof) error {
s.mu.Lock() s.mu.Lock()
defer s.mu.Unlock() defer s.mu.Unlock()
err := s.db.Update(func(tx *bbolt.Tx) error { err := kvdb.Update(s.db, func(tx kvdb.RwTx) error {
var err error var err error
var b bytes.Buffer var b bytes.Buffer
// Get or create the bucket. // Get or create the bucket.
bucket, err := tx.CreateBucketIfNotExists(waitingProofsBucketKey) bucket, err := tx.CreateTopLevelBucket(waitingProofsBucketKey)
if err != nil { if err != nil {
return err return err
} }
@ -100,9 +100,9 @@ func (s *WaitingProofStore) Remove(key WaitingProofKey) error {
return ErrWaitingProofNotFound return ErrWaitingProofNotFound
} }
err := s.db.Update(func(tx *bbolt.Tx) error { err := kvdb.Update(s.db, func(tx kvdb.RwTx) error {
// Get or create the top bucket. // Get or create the top bucket.
bucket := tx.Bucket(waitingProofsBucketKey) bucket := tx.ReadWriteBucket(waitingProofsBucketKey)
if bucket == nil { if bucket == nil {
return ErrWaitingProofNotFound return ErrWaitingProofNotFound
} }
@ -123,8 +123,8 @@ func (s *WaitingProofStore) Remove(key WaitingProofKey) error {
// ForAll iterates thought all waiting proofs and passing the waiting proof // ForAll iterates thought all waiting proofs and passing the waiting proof
// in the given callback. // in the given callback.
func (s *WaitingProofStore) ForAll(cb func(*WaitingProof) error) error { func (s *WaitingProofStore) ForAll(cb func(*WaitingProof) error) error {
return s.db.View(func(tx *bbolt.Tx) error { return kvdb.View(s.db, func(tx kvdb.ReadTx) error {
bucket := tx.Bucket(waitingProofsBucketKey) bucket := tx.ReadBucket(waitingProofsBucketKey)
if bucket == nil { if bucket == nil {
return ErrWaitingProofNotFound return ErrWaitingProofNotFound
} }
@ -158,8 +158,8 @@ func (s *WaitingProofStore) Get(key WaitingProofKey) (*WaitingProof, error) {
return nil, ErrWaitingProofNotFound return nil, ErrWaitingProofNotFound
} }
err := s.db.View(func(tx *bbolt.Tx) error { err := kvdb.View(s.db, func(tx kvdb.ReadTx) error {
bucket := tx.Bucket(waitingProofsBucketKey) bucket := tx.ReadBucket(waitingProofsBucketKey)
if bucket == nil { if bucket == nil {
return ErrWaitingProofNotFound return ErrWaitingProofNotFound
} }

View File

@ -3,7 +3,7 @@ package channeldb
import ( import (
"fmt" "fmt"
"github.com/coreos/bbolt" "github.com/lightningnetwork/lnd/channeldb/kvdb"
"github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lntypes"
) )
@ -106,8 +106,8 @@ func (w *WitnessCache) addWitnessEntries(wType WitnessType,
return nil return nil
} }
return w.db.Batch(func(tx *bbolt.Tx) error { return kvdb.Batch(w.db.Backend, func(tx kvdb.RwTx) error {
witnessBucket, err := tx.CreateBucketIfNotExists(witnessBucketKey) witnessBucket, err := tx.CreateTopLevelBucket(witnessBucketKey)
if err != nil { if err != nil {
return err return err
} }
@ -150,8 +150,8 @@ func (w *WitnessCache) LookupSha256Witness(hash lntypes.Hash) (lntypes.Preimage,
// will be returned. // will be returned.
func (w *WitnessCache) lookupWitness(wType WitnessType, witnessKey []byte) ([]byte, error) { func (w *WitnessCache) lookupWitness(wType WitnessType, witnessKey []byte) ([]byte, error) {
var witness []byte var witness []byte
err := w.db.View(func(tx *bbolt.Tx) error { err := kvdb.View(w.db, func(tx kvdb.ReadTx) error {
witnessBucket := tx.Bucket(witnessBucketKey) witnessBucket := tx.ReadBucket(witnessBucketKey)
if witnessBucket == nil { if witnessBucket == nil {
return ErrNoWitnesses return ErrNoWitnesses
} }
@ -160,7 +160,7 @@ func (w *WitnessCache) lookupWitness(wType WitnessType, witnessKey []byte) ([]by
if err != nil { if err != nil {
return err return err
} }
witnessTypeBucket := witnessBucket.Bucket(witnessTypeBucketKey) witnessTypeBucket := witnessBucket.NestedReadBucket(witnessTypeBucketKey)
if witnessTypeBucket == nil { if witnessTypeBucket == nil {
return ErrNoWitnesses return ErrNoWitnesses
} }
@ -189,8 +189,8 @@ func (w *WitnessCache) DeleteSha256Witness(hash lntypes.Hash) error {
// deleteWitness attempts to delete a particular witness from the database. // deleteWitness attempts to delete a particular witness from the database.
func (w *WitnessCache) deleteWitness(wType WitnessType, witnessKey []byte) error { func (w *WitnessCache) deleteWitness(wType WitnessType, witnessKey []byte) error {
return w.db.Batch(func(tx *bbolt.Tx) error { return kvdb.Batch(w.db.Backend, func(tx kvdb.RwTx) error {
witnessBucket, err := tx.CreateBucketIfNotExists(witnessBucketKey) witnessBucket, err := tx.CreateTopLevelBucket(witnessBucketKey)
if err != nil { if err != nil {
return err return err
} }
@ -213,8 +213,8 @@ func (w *WitnessCache) deleteWitness(wType WitnessType, witnessKey []byte) error
// DeleteWitnessClass attempts to delete an *entire* class of witnesses. After // DeleteWitnessClass attempts to delete an *entire* class of witnesses. After
// this function return with a non-nil error, // this function return with a non-nil error,
func (w *WitnessCache) DeleteWitnessClass(wType WitnessType) error { func (w *WitnessCache) DeleteWitnessClass(wType WitnessType) error {
return w.db.Batch(func(tx *bbolt.Tx) error { return kvdb.Batch(w.db.Backend, func(tx kvdb.RwTx) error {
witnessBucket, err := tx.CreateBucketIfNotExists(witnessBucketKey) witnessBucket, err := tx.CreateTopLevelBucket(witnessBucketKey)
if err != nil { if err != nil {
return err return err
} }
@ -224,6 +224,6 @@ func (w *WitnessCache) DeleteWitnessClass(wType WitnessType) error {
return err return err
} }
return witnessBucket.DeleteBucket(witnessTypeBucketKey) return witnessBucket.DeleteNestedBucket(witnessTypeBucketKey)
}) })
} }

5
go.mod
View File

@ -45,8 +45,9 @@ require (
github.com/rogpeppe/fastuuid v1.2.0 // indirect github.com/rogpeppe/fastuuid v1.2.0 // indirect
github.com/tv42/zbase32 v0.0.0-20160707012821-501572607d02 github.com/tv42/zbase32 v0.0.0-20160707012821-501572607d02
github.com/urfave/cli v1.18.0 github.com/urfave/cli v1.18.0
golang.org/x/crypto v0.0.0-20200109152110-61a87790db17 go.etcd.io/bbolt v1.3.3
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67
golang.org/x/net v0.0.0-20190206173232-65e2d4e15006
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2
google.golang.org/genproto v0.0.0-20190201180003-4b09977fb922 google.golang.org/genproto v0.0.0-20190201180003-4b09977fb922
google.golang.org/grpc v1.19.0 google.golang.org/grpc v1.19.0