diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index a68c02890..a4acdeef4 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -214,6 +214,7 @@ jobs: matrix: unit_type: - btcd unit-cover + - unit tags=kvdb_etcd - travis-race steps: - name: git checkout diff --git a/.golangci.yml b/.golangci.yml index c0057fce9..ee9246b2a 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,6 +1,6 @@ run: # timeout for analysis - deadline: 4m + deadline: 10m # Skip autogenerated files for mobile. skip-files: @@ -65,3 +65,11 @@ issues: - path: _test\.go linters: - gosec + + # Fix false positives because of build flags in itest directory. + - path: lntest/itest/.* + linters: + - unused + - deadcode + - unparam + - govet diff --git a/.travis.yml b/.travis.yml index 9456d2085..ad87c7993 100644 --- a/.travis.yml +++ b/.travis.yml @@ -28,6 +28,25 @@ sudo: required jobs: include: + - stage: Sanity Check + name: Lint and compile + before_script: + # Install the RPC tools as a before step so Travis collapses the output + # after it's done. + - ./scripts/install_travis_proto.sh + + script: + # Step 1: Make sure no diff is produced when compiling with the correct + # version. + - make rpc-check + + # Step 2: Make sure the unit tests compile, but don't run them. They run + # in a GitHub Workflow. + - make unit pkg=... case=_NONE_ + + # Step 3: Lint go code. Invoke GC more often to reduce memory usage. + - GOGC=30 make lint + - stage: Integration Test name: Btcd Integration script: @@ -49,7 +68,32 @@ jobs: - GOARM=7 GOARCH=arm GOOS=linux CGO_ENABLED=0 make itest-only arch: arm64 -after_script: - - LOG_FILES=./lntest/itest/*.log - - echo "Uploading to termbin.com..." && find $LOG_FILES | xargs -I{} sh -c "cat {} | nc termbin.com 9999 | xargs -r0 printf '{} uploaded to %s'" - - echo "Uploading to file.io..." && tar -zcvO $LOG_FILES | curl -s -F 'file=@-;filename=logs.tar.gz' https://file.io | xargs -r0 printf 'logs.tar.gz uploaded to %s\n' + - name: Btcd Integration Windows + script: + - make itest-windows + os: windows + before_install: + - choco upgrade --no-progress -y make netcat curl findutils + - export MAKE=mingw32-make + after_failure: + - |- + case $TRAVIS_OS_NAME in + windows) + echo "Uploading to termbin.com..." + for f in ./lntest/itest/*.log; do cat $f | nc termbin.com 9999 | xargs -r0 printf "$f"' uploaded to %s'; done + ;; + esac + +after_failure: + - |- + case $TRAVIS_OS_NAME in + windows) + # Needs other commands, see after_script of the Windows build + ;; + + *) + LOG_FILES=./lntest/itest/*.log + echo "Uploading to termbin.com..." && find $LOG_FILES | xargs -I{} sh -c "cat {} | nc termbin.com 9999 | xargs -r0 printf '{} uploaded to %s'" + echo "Uploading to file.io..." && tar -zcvO $LOG_FILES | curl -s -F 'file=@-;filename=logs.tar.gz' https://file.io | xargs -r0 printf 'logs.tar.gz uploaded to %s\n' + ;; + esac diff --git a/Makefile b/Makefile index e474fbe5a..e94ca3580 100644 --- a/Makefile +++ b/Makefile @@ -43,8 +43,6 @@ GOTEST := GO111MODULE=on go test GOVERSION := $(shell go version | awk '{print $$3}') GOFILES_NOVENDOR = $(shell find . -type f -name '*.go' -not -path "./vendor/*") -GOLIST := go list -deps $(PKG)/... | grep '$(PKG)'| grep -v '/vendor/' -GOLISTCOVER := $(shell go list -deps -f '{{.ImportPath}}' ./... | grep '$(PKG)' | sed -e 's/^$(ESCPKG)/./') RM := rm -f CP := cp @@ -132,6 +130,11 @@ build-itest: $(GOBUILD) -tags="$(ITEST_TAGS)" -o lnd-itest $(ITEST_LDFLAGS) $(PKG)/cmd/lnd $(GOBUILD) -tags="$(ITEST_TAGS)" -o lncli-itest $(ITEST_LDFLAGS) $(PKG)/cmd/lncli +build-itest-windows: + @$(call print, "Building itest lnd and lncli.") + $(GOBUILD) -tags="$(ITEST_TAGS)" -o lnd-itest.exe $(ITEST_LDFLAGS) $(PKG)/cmd/lnd + $(GOBUILD) -tags="$(ITEST_TAGS)" -o lncli-itest.exe $(ITEST_LDFLAGS) $(PKG)/cmd/lncli + install: @$(call print, "Installing lnd and lncli.") $(GOINSTALL) -tags="${tags}" $(LDFLAGS) $(PKG)/cmd/lnd @@ -158,6 +161,8 @@ itest-only: itest: btcd build-itest itest-only +itest-windows: btcd build-itest-windows itest-only + unit: btcd @$(call print, "Running unit tests.") $(UNIT) diff --git a/README.md b/README.md index 9356ad733..16a2e53ce 100644 --- a/README.md +++ b/README.md @@ -90,7 +90,7 @@ any issues regarding security or privacy, please disclose the information responsibly by sending an email to security at lightning dot engineering, preferably [encrypted using our designated PGP key (`91FE464CD75101DA6B6BAB60555C6465E5BCB3AF`) which can be found -[here](https://gist.githubusercontent.com/Roasbeef/6fb5b52886183239e4aa558f83d085d3/raw/5ef96c426e3cf20a2443dc9d3c7d6877576da9ca/security@lightning.engineering). +[here](https://gist.githubusercontent.com/Roasbeef/6fb5b52886183239e4aa558f83d085d3/raw/5fa96010af201628bcfa61e9309d9b13d23d220f/security@lightning.engineering). ## Further reading * [Step-by-step send payment guide with docker](https://github.com/lightningnetwork/lnd/tree/master/docker) diff --git a/breacharbiter.go b/breacharbiter.go index 6924682f2..b504d1b83 100644 --- a/breacharbiter.go +++ b/breacharbiter.go @@ -20,6 +20,7 @@ import ( "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/htlcswitch" "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/labels" "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwallet/chainfee" ) @@ -566,7 +567,8 @@ justiceTxBroadcast: // We'll now attempt to broadcast the transaction which finalized the // channel's retribution against the cheating counter party. - err = b.cfg.PublishTransaction(finalTx, "") + label := labels.MakeLabel(labels.LabelTypeJusticeTransaction, nil) + err = b.cfg.PublishTransaction(finalTx, label) if err != nil { brarLog.Errorf("Unable to broadcast justice tx: %v", err) @@ -912,6 +914,11 @@ func (bo *breachedOutput) HeightHint() uint32 { return bo.confHeight } +// UnconfParent returns information about a possibly unconfirmed parent tx. +func (bo *breachedOutput) UnconfParent() *input.TxInfo { + return nil +} + // Add compile-time constraint ensuring breachedOutput implements the Input // interface. var _ input.Input = (*breachedOutput)(nil) diff --git a/breacharbiter_test.go b/breacharbiter_test.go index 01d9daf75..03c1cdb6f 100644 --- a/breacharbiter_test.go +++ b/breacharbiter_test.go @@ -29,6 +29,7 @@ import ( "github.com/lightningnetwork/lnd/htlcswitch" "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/keychain" + "github.com/lightningnetwork/lnd/lntest/mock" "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwallet/chainfee" @@ -1433,8 +1434,8 @@ func testBreachSpends(t *testing.T, test breachTest) { // Notify that the breaching transaction is confirmed, to trigger the // retribution logic. - notifier := brar.cfg.Notifier.(*mockSpendNotifier) - notifier.confChannel <- &chainntnfs.TxConfirmation{} + notifier := brar.cfg.Notifier.(*mock.SpendNotifier) + notifier.ConfChan <- &chainntnfs.TxConfirmation{} // The breach arbiter should attempt to sweep all outputs on the // breached commitment. We'll pretend that the HTLC output has been @@ -1510,7 +1511,7 @@ func testBreachSpends(t *testing.T, test breachTest) { // Deliver confirmation of sweep if the test expects it. if test.sendFinalConf { - notifier.confChannel <- &chainntnfs.TxConfirmation{} + notifier.ConfChan <- &chainntnfs.TxConfirmation{} } // Assert that the channel is fully resolved. @@ -1670,10 +1671,10 @@ func createTestArbiter(t *testing.T, contractBreaches chan *ContractBreachEvent, aliceKeyPriv, _ := btcec.PrivKeyFromBytes(btcec.S256(), alicesPrivKey) - signer := &mockSigner{key: aliceKeyPriv} + signer := &mock.SingleSigner{Privkey: aliceKeyPriv} // Assemble our test arbiter. - notifier := makeMockSpendNotifier() + notifier := mock.MakeMockSpendNotifier() ba := newBreachArbiter(&BreachConfig{ CloseLink: func(_ *wire.OutPoint, _ htlcswitch.ChannelCloseType) {}, DB: db, @@ -1897,8 +1898,8 @@ func createInitChannels(revocationWindow int) (*lnwallet.LightningChannel, *lnwa Packager: channeldb.NewChannelPackager(shortChanID), } - aliceSigner := &mockSigner{aliceKeyPriv} - bobSigner := &mockSigner{bobKeyPriv} + aliceSigner := &mock.SingleSigner{Privkey: aliceKeyPriv} + bobSigner := &mock.SingleSigner{Privkey: bobKeyPriv} alicePool := lnwallet.NewSigPool(1, aliceSigner) channelAlice, err := lnwallet.NewLightningChannel( diff --git a/brontide/conn.go b/brontide/conn.go index 33b550b83..c64d8a643 100644 --- a/brontide/conn.go +++ b/brontide/conn.go @@ -10,6 +10,7 @@ import ( "github.com/btcsuite/btcd/btcec" "github.com/lightningnetwork/lnd/keychain" "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/tor" ) // Conn is an implementation of net.Conn which enforces an authenticated key @@ -34,12 +35,12 @@ var _ net.Conn = (*Conn)(nil) // public key. In the case of a handshake failure, the connection is closed and // a non-nil error is returned. func Dial(local keychain.SingleKeyECDH, netAddr *lnwire.NetAddress, - dialer func(string, string) (net.Conn, error)) (*Conn, error) { + timeout time.Duration, dialer tor.DialFunc) (*Conn, error) { ipAddr := netAddr.Address.String() var conn net.Conn var err error - conn, err = dialer("tcp", ipAddr) + conn, err = dialer("tcp", ipAddr, timeout) if err != nil { return nil, err } diff --git a/brontide/noise_test.go b/brontide/noise_test.go index ed2229c1f..dd0882cea 100644 --- a/brontide/noise_test.go +++ b/brontide/noise_test.go @@ -13,6 +13,7 @@ import ( "github.com/btcsuite/btcd/btcec" "github.com/lightningnetwork/lnd/keychain" "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/tor" ) type maybeNetConn struct { @@ -66,7 +67,10 @@ func establishTestConnection() (net.Conn, net.Conn, func(), error) { // successful. remoteConnChan := make(chan maybeNetConn, 1) go func() { - remoteConn, err := Dial(remoteKeyECDH, netAddr, net.Dial) + remoteConn, err := Dial( + remoteKeyECDH, netAddr, + tor.DefaultConnTimeout, net.DialTimeout, + ) remoteConnChan <- maybeNetConn{remoteConn, err} }() @@ -196,7 +200,10 @@ func TestConcurrentHandshakes(t *testing.T) { remoteKeyECDH := &keychain.PrivKeyECDH{PrivKey: remotePriv} go func() { - remoteConn, err := Dial(remoteKeyECDH, netAddr, net.Dial) + remoteConn, err := Dial( + remoteKeyECDH, netAddr, + tor.DefaultConnTimeout, net.DialTimeout, + ) connChan <- maybeNetConn{remoteConn, err} }() diff --git a/build/log_shutdown.go b/build/log_shutdown.go new file mode 100644 index 000000000..d83ff99ec --- /dev/null +++ b/build/log_shutdown.go @@ -0,0 +1,53 @@ +package build + +import ( + "github.com/btcsuite/btclog" + "github.com/lightningnetwork/lnd/signal" +) + +// ShutdownLogger wraps an existing logger with a shutdown function which will +// be called on Critical/Criticalf to prompt shutdown. +type ShutdownLogger struct { + btclog.Logger +} + +// NewShutdownLogger creates a shutdown logger for the log provided which will +// use the signal package to request shutdown on critical errors. +func NewShutdownLogger(logger btclog.Logger) *ShutdownLogger { + return &ShutdownLogger{ + Logger: logger, + } +} + +// Criticalf formats message according to format specifier and writes to +// log with LevelCritical. It will then call the shutdown logger's shutdown +// function to prompt safe shutdown. +// +// Note: it is part of the btclog.Logger interface. +func (s *ShutdownLogger) Criticalf(format string, params ...interface{}) { + s.Logger.Criticalf(format, params...) + s.shutdown() +} + +// Critical formats message using the default formats for its operands +// and writes to log with LevelCritical. It will then call the shutdown +// logger's shutdown function to prompt safe shutdown. +// +// Note: it is part of the btclog.Logger interface. +func (s *ShutdownLogger) Critical(v ...interface{}) { + s.Logger.Critical(v) + s.shutdown() +} + +// shutdown checks whether we are listening for interrupts, since a shutdown +// request to the signal package will block if it is not running, and requests +// shutdown if possible. +func (s *ShutdownLogger) shutdown() { + if !signal.Listening() { + s.Logger.Info("Request for shutdown ignored") + return + } + + s.Logger.Info("Sending request for shutdown") + signal.RequestShutdown() +} diff --git a/build/logrotator.go b/build/logrotator.go index 734a3bd7e..2474d2d8c 100644 --- a/build/logrotator.go +++ b/build/logrotator.go @@ -39,7 +39,10 @@ func NewRotatingLogWriter() *RotatingLogWriter { logWriter := &LogWriter{} backendLog := btclog.NewBackend(logWriter) return &RotatingLogWriter{ - GenSubLogger: backendLog.Logger, + GenSubLogger: func(tag string) btclog.Logger { + logger := backendLog.Logger(tag) + return NewShutdownLogger(logger) + }, logWriter: logWriter, backendLog: backendLog, subsystemLoggers: SubLoggers{}, diff --git a/build/version.go b/build/version.go index 444673ecf..755e80df4 100644 --- a/build/version.go +++ b/build/version.go @@ -41,7 +41,7 @@ const ( AppMajor uint = 0 // AppMinor defines the minor version of this binary. - AppMinor uint = 10 + AppMinor uint = 11 // AppPatch defines the application patch for this binary. AppPatch uint = 99 diff --git a/cert/go.mod b/cert/go.mod index 29aa7fe7c..858cafcea 100644 --- a/cert/go.mod +++ b/cert/go.mod @@ -1,3 +1,5 @@ module github.com/lightningnetwork/lnd/cert go 1.13 + +require github.com/stretchr/testify v1.5.1 diff --git a/cert/go.sum b/cert/go.sum new file mode 100644 index 000000000..331fa6982 --- /dev/null +++ b/cert/go.sum @@ -0,0 +1,11 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/cert/selfsigned.go b/cert/selfsigned.go index bcb7cb9ca..9a41b13d7 100644 --- a/cert/selfsigned.go +++ b/cert/selfsigned.go @@ -32,8 +32,9 @@ var ( ) // ipAddresses returns the parserd IP addresses to use when creating the TLS -// certificate. -func ipAddresses(tlsExtraIPs []string) ([]net.IP, error) { +// certificate. If tlsDisableAutofill is true, we don't include interface +// addresses to protect users privacy. +func ipAddresses(tlsExtraIPs []string, tlsDisableAutofill bool) ([]net.IP, error) { // Collect the host's IP addresses, including loopback, in a slice. ipAddresses := []net.IP{net.ParseIP("127.0.0.1"), net.ParseIP("::1")} @@ -47,15 +48,20 @@ func ipAddresses(tlsExtraIPs []string) ([]net.IP, error) { ipAddresses = append(ipAddresses, ipAddr) } - // Add all the interface IPs that aren't already in the slice. - addrs, err := net.InterfaceAddrs() - if err != nil { - return nil, err - } - for _, a := range addrs { - ipAddr, _, err := net.ParseCIDR(a.String()) - if err == nil { - addIP(ipAddr) + // To protect their privacy, some users might not want to have all + // their network addresses include in the certificate as this could + // leak sensitive information. + if !tlsDisableAutofill { + // Add all the interface IPs that aren't already in the slice. + addrs, err := net.InterfaceAddrs() + if err != nil { + return nil, err + } + for _, a := range addrs { + ipAddr, _, err := net.ParseCIDR(a.String()) + if err == nil { + addIP(ipAddr) + } } } @@ -72,10 +78,14 @@ func ipAddresses(tlsExtraIPs []string) ([]net.IP, error) { // dnsNames returns the host and DNS names to use when creating the TLS // ceftificate. -func dnsNames(tlsExtraDomains []string) (string, []string) { +func dnsNames(tlsExtraDomains []string, tlsDisableAutofill bool) (string, []string) { // Collect the host's names into a slice. host, err := os.Hostname() - if err != nil { + + // To further protect their privacy, some users might not want + // to have their hostname include in the certificate as this could + // leak sensitive information. + if err != nil || tlsDisableAutofill { // Nothing much we can do here, other than falling back to // localhost as fallback. A hostname can still be provided with // the tlsExtraDomain parameter if the problem persists on a @@ -89,6 +99,13 @@ func dnsNames(tlsExtraDomains []string) (string, []string) { } dnsNames = append(dnsNames, tlsExtraDomains...) + // Because we aren't including the hostname in the certificate when + // tlsDisableAutofill is set, we will use the first extra domain + // specified by the user, if it's set, as the Common Name. + if tlsDisableAutofill && len(tlsExtraDomains) > 0 { + host = tlsExtraDomains[0] + } + // Also add fake hostnames for unix sockets, otherwise hostname // verification will fail in the client. dnsNames = append(dnsNames, "unix", "unixpacket") @@ -104,10 +121,10 @@ func dnsNames(tlsExtraDomains []string) (string, []string) { // and domains given. The certificate is considered up to date if it was // created with _exactly_ the IPs and domains given. func IsOutdated(cert *x509.Certificate, tlsExtraIPs, - tlsExtraDomains []string) (bool, error) { + tlsExtraDomains []string, tlsDisableAutofill bool) (bool, error) { // Parse the slice of IP strings. - ips, err := ipAddresses(tlsExtraIPs) + ips, err := ipAddresses(tlsExtraIPs, tlsDisableAutofill) if err != nil { return false, err } @@ -147,7 +164,7 @@ func IsOutdated(cert *x509.Certificate, tlsExtraIPs, } // Get the full list of DNS names to use. - _, dnsNames := dnsNames(tlsExtraDomains) + _, dnsNames := dnsNames(tlsExtraDomains, tlsDisableAutofill) // We do the same kind of deduplication for the DNS names. dns1 := make(map[string]struct{}) @@ -186,7 +203,8 @@ func IsOutdated(cert *x509.Certificate, tlsExtraIPs, // This function is adapted from https://github.com/btcsuite/btcd and // https://github.com/btcsuite/btcutil func GenCertPair(org, certFile, keyFile string, tlsExtraIPs, - tlsExtraDomains []string, certValidity time.Duration) error { + tlsExtraDomains []string, tlsDisableAutofill bool, + certValidity time.Duration) error { now := time.Now() validUntil := now.Add(certValidity) @@ -204,8 +222,8 @@ func GenCertPair(org, certFile, keyFile string, tlsExtraIPs, // Get all DNS names and IP addresses to use when creating the // certificate. - host, dnsNames := dnsNames(tlsExtraDomains) - ipAddresses, err := ipAddresses(tlsExtraIPs) + host, dnsNames := dnsNames(tlsExtraDomains, tlsDisableAutofill) + ipAddresses, err := ipAddresses(tlsExtraIPs, tlsDisableAutofill) if err != nil { return err } diff --git a/cert/selfsigned_test.go b/cert/selfsigned_test.go index 0a5888a96..b14283490 100644 --- a/cert/selfsigned_test.go +++ b/cert/selfsigned_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/lightningnetwork/lnd/cert" + "github.com/stretchr/testify/require" ) var ( @@ -26,7 +27,7 @@ func TestIsOutdatedCert(t *testing.T) { // Generate TLS files with two extra IPs and domains. err = cert.GenCertPair( "lnd autogenerated cert", certPath, keyPath, extraIPs[:2], - extraDomains[:2], cert.DefaultAutogenValidity, + extraDomains[:2], false, cert.DefaultAutogenValidity, ) if err != nil { t.Fatal(err) @@ -48,7 +49,7 @@ func TestIsOutdatedCert(t *testing.T) { // above. outdated, err := cert.IsOutdated( parsedCert, extraIPs[:numIPs], - extraDomains[:numDomains], + extraDomains[:numDomains], false, ) if err != nil { t.Fatal(err) @@ -81,7 +82,7 @@ func TestIsOutdatedPermutation(t *testing.T) { // Generate TLS files from the IPs and domains. err = cert.GenCertPair( "lnd autogenerated cert", certPath, keyPath, extraIPs[:], - extraDomains[:], cert.DefaultAutogenValidity, + extraDomains[:], false, cert.DefaultAutogenValidity, ) if err != nil { t.Fatal(err) @@ -102,7 +103,7 @@ func TestIsOutdatedPermutation(t *testing.T) { dupDNS[i] = extraDomains[i/2] } - outdated, err := cert.IsOutdated(parsedCert, dupIPs, dupDNS) + outdated, err := cert.IsOutdated(parsedCert, dupIPs, dupDNS, false) if err != nil { t.Fatal(err) } @@ -123,7 +124,7 @@ func TestIsOutdatedPermutation(t *testing.T) { revDNS[i] = extraDomains[len(extraDomains)-1-i] } - outdated, err = cert.IsOutdated(parsedCert, revIPs, revDNS) + outdated, err = cert.IsOutdated(parsedCert, revIPs, revDNS, false) if err != nil { t.Fatal(err) } @@ -133,3 +134,59 @@ func TestIsOutdatedPermutation(t *testing.T) { "considered outdated") } } + +// TestTLSDisableAutofill checks that setting the --tlsdisableautofill flag +// does not add interface ip addresses or hostnames to the cert. +func TestTLSDisableAutofill(t *testing.T) { + tempDir, err := ioutil.TempDir("", "certtest") + if err != nil { + t.Fatal(err) + } + + certPath := tempDir + "/tls.cert" + keyPath := tempDir + "/tls.key" + + // Generate TLS files with two extra IPs and domains and no interface IPs. + err = cert.GenCertPair( + "lnd autogenerated cert", certPath, keyPath, extraIPs[:2], + extraDomains[:2], true, cert.DefaultAutogenValidity, + ) + require.NoError( + t, err, + "unable to generate tls certificate pair", + ) + + _, parsedCert, err := cert.LoadCert( + certPath, keyPath, + ) + require.NoError( + t, err, + "unable to load tls certificate pair", + ) + + // Check if the TLS cert is outdated while still preventing + // interface IPs from being used. Should not be outdated + shouldNotBeOutdated, err := cert.IsOutdated( + parsedCert, extraIPs[:2], + extraDomains[:2], true, + ) + require.NoError(t, err) + + require.Equal( + t, false, shouldNotBeOutdated, + "TLS Certificate was marked as outdated when it should not be", + ) + + // Check if the TLS cert is outdated while allowing for + // interface IPs to be used. Should report as outdated. + shouldBeOutdated, err := cert.IsOutdated( + parsedCert, extraIPs[:2], + extraDomains[:2], false, + ) + require.NoError(t, err) + + require.Equal( + t, true, shouldBeOutdated, + "TLS Certificate was not marked as outdated when it should be", + ) +} diff --git a/chainntnfs/interface.go b/chainntnfs/interface.go index c224181c8..4017d8530 100644 --- a/chainntnfs/interface.go +++ b/chainntnfs/interface.go @@ -482,7 +482,7 @@ func GetClientMissedBlocks(chainConn ChainConn, clientBestBlock *BlockEpoch, return missedBlocks, nil } -// RewindChain handles internal state updates for the notifier's TxNotifier It +// RewindChain handles internal state updates for the notifier's TxNotifier. It // has no effect if given a height greater than or equal to our current best // known height. It returns the new best block for the notifier. func RewindChain(chainConn ChainConn, txNotifier *TxNotifier, diff --git a/chainntnfs/txnotifier.go b/chainntnfs/txnotifier.go index 6a3aecd70..2e527cfa4 100644 --- a/chainntnfs/txnotifier.go +++ b/chainntnfs/txnotifier.go @@ -262,7 +262,7 @@ type HistoricalConfDispatch struct { // inclusion of within the chain. ConfRequest - // StartHeight specifies the block height at which to being the + // StartHeight specifies the block height at which to begin the // historical rescan. StartHeight uint32 @@ -763,9 +763,7 @@ func (n *TxNotifier) CancelConf(confRequest ConfRequest, confID uint64) { if confSet.details != nil { confHeight := confSet.details.BlockHeight + ntfn.NumConfirmations - 1 - if confHeight <= n.currentHeight { - delete(n.ntfnsByConfirmHeight[confHeight], ntfn) - } + delete(n.ntfnsByConfirmHeight[confHeight], ntfn) } } @@ -1959,19 +1957,21 @@ func (n *TxNotifier) TearDown() { defer n.Unlock() for _, confSet := range n.confNotifications { - for _, ntfn := range confSet.ntfns { + for confID, ntfn := range confSet.ntfns { close(ntfn.Event.Confirmed) close(ntfn.Event.Updates) close(ntfn.Event.NegativeConf) close(ntfn.Event.Done) + delete(confSet.ntfns, confID) } } for _, spendSet := range n.spendNotifications { - for _, ntfn := range spendSet.ntfns { + for spendID, ntfn := range spendSet.ntfns { close(ntfn.Event.Spend) close(ntfn.Event.Reorg) close(ntfn.Event.Done) + delete(spendSet.ntfns, spendID) } } } diff --git a/chainntnfs/txnotifier_test.go b/chainntnfs/txnotifier_test.go index b2262d4a9..40fc052d8 100644 --- a/chainntnfs/txnotifier_test.go +++ b/chainntnfs/txnotifier_test.go @@ -4,6 +4,7 @@ import ( "bytes" "sync" "testing" + "time" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" @@ -1138,7 +1139,7 @@ func TestTxNotifierCancelConf(t *testing.T) { hintCache := newMockHintCache() n := chainntnfs.NewTxNotifier(startingHeight, 100, hintCache, hintCache) - // We'll register three notification requests. The last two will be + // We'll register four notification requests. The last three will be // canceled. tx1 := wire.NewMsgTx(1) tx1.AddTxOut(&wire.TxOut{PkScript: testRawScript}) @@ -1159,7 +1160,12 @@ func TestTxNotifierCancelConf(t *testing.T) { if err != nil { t.Fatalf("unable to register spend ntfn: %v", err) } - cancelConfRequest := ntfn2.HistoricalDispatch.ConfRequest + + // This request will have a three block num confs. + ntfn4, err := n.RegisterConf(&tx2Hash, testRawScript, 3, 1) + if err != nil { + t.Fatalf("unable to register spend ntfn: %v", err) + } // Extend the chain with a block that will confirm both transactions. // This will queue confirmation notifications to dispatch once their @@ -1175,7 +1181,7 @@ func TestTxNotifierCancelConf(t *testing.T) { } // Cancel the second notification before connecting the block. - n.CancelConf(cancelConfRequest, 2) + ntfn2.Event.Cancel() err = n.ConnectTip(block.Hash(), startingHeight+1, block.Transactions()) if err != nil { @@ -1184,7 +1190,7 @@ func TestTxNotifierCancelConf(t *testing.T) { // Cancel the third notification before notifying to ensure its queued // confirmation notification gets removed as well. - n.CancelConf(cancelConfRequest, 3) + ntfn3.Event.Cancel() if err := n.NotifyHeight(startingHeight + 1); err != nil { t.Fatalf("unable to dispatch notifications: %v", err) @@ -1218,6 +1224,54 @@ func TestTxNotifierCancelConf(t *testing.T) { default: t.Fatal("expected Confirmed channel to be closed") } + + // Connect yet another block. + block1 := btcutil.NewBlock(&wire.MsgBlock{ + Transactions: []*wire.MsgTx{}, + }) + + err = n.ConnectTip(block1.Hash(), startingHeight+2, block1.Transactions()) + if err != nil { + t.Fatalf("unable to connect block: %v", err) + } + + if err := n.NotifyHeight(startingHeight + 2); err != nil { + t.Fatalf("unable to dispatch notifications: %v", err) + } + + // Since neither it reached the set confirmation height or was + // canceled, nothing should happen to ntfn4 in this block. + select { + case <-ntfn4.Event.Confirmed: + t.Fatal("expected nothing to happen") + case <-time.After(10 * time.Millisecond): + } + + // Now cancel the notification. + ntfn4.Event.Cancel() + select { + case _, ok := <-ntfn4.Event.Confirmed: + if ok { + t.Fatal("expected Confirmed channel to be closed") + } + default: + t.Fatal("expected Confirmed channel to be closed") + } + + // Finally, confirm a block that would trigger ntfn4 confirmation + // hadn't it already been canceled. + block2 := btcutil.NewBlock(&wire.MsgBlock{ + Transactions: []*wire.MsgTx{}, + }) + + err = n.ConnectTip(block2.Hash(), startingHeight+3, block2.Transactions()) + if err != nil { + t.Fatalf("unable to connect block: %v", err) + } + + if err := n.NotifyHeight(startingHeight + 3); err != nil { + t.Fatalf("unable to dispatch notifications: %v", err) + } } // TestTxNotifierCancelSpend ensures that a spend notification after a client diff --git a/chainparams.go b/chainparams.go index a119fea8e..6f3f2fe16 100644 --- a/chainparams.go +++ b/chainparams.go @@ -10,10 +10,6 @@ import ( litecoinWire "github.com/ltcsuite/ltcd/wire" ) -// activeNetParams is a pointer to the parameters specific to the currently -// active bitcoin network. -var activeNetParams = bitcoinTestNetParams - // bitcoinNetParams couples the p2p parameters of a network with the // corresponding RPC port of a daemon running on the particular network. type bitcoinNetParams struct { diff --git a/chainregistry.go b/chainregistry.go index 3d750a3e6..c534b4e7f 100644 --- a/chainregistry.go +++ b/chainregistry.go @@ -2,6 +2,7 @@ package lnd import ( "encoding/hex" + "errors" "fmt" "io/ioutil" "net" @@ -161,7 +162,7 @@ type chainControl struct { // full-node, another backed by a running bitcoind full-node, and the other // backed by a running neutrino light client instance. When running with a // neutrino light client instance, `neutrinoCS` must be non-nil. -func newChainControlFromConfig(cfg *Config, chanDB *channeldb.DB, +func newChainControlFromConfig(cfg *Config, localDB, remoteDB *channeldb.DB, privateWalletPw, publicWalletPw []byte, birthday time.Time, recoveryWindow uint32, wallet *wallet.Wallet, neutrinoCS *neutrino.ChainService) (*chainControl, error) { @@ -212,8 +213,8 @@ func newChainControlFromConfig(cfg *Config, chanDB *channeldb.DB, Birthday: birthday, RecoveryWindow: recoveryWindow, DataDir: homeChainConfig.ChainDir, - NetParams: activeNetParams.Params, - CoinType: activeNetParams.CoinType, + NetParams: cfg.ActiveNetParams.Params, + CoinType: cfg.ActiveNetParams.CoinType, Wallet: wallet, } @@ -225,8 +226,9 @@ func newChainControlFromConfig(cfg *Config, chanDB *channeldb.DB, if cfg.HeightHintCacheQueryDisable { ltndLog.Infof("Height Hint Cache Queries disabled") } + // Initialize the height hint cache within the chain directory. - hintCache, err := chainntnfs.NewHeightHintCache(heightHintCacheConfig, chanDB) + hintCache, err := chainntnfs.NewHeightHintCache(heightHintCacheConfig, localDB) if err != nil { return nil, fmt.Errorf("unable to initialize height hint "+ "cache: %v", err) @@ -248,25 +250,19 @@ func newChainControlFromConfig(cfg *Config, chanDB *channeldb.DB, return nil, err } - // If the user provided an API for fee estimation, activate it now. + // Map the deprecated neutrino feeurl flag to the general fee + // url. if cfg.NeutrinoMode.FeeURL != "" { - ltndLog.Infof("Using API fee estimator!") - - estimator := chainfee.NewWebAPIEstimator( - chainfee.SparseConfFeeSource{ - URL: cfg.NeutrinoMode.FeeURL, - }, - defaultBitcoinStaticFeePerKW, - ) - - if err := estimator.Start(); err != nil { - return nil, err + if cfg.FeeURL != "" { + return nil, errors.New("feeurl and " + + "neutrino.feeurl are mutually exclusive") } - cc.feeEstimator = estimator + + cfg.FeeURL = cfg.NeutrinoMode.FeeURL } walletConfig.ChainSource = chain.NewNeutrinoClient( - activeNetParams.Params, neutrinoCS, + cfg.ActiveNetParams.Params, neutrinoCS, ) case "bitcoind", "litecoind": @@ -290,7 +286,7 @@ func newChainControlFromConfig(cfg *Config, chanDB *channeldb.DB, // btcd, which picks a different port so that btcwallet // can use the same RPC port as bitcoind. We convert // this back to the btcwallet/bitcoind port. - rpcPort, err := strconv.Atoi(activeNetParams.rpcPort) + rpcPort, err := strconv.Atoi(cfg.ActiveNetParams.rpcPort) if err != nil { return nil, err } @@ -318,7 +314,7 @@ func newChainControlFromConfig(cfg *Config, chanDB *channeldb.DB, // Establish the connection to bitcoind and create the clients // required for our relevant subsystems. bitcoindConn, err := chain.NewBitcoindConn( - activeNetParams.Params, bitcoindHost, + cfg.ActiveNetParams.Params, bitcoindHost, bitcoindMode.RPCUser, bitcoindMode.RPCPass, bitcoindMode.ZMQPubRawBlock, bitcoindMode.ZMQPubRawTx, 5*time.Second, @@ -333,7 +329,7 @@ func newChainControlFromConfig(cfg *Config, chanDB *channeldb.DB, } cc.chainNotifier = bitcoindnotify.New( - bitcoindConn, activeNetParams.Params, hintCache, hintCache, + bitcoindConn, cfg.ActiveNetParams.Params, hintCache, hintCache, ) cc.chainView = chainview.NewBitcoindFilteredChainView(bitcoindConn) walletConfig.ChainSource = bitcoindConn.NewBitcoindClient() @@ -365,9 +361,6 @@ func newChainControlFromConfig(cfg *Config, chanDB *channeldb.DB, if err != nil { return nil, err } - if err := cc.feeEstimator.Start(); err != nil { - return nil, err - } } else if cfg.Litecoin.Active && !cfg.Litecoin.RegTest { ltndLog.Infof("Initializing litecoind backed fee estimator in "+ "%s mode", bitcoindMode.EstimateMode) @@ -384,9 +377,6 @@ func newChainControlFromConfig(cfg *Config, chanDB *channeldb.DB, if err != nil { return nil, err } - if err := cc.feeEstimator.Start(); err != nil { - return nil, err - } } case "btcd", "ltcd": // Otherwise, we'll be speaking directly via RPC to a node. @@ -431,7 +421,7 @@ func newChainControlFromConfig(cfg *Config, chanDB *channeldb.DB, btcdHost = btcdMode.RPCHost } else { btcdHost = fmt.Sprintf("%v:%v", btcdMode.RPCHost, - activeNetParams.rpcPort) + cfg.ActiveNetParams.rpcPort) } btcdUser := btcdMode.RPCUser @@ -447,7 +437,7 @@ func newChainControlFromConfig(cfg *Config, chanDB *channeldb.DB, DisableAutoReconnect: false, } cc.chainNotifier, err = btcdnotify.New( - rpcConfig, activeNetParams.Params, hintCache, hintCache, + rpcConfig, cfg.ActiveNetParams.Params, hintCache, hintCache, ) if err != nil { return nil, err @@ -463,7 +453,7 @@ func newChainControlFromConfig(cfg *Config, chanDB *channeldb.DB, // Create a special websockets rpc client for btcd which will be used // by the wallet for notifications, calls, etc. - chainRPC, err := chain.NewRPCClient(activeNetParams.Params, btcdHost, + chainRPC, err := chain.NewRPCClient(cfg.ActiveNetParams.Params, btcdHost, btcdUser, btcdPass, rpcCert, false, 20) if err != nil { return nil, err @@ -489,15 +479,34 @@ func newChainControlFromConfig(cfg *Config, chanDB *channeldb.DB, if err != nil { return nil, err } - if err := cc.feeEstimator.Start(); err != nil { - return nil, err - } } default: return nil, fmt.Errorf("unknown node type: %s", homeChainConfig.Node) } + // Override default fee estimator if an external service is specified. + if cfg.FeeURL != "" { + // Do not cache fees on regtest to make it easier to execute + // manual or automated test cases. + cacheFees := !cfg.Bitcoin.RegTest + + ltndLog.Infof("Using external fee estimator %v: cached=%v", + cfg.FeeURL, cacheFees) + + cc.feeEstimator = chainfee.NewWebAPIEstimator( + chainfee.SparseConfFeeSource{ + URL: cfg.FeeURL, + }, + !cacheFees, + ) + } + + // Start fee estimator. + if err := cc.feeEstimator.Start(); err != nil { + return nil, err + } + wc, err := btcwallet.New(*walletConfig) if err != nil { fmt.Printf("unable to create wallet controller: %v\n", err) @@ -516,14 +525,14 @@ func newChainControlFromConfig(cfg *Config, chanDB *channeldb.DB, } keyRing := keychain.NewBtcWalletKeyRing( - wc.InternalWallet(), activeNetParams.CoinType, + wc.InternalWallet(), cfg.ActiveNetParams.CoinType, ) cc.keyRing = keyRing // Create, and start the lnwallet, which handles the core payment // channel logic, and exposes control via proxy state machines. walletCfg := lnwallet.Config{ - Database: chanDB, + Database: remoteDB, Notifier: cc.chainNotifier, WalletController: wc, Signer: cc.signer, @@ -531,7 +540,7 @@ func newChainControlFromConfig(cfg *Config, chanDB *channeldb.DB, SecretKeyRing: keyRing, ChainIO: cc.chainIO, DefaultConstraints: channelConstraints, - NetParams: *activeNetParams.Params, + NetParams: *cfg.ActiveNetParams.Params, } lnWallet, err := lnwallet.NewLightningWallet(walletCfg) if err != nil { @@ -733,7 +742,7 @@ func initNeutrinoBackend(cfg *Config, chainDir string) (*neutrino.ChainService, // match the behavior of btcwallet. dbPath := filepath.Join( chainDir, - normalizeNetwork(activeNetParams.Name), + normalizeNetwork(cfg.ActiveNetParams.Name), ) // Ensure that the neutrino db path exists. @@ -762,11 +771,14 @@ func initNeutrinoBackend(cfg *Config, chainDir string) (*neutrino.ChainService, config := neutrino.Config{ DataDir: dbPath, Database: db, - ChainParams: *activeNetParams.Params, + ChainParams: *cfg.ActiveNetParams.Params, AddPeers: cfg.NeutrinoMode.AddPeers, ConnectPeers: cfg.NeutrinoMode.ConnectPeers, Dialer: func(addr net.Addr) (net.Conn, error) { - return cfg.net.Dial(addr.Network(), addr.String()) + return cfg.net.Dial( + addr.Network(), addr.String(), + cfg.ConnectionTimeout, + ) }, NameResolver: func(host string) ([]net.IP, error) { addrs, err := cfg.net.LookupHost(host) diff --git a/chanfitness/chanevent.go b/chanfitness/chanevent.go index 53048ee3b..0dc4b29da 100644 --- a/chanfitness/chanevent.go +++ b/chanfitness/chanevent.go @@ -5,7 +5,7 @@ import ( "time" "github.com/btcsuite/btcd/wire" - "github.com/lightningnetwork/lnd/routing/route" + "github.com/lightningnetwork/lnd/clock" ) type eventType int @@ -28,74 +28,244 @@ func (e eventType) String() string { return "unknown" } -// channelEvent is a a timestamped event which is observed on a per channel -// basis. -type channelEvent struct { +type event struct { timestamp time.Time eventType eventType } -// chanEventLog stores all events that have occurred over a channel's lifetime. -type chanEventLog struct { - // channelPoint is the outpoint for the channel's funding transaction. - channelPoint wire.OutPoint +// peerLog tracks events for a peer and its channels. If we currently have no +// channels with the peer, it will simply track its current online state. If we +// do have channels open with the peer, it will track the peer's online and +// offline events so that we can calculate uptime for our channels. A single +// event log is used for these online and offline events, and uptime for a +// channel is calculated by examining a subsection of this log. +type peerLog struct { + // online stores whether the peer is currently online. + online bool - // peer is the compressed public key of the peer being monitored. - peer route.Vertex + // onlineEvents is a log of timestamped events observed for the peer + // that we have committed to allocating memory to. + onlineEvents []*event - // events is a log of timestamped events observed for the channel. - events []*channelEvent + // stagedEvent represents an event that is pending addition to the + // events list. It has not yet been added because we rate limit the + // frequency that we store events at. We need to store this value + // in the log (rather than just ignore events) so that we can flush the + // aggregate outcome to our event log once the rate limiting period has + // ended. + // + // Take the following example: + // - Peer online event recorded + // - Peer offline event, not recorded due to rate limit + // - No more events, we incorrectly believe our peer to be online + // Instead of skipping events, we stage the most recent event during the + // rate limited period so that we know what happened (on aggregate) + // while we were rate limiting events. + // + // Note that we currently only store offline/online events so we can + // use this field to track our online state. With the addition of other + // event types, we need to only stage online/offline events, or split + // them out. + stagedEvent *event - // now is expected to return the current time. It is supplied as an - // external function to enable deterministic unit tests. - now func() time.Time + // flapCount is the number of times this peer has been observed as + // going offline. + flapCount int - // openedAt tracks the first time this channel was seen. This is not - // necessarily the time that it confirmed on chain because channel events - // are not persisted at present. - openedAt time.Time + // lastFlap is the timestamp of the last flap we recorded for the peer. + // This value will be nil if we have never recorded a flap for the peer. + lastFlap *time.Time - // closedAt is the time that the channel was closed. If the channel has not - // been closed yet, it is zero. - closedAt time.Time + // clock allows creation of deterministic unit tests. + clock clock.Clock + + // channels contains a set of currently open channels. Channels will be + // added and removed from this map as they are opened and closed. + channels map[wire.OutPoint]*channelInfo } -// newEventLog creates an event log for a channel with the openedAt time set. -func newEventLog(channelPoint wire.OutPoint, peer route.Vertex, - now func() time.Time) *chanEventLog { +// newPeerLog creates a log for a peer, taking its historical flap count and +// last flap time as parameters. These values may be zero/nil if we have no +// record of historical flap count for the peer. +func newPeerLog(clock clock.Clock, flapCount int, + lastFlap *time.Time) *peerLog { - eventlog := &chanEventLog{ - channelPoint: channelPoint, - peer: peer, - now: now, - openedAt: now(), + return &peerLog{ + clock: clock, + flapCount: flapCount, + lastFlap: lastFlap, + channels: make(map[wire.OutPoint]*channelInfo), + } +} + +// channelInfo contains information about a channel. +type channelInfo struct { + // openedAt tracks the first time this channel was seen. This is not + // necessarily the time that it confirmed on chain because channel + // events are not persisted at present. + openedAt time.Time +} + +func newChannelInfo(openedAt time.Time) *channelInfo { + return &channelInfo{ + openedAt: openedAt, + } +} + +// onlineEvent records a peer online or offline event in the log and increments +// the peer's flap count. +func (p *peerLog) onlineEvent(online bool) { + eventTime := p.clock.Now() + + // If we have a non-nil last flap time, potentially apply a cooldown + // factor to the peer's flap count before we rate limit it. This allows + // us to decrease the penalty for historical flaps over time, provided + // the peer has not flapped for a while. + if p.lastFlap != nil { + p.flapCount = cooldownFlapCount( + p.clock.Now(), p.flapCount, *p.lastFlap, + ) } - return eventlog -} + // Record flap count information and online state regardless of whether + // we have any channels open with this peer. + p.flapCount++ + p.lastFlap = &eventTime + p.online = online -// close sets the closing time for an event log. -func (e *chanEventLog) close() { - e.closedAt = e.now() -} - -// add appends an event with the given type and current time to the event log. -// The open time for the eventLog will be set to the event's timestamp if it is -// not set yet. -func (e *chanEventLog) add(eventType eventType) { - // If the channel is already closed, return early without adding an event. - if !e.closedAt.IsZero() { + // If we have no channels currently open with the peer, we do not want + // to commit resources to tracking their online state beyond a simple + // online boolean, so we exit early. + if p.channelCount() == 0 { return } - // Add the event to the eventLog with the current timestamp. - event := &channelEvent{ - timestamp: e.now(), + p.addEvent(online, eventTime) +} + +// addEvent records an online or offline event in our event log. and increments +// the peer's flap count. +func (p *peerLog) addEvent(online bool, time time.Time) { + eventType := peerOnlineEvent + if !online { + eventType = peerOfflineEvent + } + + event := &event{ + timestamp: time, eventType: eventType, } - e.events = append(e.events, event) - log.Debugf("Channel %v recording event: %v", e.channelPoint, eventType) + // If we have no staged events, we can just stage this event and return. + if p.stagedEvent == nil { + p.stagedEvent = event + return + } + + // We get the amount of time we require between events according to + // peer flap count. + aggregation := getRateLimit(p.flapCount) + nextRecordTime := p.stagedEvent.timestamp.Add(aggregation) + flushEvent := nextRecordTime.Before(event.timestamp) + + // If enough time has passed since our last staged event, we add our + // event to our in-memory list. + if flushEvent { + p.onlineEvents = append(p.onlineEvents, p.stagedEvent) + } + + // Finally, we replace our staged event with the new event we received. + p.stagedEvent = event +} + +// addChannel adds a channel to our log. If we have not tracked any online +// events for our peer yet, we create one with our peer's current online state +// so that we know the state that the peer had at channel start, which is +// required to calculate uptime over the channel's lifetime. +func (p *peerLog) addChannel(channelPoint wire.OutPoint) error { + _, ok := p.channels[channelPoint] + if ok { + return fmt.Errorf("channel: %v already present", channelPoint) + } + + openTime := p.clock.Now() + p.channels[channelPoint] = newChannelInfo(openTime) + + // If we do not have any online events tracked for our peer (which is + // the case when we have no other channels open with the peer), we add + // an event with the peer's current online state so that we know that + // starting state for this peer when a channel was connected (which + // allows us to calculate uptime over the lifetime of the channel). + if len(p.onlineEvents) == 0 { + p.addEvent(p.online, openTime) + } + + return nil +} + +// removeChannel removes a channel from our log. If we have no more channels +// with the peer after removing this one, we clear our list of events. +func (p *peerLog) removeChannel(channelPoint wire.OutPoint) error { + _, ok := p.channels[channelPoint] + if !ok { + return fmt.Errorf("channel: %v not present", channelPoint) + } + + delete(p.channels, channelPoint) + + // If we have no more channels in our event log, we can discard all of + // our online events in memory, since we don't need them anymore. + // TODO(carla): this could be done on a per channel basis. + if p.channelCount() == 0 { + p.onlineEvents = nil + p.stagedEvent = nil + } + + return nil +} + +// channelCount returns the number of channels that we currently have +// with the peer. +func (p *peerLog) channelCount() int { + return len(p.channels) +} + +// channelUptime looks up a channel and returns the amount of time that the +// channel has been monitored for and its uptime over this period. +func (p *peerLog) channelUptime(channelPoint wire.OutPoint) (time.Duration, + time.Duration, error) { + + channel, ok := p.channels[channelPoint] + if !ok { + return 0, 0, ErrChannelNotFound + } + + now := p.clock.Now() + + uptime, err := p.uptime(channel.openedAt, now) + if err != nil { + return 0, 0, err + } + + return now.Sub(channel.openedAt), uptime, nil +} + +// getFlapCount returns the peer's flap count and the timestamp that we last +// recorded a flap. +func (p *peerLog) getFlapCount() (int, *time.Time) { + return p.flapCount, p.lastFlap +} + +// listEvents returns all of the events that our event log has tracked, +// including events that are staged for addition to our set of events but have +// not yet been committed to (because we rate limit and store only the aggregate +// outcome over a period). +func (p *peerLog) listEvents() []*event { + if p.stagedEvent == nil { + return p.onlineEvents + } + + return append(p.onlineEvents, p.stagedEvent) } // onlinePeriod represents a period of time over which a peer was online. @@ -106,17 +276,25 @@ type onlinePeriod struct { // getOnlinePeriods returns a list of all the periods that the event log has // recorded the remote peer as being online. In the unexpected case where there // are no events, the function returns early. Online periods are defined as a -// peer online event which is terminated by a peer offline event. This function -// expects the event log provided to be ordered by ascending timestamp. -func (e *chanEventLog) getOnlinePeriods() []*onlinePeriod { +// peer online event which is terminated by a peer offline event. If the event +// log ends on a peer online event, it appends a final period which is +// calculated until the present. This function expects the event log provided +// to be ordered by ascending timestamp, and can tolerate multiple consecutive +// online or offline events. +func (p *peerLog) getOnlinePeriods() []*onlinePeriod { + events := p.listEvents() + // Return early if there are no events, there are no online periods. - if len(e.events) == 0 { + if len(events) == 0 { return nil } var ( - lastOnline time.Time - offline bool + // lastEvent tracks the last event that we had that was of + // a different type to our own. It is used to determine the + // start time of our online periods when we experience an + // offline event, and to track our last recorded state. + lastEvent *event onlinePeriods []*onlinePeriod ) @@ -126,57 +304,77 @@ func (e *chanEventLog) getOnlinePeriods() []*onlinePeriod { // the online event and the present is not tracked. The type of the most // recent event is tracked using the offline bool so that we can add a // final online period if necessary. - for _, event := range e.events { - + for _, event := range events { switch event.eventType { case peerOnlineEvent: - lastOnline = event.timestamp - offline = false - - case peerOfflineEvent: - offline = true - - // Do not add to uptime if there is no previous online timestamp, - // the event log has started with an offline event - if lastOnline.IsZero() { - continue + // If our previous event is nil, we just set it and + // break out of the switch. + if lastEvent == nil { + lastEvent = event + break } - // The eventLog has recorded an offline event, having previously - // been online so we add an online period to to set of online periods. - onlinePeriods = append(onlinePeriods, &onlinePeriod{ - start: lastOnline, - end: event.timestamp, - }) + // If our previous event was an offline event, we update + // it to this event. We do not do this if it was an + // online event because duplicate online events would + // progress our online timestamp forward (rather than + // keep it at our earliest online event timestamp). + if lastEvent.eventType == peerOfflineEvent { + lastEvent = event + } + + case peerOfflineEvent: + // If our previous event is nil, we just set it and + // break out of the switch since we cannot record an + // online period from this single event. + if lastEvent == nil { + lastEvent = event + break + } + + // If the last event we saw was an online event, we + // add an online period to our set and progress our + // previous event to this offline event. We do not + // do this if we have had duplicate offline events + // because we would be tracking the most recent offline + // event (rather than keep it at our earliest offline + // event timestamp). + if lastEvent.eventType == peerOnlineEvent { + onlinePeriods = append( + onlinePeriods, &onlinePeriod{ + start: lastEvent.timestamp, + end: event.timestamp, + }, + ) + + lastEvent = event + } } } - // If the last event was an peer offline event, we do not need to calculate - // a final online period and can return online periods as is. - if offline { + // If the last event was an peer offline event, we do not need to + // calculate a final online period and can return online periods as is. + if lastEvent.eventType == peerOfflineEvent { return onlinePeriods } - // The log ended on an online event, so we need to add a final online event. - // If the channel is closed, this period is until channel closure. It it is - // still open, we calculate it until the present. - endTime := e.closedAt - if endTime.IsZero() { - endTime = e.now() + // The log ended on an online event, so we need to add a final online + // period which terminates at the present. + finalEvent := &onlinePeriod{ + start: lastEvent.timestamp, + end: p.clock.Now(), } // Add the final online period to the set and return. - return append(onlinePeriods, &onlinePeriod{ - start: lastOnline, - end: endTime, - }) + return append(onlinePeriods, finalEvent) } -// uptime calculates the total uptime we have recorded for a channel over the +// uptime calculates the total uptime we have recorded for a peer over the // inclusive range specified. An error is returned if the end of the range is // before the start or a zero end time is returned. -func (e *chanEventLog) uptime(start, end time.Time) (time.Duration, error) { - // Error if we are provided with an invalid range to calculate uptime for. +func (p *peerLog) uptime(start, end time.Time) (time.Duration, error) { + // Error if we are provided with an invalid range to calculate uptime + // for. if end.Before(start) { return 0, fmt.Errorf("end time: %v before start time: %v", end, start) @@ -187,26 +385,28 @@ func (e *chanEventLog) uptime(start, end time.Time) (time.Duration, error) { var uptime time.Duration - for _, p := range e.getOnlinePeriods() { - // The online period ends before the range we're looking at, so we can - // skip over it. + for _, p := range p.getOnlinePeriods() { + // The online period ends before the range we're looking at, so + // we can skip over it. if p.end.Before(start) { continue } - // The online period starts after the range we're looking at, so can - // stop calculating uptime. + // The online period starts after the range we're looking at, so + // can stop calculating uptime. if p.start.After(end) { break } - // If the online period starts before our range, shift the start time up - // so that we only calculate uptime from the start of our range. + // If the online period starts before our range, shift the start + // time up so that we only calculate uptime from the start of + // our range. if p.start.Before(start) { p.start = start } - // If the online period ends before our range, shift the end time - // forward so that we only calculate uptime until the end of the range. + // If the online period ends before our range, shift the end + // time forward so that we only calculate uptime until the end + // of the range. if p.end.After(end) { p.end = end } diff --git a/chanfitness/chanevent_test.go b/chanfitness/chanevent_test.go index 72733dbef..111c8e4e4 100644 --- a/chanfitness/chanevent_test.go +++ b/chanfitness/chanevent_test.go @@ -3,77 +3,269 @@ package chanfitness import ( "testing" "time" + + "github.com/btcsuite/btcd/wire" + "github.com/lightningnetwork/lnd/clock" + "github.com/stretchr/testify/require" ) -// TestAdd tests adding events to an event log. It tests the case where the -// channel is open, and should have an event added, and the case where it is -// closed and the event should not be added. -func TestAdd(t *testing.T) { - tests := []struct { - name string - eventLog *chanEventLog - event eventType - expected []eventType - }{ - { - name: "Channel open", - eventLog: &chanEventLog{ - now: time.Now, - }, - event: peerOnlineEvent, - expected: []eventType{peerOnlineEvent}, - }, - { - name: "Channel closed, event not added", - eventLog: &chanEventLog{ - now: time.Now, - }, - event: peerOnlineEvent, - expected: []eventType{}, - }, +// TestPeerLog tests the functionality of the peer log struct. +func TestPeerLog(t *testing.T) { + clock := clock.NewTestClock(testNow) + peerLog := newPeerLog(clock, 0, nil) + + // assertFlapCount is a helper that asserts that our peer's flap count + // and timestamp is set to expected values. + assertFlapCount := func(expectedCount int, expectedTs *time.Time) { + flapCount, flapTs := peerLog.getFlapCount() + require.Equal(t, expectedCount, flapCount) + require.Equal(t, expectedTs, flapTs) } - for _, test := range tests { - test := test + require.Zero(t, peerLog.channelCount()) + require.False(t, peerLog.online) + assertFlapCount(0, nil) - t.Run(test.name, func(t *testing.T) { - test.eventLog.add(test.event) + // Test that looking up an unknown channel fails. + _, _, err := peerLog.channelUptime(wire.OutPoint{Index: 1}) + require.Error(t, err) - for i, e := range test.expected { - if test.eventLog.events[i].eventType != e { - t.Fatalf("Expected event type: %v, got: %v", - e, test.eventLog.events[i].eventType) - } - } - }) + lastFlap := clock.Now() + + // Add an offline event, since we have no channels, we do not expect + // to have any online periods recorded for our peer. However, we should + // increment our flap count for the peer. + peerLog.onlineEvent(false) + require.Len(t, peerLog.getOnlinePeriods(), 0) + assertFlapCount(1, &lastFlap) + + // Bump our test clock's time by an hour so that we can create an online + // event with a distinct time. + lastFlap = testNow.Add(time.Hour) + clock.SetTime(lastFlap) + + // Likewise, if we have an online event, nothing beyond the online state + // of our peer log should change, but our flap count should change. + peerLog.onlineEvent(true) + require.Len(t, peerLog.getOnlinePeriods(), 0) + assertFlapCount(2, &lastFlap) + + // Add a channel and assert that we have one channel listed. Since this + // is the first channel we track for the peer, we expect an online + // event to be added, however, our flap count should not change because + // this is not a new online event, we are just copying one into our log + // for our purposes. + chan1 := wire.OutPoint{ + Index: 1, } + require.NoError(t, peerLog.addChannel(chan1)) + require.Equal(t, 1, peerLog.channelCount()) + assertFlapCount(2, &lastFlap) + + // Assert that we can now successfully get our added channel. + _, _, err = peerLog.channelUptime(chan1) + require.NoError(t, err) + + // Bump our test clock's time so that our current time is different to + // channel open time. + lastFlap = clock.Now().Add(time.Hour) + clock.SetTime(lastFlap) + + // Now that we have added a channel and an hour has passed, we expect + // our uptime and lifetime to both equal an hour. + lifetime, uptime, err := peerLog.channelUptime(chan1) + require.NoError(t, err) + require.Equal(t, time.Hour, lifetime) + require.Equal(t, time.Hour, uptime) + + // Add an offline event for our peer and assert that our flap count is + // incremented. + peerLog.onlineEvent(false) + assertFlapCount(3, &lastFlap) + + // Now we add another channel to our store and assert that we now report + // two channels for this peer. + chan2 := wire.OutPoint{ + Index: 2, + } + require.NoError(t, peerLog.addChannel(chan2)) + require.Equal(t, 2, peerLog.channelCount()) + + // Progress our time again, so that our peer has now been offline for + // two hours. + now := lastFlap.Add(time.Hour * 2) + clock.SetTime(now) + + // Our first channel should report as having been monitored for three + // hours, but only online for one of those hours. + lifetime, uptime, err = peerLog.channelUptime(chan1) + require.NoError(t, err) + require.Equal(t, time.Hour*3, lifetime) + require.Equal(t, time.Hour, uptime) + + // Remove our first channel and check that we can still correctly query + // uptime for the second channel. + require.NoError(t, peerLog.removeChannel(chan1)) + require.Equal(t, 1, peerLog.channelCount()) + + // Our second channel, which was created when our peer was offline, + // should report as having been monitored for two hours, but have zero + // uptime. + lifetime, uptime, err = peerLog.channelUptime(chan2) + require.NoError(t, err) + require.Equal(t, time.Hour*2, lifetime) + require.Equal(t, time.Duration(0), uptime) + + // Finally, remove our second channel and assert that our peer cleans + // up its in memory set of events but keeps its flap count record. + require.NoError(t, peerLog.removeChannel(chan2)) + require.Equal(t, 0, peerLog.channelCount()) + require.Len(t, peerLog.onlineEvents, 0) + assertFlapCount(3, &lastFlap) + + require.Len(t, peerLog.listEvents(), 0) + require.Nil(t, peerLog.stagedEvent) +} + +// TestRateLimitAdd tests the addition of events to the event log with rate +// limiting in place. +func TestRateLimitAdd(t *testing.T) { + // Create a mock clock specifically for this test so that we can + // progress time without affecting the other tests. + mockedClock := clock.NewTestClock(testNow) + + // Create a new peer log. + peerLog := newPeerLog(mockedClock, 0, nil) + require.Nil(t, peerLog.stagedEvent) + + // Create a channel for our peer log, otherwise it will not track online + // events. + require.NoError(t, peerLog.addChannel(wire.OutPoint{})) + + // First, we add an event to the event log. Since we have no previous + // events, we expect this event to staged immediately. + peerEvent := &event{ + timestamp: testNow, + eventType: peerOfflineEvent, + } + + peerLog.onlineEvent(false) + require.Equal(t, peerEvent, peerLog.stagedEvent) + + // We immediately add another event to our event log. We expect our + // staged event to be replaced with this new event, because insufficient + // time has passed since our last event. + peerEvent = &event{ + timestamp: testNow, + eventType: peerOnlineEvent, + } + + peerLog.onlineEvent(true) + require.Equal(t, peerEvent, peerLog.stagedEvent) + + // We get the amount of time that we need to pass before we record an + // event from our rate limiting tiers. We then progress our test clock + // to just after this point. + delta := getRateLimit(peerLog.flapCount) + newNow := testNow.Add(delta + 1) + mockedClock.SetTime(newNow) + + // Now, when we add an event, we expect our staged event to be added + // to our events list and for our new event to be staged. + newEvent := &event{ + timestamp: newNow, + eventType: peerOfflineEvent, + } + peerLog.onlineEvent(false) + + require.Equal(t, []*event{peerEvent}, peerLog.onlineEvents) + require.Equal(t, newEvent, peerLog.stagedEvent) + + // Now, we test the case where we add many events to our log. We expect + // our set of events to be untouched, but for our staged event to be + // updated. + nextEvent := &event{ + timestamp: newNow, + eventType: peerOnlineEvent, + } + + for i := 0; i < 5; i++ { + // We flip the kind of event for each type so that we can check + // that our staged event is definitely changing each time. + if i%2 == 0 { + nextEvent.eventType = peerOfflineEvent + } else { + nextEvent.eventType = peerOnlineEvent + } + + online := nextEvent.eventType == peerOnlineEvent + + peerLog.onlineEvent(online) + require.Equal(t, []*event{peerEvent}, peerLog.onlineEvents) + require.Equal(t, nextEvent, peerLog.stagedEvent) + } + + // Now, we test the case where a peer's flap count is cooled down + // because it has not flapped for a while. Set our peer's flap count so + // that we fall within our second rate limiting tier and assert that we + // are at this level. + peerLog.flapCount = rateLimitScale + 1 + rateLimit := getRateLimit(peerLog.flapCount) + require.Equal(t, rateLimits[1], rateLimit) + + // Progress our clock to the point where we will have our flap count + // cooled. + newNow = mockedClock.Now().Add(flapCountCooldownPeriod) + mockedClock.SetTime(newNow) + + // Add an online event, and expect it to be staged. + onlineEvent := &event{ + timestamp: newNow, + eventType: peerOnlineEvent, + } + peerLog.onlineEvent(true) + require.Equal(t, onlineEvent, peerLog.stagedEvent) + + // Progress our clock by the rate limit level that we will be on if + // our flap rate is cooled down to a lower level. + newNow = mockedClock.Now().Add(rateLimits[0] + 1) + mockedClock.SetTime(newNow) + + // Add another event. We expect this event to be staged and our previous + // event to be flushed to the event log (because our cooldown has been + // applied). + offlineEvent := &event{ + timestamp: newNow, + eventType: peerOfflineEvent, + } + peerLog.onlineEvent(false) + require.Equal(t, offlineEvent, peerLog.stagedEvent) + + flushedEventIdx := len(peerLog.onlineEvents) - 1 + require.Equal( + t, onlineEvent, peerLog.onlineEvents[flushedEventIdx], + ) } // TestGetOnlinePeriod tests the getOnlinePeriod function. It tests the case // where no events present, and the case where an additional online period // must be added because the event log ends on an online event. func TestGetOnlinePeriod(t *testing.T) { - // Set time for consistent testing. - now := time.Now() - - fourHoursAgo := now.Add(time.Hour * -4) - threeHoursAgo := now.Add(time.Hour * -3) - twoHoursAgo := now.Add(time.Hour * -2) - oneHourAgo := now.Add(time.Hour * -1) + fourHoursAgo := testNow.Add(time.Hour * -4) + threeHoursAgo := testNow.Add(time.Hour * -3) + twoHoursAgo := testNow.Add(time.Hour * -2) tests := []struct { name string - events []*channelEvent + events []*event expectedOnline []*onlinePeriod - openedAt time.Time - closedAt time.Time }{ { - name: "No events", + name: "no events", }, { - name: "Start on online period", - events: []*channelEvent{ + name: "start on online period", + events: []*event{ { timestamp: threeHoursAgo, eventType: peerOnlineEvent, @@ -91,8 +283,8 @@ func TestGetOnlinePeriod(t *testing.T) { }, }, { - name: "Start on offline period", - events: []*channelEvent{ + name: "start on offline period", + events: []*event{ { timestamp: fourHoursAgo, eventType: peerOfflineEvent, @@ -100,8 +292,8 @@ func TestGetOnlinePeriod(t *testing.T) { }, }, { - name: "End on an online period, channel not closed", - events: []*channelEvent{ + name: "end on an online period", + events: []*event{ { timestamp: fourHoursAgo, eventType: peerOnlineEvent, @@ -110,25 +302,88 @@ func TestGetOnlinePeriod(t *testing.T) { expectedOnline: []*onlinePeriod{ { start: fourHoursAgo, - end: now, + end: testNow, }, }, }, { - name: "End on an online period, channel closed", - events: []*channelEvent{ + name: "duplicate online events", + events: []*event{ { timestamp: fourHoursAgo, eventType: peerOnlineEvent, }, + { + timestamp: threeHoursAgo, + eventType: peerOnlineEvent, + }, }, expectedOnline: []*onlinePeriod{ { start: fourHoursAgo, - end: oneHourAgo, + end: testNow, + }, + }, + }, + { + name: "duplicate offline events", + events: []*event{ + { + timestamp: fourHoursAgo, + eventType: peerOfflineEvent, + }, + { + timestamp: threeHoursAgo, + eventType: peerOfflineEvent, + }, + }, + expectedOnline: nil, + }, + { + name: "duplicate online then offline", + events: []*event{ + { + timestamp: fourHoursAgo, + eventType: peerOnlineEvent, + }, + { + timestamp: threeHoursAgo, + eventType: peerOnlineEvent, + }, + { + timestamp: twoHoursAgo, + eventType: peerOfflineEvent, + }, + }, + expectedOnline: []*onlinePeriod{ + { + start: fourHoursAgo, + end: twoHoursAgo, + }, + }, + }, + { + name: "duplicate offline then online", + events: []*event{ + { + timestamp: fourHoursAgo, + eventType: peerOfflineEvent, + }, + { + timestamp: threeHoursAgo, + eventType: peerOfflineEvent, + }, + { + timestamp: twoHoursAgo, + eventType: peerOnlineEvent, + }, + }, + expectedOnline: []*onlinePeriod{ + { + start: twoHoursAgo, + end: testNow, }, }, - closedAt: oneHourAgo, }, } @@ -136,33 +391,16 @@ func TestGetOnlinePeriod(t *testing.T) { test := test t.Run(test.name, func(t *testing.T) { - score := &chanEventLog{ - events: test.events, - now: func() time.Time { - return now - }, - openedAt: test.openedAt, - closedAt: test.closedAt, + t.Parallel() + + score := &peerLog{ + onlineEvents: test.events, + clock: clock.NewTestClock(testNow), } online := score.getOnlinePeriods() - if len(online) != len(test.expectedOnline) { - t.Fatalf("Expectd: %v online periods, got: %v", - len(test.expectedOnline), len(online)) - } - - for i, o := range test.expectedOnline { - if online[i].start != o.start { - t.Errorf("Expected start: %v, got %v", o.start, - online[i].start) - } - - if online[i].end != o.end { - t.Errorf("Expected end: %v, got %v", o.end, - online[i].end) - } - } + require.Equal(t, test.expectedOnline, online) }) } @@ -170,48 +408,39 @@ func TestGetOnlinePeriod(t *testing.T) { // TestUptime tests channel uptime calculation based on its event log. func TestUptime(t *testing.T) { - // Set time for consistent testing. - now := time.Now() - - fourHoursAgo := now.Add(time.Hour * -4) - threeHoursAgo := now.Add(time.Hour * -3) - twoHoursAgo := now.Add(time.Hour * -2) - oneHourAgo := now.Add(time.Hour * -1) + fourHoursAgo := testNow.Add(time.Hour * -4) + threeHoursAgo := testNow.Add(time.Hour * -3) + twoHoursAgo := testNow.Add(time.Hour * -2) + oneHourAgo := testNow.Add(time.Hour * -1) tests := []struct { name string - // opened at is the time the channel was recorded as being open, and is - // never expected to be zero. - openedAt time.Time + // events is the set of event log that we are calculating uptime + // for. + events []*event - // closed at is the tim the channel was recorded as being closed, and - // can have a zero value if the. - closedAt time.Time - - // events is the set of event log that we are calculating uptime for. - events []*channelEvent - - // startTime is the beginning of the period that we are calculating - // uptime for, it cannot have a zero value. + // startTime is the beginning of the period that we are + // calculating uptime for, it cannot have a zero value. startTime time.Time - // endTime is the end of the period that we are calculating uptime for, - // it cannot have a zero value. + // endTime is the end of the period that we are calculating + // uptime for, it cannot have a zero value. endTime time.Time - // expectedUptime is the amount of uptime we expect to be calculated - // over the period specified by startTime and endTime. + // expectedUptime is the amount of uptime we expect to be + // calculated over the period specified by startTime and + // endTime. expectedUptime time.Duration - // expectErr is set to true if we expect an error to be returned when - // calling the uptime function + // expectErr is set to true if we expect an error to be returned + // when calling the uptime function. expectErr bool }{ { name: "End before start", endTime: threeHoursAgo, - startTime: now, + startTime: testNow, expectErr: true, }, { @@ -219,64 +448,20 @@ func TestUptime(t *testing.T) { expectErr: true, }, { - name: "Online event and closed", - openedAt: fourHoursAgo, - closedAt: oneHourAgo, - events: []*channelEvent{ + name: "online event and no offline", + events: []*event{ { timestamp: fourHoursAgo, eventType: peerOnlineEvent, }, }, startTime: fourHoursAgo, - endTime: now, - expectedUptime: time.Hour * 3, - }, - { - name: "Online event and not closed", - openedAt: fourHoursAgo, - events: []*channelEvent{ - { - timestamp: fourHoursAgo, - eventType: peerOnlineEvent, - }, - }, - startTime: fourHoursAgo, - endTime: now, + endTime: testNow, expectedUptime: time.Hour * 4, }, { - name: "Offline event and closed", - openedAt: fourHoursAgo, - closedAt: threeHoursAgo, - events: []*channelEvent{ - { - timestamp: fourHoursAgo, - eventType: peerOfflineEvent, - }, - }, - startTime: fourHoursAgo, - endTime: now, - }, - { - name: "Online event before close", - openedAt: fourHoursAgo, - closedAt: oneHourAgo, - events: []*channelEvent{ - { - timestamp: twoHoursAgo, - eventType: peerOnlineEvent, - }, - }, - startTime: fourHoursAgo, - endTime: now, - expectedUptime: time.Hour, - }, - { - name: "Online then offline event", - openedAt: fourHoursAgo, - closedAt: oneHourAgo, - events: []*channelEvent{ + name: "online then offline event", + events: []*event{ { timestamp: threeHoursAgo, eventType: peerOnlineEvent, @@ -287,44 +472,40 @@ func TestUptime(t *testing.T) { }, }, startTime: fourHoursAgo, - endTime: now, + endTime: testNow, expectedUptime: time.Hour, }, { - name: "Online event before uptime period", - openedAt: fourHoursAgo, - closedAt: oneHourAgo, - events: []*channelEvent{ + name: "online event before uptime period", + events: []*event{ { timestamp: threeHoursAgo, eventType: peerOnlineEvent, }, }, startTime: twoHoursAgo, - endTime: now, - expectedUptime: time.Hour, - }, - { - name: "Offline event after uptime period", - openedAt: fourHoursAgo, - events: []*channelEvent{ - { - timestamp: fourHoursAgo, - eventType: peerOnlineEvent, - }, - { - timestamp: now.Add(time.Hour), - eventType: peerOfflineEvent, - }, - }, - startTime: twoHoursAgo, - endTime: now, + endTime: testNow, expectedUptime: time.Hour * 2, }, { - name: "All events within period", - openedAt: fourHoursAgo, - events: []*channelEvent{ + name: "offline event after uptime period", + events: []*event{ + { + timestamp: fourHoursAgo, + eventType: peerOnlineEvent, + }, + { + timestamp: testNow.Add(time.Hour), + eventType: peerOfflineEvent, + }, + }, + startTime: twoHoursAgo, + endTime: testNow, + expectedUptime: time.Hour * 2, + }, + { + name: "all events within period", + events: []*event{ { timestamp: twoHoursAgo, eventType: peerOnlineEvent, @@ -335,31 +516,30 @@ func TestUptime(t *testing.T) { expectedUptime: time.Hour, }, { - name: "Multiple online and offline", - openedAt: now.Add(time.Hour * -8), - events: []*channelEvent{ + name: "multiple online and offline", + events: []*event{ { - timestamp: now.Add(time.Hour * -7), + timestamp: testNow.Add(time.Hour * -7), eventType: peerOnlineEvent, }, { - timestamp: now.Add(time.Hour * -6), + timestamp: testNow.Add(time.Hour * -6), eventType: peerOfflineEvent, }, { - timestamp: now.Add(time.Hour * -5), + timestamp: testNow.Add(time.Hour * -5), eventType: peerOnlineEvent, }, { - timestamp: now.Add(time.Hour * -4), + timestamp: testNow.Add(time.Hour * -4), eventType: peerOfflineEvent, }, { - timestamp: now.Add(time.Hour * -3), + timestamp: testNow.Add(time.Hour * -3), eventType: peerOnlineEvent, }, }, - startTime: now.Add(time.Hour * -8), + startTime: testNow.Add(time.Hour * -8), endTime: oneHourAgo, expectedUptime: time.Hour * 4, }, @@ -369,27 +549,16 @@ func TestUptime(t *testing.T) { test := test t.Run(test.name, func(t *testing.T) { - score := &chanEventLog{ - events: test.events, - now: func() time.Time { - return now - }, - openedAt: test.openedAt, - closedAt: test.closedAt, + score := &peerLog{ + onlineEvents: test.events, + clock: clock.NewTestClock(testNow), } - uptime, err := score.uptime(test.startTime, test.endTime) - if test.expectErr && err == nil { - t.Fatal("Expected an error, got nil") - } - if !test.expectErr && err != nil { - t.Fatalf("Expcted no error, got: %v", err) - } - - if uptime != test.expectedUptime { - t.Errorf("Expected uptime: %v, got: %v", - test.expectedUptime, uptime) - } + uptime, err := score.uptime( + test.startTime, test.endTime, + ) + require.Equal(t, test.expectErr, err != nil) + require.Equal(t, test.expectedUptime, uptime) }) } } diff --git a/chanfitness/chaneventstore.go b/chanfitness/chaneventstore.go index b54409bfc..03b23f1db 100644 --- a/chanfitness/chaneventstore.go +++ b/chanfitness/chaneventstore.go @@ -18,19 +18,31 @@ import ( "github.com/btcsuite/btcd/wire" "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/channelnotifier" + "github.com/lightningnetwork/lnd/clock" "github.com/lightningnetwork/lnd/peernotifier" "github.com/lightningnetwork/lnd/routing/route" "github.com/lightningnetwork/lnd/subscribe" + "github.com/lightningnetwork/lnd/ticker" +) + +const ( + // FlapCountFlushRate determines how often we write peer total flap + // count to disk. + FlapCountFlushRate = time.Hour ) var ( - // errShuttingDown is returned when the store cannot respond to a query because - // it has received the shutdown signal. + // errShuttingDown is returned when the store cannot respond to a query + // because it has received the shutdown signal. errShuttingDown = errors.New("channel event store shutting down") - // ErrChannelNotFound is returned when a query is made for a channel that - // the event store does not have knowledge of. + // ErrChannelNotFound is returned when a query is made for a channel + // that the event store does not have knowledge of. ErrChannelNotFound = errors.New("channel not found in event store") + + // ErrPeerNotFound is returned when a query is made for a channel + // that has a peer that the event store is not currently tracking. + ErrPeerNotFound = errors.New("peer not found in event store") ) // ChannelEventStore maintains a set of event logs for the node's channels to @@ -38,18 +50,14 @@ var ( type ChannelEventStore struct { cfg *Config - // channels maps channel points to event logs. - channels map[wire.OutPoint]*chanEventLog + // peers tracks all of our currently monitored peers and their channels. + peers map[route.Vertex]peerMonitor - // peers tracks the current online status of peers based on online/offline - // events. - peers map[route.Vertex]bool + // chanInfoRequests serves requests for information about our channel. + chanInfoRequests chan channelInfoRequest - // lifespanRequests serves requests for the lifespan of channels. - lifespanRequests chan lifespanRequest - - // uptimeRequests serves requests for the uptime of channels. - uptimeRequests chan uptimeRequest + // peerRequests serves requests for information about a peer. + peerRequests chan peerRequest quit chan struct{} @@ -60,49 +68,58 @@ type ChannelEventStore struct { // activity. All elements of the config must be non-nil for the event store to // operate. type Config struct { - // SubscribeChannelEvents provides a subscription client which provides a - // stream of channel events. - SubscribeChannelEvents func() (*subscribe.Client, error) + // SubscribeChannelEvents provides a subscription client which provides + // a stream of channel events. + SubscribeChannelEvents func() (subscribe.Subscription, error) // SubscribePeerEvents provides a subscription client which provides a // stream of peer online/offline events. - SubscribePeerEvents func() (*subscribe.Client, error) + SubscribePeerEvents func() (subscribe.Subscription, error) - // GetOpenChannels provides a list of existing open channels which is used - // to populate the ChannelEventStore with a set of channels on startup. + // GetOpenChannels provides a list of existing open channels which is + // used to populate the ChannelEventStore with a set of channels on + // startup. GetOpenChannels func() ([]*channeldb.OpenChannel, error) + + // Clock is the time source that the subsystem uses, provided here + // for ease of testing. + Clock clock.Clock + + // WriteFlapCounts records the flap count for a set of peers on disk. + WriteFlapCount func(map[route.Vertex]*channeldb.FlapCount) error + + // ReadFlapCount gets the flap count for a peer on disk. + ReadFlapCount func(route.Vertex) (*channeldb.FlapCount, error) + + // FlapCountTicker is a ticker which controls how often we flush our + // peer's flap count to disk. + FlapCountTicker ticker.Ticker } -// lifespanRequest contains the channel ID required to query the store for a -// channel's lifespan and a blocking response channel on which the result is -// sent. -type lifespanRequest struct { +// peerFlapCountMap is the map used to map peers to flap counts, declared here +// to allow shorter function signatures. +type peerFlapCountMap map[route.Vertex]*channeldb.FlapCount + +type channelInfoRequest struct { + peer route.Vertex channelPoint wire.OutPoint - responseChan chan lifespanResponse + responseChan chan channelInfoResponse } -// lifespanResponse contains the response to a lifespanRequest and an error if -// one occurred. -type lifespanResponse struct { - start time.Time - end time.Time - err error +type channelInfoResponse struct { + info *ChannelInfo + err error } -// uptimeRequest contains the parameters required to query the store for a -// channel's uptime and a blocking response channel on which the result is sent. -type uptimeRequest struct { - channelPoint wire.OutPoint - startTime time.Time - endTime time.Time - responseChan chan uptimeResponse +type peerRequest struct { + peer route.Vertex + responseChan chan peerResponse } -// uptimeResponse contains the response to an uptimeRequest and an error if one -// occurred. -type uptimeResponse struct { - uptime time.Duration - err error +type peerResponse struct { + flapCount int + ts *time.Time + err error } // NewChannelEventStore initializes an event store with the config provided. @@ -111,10 +128,9 @@ type uptimeResponse struct { func NewChannelEventStore(config *Config) *ChannelEventStore { store := &ChannelEventStore{ cfg: config, - channels: make(map[wire.OutPoint]*chanEventLog), - peers: make(map[route.Vertex]bool), - lifespanRequests: make(chan lifespanRequest), - uptimeRequests: make(chan uptimeRequest), + peers: make(map[route.Vertex]peerMonitor), + chanInfoRequests: make(chan channelInfoRequest), + peerRequests: make(chan peerRequest), quit: make(chan struct{}), } @@ -140,7 +156,8 @@ func (c *ChannelEventStore) Start() error { return err } - // cancel should be called to cancel all subscriptions if an error occurs. + // cancel should be called to cancel all subscriptions if an error + // occurs. cancel := func() { channelClient.Cancel() peerClient.Cancel() @@ -166,8 +183,8 @@ func (c *ChannelEventStore) Start() error { return err } - // Add existing channels to the channel store with an initial peer - // online or offline event. + // Add existing channels to the channel store with an initial + // peer online or offline event. c.addChannel(ch.FundingOutpoint, peerKey) } @@ -186,63 +203,98 @@ func (c *ChannelEventStore) Start() error { func (c *ChannelEventStore) Stop() { log.Info("Stopping event store") + c.cfg.FlapCountTicker.Stop() + // Stop the consume goroutine. close(c.quit) c.wg.Wait() } -// addChannel adds a new channel to the ChannelEventStore's map of channels with -// an initial peer online state (if the peer is online). If the channel is -// already present in the map, the function returns early. This function should -// be called to add existing channels on startup and when open channel events -// are observed. +// addChannel checks whether we are already tracking a channel's peer, creates a +// new peer log to track it if we are not yet monitoring it, and adds the +// channel. func (c *ChannelEventStore) addChannel(channelPoint wire.OutPoint, peer route.Vertex) { - // Check for the unexpected case where the channel is already in the store. - _, ok := c.channels[channelPoint] - if ok { - log.Errorf("Channel %v duplicated in channel store", channelPoint) + peerMonitor, err := c.getPeerMonitor(peer) + if err != nil { + log.Error("could not create monitor: %v", err) return } - // Create an event log for the channel. - eventLog := newEventLog(channelPoint, peer, time.Now) + if err := peerMonitor.addChannel(channelPoint); err != nil { + log.Errorf("could not add channel: %v", err) + } +} - // If the peer is already online, add a peer online event to record - // the starting state of the peer. - if c.peers[peer] { - eventLog.add(peerOnlineEvent) +// getPeerMonitor tries to get an existing peer monitor from our in memory list, +// and falls back to creating a new monitor if it is not currently known. +func (c *ChannelEventStore) getPeerMonitor(peer route.Vertex) (peerMonitor, + error) { + + peerMonitor, ok := c.peers[peer] + if ok { + return peerMonitor, nil } - c.channels[channelPoint] = eventLog + var ( + flapCount int + lastFlap *time.Time + ) + + historicalFlap, err := c.cfg.ReadFlapCount(peer) + switch err { + // If we do not have any records for this peer we set a 0 flap count + // and timestamp. + case channeldb.ErrNoPeerBucket: + + case nil: + flapCount = int(historicalFlap.Count) + lastFlap = &historicalFlap.LastFlap + + // Return if we get an unexpected error. + default: + return nil, err + } + + peerMonitor = newPeerLog(c.cfg.Clock, flapCount, lastFlap) + c.peers[peer] = peerMonitor + + return peerMonitor, nil } // closeChannel records a closed time for a channel, and returns early is the -// channel is not known to the event store. -func (c *ChannelEventStore) closeChannel(channelPoint wire.OutPoint) { - // Check for the unexpected case where the channel is unknown to the store. - eventLog, ok := c.channels[channelPoint] +// channel is not known to the event store. We log warnings (rather than errors) +// when we cannot find a peer/channel because channels that we restore from a +// static channel backup do not have their open notified, so the event store +// never learns about them, but they are closed using the regular flow so we +// will try to remove them on close. At present, we cannot easily distinguish +// between these closes and others. +func (c *ChannelEventStore) closeChannel(channelPoint wire.OutPoint, + peer route.Vertex) { + + peerMonitor, ok := c.peers[peer] if !ok { - log.Errorf("Close channel %v unknown to store", channelPoint) + log.Warnf("peer not known to store: %v", peer) return } - eventLog.close() + if err := peerMonitor.removeChannel(channelPoint); err != nil { + log.Warnf("could not remove channel: %v", err) + } } -// peerEvent adds a peer online or offline event to all channels we currently -// have open with a peer. -func (c *ChannelEventStore) peerEvent(peer route.Vertex, event eventType) { - // Track current online status of peers in the channelEventStore. - c.peers[peer] = event == peerOnlineEvent - - for _, eventLog := range c.channels { - if eventLog.peer == peer { - eventLog.add(event) - } +// peerEvent creates a peer monitor for a peer if we do not currently have +// one, and adds an online event to it. +func (c *ChannelEventStore) peerEvent(peer route.Vertex, online bool) { + peerMonitor, err := c.getPeerMonitor(peer) + if err != nil { + log.Error("could not create monitor: %v", err) + return } + + peerMonitor.onlineEvent(online) } // subscriptions abstracts away from subscription clients to allow for mocking. @@ -256,8 +308,22 @@ type subscriptions struct { // the event store with channel and peer events, and serves requests for channel // uptime and lifespan. func (c *ChannelEventStore) consume(subscriptions *subscriptions) { - defer c.wg.Done() - defer subscriptions.cancel() + // Start our flap count ticker. + c.cfg.FlapCountTicker.Resume() + + // On exit, we will cancel our subscriptions and write our most recent + // flap counts to disk. This ensures that we have consistent data in + // the case of a graceful shutdown. If we do not shutdown gracefully, + // our worst case is data from our last flap count tick (1H). + defer func() { + subscriptions.cancel() + + if err := c.recordFlapCount(); err != nil { + log.Errorf("error recording flap on shutdown: %v", err) + } + + c.wg.Done() + }() // Consume events until the channel is closed. for { @@ -265,68 +331,78 @@ func (c *ChannelEventStore) consume(subscriptions *subscriptions) { // Process channel opened and closed events. case e := <-subscriptions.channelUpdates: switch event := e.(type) { - // A new channel has been opened, we must add the channel to the - // store and record a channel open event. + // A new channel has been opened, we must add the + // channel to the store and record a channel open event. case channelnotifier.OpenChannelEvent: + compressed := event.Channel.IdentityPub.SerializeCompressed() peerKey, err := route.NewVertexFromBytes( - event.Channel.IdentityPub.SerializeCompressed(), + compressed, ) if err != nil { - log.Errorf("Could not get vertex from: %v", - event.Channel.IdentityPub.SerializeCompressed()) + log.Errorf("Could not get vertex "+ + "from: %v", compressed) } - c.addChannel(event.Channel.FundingOutpoint, peerKey) + c.addChannel( + event.Channel.FundingOutpoint, peerKey, + ) - // A channel has been closed, we must remove the channel from the - // store and record a channel closed event. + // A channel has been closed, we must remove the channel + // from the store and record a channel closed event. case channelnotifier.ClosedChannelEvent: - c.closeChannel(event.CloseSummary.ChanPoint) + compressed := event.CloseSummary.RemotePub.SerializeCompressed() + peerKey, err := route.NewVertexFromBytes( + compressed, + ) + if err != nil { + log.Errorf("Could not get vertex "+ + "from: %v", compressed) + continue + } + + c.closeChannel( + event.CloseSummary.ChanPoint, peerKey, + ) } // Process peer online and offline events. case e := <-subscriptions.peerUpdates: switch event := e.(type) { - // We have reestablished a connection with our peer, and should - // record an online event for any channels with that peer. + // We have reestablished a connection with our peer, + // and should record an online event for any channels + // with that peer. case peernotifier.PeerOnlineEvent: - c.peerEvent(event.PubKey, peerOnlineEvent) + c.peerEvent(event.PubKey, true) - // We have lost a connection with our peer, and should record an - // offline event for any channels with that peer. + // We have lost a connection with our peer, and should + // record an offline event for any channels with that + // peer. case peernotifier.PeerOfflineEvent: - c.peerEvent(event.PubKey, peerOfflineEvent) + c.peerEvent(event.PubKey, false) } // Serve all requests for channel lifetime. - case req := <-c.lifespanRequests: - var resp lifespanResponse - - channel, ok := c.channels[req.channelPoint] - if !ok { - resp.err = ErrChannelNotFound - } else { - resp.start = channel.openedAt - resp.end = channel.closedAt - } + case req := <-c.chanInfoRequests: + var resp channelInfoResponse + resp.info, resp.err = c.getChanInfo(req) req.responseChan <- resp - // Serve requests for channel uptime. - case req := <-c.uptimeRequests: - var resp uptimeResponse - - channel, ok := c.channels[req.channelPoint] - if !ok { - resp.err = ErrChannelNotFound - } else { - uptime, err := channel.uptime(req.startTime, req.endTime) - resp.uptime = uptime - resp.err = err - } + // Serve all requests for information about our peer. + case req := <-c.peerRequests: + var resp peerResponse + resp.flapCount, resp.ts, resp.err = c.flapCount( + req.peer, + ) req.responseChan <- resp + case <-c.cfg.FlapCountTicker.Ticks(): + if err := c.recordFlapCount(); err != nil { + log.Errorf("could not record flap "+ + "count: %v", err) + } + // Exit if the store receives the signal to shutdown. case <-c.quit: return @@ -334,65 +410,151 @@ func (c *ChannelEventStore) consume(subscriptions *subscriptions) { } } -// GetLifespan returns the opening and closing time observed for a channel and -// a boolean to indicate whether the channel is known the the event store. If -// the channel is still open, a zero close time is returned. -func (c *ChannelEventStore) GetLifespan( - channelPoint wire.OutPoint) (time.Time, time.Time, error) { +// ChannelInfo provides the set of information that the event store has recorded +// for a channel. +type ChannelInfo struct { + // Lifetime is the total amount of time we have monitored the channel + // for. + Lifetime time.Duration - request := lifespanRequest{ + // Uptime is the total amount of time that the channel peer has been + // observed as online during the monitored lifespan. + Uptime time.Duration +} + +// GetChanInfo gets all the information we have on a channel in the event store. +func (c *ChannelEventStore) GetChanInfo(channelPoint wire.OutPoint, + peer route.Vertex) (*ChannelInfo, error) { + + request := channelInfoRequest{ + peer: peer, channelPoint: channelPoint, - responseChan: make(chan lifespanResponse), + responseChan: make(chan channelInfoResponse), } - // Send a request for the channel's lifespan to the main event loop, or - // return early with an error if the store has already received a shutdown - // signal. + // Send a request for the channel's information to the main event loop, + // or return early with an error if the store has already received a + // shutdown signal. select { - case c.lifespanRequests <- request: + case c.chanInfoRequests <- request: case <-c.quit: - return time.Time{}, time.Time{}, errShuttingDown + return nil, errShuttingDown } - // Return the response we receive on the response channel or exit early if - // the store is instructed to exit. + // Return the response we receive on the response channel or exit early + // if the store is instructed to exit. select { case resp := <-request.responseChan: - return resp.start, resp.end, resp.err + return resp.info, resp.err case <-c.quit: - return time.Time{}, time.Time{}, errShuttingDown + return nil, errShuttingDown } } -// GetUptime returns the uptime of a channel over a period and an error if the -// channel cannot be found or the uptime calculation fails. -func (c *ChannelEventStore) GetUptime(channelPoint wire.OutPoint, startTime, - endTime time.Time) (time.Duration, error) { +// getChanInfo collects channel information for a channel. It gets uptime over +// the full lifetime of the channel. +func (c *ChannelEventStore) getChanInfo(req channelInfoRequest) (*ChannelInfo, + error) { - request := uptimeRequest{ - channelPoint: channelPoint, - startTime: startTime, - endTime: endTime, - responseChan: make(chan uptimeResponse), + peerMonitor, ok := c.peers[req.peer] + if !ok { + return nil, ErrPeerNotFound } - // Send a request for the channel's uptime to the main event loop, or - // return early with an error if the store has already received a shutdown - // signal. + lifetime, uptime, err := peerMonitor.channelUptime(req.channelPoint) + if err != nil { + return nil, err + } + + return &ChannelInfo{ + Lifetime: lifetime, + Uptime: uptime, + }, nil +} + +// FlapCount returns the flap count we have for a peer and the timestamp of its +// last flap. If we do not have any flaps recorded for the peer, the last flap +// timestamp will be nil. +func (c *ChannelEventStore) FlapCount(peer route.Vertex) (int, *time.Time, + error) { + + request := peerRequest{ + peer: peer, + responseChan: make(chan peerResponse), + } + + // Send a request for the peer's information to the main event loop, + // or return early with an error if the store has already received a + // shutdown signal. select { - case c.uptimeRequests <- request: + case c.peerRequests <- request: case <-c.quit: - return 0, errShuttingDown + return 0, nil, errShuttingDown } - // Return the response we receive on the response channel or exit early if - // the store is instructed to exit. + // Return the response we receive on the response channel or exit early + // if the store is instructed to exit. select { case resp := <-request.responseChan: - return resp.uptime, resp.err + return resp.flapCount, resp.ts, resp.err case <-c.quit: - return 0, errShuttingDown + return 0, nil, errShuttingDown } } + +// flapCount gets our peer flap count and last flap timestamp from our in memory +// record of a peer, falling back to on disk if we are not currently tracking +// the peer. If we have no flap count recorded for the peer, a nil last flap +// time will be returned. +func (c *ChannelEventStore) flapCount(peer route.Vertex) (int, *time.Time, + error) { + + // First check whether we are tracking this peer in memory, because this + // record will have the most accurate flap count. We do not fail if we + // can't find the peer in memory, because we may have previously + // recorded its flap count on disk. + peerMonitor, ok := c.peers[peer] + if ok { + count, ts := peerMonitor.getFlapCount() + return count, ts, nil + } + + // Try to get our flap count from the database. If this value is not + // recorded, we return a nil last flap time to indicate that we have no + // record of the peer's flap count. + flapCount, err := c.cfg.ReadFlapCount(peer) + switch err { + case channeldb.ErrNoPeerBucket: + return 0, nil, nil + + case nil: + return int(flapCount.Count), &flapCount.LastFlap, nil + + default: + return 0, nil, err + } +} + +// recordFlapCount will record our flap count for each peer that we are +// currently tracking, skipping peers that have a 0 flap count. +func (c *ChannelEventStore) recordFlapCount() error { + updates := make(peerFlapCountMap) + + for peer, monitor := range c.peers { + flapCount, lastFlap := monitor.getFlapCount() + if lastFlap == nil { + continue + } + + updates[peer] = &channeldb.FlapCount{ + Count: uint32(flapCount), + LastFlap: *lastFlap, + } + } + + log.Debugf("recording flap count for: %v peers", len(updates)) + + return c.cfg.WriteFlapCount(updates) +} diff --git a/chanfitness/chaneventstore_test.go b/chanfitness/chaneventstore_test.go index 5a7858846..0c9921119 100644 --- a/chanfitness/chaneventstore_test.go +++ b/chanfitness/chaneventstore_test.go @@ -2,38 +2,40 @@ package chanfitness import ( "errors" + "math/big" "testing" "time" "github.com/btcsuite/btcd/btcec" - "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" "github.com/lightningnetwork/lnd/channeldb" - "github.com/lightningnetwork/lnd/channelnotifier" - "github.com/lightningnetwork/lnd/peernotifier" + "github.com/lightningnetwork/lnd/clock" "github.com/lightningnetwork/lnd/routing/route" "github.com/lightningnetwork/lnd/subscribe" + "github.com/stretchr/testify/require" ) +// testNow is the current time tests will use. +var testNow = time.Unix(1592465134, 0) + // TestStartStoreError tests the starting of the store in cases where the setup // functions fail. It does not test the mechanics of consuming events because // these are covered in a separate set of tests. func TestStartStoreError(t *testing.T) { - // Ok and erroring subscribe functions are defined here to de-clutter tests. - okSubscribeFunc := func() (*subscribe.Client, error) { - return &subscribe.Client{ - Cancel: func() {}, - }, nil + // Ok and erroring subscribe functions are defined here to de-clutter + // tests. + okSubscribeFunc := func() (subscribe.Subscription, error) { + return newMockSubscription(t), nil } - errSubscribeFunc := func() (client *subscribe.Client, e error) { + errSubscribeFunc := func() (subscribe.Subscription, error) { return nil, errors.New("intentional test err") } tests := []struct { name string - ChannelEvents func() (*subscribe.Client, error) - PeerEvents func() (*subscribe.Client, error) + ChannelEvents func() (subscribe.Subscription, error) + PeerEvents func() (subscribe.Subscription, error) GetChannels func() ([]*channeldb.OpenChannel, error) }{ { @@ -49,7 +51,7 @@ func TestStartStoreError(t *testing.T) { name: "Get open channels fails", ChannelEvents: okSubscribeFunc, PeerEvents: okSubscribeFunc, - GetChannels: func() (channels []*channeldb.OpenChannel, e error) { + GetChannels: func() ([]*channeldb.OpenChannel, error) { return nil, errors.New("intentional test err") }, }, @@ -59,15 +61,18 @@ func TestStartStoreError(t *testing.T) { test := test t.Run(test.name, func(t *testing.T) { + clock := clock.NewTestClock(testNow) + store := NewChannelEventStore(&Config{ SubscribeChannelEvents: test.ChannelEvents, SubscribePeerEvents: test.PeerEvents, GetOpenChannels: test.GetChannels, + Clock: clock, }) err := store.Start() - // Check that we receive an error, because the test only checks for - // error cases. + // Check that we receive an error, because the test only + // checks for error cases. if err == nil { t.Fatalf("Expected error on startup, got: nil") } @@ -75,29 +80,6 @@ func TestStartStoreError(t *testing.T) { } } -// getTestChannel returns a non-zero peer pubKey, serialized pubKey and channel -// outpoint for testing. -func getTestChannel(t *testing.T) (*btcec.PublicKey, route.Vertex, - wire.OutPoint) { - - privKey, err := btcec.NewPrivateKey(btcec.S256()) - if err != nil { - t.Fatalf("Error getting pubkey: %v", err) - } - - pubKey, err := route.NewVertexFromBytes( - privKey.PubKey().SerializeCompressed(), - ) - if err != nil { - t.Fatalf("Could not create vertex: %v", err) - } - - return privKey.PubKey(), pubKey, wire.OutPoint{ - Hash: [chainhash.HashSize]byte{1, 2, 3}, - Index: 0, - } -} - // TestMonitorChannelEvents tests the store's handling of channel and peer // events. It tests for the unexpected cases where we receive a channel open for // an already known channel and but does not test for closing an unknown channel @@ -105,425 +87,257 @@ func getTestChannel(t *testing.T) (*btcec.PublicKey, route.Vertex, // through an eventLog which does not exist. This test does not test handling // of uptime and lifespan requests, as they are tested in their own tests. func TestMonitorChannelEvents(t *testing.T) { - pubKey, vertex, chanPoint := getTestChannel(t) + var ( + pubKey = &btcec.PublicKey{ + X: big.NewInt(0), + Y: big.NewInt(1), + Curve: btcec.S256(), + } - tests := []struct { - name string + chan1 = wire.OutPoint{Index: 1} + chan2 = wire.OutPoint{Index: 2} + ) - // generateEvents takes channels which represent the updates channels - // for subscription clients and passes events in the desired order. - // This function is intended to be blocking so that the test does not - // have a data race with event consumption, so the channels should not - // be buffered. - generateEvents func(channelEvents, peerEvents chan<- interface{}) + peer1, err := route.NewVertexFromBytes(pubKey.SerializeCompressed()) + require.NoError(t, err) - // expectedEvents is the expected set of event types in the store. - expectedEvents []eventType - }{ - { - name: "Channel opened, peer comes online", - generateEvents: func(channelEvents, peerEvents chan<- interface{}) { - // Add an open channel event - channelEvents <- channelnotifier.OpenChannelEvent{ - Channel: &channeldb.OpenChannel{ - FundingOutpoint: chanPoint, - IdentityPub: pubKey, - }, - } + t.Run("peer comes online after channel open", func(t *testing.T) { + gen := func(ctx *chanEventStoreTestCtx) { + ctx.sendChannelOpenedUpdate(pubKey, chan1) + ctx.peerEvent(peer1, true) + } - // Add a peer online event. - peerEvents <- peernotifier.PeerOnlineEvent{PubKey: vertex} - }, - expectedEvents: []eventType{peerOnlineEvent}, - }, - { - name: "Duplicate channel open events", - generateEvents: func(channelEvents, peerEvents chan<- interface{}) { - // Add an open channel event - channelEvents <- channelnotifier.OpenChannelEvent{ - Channel: &channeldb.OpenChannel{ - FundingOutpoint: chanPoint, - IdentityPub: pubKey, - }, - } + testEventStore(t, gen, peer1, 1) + }) - // Add a peer online event. - peerEvents <- peernotifier.PeerOnlineEvent{PubKey: vertex} + t.Run("duplicate channel open events", func(t *testing.T) { + gen := func(ctx *chanEventStoreTestCtx) { + ctx.sendChannelOpenedUpdate(pubKey, chan1) + ctx.sendChannelOpenedUpdate(pubKey, chan1) + ctx.peerEvent(peer1, true) + } - // Add a duplicate channel open event. - channelEvents <- channelnotifier.OpenChannelEvent{ - Channel: &channeldb.OpenChannel{ - FundingOutpoint: chanPoint, - IdentityPub: pubKey, - }, - } - }, - expectedEvents: []eventType{peerOnlineEvent}, - }, - { - name: "Channel opened, peer already online", - generateEvents: func(channelEvents, peerEvents chan<- interface{}) { - // Add a peer online event. - peerEvents <- peernotifier.PeerOnlineEvent{PubKey: vertex} + testEventStore(t, gen, peer1, 1) + }) - // Add an open channel event - channelEvents <- channelnotifier.OpenChannelEvent{ - Channel: &channeldb.OpenChannel{ - FundingOutpoint: chanPoint, - IdentityPub: pubKey, - }, - } - }, - expectedEvents: []eventType{peerOnlineEvent}, - }, + t.Run("peer online before channel created", func(t *testing.T) { + gen := func(ctx *chanEventStoreTestCtx) { + ctx.peerEvent(peer1, true) + ctx.sendChannelOpenedUpdate(pubKey, chan1) + } - { - name: "Channel opened, peer offline, closed", - generateEvents: func(channelEvents, peerEvents chan<- interface{}) { - // Add an open channel event - channelEvents <- channelnotifier.OpenChannelEvent{ - Channel: &channeldb.OpenChannel{ - FundingOutpoint: chanPoint, - IdentityPub: pubKey, - }, - } + testEventStore(t, gen, peer1, 1) + }) - // Add a peer online event. - peerEvents <- peernotifier.PeerOfflineEvent{PubKey: vertex} + t.Run("multiple channels for peer", func(t *testing.T) { + gen := func(ctx *chanEventStoreTestCtx) { + ctx.peerEvent(peer1, true) + ctx.sendChannelOpenedUpdate(pubKey, chan1) - // Add a close channel event. - channelEvents <- channelnotifier.ClosedChannelEvent{ - CloseSummary: &channeldb.ChannelCloseSummary{ - ChanPoint: chanPoint, - }, - } - }, - expectedEvents: []eventType{peerOfflineEvent}, - }, - { - name: "Event after channel close not recorded", - generateEvents: func(channelEvents, peerEvents chan<- interface{}) { - // Add an open channel event - channelEvents <- channelnotifier.OpenChannelEvent{ - Channel: &channeldb.OpenChannel{ - FundingOutpoint: chanPoint, - IdentityPub: pubKey, - }, - } + ctx.peerEvent(peer1, false) + ctx.sendChannelOpenedUpdate(pubKey, chan2) + } - // Add a close channel event. - channelEvents <- channelnotifier.ClosedChannelEvent{ - CloseSummary: &channeldb.ChannelCloseSummary{ - ChanPoint: chanPoint, - }, - } + testEventStore(t, gen, peer1, 2) + }) - // Add a peer online event. - peerEvents <- peernotifier.PeerOfflineEvent{PubKey: vertex} - }, - }, - } + t.Run("multiple channels for peer, one closed", func(t *testing.T) { + gen := func(ctx *chanEventStoreTestCtx) { + ctx.peerEvent(peer1, true) + ctx.sendChannelOpenedUpdate(pubKey, chan1) - for _, test := range tests { - test := test + ctx.peerEvent(peer1, false) + ctx.sendChannelOpenedUpdate(pubKey, chan2) - t.Run(test.name, func(t *testing.T) { - // Create a store with the channels and online peers specified - // by the test. - store := NewChannelEventStore(&Config{}) + ctx.closeChannel(chan1, pubKey) + ctx.peerEvent(peer1, true) + } - // Create channels which represent the subscriptions we have to peer - // and client events. - channelEvents := make(chan interface{}) - peerEvents := make(chan interface{}) + testEventStore(t, gen, peer1, 1) + }) - store.wg.Add(1) - go store.consume(&subscriptions{ - channelUpdates: channelEvents, - peerUpdates: peerEvents, - cancel: func() {}, - }) - - // Add events to the store then kill the goroutine using store.Stop. - test.generateEvents(channelEvents, peerEvents) - store.Stop() - - // Retrieve the eventLog for the channel and check that its - // contents are as expected. - eventLog, ok := store.channels[chanPoint] - if !ok { - t.Fatalf("Expected to find event store") - } - - for i, e := range eventLog.events { - if test.expectedEvents[i] != e.eventType { - t.Fatalf("Expected type: %v, got: %v", - test.expectedEvents[i], e.eventType) - } - } - }) - } } -// TestGetLifetime tests the GetLifetime function for the cases where a channel +// testEventStore creates a new test contexts, generates a set of events for it +// and tests that it has the number of channels we expect. +func testEventStore(t *testing.T, generateEvents func(*chanEventStoreTestCtx), + peer route.Vertex, expectedChannels int) { + + testCtx := newChanEventStoreTestCtx(t) + testCtx.start() + + generateEvents(testCtx) + + // Shutdown the store so that we can safely access the maps in our event + // store. + testCtx.stop() + + // Get our peer and check that it has the channels we expect. + monitor, ok := testCtx.store.peers[peer] + require.True(t, ok) + + require.Equal(t, expectedChannels, monitor.channelCount()) +} + +// TestStoreFlapCount tests flushing of flap counts to disk on timer ticks and +// on store shutdown. +func TestStoreFlapCount(t *testing.T) { + testCtx := newChanEventStoreTestCtx(t) + testCtx.start() + + pubkey, _, _ := testCtx.createChannel() + testCtx.peerEvent(pubkey, false) + + // Now, we tick our flap count ticker. We expect our main goroutine to + // flush our tick count to disk. + testCtx.tickFlapCount() + + // Since we just tracked a offline event, we expect a single flap for + // our peer. + expectedUpdate := peerFlapCountMap{ + pubkey: { + Count: 1, + LastFlap: testCtx.clock.Now(), + }, + } + + testCtx.assertFlapCountUpdated() + testCtx.assertFlapCountUpdates(expectedUpdate) + + // Create three events for out peer, online/offline/online. + testCtx.peerEvent(pubkey, true) + testCtx.peerEvent(pubkey, false) + testCtx.peerEvent(pubkey, true) + + // Trigger another write. + testCtx.tickFlapCount() + + // Since we have processed 3 more events for our peer, we update our + // expected online map to have a flap count of 4 for this peer. + expectedUpdate[pubkey] = &channeldb.FlapCount{ + Count: 4, + LastFlap: testCtx.clock.Now(), + } + testCtx.assertFlapCountUpdated() + testCtx.assertFlapCountUpdates(expectedUpdate) + + testCtx.stop() +} + +// TestGetChanInfo tests the GetChanInfo function for the cases where a channel // is known and unknown to the store. -func TestGetLifetime(t *testing.T) { - now := time.Now() +func TestGetChanInfo(t *testing.T) { + ctx := newChanEventStoreTestCtx(t) + ctx.start() - tests := []struct { - name string - channelFound bool - channelPoint wire.OutPoint - opened time.Time - closed time.Time - expectedError error - }{ - { - name: "Channel found", - channelFound: true, - opened: now, - closed: now.Add(time.Hour * -1), - expectedError: nil, - }, - { - name: "Channel not found", - expectedError: ErrChannelNotFound, - }, - } + // Make a note of the time that our mocked clock starts on. + now := ctx.clock.Now() - for _, test := range tests { - test := test + // Create mock vars for a channel but do not add them to our store yet. + peer, pk, channel := ctx.newChannel() - t.Run(test.name, func(t *testing.T) { - // Create and empty events store for testing. - store := NewChannelEventStore(&Config{}) + // Send an online event for our peer, although we do not yet have an + // open channel. + ctx.peerEvent(peer, true) - // Start goroutine which consumes GetLifespan requests. - store.wg.Add(1) - go store.consume(&subscriptions{ - channelUpdates: make(chan interface{}), - peerUpdates: make(chan interface{}), - cancel: func() {}, - }) + // Try to get info for a channel that has not been opened yet, we + // expect to get an error. + _, err := ctx.store.GetChanInfo(channel, peer) + require.Equal(t, ErrChannelNotFound, err) - // Stop the store's go routine. - defer store.Stop() + // Now we send our store a notification that a channel has been opened. + ctx.sendChannelOpenedUpdate(pk, channel) - // Add channel to eventStore if the test indicates that it should - // be present. - if test.channelFound { - store.channels[test.channelPoint] = &chanEventLog{ - openedAt: test.opened, - closedAt: test.closed, - } - } + // Wait for our channel to be recognized by our store. We need to wait + // for the channel to be created so that we do not update our time + // before the channel open is processed. + require.Eventually(t, func() bool { + _, err = ctx.store.GetChanInfo(channel, peer) + return err == nil + }, timeout, time.Millisecond*20) - open, close, err := store.GetLifespan(test.channelPoint) - if test.expectedError != err { - t.Fatalf("Expected: %v, got: %v", test.expectedError, err) - } + // Increment our test clock by an hour. + now = now.Add(time.Hour) + ctx.clock.SetTime(now) - if open != test.opened { - t.Errorf("Expected: %v, got %v", test.opened, open) - } + // At this stage our channel has been open and online for an hour. + info, err := ctx.store.GetChanInfo(channel, peer) + require.NoError(t, err) + require.Equal(t, time.Hour, info.Lifetime) + require.Equal(t, time.Hour, info.Uptime) - if close != test.closed { - t.Errorf("Expected: %v, got %v", test.closed, close) - } - }) - } + // Now we send a peer offline event for our channel. + ctx.peerEvent(peer, false) + + // Since we have not bumped our mocked time, our uptime calculations + // should be the same, even though we've just processed an offline + // event. + info, err = ctx.store.GetChanInfo(channel, peer) + require.NoError(t, err) + require.Equal(t, time.Hour, info.Lifetime) + require.Equal(t, time.Hour, info.Uptime) + + // Progress our time again. This time, our peer is currently tracked as + // being offline, so we expect our channel info to reflect that the peer + // has been offline for this period. + now = now.Add(time.Hour) + ctx.clock.SetTime(now) + + info, err = ctx.store.GetChanInfo(channel, peer) + require.NoError(t, err) + require.Equal(t, time.Hour*2, info.Lifetime) + require.Equal(t, time.Hour, info.Uptime) + + ctx.stop() } -// TestGetUptime tests the getUptime call for channels known to the event store. -// It does not test the trivial case where a channel is unknown to the store, -// because this is simply a zero return if an item is not found in a map. It -// tests the unexpected edge cases where a tracked channel does not have any -// events recorded, and when a zero time is specified for the uptime range. -func TestGetUptime(t *testing.T) { - // Set time for deterministic unit tests. - now := time.Now() +// TestFlapCount tests querying the store for peer flap counts, covering the +// case where the peer is tracked in memory, and the case where we need to +// lookup the peer on disk. +func TestFlapCount(t *testing.T) { + clock := clock.NewTestClock(testNow) - twoHoursAgo := now.Add(time.Hour * -2) - fourHoursAgo := now.Add(time.Hour * -4) + var ( + peer = route.Vertex{9, 9, 9} + peerFlapCount = 3 + lastFlap = clock.Now() + ) - tests := []struct { - name string - - channelPoint wire.OutPoint - - // events is the set of events we expect to find in the channel store. - events []*channelEvent - - // openedAt is the time the channel is recorded as open by the store. - openedAt time.Time - - // closedAt is the time the channel is recorded as closed by the store. - // If the channel is still open, this value is zero. - closedAt time.Time - - // channelFound is true if we expect to find the channel in the store. - channelFound bool - - // startTime specifies the beginning of the uptime range we want to - // calculate. - startTime time.Time - - // endTime specified the end of the uptime range we want to calculate. - endTime time.Time - - expectedUptime time.Duration - - expectedError error - }{ - { - name: "No events", - startTime: twoHoursAgo, - endTime: now, - channelFound: true, - expectedError: nil, - }, - { - name: "50% Uptime", - events: []*channelEvent{ - { - timestamp: fourHoursAgo, - eventType: peerOnlineEvent, - }, - { - timestamp: twoHoursAgo, - eventType: peerOfflineEvent, - }, - }, - openedAt: fourHoursAgo, - expectedUptime: time.Hour * 2, - startTime: fourHoursAgo, - endTime: now, - channelFound: true, - expectedError: nil, - }, - { - name: "Zero start time", - events: []*channelEvent{ - { - timestamp: fourHoursAgo, - eventType: peerOnlineEvent, - }, - }, - openedAt: fourHoursAgo, - expectedUptime: time.Hour * 4, - endTime: now, - channelFound: true, - expectedError: nil, - }, - { - name: "Channel not found", - startTime: twoHoursAgo, - endTime: now, - channelFound: false, - expectedError: ErrChannelNotFound, - }, + // Create a test context with one peer's flap count already recorded, + // which mocks it already having its flap count stored on disk. + ctx := newChanEventStoreTestCtx(t) + ctx.flapUpdates[peer] = &channeldb.FlapCount{ + Count: uint32(peerFlapCount), + LastFlap: lastFlap, } - for _, test := range tests { - test := test + ctx.start() - t.Run(test.name, func(t *testing.T) { - // Set up event store with the events specified for the test and - // mocked time. - store := NewChannelEventStore(&Config{}) + // Create test variables for a peer and channel, but do not add it to + // our store yet. + peer1 := route.Vertex{1, 2, 3} - // Start goroutine which consumes GetUptime requests. - store.wg.Add(1) - go store.consume(&subscriptions{ - channelUpdates: make(chan interface{}), - peerUpdates: make(chan interface{}), - cancel: func() {}, - }) + // First, query for a peer that we have no record of in memory or on + // disk and confirm that we indicate that the peer was not found. + _, ts, err := ctx.store.FlapCount(peer1) + require.NoError(t, err) + require.Nil(t, ts) - // Stop the store's goroutine. - defer store.Stop() + // Send an online event for our peer. + ctx.peerEvent(peer1, true) - // Add the channel to the store if it is intended to be found. - if test.channelFound { - store.channels[test.channelPoint] = &chanEventLog{ - events: test.events, - now: func() time.Time { return now }, - openedAt: test.openedAt, - closedAt: test.closedAt, - } - } + // Assert that we now find a record of the peer with flap count = 1. + count, ts, err := ctx.store.FlapCount(peer1) + require.NoError(t, err) + require.Equal(t, lastFlap, *ts) + require.Equal(t, 1, count) - uptime, err := store.GetUptime(test.channelPoint, test.startTime, test.endTime) - if test.expectedError != err { - t.Fatalf("Expected: %v, got: %v", test.expectedError, err) - } + // Make a request for our peer that not tracked in memory, but does + // have its flap count stored on disk. + count, ts, err = ctx.store.FlapCount(peer) + require.NoError(t, err) + require.Equal(t, lastFlap, *ts) + require.Equal(t, peerFlapCount, count) - if uptime != test.expectedUptime { - t.Fatalf("Expected uptime percentage: %v, got %v", - test.expectedUptime, uptime) - } - - }) - } -} - -// TestAddChannel tests that channels are added to the event store with -// appropriate timestamps. This test addresses a bug where offline channels -// did not have an opened time set, and checks that an online event is set for -// peers that are online at the time that a channel is opened. -func TestAddChannel(t *testing.T) { - _, vertex, chanPoint := getTestChannel(t) - - tests := []struct { - name string - - // peers maps peers to an online state. - peers map[route.Vertex]bool - - expectedEvents []eventType - }{ - { - name: "peer offline", - peers: make(map[route.Vertex]bool), - expectedEvents: []eventType{}, - }, - { - name: "peer online", - peers: map[route.Vertex]bool{ - vertex: true, - }, - expectedEvents: []eventType{peerOnlineEvent}, - }, - } - - for _, test := range tests { - test := test - t.Run(test.name, func(t *testing.T) { - store := NewChannelEventStore(&Config{}) - store.peers = test.peers - - // Add channel to the store. - store.addChannel(chanPoint, vertex) - - // Check that the eventLog is successfully added. - eventLog, ok := store.channels[chanPoint] - if !ok { - t.Fatalf("channel should be in store") - } - - // Check that the eventLog contains the events we - // expect. - for i, e := range test.expectedEvents { - if e != eventLog.events[i].eventType { - t.Fatalf("expected: %v, got: %v", - e, eventLog.events[i].eventType) - } - } - - // Ensure that open time is always set. - if eventLog.openedAt.IsZero() { - t.Fatalf("channel should have opened at set") - } - }) - } + ctx.stop() } diff --git a/chanfitness/chaneventstore_testctx_test.go b/chanfitness/chaneventstore_testctx_test.go new file mode 100644 index 000000000..8b4983109 --- /dev/null +++ b/chanfitness/chaneventstore_testctx_test.go @@ -0,0 +1,306 @@ +package chanfitness + +import ( + "math/big" + "testing" + "time" + + "github.com/btcsuite/btcd/btcec" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/lightningnetwork/lnd/channeldb" + "github.com/lightningnetwork/lnd/channelnotifier" + "github.com/lightningnetwork/lnd/clock" + "github.com/lightningnetwork/lnd/peernotifier" + "github.com/lightningnetwork/lnd/routing/route" + "github.com/lightningnetwork/lnd/subscribe" + "github.com/lightningnetwork/lnd/ticker" + "github.com/stretchr/testify/require" +) + +// timeout is the amount of time we allow our blocking test calls. +var timeout = time.Second + +// chanEventStoreTestCtx is a helper struct which can be used to test the +// channel event store. +type chanEventStoreTestCtx struct { + t *testing.T + + store *ChannelEventStore + + channelSubscription *mockSubscription + peerSubscription *mockSubscription + + // testVarIdx is an index which will be used to deterministically add + // channels and public keys to our test context. We use a single value + // for a single pubkey + channel combination because its actual value + // does not matter. + testVarIdx int + + // clock is the clock that our test store will use. + clock *clock.TestClock + + // flapUpdates stores our most recent set of updates flap counts. + flapUpdates peerFlapCountMap + + // flapCountUpdates is a channel which receives new flap counts. + flapCountUpdates chan peerFlapCountMap + + // stopped is closed when our test context is fully shutdown. It is + // used to prevent calling of functions which can only be called after + // shutdown. + stopped chan struct{} +} + +// newChanEventStoreTestCtx creates a test context which can be used to test +// the event store. +func newChanEventStoreTestCtx(t *testing.T) *chanEventStoreTestCtx { + testCtx := &chanEventStoreTestCtx{ + t: t, + channelSubscription: newMockSubscription(t), + peerSubscription: newMockSubscription(t), + clock: clock.NewTestClock(testNow), + flapUpdates: make(peerFlapCountMap), + flapCountUpdates: make(chan peerFlapCountMap), + stopped: make(chan struct{}), + } + + cfg := &Config{ + Clock: testCtx.clock, + SubscribeChannelEvents: func() (subscribe.Subscription, error) { + return testCtx.channelSubscription, nil + }, + SubscribePeerEvents: func() (subscribe.Subscription, error) { + return testCtx.peerSubscription, nil + }, + GetOpenChannels: func() ([]*channeldb.OpenChannel, error) { + return nil, nil + }, + WriteFlapCount: func(updates map[route.Vertex]*channeldb.FlapCount) error { + // Send our whole update map into the test context's + // updates channel. The test will need to assert flap + // count updated or this send will timeout. + select { + case testCtx.flapCountUpdates <- updates: + + case <-time.After(timeout): + t.Fatalf("WriteFlapCount timeout") + } + + return nil + }, + ReadFlapCount: func(peer route.Vertex) (*channeldb.FlapCount, error) { + count, ok := testCtx.flapUpdates[peer] + if !ok { + return nil, channeldb.ErrNoPeerBucket + } + + return count, nil + }, + FlapCountTicker: ticker.NewForce(FlapCountFlushRate), + } + + testCtx.store = NewChannelEventStore(cfg) + + return testCtx +} + +// start starts the test context's event store. +func (c *chanEventStoreTestCtx) start() { + require.NoError(c.t, c.store.Start()) +} + +// stop stops the channel event store's subscribe servers and the store itself. +func (c *chanEventStoreTestCtx) stop() { + // On shutdown of our event store, we write flap counts to disk. In our + // test context, this write function is blocked on asserting that the + // update has occurred. We stop our store in a goroutine so that we + // can shut it down and assert that it performs these on-shutdown + // updates. The stopped channel is used to ensure that we do not finish + // our test before this shutdown has completed. + go func() { + c.store.Stop() + close(c.stopped) + }() + + // We write our flap count to disk on shutdown, assert that the most + // recent record that the server has is written on shutdown. Calling + // this assert unblocks the stop function above. We don't check values + // here, so that our tests don't all require providing an expected swap + // count, but at least assert that the write occurred. + c.assertFlapCountUpdated() + + <-c.stopped + + // Make sure that the cancel function was called for both of our + // subscription mocks. + c.channelSubscription.assertCancelled() + c.peerSubscription.assertCancelled() +} + +// newChannel creates a new, unique test channel. Note that this function +// does not add it to the test event store, it just creates mocked values. +func (c *chanEventStoreTestCtx) newChannel() (route.Vertex, *btcec.PublicKey, + wire.OutPoint) { + + // Create a pubkey for our channel peer. + pubKey := &btcec.PublicKey{ + X: big.NewInt(int64(c.testVarIdx)), + Y: big.NewInt(int64(c.testVarIdx)), + Curve: btcec.S256(), + } + + // Create vertex from our pubkey. + vertex, err := route.NewVertexFromBytes(pubKey.SerializeCompressed()) + require.NoError(c.t, err) + + // Create a channel point using our channel index, then increment it. + chanPoint := wire.OutPoint{ + Hash: [chainhash.HashSize]byte{1, 2, 3}, + Index: uint32(c.testVarIdx), + } + + // Increment the index we use so that the next channel and pubkey we + // create will be unique. + c.testVarIdx++ + + return vertex, pubKey, chanPoint +} + +// createChannel creates a new channel, notifies the event store that it has +// been created and returns the peer vertex, pubkey and channel point. +func (c *chanEventStoreTestCtx) createChannel() (route.Vertex, *btcec.PublicKey, + wire.OutPoint) { + + vertex, pubKey, chanPoint := c.newChannel() + c.sendChannelOpenedUpdate(pubKey, chanPoint) + + return vertex, pubKey, chanPoint +} + +// closeChannel sends a close channel event to our subscribe server. +func (c *chanEventStoreTestCtx) closeChannel(channel wire.OutPoint, + peer *btcec.PublicKey) { + + update := channelnotifier.ClosedChannelEvent{ + CloseSummary: &channeldb.ChannelCloseSummary{ + ChanPoint: channel, + RemotePub: peer, + }, + } + + c.channelSubscription.sendUpdate(update) +} + +// tickFlapCount forces a tick for our flap count ticker with the current time. +func (c *chanEventStoreTestCtx) tickFlapCount() { + testTicker := c.store.cfg.FlapCountTicker.(*ticker.Force) + + select { + case testTicker.Force <- c.store.cfg.Clock.Now(): + + case <-time.After(timeout): + c.t.Fatalf("could not tick flap count ticker") + } +} + +// peerEvent sends a peer online or offline event to the store for the peer +// provided. +func (c *chanEventStoreTestCtx) peerEvent(peer route.Vertex, online bool) { + var update interface{} + if online { + update = peernotifier.PeerOnlineEvent{PubKey: peer} + } else { + update = peernotifier.PeerOfflineEvent{PubKey: peer} + } + + c.peerSubscription.sendUpdate(update) +} + +// sendChannelOpenedUpdate notifies the test event store that a channel has +// been opened. +func (c *chanEventStoreTestCtx) sendChannelOpenedUpdate(pubkey *btcec.PublicKey, + channel wire.OutPoint) { + + update := channelnotifier.OpenChannelEvent{ + Channel: &channeldb.OpenChannel{ + FundingOutpoint: channel, + IdentityPub: pubkey, + }, + } + + c.channelSubscription.sendUpdate(update) +} + +// assertFlapCountUpdated asserts that our store has made an attempt to write +// our current set of flap counts to disk and sets this value in our test ctx. +// Note that it does not check the values of the update. +func (c *chanEventStoreTestCtx) assertFlapCountUpdated() { + select { + case c.flapUpdates = <-c.flapCountUpdates: + + case <-time.After(timeout): + c.t.Fatalf("assertFlapCountUpdated timeout") + } +} + +// assertFlapCountUpdates asserts that out current record of flap counts is +// as expected. +func (c *chanEventStoreTestCtx) assertFlapCountUpdates(expected peerFlapCountMap) { + require.Equal(c.t, expected, c.flapUpdates) +} + +// mockSubscription is a mock subscription client that blocks on sends into the +// updates channel. We use this mock rather than an actual subscribe client +// because they do not block, which makes tests race (because we have no way +// to guarantee that the test client consumes the update before shutdown). +type mockSubscription struct { + t *testing.T + updates chan interface{} + + // Embed the subscription interface in this mock so that we satisfy it. + subscribe.Subscription +} + +// newMockSubscription creates a mock subscription. +func newMockSubscription(t *testing.T) *mockSubscription { + return &mockSubscription{ + t: t, + updates: make(chan interface{}), + } +} + +// sendUpdate sends an update into our updates channel, mocking the dispatch of +// an update from a subscription server. This call will fail the test if the +// update is not consumed within our timeout. +func (m *mockSubscription) sendUpdate(update interface{}) { + select { + case m.updates <- update: + + case <-time.After(timeout): + m.t.Fatalf("update: %v timeout", update) + } +} + +// Updates returns the updates channel for the mock. +func (m *mockSubscription) Updates() <-chan interface{} { + return m.updates +} + +// Cancel should be called in case the client no longer wants to subscribe for +// updates from the server. +func (m *mockSubscription) Cancel() { + close(m.updates) +} + +// assertCancelled asserts that the cancel function has been called for this +// mock. +func (m *mockSubscription) assertCancelled() { + select { + case _, open := <-m.updates: + require.False(m.t, open, "subscription not cancelled") + + case <-time.After(timeout): + m.t.Fatalf("assert cancelled timeout") + } +} diff --git a/chanfitness/interface.go b/chanfitness/interface.go new file mode 100644 index 000000000..22678d650 --- /dev/null +++ b/chanfitness/interface.go @@ -0,0 +1,34 @@ +package chanfitness + +import ( + "time" + + "github.com/btcsuite/btcd/wire" +) + +// peerMonitor is an interface implemented by entities that monitor our peers +// online events and the channels we currently have open with them. +type peerMonitor interface { + // event adds an online or offline event. + onlineEvent(online bool) + + // addChannel adds a new channel. + addChannel(channelPoint wire.OutPoint) error + + // removeChannel removes a channel. + removeChannel(channelPoint wire.OutPoint) error + + // channelCount returns the number of channels that we currently have + // with the peer. + channelCount() int + + // channelUptime looks up a channel and returns the amount of time that + // the channel has been monitored for and its uptime over this period. + channelUptime(channelPoint wire.OutPoint) (time.Duration, + time.Duration, error) + + // getFlapCount returns the peer's flap count and the timestamp that we + // last recorded a flap, which may be nil if we have never recorded a + // flap for this peer. + getFlapCount() (int, *time.Time) +} diff --git a/chanfitness/rate_limit.go b/chanfitness/rate_limit.go new file mode 100644 index 000000000..b070a4452 --- /dev/null +++ b/chanfitness/rate_limit.go @@ -0,0 +1,82 @@ +package chanfitness + +import ( + "math" + "time" +) + +const ( + // rateLimitScale is the number of events we allow per rate limited + // tier. Increasing this value makes our rate limiting more lenient, + // decreasing it makes us less lenient. + rateLimitScale = 200 + + // flapCountCooldownFactor is the factor by which we decrease a peer's + // flap count if they have not flapped for the cooldown period. + flapCountCooldownFactor = 0.95 + + // flapCountCooldownPeriod is the amount of time that we require a peer + // has not flapped for before we reduce their all time flap count using + // our cooldown factor. + flapCountCooldownPeriod = time.Hour * 8 +) + +// rateLimits is the set of rate limit tiers we apply to our peers based on +// their flap count. A peer can be placed in their tier by dividing their flap +// count by the rateLimitScale and returning the value at that index. +var rateLimits = []time.Duration{ + time.Second, + time.Second * 5, + time.Second * 30, + time.Minute, + time.Minute * 30, + time.Hour, +} + +// getRateLimit returns the value of the rate limited tier that we are on based +// on current flap count. If a peer's flap count exceeds the top tier, we just +// return our highest tier. +func getRateLimit(flapCount int) time.Duration { + // Figure out the tier we fall into based on our current flap count. + tier := flapCount / rateLimitScale + + // If we have more events than our number of tiers, we just use the + // last tier + tierLen := len(rateLimits) + if tier >= tierLen { + tier = tierLen - 1 + } + + return rateLimits[tier] +} + +// cooldownFlapCount takes a timestamped flap count, and returns its value +// scaled down by our cooldown factor if at least our cooldown period has +// elapsed since the peer last flapped. We do this because we store all-time +// flap count for peers, and want to allow downgrading of peers that have not +// flapped for a long time. +func cooldownFlapCount(now time.Time, flapCount int, + lastFlap time.Time) int { + + // Calculate time since our last flap, and the number of times we need + // to apply our cooldown factor. + timeSinceFlap := now.Sub(lastFlap) + + // If our cooldown period has not elapsed yet, we just return our flap + // count. We allow fractional cooldown periods once this period has + // elapsed, so we do not want to apply a fractional cooldown before the + // full cooldown period has elapsed. + if timeSinceFlap < flapCountCooldownPeriod { + return flapCount + } + + // Get the factor by which we need to cooldown our flap count. If + // insufficient time has passed to cooldown our flap count. Use use a + // float so that we allow fractional cooldown periods. + cooldownPeriods := float64(timeSinceFlap) / + float64(flapCountCooldownPeriod) + + effectiveFactor := math.Pow(flapCountCooldownFactor, cooldownPeriods) + + return int(float64(flapCount) * effectiveFactor) +} diff --git a/chanfitness/rate_limit_test.go b/chanfitness/rate_limit_test.go new file mode 100644 index 000000000..b9bca8086 --- /dev/null +++ b/chanfitness/rate_limit_test.go @@ -0,0 +1,105 @@ +package chanfitness + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +// TestGetRateLimit tests getting of our rate limit using the current constants. +// It creates test cases that are relative to our constants so that they +// can be adjusted without breaking the unit test. +func TestGetRateLimit(t *testing.T) { + tests := []struct { + name string + flapCount int + rateLimit time.Duration + }{ + { + name: "zero flaps", + flapCount: 0, + rateLimit: rateLimits[0], + }, + { + name: "middle tier", + flapCount: rateLimitScale * (len(rateLimits) / 2), + rateLimit: rateLimits[len(rateLimits)/2], + }, + { + name: "last tier", + flapCount: rateLimitScale * (len(rateLimits) - 1), + rateLimit: rateLimits[len(rateLimits)-1], + }, + { + name: "beyond last tier", + flapCount: rateLimitScale * (len(rateLimits) * 2), + rateLimit: rateLimits[len(rateLimits)-1], + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + limit := getRateLimit(test.flapCount) + require.Equal(t, test.rateLimit, limit) + }) + } +} + +// TestCooldownFlapCount tests cooldown of all time flap counts. +func TestCooldownFlapCount(t *testing.T) { + tests := []struct { + name string + flapCount int + lastFlap time.Time + expected int + }{ + { + name: "just flapped, do not cooldown", + flapCount: 1, + lastFlap: testNow, + expected: 1, + }, + { + name: "period not elapsed, do not cooldown", + flapCount: 1, + lastFlap: testNow.Add(flapCountCooldownPeriod / 2 * -1), + expected: 1, + }, + { + name: "rounded to 0", + flapCount: 1, + lastFlap: testNow.Add(flapCountCooldownPeriod * -1), + expected: 0, + }, + { + name: "decreased to integer value", + flapCount: 10, + lastFlap: testNow.Add(flapCountCooldownPeriod * -1), + expected: 9, + }, + { + name: "multiple cooldown periods", + flapCount: 10, + lastFlap: testNow.Add(flapCountCooldownPeriod * -3), + expected: 8, + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + flapCount := cooldownFlapCount( + testNow, test.flapCount, test.lastFlap, + ) + require.Equal(t, test.expected, flapCount) + }) + } +} diff --git a/channeldb/channel.go b/channeldb/channel.go index 5bec7a475..35a0700d4 100644 --- a/channeldb/channel.go +++ b/channeldb/channel.go @@ -79,6 +79,11 @@ var ( // for in one of our remote commits. unsignedAckedUpdatesKey = []byte("unsigned-acked-updates-key") + // remoteUnsignedLocalUpdatesKey is an entry in the channel bucket that + // contains the local updates that the remote party has acked, but + // has not yet signed for in one of their local commits. + remoteUnsignedLocalUpdatesKey = []byte("remote-unsigned-local-updates-key") + // revocationStateKey stores their current revocation hash, our // preimage producer and their preimage store. revocationStateKey = []byte("revocation-state-key") @@ -201,8 +206,7 @@ const ( // AnchorOutputsBit indicates that the channel makes use of anchor // outputs to bump the commitment transaction's effective feerate. This - // channel type also uses a delayed to_remote output script. If bit is - // set, we'll find the size of the anchor outputs in the database. + // channel type also uses a delayed to_remote output script. AnchorOutputsBit ChannelType = 1 << 3 // FrozenBit indicates that the channel is a frozen channel, meaning @@ -1448,6 +1452,39 @@ func (c *OpenChannel) UpdateCommitment(newCommitment *ChannelCommitment, "updates: %v", err) } + // Persist the remote unsigned local updates that are not included + // in our new commitment. + updateBytes := chanBucket.Get(remoteUnsignedLocalUpdatesKey) + if updateBytes == nil { + return nil + } + + r := bytes.NewReader(updateBytes) + updates, err := deserializeLogUpdates(r) + if err != nil { + return err + } + + var validUpdates []LogUpdate + for _, upd := range updates { + // Filter for updates that are not on our local + // commitment. + if upd.LogIndex >= newCommitment.LocalLogIndex { + validUpdates = append(validUpdates, upd) + } + } + + var b2 bytes.Buffer + err = serializeLogUpdates(&b2, validUpdates) + if err != nil { + return fmt.Errorf("unable to serialize log updates: %v", err) + } + + err = chanBucket.Put(remoteUnsignedLocalUpdatesKey, b2.Bytes()) + if err != nil { + return fmt.Errorf("unable to restore chanbucket: %v", err) + } + return nil }) if err != nil { @@ -2065,6 +2102,39 @@ func (c *OpenChannel) UnsignedAckedUpdates() ([]LogUpdate, error) { return updates, nil } +// RemoteUnsignedLocalUpdates retrieves the persisted, unsigned local log +// updates that the remote still needs to sign for. +func (c *OpenChannel) RemoteUnsignedLocalUpdates() ([]LogUpdate, error) { + var updates []LogUpdate + err := kvdb.View(c.Db, func(tx kvdb.RTx) error { + chanBucket, err := fetchChanBucket( + tx, c.IdentityPub, &c.FundingOutpoint, c.ChainHash, + ) + switch err { + case nil: + break + case ErrNoChanDBExists, ErrNoActiveChannels, ErrChannelNotFound: + return nil + default: + return err + } + + updateBytes := chanBucket.Get(remoteUnsignedLocalUpdatesKey) + if updateBytes == nil { + return nil + } + + r := bytes.NewReader(updateBytes) + updates, err = deserializeLogUpdates(r) + return err + }) + if err != nil { + return nil, err + } + + return updates, nil +} + // InsertNextRevocation inserts the _next_ commitment point (revocation) into // the database, and also modifies the internal RemoteNextRevocation attribute // to point to the passed key. This method is to be using during final channel @@ -2101,8 +2171,12 @@ func (c *OpenChannel) InsertNextRevocation(revKey *btcec.PublicKey) error { // this log can be consulted in order to reconstruct the state needed to // rectify the situation. This method will add the current commitment for the // remote party to the revocation log, and promote the current pending -// commitment to the current remote commitment. -func (c *OpenChannel) AdvanceCommitChainTail(fwdPkg *FwdPkg) error { +// commitment to the current remote commitment. The updates parameter is the +// set of local updates that the peer still needs to send us a signature for. +// We store this set of updates in case we go down. +func (c *OpenChannel) AdvanceCommitChainTail(fwdPkg *FwdPkg, + updates []LogUpdate) error { + c.Lock() defer c.Unlock() @@ -2226,6 +2300,20 @@ func (c *OpenChannel) AdvanceCommitChainTail(fwdPkg *FwdPkg) error { return fmt.Errorf("unable to store under unsignedAckedUpdatesKey: %v", err) } + // Persist the local updates the peer hasn't yet signed so they + // can be restored after restart. + var b2 bytes.Buffer + err = serializeLogUpdates(&b2, updates) + if err != nil { + return err + } + + err = chanBucket.Put(remoteUnsignedLocalUpdatesKey, b2.Bytes()) + if err != nil { + return fmt.Errorf("unable to restore remote unsigned "+ + "local updates: %v", err) + } + newRemoteCommit = &newCommit.Commitment return nil @@ -2320,16 +2408,24 @@ func (c *OpenChannel) SetFwdFilter(height uint64, fwdFilter *PkgFilter) error { }) } -// RemoveFwdPkg atomically removes a forwarding package specified by the remote -// commitment height. +// RemoveFwdPkgs atomically removes forwarding packages specified by the remote +// commitment heights. If one of the intermediate RemovePkg calls fails, then the +// later packages won't be removed. // // NOTE: This method should only be called on packages marked FwdStateCompleted. -func (c *OpenChannel) RemoveFwdPkg(height uint64) error { +func (c *OpenChannel) RemoveFwdPkgs(heights ...uint64) error { c.Lock() defer c.Unlock() return kvdb.Update(c.Db, func(tx kvdb.RwTx) error { - return c.Packager.RemovePkg(tx, height) + for _, height := range heights { + err := c.Packager.RemovePkg(tx, height) + if err != nil { + return err + } + } + + return nil }) } diff --git a/channeldb/channel_test.go b/channeldb/channel_test.go index e0fb3e897..656a885bf 100644 --- a/channeldb/channel_test.go +++ b/channeldb/channel_test.go @@ -2,10 +2,8 @@ package channeldb import ( "bytes" - "io/ioutil" "math/rand" "net" - "os" "reflect" "runtime" "testing" @@ -86,40 +84,6 @@ var ( } ) -// makeTestDB creates a new instance of the ChannelDB for testing purposes. A -// callback which cleans up the created temporary directories is also returned -// and intended to be executed after the test completes. -func makeTestDB() (*DB, func(), error) { - // First, create a temporary directory to be used for the duration of - // this test. - tempDirName, err := ioutil.TempDir("", "channeldb") - if err != nil { - return nil, nil, err - } - - // Next, create channeldb for the first time. - backend, backendCleanup, err := kvdb.GetTestBackend(tempDirName, "cdb") - if err != nil { - backendCleanup() - return nil, nil, err - } - - cdb, err := CreateWithBackend(backend, OptionClock(testClock)) - if err != nil { - backendCleanup() - os.RemoveAll(tempDirName) - return nil, nil, err - } - - cleanUp := func() { - cdb.Close() - backendCleanup() - os.RemoveAll(tempDirName) - } - - return cdb, cleanUp, nil -} - // testChannelParams is a struct which details the specifics of how a channel // should be created. type testChannelParams struct { @@ -403,7 +367,7 @@ func createTestChannelState(t *testing.T, cdb *DB) *OpenChannel { func TestOpenChannelPutGetDelete(t *testing.T) { t.Parallel() - cdb, cleanUp, err := makeTestDB() + cdb, cleanUp, err := MakeTestDB() if err != nil { t.Fatalf("unable to make test database: %v", err) } @@ -552,7 +516,7 @@ func TestOptionalShutdown(t *testing.T) { test := test t.Run(test.name, func(t *testing.T) { - cdb, cleanUp, err := makeTestDB() + cdb, cleanUp, err := MakeTestDB() if err != nil { t.Fatalf("unable to make test database: %v", err) } @@ -609,7 +573,7 @@ func assertCommitmentEqual(t *testing.T, a, b *ChannelCommitment) { func TestChannelStateTransition(t *testing.T) { t.Parallel() - cdb, cleanUp, err := makeTestDB() + cdb, cleanUp, err := MakeTestDB() if err != nil { t.Fatalf("unable to make test database: %v", err) } @@ -797,7 +761,7 @@ func TestChannelStateTransition(t *testing.T) { fwdPkg := NewFwdPkg(channel.ShortChanID(), oldRemoteCommit.CommitHeight, diskCommitDiff.LogUpdates, nil) - err = channel.AdvanceCommitChainTail(fwdPkg) + err = channel.AdvanceCommitChainTail(fwdPkg, nil) if err != nil { t.Fatalf("unable to append to revocation log: %v", err) } @@ -845,7 +809,7 @@ func TestChannelStateTransition(t *testing.T) { fwdPkg = NewFwdPkg(channel.ShortChanID(), oldRemoteCommit.CommitHeight, nil, nil) - err = channel.AdvanceCommitChainTail(fwdPkg) + err = channel.AdvanceCommitChainTail(fwdPkg, nil) if err != nil { t.Fatalf("unable to append to revocation log: %v", err) } @@ -914,7 +878,7 @@ func TestChannelStateTransition(t *testing.T) { func TestFetchPendingChannels(t *testing.T) { t.Parallel() - cdb, cleanUp, err := makeTestDB() + cdb, cleanUp, err := MakeTestDB() if err != nil { t.Fatalf("unable to make test database: %v", err) } @@ -993,7 +957,7 @@ func TestFetchPendingChannels(t *testing.T) { func TestFetchClosedChannels(t *testing.T) { t.Parallel() - cdb, cleanUp, err := makeTestDB() + cdb, cleanUp, err := MakeTestDB() if err != nil { t.Fatalf("unable to make test database: %v", err) } @@ -1084,7 +1048,7 @@ func TestFetchWaitingCloseChannels(t *testing.T) { // We'll start by creating two channels within our test database. One of // them will have their funding transaction confirmed on-chain, while // the other one will remain unconfirmed. - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB() if err != nil { t.Fatalf("unable to make test database: %v", err) } @@ -1199,7 +1163,7 @@ func TestFetchWaitingCloseChannels(t *testing.T) { func TestRefreshShortChanID(t *testing.T) { t.Parallel() - cdb, cleanUp, err := makeTestDB() + cdb, cleanUp, err := MakeTestDB() if err != nil { t.Fatalf("unable to make test database: %v", err) } @@ -1347,7 +1311,7 @@ func TestCloseInitiator(t *testing.T) { t.Run(test.name, func(t *testing.T) { t.Parallel() - cdb, cleanUp, err := makeTestDB() + cdb, cleanUp, err := MakeTestDB() if err != nil { t.Fatalf("unable to make test database: %v", err) @@ -1392,7 +1356,7 @@ func TestCloseInitiator(t *testing.T) { // TestCloseChannelStatus tests setting of a channel status on the historical // channel on channel close. func TestCloseChannelStatus(t *testing.T) { - cdb, cleanUp, err := makeTestDB() + cdb, cleanUp, err := MakeTestDB() if err != nil { t.Fatalf("unable to make test database: %v", err) @@ -1538,7 +1502,7 @@ func TestBalanceAtHeight(t *testing.T) { t.Run(test.name, func(t *testing.T) { t.Parallel() - cdb, cleanUp, err := makeTestDB() + cdb, cleanUp, err := MakeTestDB() if err != nil { t.Fatalf("unable to make test database: %v", err) diff --git a/channeldb/db.go b/channeldb/db.go index 06d905606..983b4fbbd 100644 --- a/channeldb/db.go +++ b/channeldb/db.go @@ -4,6 +4,7 @@ import ( "bytes" "encoding/binary" "fmt" + "io/ioutil" "net" "os" @@ -163,6 +164,12 @@ var ( number: 17, migration: mig.CreateTLB(closeSummaryBucket), }, + { + // Create a top level bucket which holds information + // about our peers. + number: 18, + migration: mig.CreateTLB(peersBucket), + }, } // Big endian is the preferred byte order, due to cursor scans over @@ -277,6 +284,7 @@ var topLevelBuckets = [][]byte{ invoiceBucket, payAddrIndexBucket, paymentsIndexBucket, + peersBucket, nodeInfoBucket, nodeBucket, edgeBucket, @@ -1260,3 +1268,37 @@ func (db *DB) FetchHistoricalChannel(outPoint *wire.OutPoint) (*OpenChannel, err return channel, nil } + +// MakeTestDB creates a new instance of the ChannelDB for testing purposes. +// A callback which cleans up the created temporary directories is also +// returned and intended to be executed after the test completes. +func MakeTestDB(modifiers ...OptionModifier) (*DB, func(), error) { + // First, create a temporary directory to be used for the duration of + // this test. + tempDirName, err := ioutil.TempDir("", "channeldb") + if err != nil { + return nil, nil, err + } + + // Next, create channeldb for the first time. + backend, backendCleanup, err := kvdb.GetTestBackend(tempDirName, "cdb") + if err != nil { + backendCleanup() + return nil, nil, err + } + + cdb, err := CreateWithBackend(backend, modifiers...) + if err != nil { + backendCleanup() + os.RemoveAll(tempDirName) + return nil, nil, err + } + + cleanUp := func() { + cdb.Close() + backendCleanup() + os.RemoveAll(tempDirName) + } + + return cdb, cleanUp, nil +} diff --git a/channeldb/db_test.go b/channeldb/db_test.go index e5c57c1de..a86eee695 100644 --- a/channeldb/db_test.go +++ b/channeldb/db_test.go @@ -115,7 +115,7 @@ func TestFetchClosedChannelForID(t *testing.T) { const numChans = 101 - cdb, cleanUp, err := makeTestDB() + cdb, cleanUp, err := MakeTestDB() if err != nil { t.Fatalf("unable to make test database: %v", err) } @@ -186,7 +186,7 @@ func TestFetchClosedChannelForID(t *testing.T) { func TestAddrsForNode(t *testing.T) { t.Parallel() - cdb, cleanUp, err := makeTestDB() + cdb, cleanUp, err := MakeTestDB() if err != nil { t.Fatalf("unable to make test database: %v", err) } @@ -247,7 +247,7 @@ func TestAddrsForNode(t *testing.T) { func TestFetchChannel(t *testing.T) { t.Parallel() - cdb, cleanUp, err := makeTestDB() + cdb, cleanUp, err := MakeTestDB() if err != nil { t.Fatalf("unable to make test database: %v", err) } @@ -351,7 +351,7 @@ func genRandomChannelShell() (*ChannelShell, error) { func TestRestoreChannelShells(t *testing.T) { t.Parallel() - cdb, cleanUp, err := makeTestDB() + cdb, cleanUp, err := MakeTestDB() if err != nil { t.Fatalf("unable to make test database: %v", err) } @@ -398,7 +398,7 @@ func TestRestoreChannelShells(t *testing.T) { if err != ErrNoRestoredChannelMutation { t.Fatalf("able to mutate restored channel") } - err = channel.AdvanceCommitChainTail(nil) + err = channel.AdvanceCommitChainTail(nil, nil) if err != ErrNoRestoredChannelMutation { t.Fatalf("able to mutate restored channel") } @@ -445,7 +445,7 @@ func TestRestoreChannelShells(t *testing.T) { func TestAbandonChannel(t *testing.T) { t.Parallel() - cdb, cleanUp, err := makeTestDB() + cdb, cleanUp, err := MakeTestDB() if err != nil { t.Fatalf("unable to make test database: %v", err) } @@ -618,7 +618,7 @@ func TestFetchChannels(t *testing.T) { t.Run(test.name, func(t *testing.T) { t.Parallel() - cdb, cleanUp, err := makeTestDB() + cdb, cleanUp, err := MakeTestDB() if err != nil { t.Fatalf("unable to make test "+ "database: %v", err) @@ -687,7 +687,7 @@ func TestFetchChannels(t *testing.T) { // TestFetchHistoricalChannel tests lookup of historical channels. func TestFetchHistoricalChannel(t *testing.T) { - cdb, cleanUp, err := makeTestDB() + cdb, cleanUp, err := MakeTestDB() if err != nil { t.Fatalf("unable to make test database: %v", err) } diff --git a/channeldb/forwarding_log.go b/channeldb/forwarding_log.go index a52848dd4..d1216dc46 100644 --- a/channeldb/forwarding_log.go +++ b/channeldb/forwarding_log.go @@ -6,6 +6,7 @@ import ( "sort" "time" + "github.com/btcsuite/btcwallet/walletdb" "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/lnwire" ) @@ -104,10 +105,9 @@ func decodeForwardingEvent(r io.Reader, f *ForwardingEvent) error { func (f *ForwardingLog) AddForwardingEvents(events []ForwardingEvent) error { // Before we create the database transaction, we'll ensure that the set // of forwarding events are properly sorted according to their - // timestamp. - sort.Slice(events, func(i, j int) bool { - return events[i].Timestamp.Before(events[j].Timestamp) - }) + // timestamp and that no duplicate timestamps exist to avoid collisions + // in the key we are going to store the events under. + makeUniqueTimestamps(events) var timestamp [8]byte @@ -124,22 +124,7 @@ func (f *ForwardingLog) AddForwardingEvents(events []ForwardingEvent) error { // With the bucket obtained, we can now begin to write out the // series of events. for _, event := range events { - var eventBytes [forwardingEventSize]byte - eventBuf := bytes.NewBuffer(eventBytes[0:0:forwardingEventSize]) - - // First, we'll serialize this timestamp into our - // timestamp buffer. - byteOrder.PutUint64( - timestamp[:], uint64(event.Timestamp.UnixNano()), - ) - - // With the key encoded, we'll then encode the event - // into our buffer, then write it out to disk. - err := encodeForwardingEvent(eventBuf, &event) - if err != nil { - return err - } - err = logBucket.Put(timestamp[:], eventBuf.Bytes()) + err := storeEvent(logBucket, event, timestamp[:]) if err != nil { return err } @@ -149,6 +134,55 @@ func (f *ForwardingLog) AddForwardingEvents(events []ForwardingEvent) error { }) } +// storeEvent tries to store a forwarding event into the given bucket by trying +// to avoid collisions. If a key for the event timestamp already exists in the +// database, the timestamp is incremented in nanosecond intervals until a "free" +// slot is found. +func storeEvent(bucket walletdb.ReadWriteBucket, event ForwardingEvent, + timestampScratchSpace []byte) error { + + // First, we'll serialize this timestamp into our + // timestamp buffer. + byteOrder.PutUint64( + timestampScratchSpace, uint64(event.Timestamp.UnixNano()), + ) + + // Next we'll loop until we find a "free" slot in the bucket to store + // the event under. This should almost never happen unless we're running + // on a system that has a very bad system clock that doesn't properly + // resolve to nanosecond scale. We try up to 100 times (which would come + // to a maximum shift of 0.1 microsecond which is acceptable for most + // use cases). If we don't find a free slot, we just give up and let + // the collision happen. Something must be wrong with the data in that + // case, even on a very fast machine forwarding payments _will_ take a + // few microseconds at least so we should find a nanosecond slot + // somewhere. + const maxTries = 100 + tries := 0 + for tries < maxTries { + val := bucket.Get(timestampScratchSpace) + if val == nil { + break + } + + // Collision, try the next nanosecond timestamp. + nextNano := event.Timestamp.UnixNano() + 1 + event.Timestamp = time.Unix(0, nextNano) + byteOrder.PutUint64(timestampScratchSpace, uint64(nextNano)) + tries++ + } + + // With the key encoded, we'll then encode the event + // into our buffer, then write it out to disk. + var eventBytes [forwardingEventSize]byte + eventBuf := bytes.NewBuffer(eventBytes[0:0:forwardingEventSize]) + err := encodeForwardingEvent(eventBuf, &event) + if err != nil { + return err + } + return bucket.Put(timestampScratchSpace, eventBuf.Bytes()) +} + // ForwardingEventQuery represents a query to the forwarding log payment // circuit time series database. The query allows a caller to retrieve all // records for a particular time slice, offset in that time slice, limiting the @@ -272,3 +306,34 @@ func (f *ForwardingLog) Query(q ForwardingEventQuery) (ForwardingLogTimeSlice, e return resp, nil } + +// makeUniqueTimestamps takes a slice of forwarding events, sorts it by the +// event timestamps and then makes sure there are no duplicates in the +// timestamps. If duplicates are found, some of the timestamps are increased on +// the nanosecond scale until only unique values remain. This is a fix to +// address the problem that in some environments (looking at you, Windows) the +// system clock has such a bad resolution that two serial invocations of +// time.Now() might return the same timestamp, even if some time has elapsed +// between the calls. +func makeUniqueTimestamps(events []ForwardingEvent) { + sort.Slice(events, func(i, j int) bool { + return events[i].Timestamp.Before(events[j].Timestamp) + }) + + // Now that we know the events are sorted by timestamp, we can go + // through the list and fix all duplicates until only unique values + // remain. + for outer := 0; outer < len(events)-1; outer++ { + current := events[outer].Timestamp.UnixNano() + next := events[outer+1].Timestamp.UnixNano() + + // We initially sorted the slice. So if the current is now + // greater or equal to the next one, it's either because it's a + // duplicate or because we increased the current in the last + // iteration. + if current >= next { + next = current + 1 + events[outer+1].Timestamp = time.Unix(0, next) + } + } +} diff --git a/channeldb/forwarding_log_test.go b/channeldb/forwarding_log_test.go index cc06e8867..cd21f12e2 100644 --- a/channeldb/forwarding_log_test.go +++ b/channeldb/forwarding_log_test.go @@ -4,11 +4,11 @@ import ( "math/rand" "reflect" "testing" + "time" "github.com/davecgh/go-spew/spew" "github.com/lightningnetwork/lnd/lnwire" - - "time" + "github.com/stretchr/testify/assert" ) // TestForwardingLogBasicStorageAndQuery tests that we're able to store and @@ -19,11 +19,12 @@ func TestForwardingLogBasicStorageAndQuery(t *testing.T) { // First, we'll set up a test database, and use that to instantiate the // forwarding event log that we'll be using for the duration of the // test. - db, cleanUp, err := makeTestDB() - defer cleanUp() + db, cleanUp, err := MakeTestDB() if err != nil { t.Fatalf("unable to make test db: %v", err) } + defer cleanUp() + log := ForwardingLog{ db: db, } @@ -91,11 +92,12 @@ func TestForwardingLogQueryOptions(t *testing.T) { // First, we'll set up a test database, and use that to instantiate the // forwarding event log that we'll be using for the duration of the // test. - db, cleanUp, err := makeTestDB() - defer cleanUp() + db, cleanUp, err := MakeTestDB() if err != nil { t.Fatalf("unable to make test db: %v", err) } + defer cleanUp() + log := ForwardingLog{ db: db, } @@ -196,11 +198,12 @@ func TestForwardingLogQueryLimit(t *testing.T) { // First, we'll set up a test database, and use that to instantiate the // forwarding event log that we'll be using for the duration of the // test. - db, cleanUp, err := makeTestDB() - defer cleanUp() + db, cleanUp, err := MakeTestDB() if err != nil { t.Fatalf("unable to make test db: %v", err) } + defer cleanUp() + log := ForwardingLog{ db: db, } @@ -263,3 +266,118 @@ func TestForwardingLogQueryLimit(t *testing.T) { timeSlice.LastIndexOffset) } } + +// TestForwardingLogMakeUniqueTimestamps makes sure the function that creates +// unique timestamps does it job correctly. +func TestForwardingLogMakeUniqueTimestamps(t *testing.T) { + t.Parallel() + + // Create a list of events where some of the timestamps collide. We + // expect no existing timestamp to be overwritten, instead the "gaps" + // between them should be filled. + inputSlice := []ForwardingEvent{ + {Timestamp: time.Unix(0, 1001)}, + {Timestamp: time.Unix(0, 2001)}, + {Timestamp: time.Unix(0, 1001)}, + {Timestamp: time.Unix(0, 1002)}, + {Timestamp: time.Unix(0, 1004)}, + {Timestamp: time.Unix(0, 1004)}, + {Timestamp: time.Unix(0, 1007)}, + {Timestamp: time.Unix(0, 1001)}, + } + expectedSlice := []ForwardingEvent{ + {Timestamp: time.Unix(0, 1001)}, + {Timestamp: time.Unix(0, 1002)}, + {Timestamp: time.Unix(0, 1003)}, + {Timestamp: time.Unix(0, 1004)}, + {Timestamp: time.Unix(0, 1005)}, + {Timestamp: time.Unix(0, 1006)}, + {Timestamp: time.Unix(0, 1007)}, + {Timestamp: time.Unix(0, 2001)}, + } + + makeUniqueTimestamps(inputSlice) + + for idx, in := range inputSlice { + expect := expectedSlice[idx] + assert.Equal( + t, expect.Timestamp.UnixNano(), in.Timestamp.UnixNano(), + ) + } +} + +// TestForwardingLogStoreEvent makes sure forwarding events are stored without +// colliding on duplicate timestamps. +func TestForwardingLogStoreEvent(t *testing.T) { + t.Parallel() + + // First, we'll set up a test database, and use that to instantiate the + // forwarding event log that we'll be using for the duration of the + // test. + db, cleanUp, err := MakeTestDB() + if err != nil { + t.Fatalf("unable to make test db: %v", err) + } + defer cleanUp() + + log := ForwardingLog{ + db: db, + } + + // We'll create 20 random events, with each event having a timestamp + // with just one nanosecond apart. + numEvents := 20 + events := make([]ForwardingEvent, numEvents) + ts := time.Now().UnixNano() + for i := 0; i < numEvents; i++ { + events[i] = ForwardingEvent{ + Timestamp: time.Unix(0, ts+int64(i)), + IncomingChanID: lnwire.NewShortChanIDFromInt(uint64(rand.Int63())), + OutgoingChanID: lnwire.NewShortChanIDFromInt(uint64(rand.Int63())), + AmtIn: lnwire.MilliSatoshi(rand.Int63()), + AmtOut: lnwire.MilliSatoshi(rand.Int63()), + } + } + + // Now that all of our events are constructed, we'll add them to the + // database in a batched manner. + if err := log.AddForwardingEvents(events); err != nil { + t.Fatalf("unable to add events: %v", err) + } + + // Because timestamps are de-duplicated when adding them in a single + // batch before they even hit the DB, we add the same events again but + // in a new batch. They now have to be de-duplicated on the DB level. + if err := log.AddForwardingEvents(events); err != nil { + t.Fatalf("unable to add second batch of events: %v", err) + } + + // With all of our events added, we should be able to query for all + // events with a range of just 40 nanoseconds (2 times 20 events, all + // spaced one nanosecond apart). + eventQuery := ForwardingEventQuery{ + StartTime: time.Unix(0, ts), + EndTime: time.Unix(0, ts+int64(numEvents*2)), + IndexOffset: 0, + NumMaxEvents: uint32(numEvents * 3), + } + timeSlice, err := log.Query(eventQuery) + if err != nil { + t.Fatalf("unable to query for events: %v", err) + } + + // We should get exactly 40 events back. + if len(timeSlice.ForwardingEvents) != numEvents*2 { + t.Fatalf("wrong number of events: expected %v, got %v", + numEvents*2, len(timeSlice.ForwardingEvents)) + } + + // The timestamps should be spaced out evenly and in order. + for i := 0; i < numEvents*2; i++ { + eventTs := timeSlice.ForwardingEvents[i].Timestamp.UnixNano() + if eventTs != ts+int64(i) { + t.Fatalf("unexpected timestamp of event %d: expected "+ + "%d, got %d", i, ts+int64(i), eventTs) + } + } +} diff --git a/channeldb/forwarding_package.go b/channeldb/forwarding_package.go index 073ff4055..dced6e952 100644 --- a/channeldb/forwarding_package.go +++ b/channeldb/forwarding_package.go @@ -326,7 +326,7 @@ type SettleFailAcker interface { type GlobalFwdPkgReader interface { // LoadChannelFwdPkgs loads all known forwarding packages for the given // channel. - LoadChannelFwdPkgs(tx kvdb.RwTx, + LoadChannelFwdPkgs(tx kvdb.RTx, source lnwire.ShortChannelID) ([]*FwdPkg, error) } @@ -364,7 +364,7 @@ func (*SwitchPackager) AckSettleFails(tx kvdb.RwTx, } // LoadChannelFwdPkgs loads all forwarding packages for a particular channel. -func (*SwitchPackager) LoadChannelFwdPkgs(tx kvdb.RwTx, +func (*SwitchPackager) LoadChannelFwdPkgs(tx kvdb.RTx, source lnwire.ShortChannelID) ([]*FwdPkg, error) { return loadChannelFwdPkgs(tx, source) diff --git a/channeldb/graph_test.go b/channeldb/graph_test.go index a6c1fb0d8..71edc8f88 100644 --- a/channeldb/graph_test.go +++ b/channeldb/graph_test.go @@ -73,7 +73,7 @@ func createTestVertex(db *DB) (*LightningNode, error) { func TestNodeInsertionAndDeletion(t *testing.T) { t.Parallel() - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB() defer cleanUp() if err != nil { t.Fatalf("unable to make test database: %v", err) @@ -139,7 +139,7 @@ func TestNodeInsertionAndDeletion(t *testing.T) { func TestPartialNode(t *testing.T) { t.Parallel() - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB() defer cleanUp() if err != nil { t.Fatalf("unable to make test database: %v", err) @@ -201,7 +201,7 @@ func TestPartialNode(t *testing.T) { func TestAliasLookup(t *testing.T) { t.Parallel() - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB() defer cleanUp() if err != nil { t.Fatalf("unable to make test database: %v", err) @@ -255,7 +255,7 @@ func TestAliasLookup(t *testing.T) { func TestSourceNode(t *testing.T) { t.Parallel() - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB() defer cleanUp() if err != nil { t.Fatalf("unable to make test database: %v", err) @@ -296,7 +296,7 @@ func TestSourceNode(t *testing.T) { func TestEdgeInsertionDeletion(t *testing.T) { t.Parallel() - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB() defer cleanUp() if err != nil { t.Fatalf("unable to make test database: %v", err) @@ -431,7 +431,7 @@ func createEdge(height, txIndex uint32, txPosition uint16, outPointIndex uint32, func TestDisconnectBlockAtHeight(t *testing.T) { t.Parallel() - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB() defer cleanUp() if err != nil { t.Fatalf("unable to make test database: %v", err) @@ -718,7 +718,7 @@ func createChannelEdge(db *DB, node1, node2 *LightningNode) (*ChannelEdgeInfo, func TestEdgeInfoUpdates(t *testing.T) { t.Parallel() - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB() defer cleanUp() if err != nil { t.Fatalf("unable to make test database: %v", err) @@ -848,7 +848,7 @@ func newEdgePolicy(chanID uint64, op wire.OutPoint, db *DB, func TestGraphTraversal(t *testing.T) { t.Parallel() - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB() defer cleanUp() if err != nil { t.Fatalf("unable to make test database: %v", err) @@ -1109,7 +1109,7 @@ func assertChanViewEqualChanPoints(t *testing.T, a []EdgePoint, b []*wire.OutPoi func TestGraphPruning(t *testing.T) { t.Parallel() - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB() defer cleanUp() if err != nil { t.Fatalf("unable to make test database: %v", err) @@ -1317,7 +1317,7 @@ func TestGraphPruning(t *testing.T) { func TestHighestChanID(t *testing.T) { t.Parallel() - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB() defer cleanUp() if err != nil { t.Fatalf("unable to make test database: %v", err) @@ -1394,7 +1394,7 @@ func TestHighestChanID(t *testing.T) { func TestChanUpdatesInHorizon(t *testing.T) { t.Parallel() - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB() defer cleanUp() if err != nil { t.Fatalf("unable to make test database: %v", err) @@ -1570,7 +1570,7 @@ func TestChanUpdatesInHorizon(t *testing.T) { func TestNodeUpdatesInHorizon(t *testing.T) { t.Parallel() - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB() defer cleanUp() if err != nil { t.Fatalf("unable to make test database: %v", err) @@ -1693,7 +1693,7 @@ func TestNodeUpdatesInHorizon(t *testing.T) { func TestFilterKnownChanIDs(t *testing.T) { t.Parallel() - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB() defer cleanUp() if err != nil { t.Fatalf("unable to make test database: %v", err) @@ -1810,7 +1810,7 @@ func TestFilterKnownChanIDs(t *testing.T) { func TestFilterChannelRange(t *testing.T) { t.Parallel() - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB() defer cleanUp() if err != nil { t.Fatalf("unable to make test database: %v", err) @@ -1929,7 +1929,7 @@ func TestFilterChannelRange(t *testing.T) { func TestFetchChanInfos(t *testing.T) { t.Parallel() - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB() defer cleanUp() if err != nil { t.Fatalf("unable to make test database: %v", err) @@ -2057,7 +2057,7 @@ func TestFetchChanInfos(t *testing.T) { func TestIncompleteChannelPolicies(t *testing.T) { t.Parallel() - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB() defer cleanUp() if err != nil { t.Fatalf("unable to make test database: %v", err) @@ -2172,7 +2172,7 @@ func TestIncompleteChannelPolicies(t *testing.T) { func TestChannelEdgePruningUpdateIndexDeletion(t *testing.T) { t.Parallel() - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB() defer cleanUp() if err != nil { t.Fatalf("unable to make test database: %v", err) @@ -2327,7 +2327,7 @@ func TestChannelEdgePruningUpdateIndexDeletion(t *testing.T) { func TestPruneGraphNodes(t *testing.T) { t.Parallel() - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB() defer cleanUp() if err != nil { t.Fatalf("unable to make test database: %v", err) @@ -2411,7 +2411,7 @@ func TestPruneGraphNodes(t *testing.T) { func TestAddChannelEdgeShellNodes(t *testing.T) { t.Parallel() - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB() defer cleanUp() if err != nil { t.Fatalf("unable to make test database: %v", err) @@ -2465,7 +2465,7 @@ func TestAddChannelEdgeShellNodes(t *testing.T) { func TestNodePruningUpdateIndexDeletion(t *testing.T) { t.Parallel() - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB() defer cleanUp() if err != nil { t.Fatalf("unable to make test database: %v", err) @@ -2535,7 +2535,7 @@ func TestNodeIsPublic(t *testing.T) { // We'll need to create a separate database and channel graph for each // participant to replicate real-world scenarios (private edges being in // some graphs but not others, etc.). - aliceDB, cleanUp, err := makeTestDB() + aliceDB, cleanUp, err := MakeTestDB() defer cleanUp() if err != nil { t.Fatalf("unable to make test database: %v", err) @@ -2549,7 +2549,7 @@ func TestNodeIsPublic(t *testing.T) { t.Fatalf("unable to set source node: %v", err) } - bobDB, cleanUp, err := makeTestDB() + bobDB, cleanUp, err := MakeTestDB() defer cleanUp() if err != nil { t.Fatalf("unable to make test database: %v", err) @@ -2563,7 +2563,7 @@ func TestNodeIsPublic(t *testing.T) { t.Fatalf("unable to set source node: %v", err) } - carolDB, cleanUp, err := makeTestDB() + carolDB, cleanUp, err := MakeTestDB() defer cleanUp() if err != nil { t.Fatalf("unable to make test database: %v", err) @@ -2684,7 +2684,7 @@ func TestNodeIsPublic(t *testing.T) { func TestDisabledChannelIDs(t *testing.T) { t.Parallel() - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB() if err != nil { t.Fatalf("unable to make test database: %v", err) } @@ -2782,7 +2782,7 @@ func TestDisabledChannelIDs(t *testing.T) { func TestEdgePolicyMissingMaxHtcl(t *testing.T) { t.Parallel() - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB() defer cleanUp() if err != nil { t.Fatalf("unable to make test database: %v", err) @@ -2962,7 +2962,7 @@ func TestGraphZombieIndex(t *testing.T) { t.Parallel() // We'll start by creating our test graph along with a test edge. - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB() defer cleanUp() if err != nil { t.Fatalf("unable to create test database: %v", err) @@ -3151,7 +3151,7 @@ func TestLightningNodeSigVerification(t *testing.T) { } // Create a LightningNode from the same private key. - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB() if err != nil { t.Fatalf("unable to make test database: %v", err) } diff --git a/channeldb/invoice_test.go b/channeldb/invoice_test.go index 10148917b..bb118f715 100644 --- a/channeldb/invoice_test.go +++ b/channeldb/invoice_test.go @@ -136,7 +136,7 @@ func TestInvoiceWorkflow(t *testing.T) { } func testInvoiceWorkflow(t *testing.T, test invWorkflowTest) { - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB() defer cleanUp() if err != nil { t.Fatalf("unable to make test db: %v", err) @@ -290,7 +290,7 @@ func testInvoiceWorkflow(t *testing.T, test invWorkflowTest) { // TestAddDuplicatePayAddr asserts that the payment addresses of inserted // invoices are unique. func TestAddDuplicatePayAddr(t *testing.T) { - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB() defer cleanUp() require.NoError(t, err) @@ -317,7 +317,7 @@ func TestAddDuplicatePayAddr(t *testing.T) { // addresses to be inserted if they are blank to support JIT legacy keysend // invoices. func TestAddDuplicateKeysendPayAddr(t *testing.T) { - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB() defer cleanUp() require.NoError(t, err) @@ -358,7 +358,7 @@ func TestAddDuplicateKeysendPayAddr(t *testing.T) { // TestInvRefEquivocation asserts that retrieving or updating an invoice using // an equivocating InvoiceRef results in ErrInvRefEquivocation. func TestInvRefEquivocation(t *testing.T) { - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB() defer cleanUp() require.NoError(t, err) @@ -398,7 +398,7 @@ func TestInvRefEquivocation(t *testing.T) { func TestInvoiceCancelSingleHtlc(t *testing.T) { t.Parallel() - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB() defer cleanUp() if err != nil { t.Fatalf("unable to make test db: %v", err) @@ -472,7 +472,7 @@ func TestInvoiceCancelSingleHtlc(t *testing.T) { func TestInvoiceAddTimeSeries(t *testing.T) { t.Parallel() - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB(OptionClock(testClock)) defer cleanUp() if err != nil { t.Fatalf("unable to make test db: %v", err) @@ -622,108 +622,65 @@ func TestInvoiceAddTimeSeries(t *testing.T) { } } -// Tests that FetchAllInvoicesWithPaymentHash returns all invoices with their -// corresponding payment hashes. -func TestFetchAllInvoicesWithPaymentHash(t *testing.T) { +// TestScanInvoices tests that ScanInvoices scans trough all stored invoices +// correctly. +func TestScanInvoices(t *testing.T) { t.Parallel() - db, cleanup, err := makeTestDB() + db, cleanup, err := MakeTestDB() defer cleanup() if err != nil { t.Fatalf("unable to make test db: %v", err) } - // With an empty DB we expect to return no error and an empty list. - empty, err := db.FetchAllInvoicesWithPaymentHash(false) - if err != nil { - t.Fatalf("failed to call FetchAllInvoicesWithPaymentHash on empty DB: %v", - err) + var invoices map[lntypes.Hash]*Invoice + callCount := 0 + resetCount := 0 + + // reset is used to reset/initialize results and is called once + // upon calling ScanInvoices and when the underlying transaction is + // retried. + reset := func() { + invoices = make(map[lntypes.Hash]*Invoice) + callCount = 0 + resetCount++ + } - if len(empty) != 0 { - t.Fatalf("expected empty list as a result, got: %v", empty) + scanFunc := func(paymentHash lntypes.Hash, invoice *Invoice) error { + invoices[paymentHash] = invoice + callCount++ + + return nil } - states := []ContractState{ - ContractOpen, ContractSettled, ContractCanceled, ContractAccepted, - } + // With an empty DB we expect to not scan any invoices. + require.NoError(t, db.ScanInvoices(scanFunc, reset)) + require.Equal(t, 0, len(invoices)) + require.Equal(t, 0, callCount) + require.Equal(t, 1, resetCount) - numInvoices := len(states) * 2 - testPendingInvoices := make(map[lntypes.Hash]*Invoice) - testAllInvoices := make(map[lntypes.Hash]*Invoice) + numInvoices := 5 + testInvoices := make(map[lntypes.Hash]*Invoice) // Now populate the DB and check if we can get all invoices with their // payment hashes as expected. for i := 1; i <= numInvoices; i++ { invoice, err := randInvoice(lnwire.MilliSatoshi(i)) - if err != nil { - t.Fatalf("unable to create invoice: %v", err) - } + require.NoError(t, err) - // Set the contract state of the next invoice such that there's an equal - // number for all possbile states. - invoice.State = states[i%len(states)] paymentHash := invoice.Terms.PaymentPreimage.Hash() + testInvoices[paymentHash] = invoice - if invoice.IsPending() { - testPendingInvoices[paymentHash] = invoice - } - - testAllInvoices[paymentHash] = invoice - - if _, err := db.AddInvoice(invoice, paymentHash); err != nil { - t.Fatalf("unable to add invoice: %v", err) - } - } - - pendingInvoices, err := db.FetchAllInvoicesWithPaymentHash(true) - if err != nil { - t.Fatalf("can't fetch invoices with payment hash: %v", err) - } - - if len(testPendingInvoices) != len(pendingInvoices) { - t.Fatalf("expected %v pending invoices, got: %v", - len(testPendingInvoices), len(pendingInvoices)) - } - - allInvoices, err := db.FetchAllInvoicesWithPaymentHash(false) - if err != nil { - t.Fatalf("can't fetch invoices with payment hash: %v", err) - } - - if len(testAllInvoices) != len(allInvoices) { - t.Fatalf("expected %v invoices, got: %v", - len(testAllInvoices), len(allInvoices)) - } - - for i := range pendingInvoices { - expected, ok := testPendingInvoices[pendingInvoices[i].PaymentHash] - if !ok { - t.Fatalf("coulnd't find invoice with hash: %v", - pendingInvoices[i].PaymentHash) - } - - // Zero out add index to not confuse require.Equal. - pendingInvoices[i].Invoice.AddIndex = 0 - expected.AddIndex = 0 - - require.Equal(t, *expected, pendingInvoices[i].Invoice) - } - - for i := range allInvoices { - expected, ok := testAllInvoices[allInvoices[i].PaymentHash] - if !ok { - t.Fatalf("coulnd't find invoice with hash: %v", - allInvoices[i].PaymentHash) - } - - // Zero out add index to not confuse require.Equal. - allInvoices[i].Invoice.AddIndex = 0 - expected.AddIndex = 0 - - require.Equal(t, *expected, allInvoices[i].Invoice) + _, err = db.AddInvoice(invoice, paymentHash) + require.NoError(t, err) } + resetCount = 0 + require.NoError(t, db.ScanInvoices(scanFunc, reset)) + require.Equal(t, numInvoices, callCount) + require.Equal(t, testInvoices, invoices) + require.Equal(t, 1, resetCount) } // TestDuplicateSettleInvoice tests that if we add a new invoice and settle it @@ -732,7 +689,7 @@ func TestFetchAllInvoicesWithPaymentHash(t *testing.T) { func TestDuplicateSettleInvoice(t *testing.T) { t.Parallel() - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB(OptionClock(testClock)) defer cleanUp() if err != nil { t.Fatalf("unable to make test db: %v", err) @@ -797,7 +754,7 @@ func TestDuplicateSettleInvoice(t *testing.T) { func TestQueryInvoices(t *testing.T) { t.Parallel() - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB(OptionClock(testClock)) defer cleanUp() if err != nil { t.Fatalf("unable to make test db: %v", err) @@ -1112,7 +1069,7 @@ func getUpdateInvoice(amt lnwire.MilliSatoshi) InvoiceUpdateCallback { func TestCustomRecords(t *testing.T) { t.Parallel() - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB() defer cleanUp() if err != nil { t.Fatalf("unable to make test db: %v", err) @@ -1194,3 +1151,96 @@ func TestInvoiceRef(t *testing.T) { require.Equal(t, payHash, refByHashAndAddr.PayHash()) require.Equal(t, &payAddr, refByHashAndAddr.PayAddr()) } + +// TestDeleteInvoices tests that deleting a list of invoices will succeed +// if all delete references are valid, or will fail otherwise. +func TestDeleteInvoices(t *testing.T) { + t.Parallel() + + db, cleanup, err := MakeTestDB() + defer cleanup() + require.NoError(t, err, "unable to make test db") + + // Add some invoices to the test db. + numInvoices := 3 + invoicesToDelete := make([]InvoiceDeleteRef, numInvoices) + + for i := 0; i < numInvoices; i++ { + invoice, err := randInvoice(lnwire.MilliSatoshi(i + 1)) + require.NoError(t, err) + + paymentHash := invoice.Terms.PaymentPreimage.Hash() + addIndex, err := db.AddInvoice(invoice, paymentHash) + require.NoError(t, err) + + // Settle the second invoice. + if i == 1 { + invoice, err = db.UpdateInvoice( + InvoiceRefByHash(paymentHash), + getUpdateInvoice(invoice.Terms.Value), + ) + require.NoError(t, err, "unable to settle invoice") + } + + // store the delete ref for later. + invoicesToDelete[i] = InvoiceDeleteRef{ + PayHash: paymentHash, + PayAddr: &invoice.Terms.PaymentAddr, + AddIndex: addIndex, + SettleIndex: invoice.SettleIndex, + } + } + + // assertInvoiceCount asserts that the number of invoices equals + // to the passed count. + assertInvoiceCount := func(count int) { + // Query to collect all invoices. + query := InvoiceQuery{ + IndexOffset: 0, + NumMaxInvoices: math.MaxUint64, + } + + // Check that we really have 3 invoices. + response, err := db.QueryInvoices(query) + require.NoError(t, err) + require.Equal(t, count, len(response.Invoices)) + } + + // XOR one byte of one of the references' hash and attempt to delete. + invoicesToDelete[0].PayHash[2] ^= 3 + require.Error(t, db.DeleteInvoice(invoicesToDelete)) + assertInvoiceCount(3) + + // Restore the hash. + invoicesToDelete[0].PayHash[2] ^= 3 + + // XOR one byte of one of the references' payment address and attempt + // to delete. + invoicesToDelete[1].PayAddr[5] ^= 7 + require.Error(t, db.DeleteInvoice(invoicesToDelete)) + assertInvoiceCount(3) + + // Restore the payment address. + invoicesToDelete[1].PayAddr[5] ^= 7 + + // XOR the second invoice's payment settle index as it is settled, and + // attempt to delete. + invoicesToDelete[1].SettleIndex ^= 11 + require.Error(t, db.DeleteInvoice(invoicesToDelete)) + assertInvoiceCount(3) + + // Restore the settle index. + invoicesToDelete[1].SettleIndex ^= 11 + + // XOR the add index for one of the references and attempt to delete. + invoicesToDelete[2].AddIndex ^= 13 + require.Error(t, db.DeleteInvoice(invoicesToDelete)) + assertInvoiceCount(3) + + // Restore the add index. + invoicesToDelete[2].AddIndex ^= 13 + + // Delete should succeed with all the valid references. + require.NoError(t, db.DeleteInvoice(invoicesToDelete)) + assertInvoiceCount(0) +} diff --git a/channeldb/invoices.go b/channeldb/invoices.go index 436f194e1..5f7b64623 100644 --- a/channeldb/invoices.go +++ b/channeldb/invoices.go @@ -723,28 +723,21 @@ func fetchInvoiceNumByRef(invoiceIndex, payAddrIndex kvdb.RBucket, } } -// InvoiceWithPaymentHash is used to store an invoice and its corresponding -// payment hash. This struct is only used to store results of -// ChannelDB.FetchAllInvoicesWithPaymentHash() call. -type InvoiceWithPaymentHash struct { - // Invoice holds the invoice as selected from the invoices bucket. - Invoice Invoice +// ScanInvoices scans trough all invoices and calls the passed scanFunc for +// for each invoice with its respective payment hash. Additionally a reset() +// closure is passed which is used to reset/initialize partial results and also +// to signal if the kvdb.View transaction has been retried. +func (d *DB) ScanInvoices( + scanFunc func(lntypes.Hash, *Invoice) error, reset func()) error { - // PaymentHash is the payment hash for the Invoice. - PaymentHash lntypes.Hash -} + return kvdb.View(d, func(tx kvdb.RTx) error { + // Reset partial results. As transaction commit success is not + // guaranteed when using etcd, we need to be prepared to redo + // the whole view transaction. In order to be able to do that + // we need a way to reset existing results. This is also done + // upon first run for initialization. + reset() -// FetchAllInvoicesWithPaymentHash returns all invoices and their payment hashes -// currently stored within the database. If the pendingOnly param is true, then -// only open or accepted invoices and their payment hashes will be returned, -// skipping all invoices that are fully settled or canceled. Note that the -// returned array is not ordered by add index. -func (d *DB) FetchAllInvoicesWithPaymentHash(pendingOnly bool) ( - []InvoiceWithPaymentHash, error) { - - var result []InvoiceWithPaymentHash - - err := kvdb.View(d, func(tx kvdb.RTx) error { invoices := tx.ReadBucket(invoiceBucket) if invoices == nil { return ErrNoInvoicesCreated @@ -775,26 +768,12 @@ func (d *DB) FetchAllInvoicesWithPaymentHash(pendingOnly bool) ( return err } - if pendingOnly && !invoice.IsPending() { - return nil - } + var paymentHash lntypes.Hash + copy(paymentHash[:], k) - invoiceWithPaymentHash := InvoiceWithPaymentHash{ - Invoice: invoice, - } - - copy(invoiceWithPaymentHash.PaymentHash[:], k) - result = append(result, invoiceWithPaymentHash) - - return nil + return scanFunc(paymentHash, &invoice) }) }) - - if err != nil { - return nil, err - } - - return result, nil } // InvoiceQuery represents a query to the invoice database. The query allows a @@ -1761,3 +1740,134 @@ func setSettleMetaFields(settleIndex kvdb.RwBucket, invoiceNum []byte, return nil } + +// InvoiceDeleteRef holds a refererence to an invoice to be deleted. +type InvoiceDeleteRef struct { + // PayHash is the payment hash of the target invoice. All invoices are + // currently indexed by payment hash. + PayHash lntypes.Hash + + // PayAddr is the payment addr of the target invoice. Newer invoices + // (0.11 and up) are indexed by payment address in addition to payment + // hash, but pre 0.8 invoices do not have one at all. + PayAddr *[32]byte + + // AddIndex is the add index of the invoice. + AddIndex uint64 + + // SettleIndex is the settle index of the invoice. + SettleIndex uint64 +} + +// DeleteInvoice attempts to delete the passed invoices from the database in +// one transaction. The passed delete references hold all keys required to +// delete the invoices without also needing to deserialze them. +func (d *DB) DeleteInvoice(invoicesToDelete []InvoiceDeleteRef) error { + err := kvdb.Update(d, func(tx kvdb.RwTx) error { + invoices := tx.ReadWriteBucket(invoiceBucket) + if invoices == nil { + return ErrNoInvoicesCreated + } + + invoiceIndex := invoices.NestedReadWriteBucket( + invoiceIndexBucket, + ) + if invoiceIndex == nil { + return ErrNoInvoicesCreated + } + + invoiceAddIndex := invoices.NestedReadWriteBucket( + addIndexBucket, + ) + if invoiceAddIndex == nil { + return ErrNoInvoicesCreated + } + // settleIndex can be nil, as the bucket is created lazily + // when the first invoice is settled. + settleIndex := invoices.NestedReadWriteBucket(settleIndexBucket) + + payAddrIndex := tx.ReadWriteBucket(payAddrIndexBucket) + + for _, ref := range invoicesToDelete { + // Fetch the invoice key for using it to check for + // consistency and also to delete from the invoice index. + invoiceKey := invoiceIndex.Get(ref.PayHash[:]) + if invoiceKey == nil { + return ErrInvoiceNotFound + } + + err := invoiceIndex.Delete(ref.PayHash[:]) + if err != nil { + return err + } + + // Delete payment address index reference if there's a + // valid payment address passed. + if ref.PayAddr != nil { + // To ensure consistency check that the already + // fetched invoice key matches the one in the + // payment address index. + key := payAddrIndex.Get(ref.PayAddr[:]) + if !bytes.Equal(key, invoiceKey) { + return fmt.Errorf("unknown invoice") + } + + // Delete from the payment address index. + err := payAddrIndex.Delete(ref.PayAddr[:]) + if err != nil { + return err + } + } + + var addIndexKey [8]byte + byteOrder.PutUint64(addIndexKey[:], ref.AddIndex) + + // To ensure consistency check that the key stored in + // the add index also matches the previously fetched + // invoice key. + key := invoiceAddIndex.Get(addIndexKey[:]) + if !bytes.Equal(key, invoiceKey) { + return fmt.Errorf("unknown invoice") + } + + // Remove from the add index. + err = invoiceAddIndex.Delete(addIndexKey[:]) + if err != nil { + return err + } + + // Remove from the settle index if available and + // if the invoice is settled. + if settleIndex != nil && ref.SettleIndex > 0 { + var settleIndexKey [8]byte + byteOrder.PutUint64( + settleIndexKey[:], ref.SettleIndex, + ) + + // To ensure consistency check that the already + // fetched invoice key matches the one in the + // settle index + key := settleIndex.Get(settleIndexKey[:]) + if !bytes.Equal(key, invoiceKey) { + return fmt.Errorf("unknown invoice") + } + + err = settleIndex.Delete(settleIndexKey[:]) + if err != nil { + return err + } + } + + // Finally remove the serialized invoice from the + // invoice bucket. + err = invoices.Delete(invoiceKey) + if err != nil { + return err + } + } + + return nil + }) + + return err +} diff --git a/channeldb/kvdb/config.go b/channeldb/kvdb/config.go index a4ed68bab..179fde78a 100644 --- a/channeldb/kvdb/config.go +++ b/channeldb/kvdb/config.go @@ -12,7 +12,7 @@ const EtcdBackendName = "etcd" // BoltConfig holds bolt configuration. type BoltConfig struct { - NoFreeListSync bool `long:"nofreelistsync" description:"If true, prevents the database from syncing its freelist to disk"` + SyncFreelist bool `long:"nofreelistsync" description:"Whether the databases used within lnd should sync their freelist to disk. This is disabled by default resulting in improved memory performance during operation, but with an increase in startup time."` } // EtcdConfig holds etcd configuration. diff --git a/channeldb/kvdb/etcd/bucket.go b/channeldb/kvdb/etcd/bucket.go index 3bc087dbf..8a1ff071e 100644 --- a/channeldb/kvdb/etcd/bucket.go +++ b/channeldb/kvdb/etcd/bucket.go @@ -11,9 +11,9 @@ const ( ) var ( - bucketPrefix = []byte("b") - valuePrefix = []byte("v") - sequencePrefix = []byte("$") + valuePostfix = []byte{0x00} + bucketPostfix = []byte{0xFF} + sequencePrefix = []byte("$seq$") ) // makeBucketID returns a deterministic key for the passed byte slice. @@ -28,52 +28,65 @@ func isValidBucketID(s []byte) bool { return len(s) == bucketIDLength } -// makeKey concatenates prefix, parent and key into one byte slice. -// The prefix indicates the use of this key (whether bucket, value or sequence), -// while parentID refers to the parent bucket. -func makeKey(prefix, parent, key []byte) []byte { - keyBuf := make([]byte, len(prefix)+len(parent)+len(key)) - copy(keyBuf, prefix) - copy(keyBuf[len(prefix):], parent) - copy(keyBuf[len(prefix)+len(parent):], key) +// makeKey concatenates parent, key and postfix into one byte slice. +// The postfix indicates the use of this key (whether bucket or value), while +// parent refers to the parent bucket. +func makeKey(parent, key, postfix []byte) []byte { + keyBuf := make([]byte, len(parent)+len(key)+len(postfix)) + copy(keyBuf, parent) + copy(keyBuf[len(parent):], key) + copy(keyBuf[len(parent)+len(key):], postfix) return keyBuf } -// makePrefix concatenates prefix with parent into one byte slice. -func makePrefix(prefix []byte, parent []byte) []byte { - prefixBuf := make([]byte, len(prefix)+len(parent)) - copy(prefixBuf, prefix) - copy(prefixBuf[len(prefix):], parent) - - return prefixBuf -} - // makeBucketKey returns a bucket key from the passed parent bucket id and // the key. func makeBucketKey(parent []byte, key []byte) []byte { - return makeKey(bucketPrefix, parent, key) + return makeKey(parent, key, bucketPostfix) } // makeValueKey returns a value key from the passed parent bucket id and // the key. func makeValueKey(parent []byte, key []byte) []byte { - return makeKey(valuePrefix, parent, key) + return makeKey(parent, key, valuePostfix) } // makeSequenceKey returns a sequence key of the passed parent bucket id. func makeSequenceKey(parent []byte) []byte { - return makeKey(sequencePrefix, parent, nil) + keyBuf := make([]byte, len(sequencePrefix)+len(parent)) + copy(keyBuf, sequencePrefix) + copy(keyBuf[len(sequencePrefix):], parent) + return keyBuf } -// makeBucketPrefix returns the bucket prefix of the passed parent bucket id. -// This prefix is used for all sub buckets. -func makeBucketPrefix(parent []byte) []byte { - return makePrefix(bucketPrefix, parent) +// isBucketKey returns true if the passed key is a bucket key, meaning it +// keys a bucket name. +func isBucketKey(key string) bool { + if len(key) < bucketIDLength+1 { + return false + } + + return key[len(key)-1] == bucketPostfix[0] } -// makeValuePrefix returns the value prefix of the passed parent bucket id. -// This prefix is used for all key/values in the bucket. -func makeValuePrefix(parent []byte) []byte { - return makePrefix(valuePrefix, parent) +// getKey chops out the key from the raw key (by removing the bucket id +// prefixing the key and the postfix indicating whether it is a bucket or +// a value key) +func getKey(rawKey string) []byte { + return []byte(rawKey[bucketIDLength : len(rawKey)-1]) +} + +// getKeyVal chops out the key from the raw key (by removing the bucket id +// prefixing the key and the postfix indicating whether it is a bucket or +// a value key) and also returns the appropriate value for the key, which is +// nil in case of buckets (or the set value otherwise). +func getKeyVal(kv *KV) ([]byte, []byte) { + var val []byte + + if !isBucketKey(kv.key) { + val = []byte(kv.val) + } + + return getKey(kv.key), val } diff --git a/channeldb/kvdb/etcd/commit_queue.go b/channeldb/kvdb/etcd/commit_queue.go new file mode 100644 index 000000000..f03845650 --- /dev/null +++ b/channeldb/kvdb/etcd/commit_queue.go @@ -0,0 +1,150 @@ +// +build kvdb_etcd + +package etcd + +import ( + "context" + "sync" +) + +// commitQueueSize is the maximum number of commits we let to queue up. All +// remaining commits will block on commitQueue.Add(). +const commitQueueSize = 100 + +// commitQueue is a simple execution queue to manage conflicts for transactions +// and thereby reduce the number of times conflicting transactions need to be +// retried. When a new transaction is added to the queue, we first upgrade the +// read/write counts in the queue's own accounting to decide whether the new +// transaction has any conflicting dependencies. If the transaction does not +// conflict with any other, then it is comitted immediately, otherwise it'll be +// queued up for later exection. +// The algorithm is described in: http://www.cs.umd.edu/~abadi/papers/vll-vldb13.pdf +type commitQueue struct { + ctx context.Context + mx sync.Mutex + readerMap map[string]int + writerMap map[string]int + + commitMutex sync.RWMutex + queue chan (func()) + wg sync.WaitGroup +} + +// NewCommitQueue creates a new commit queue, with the passed abort context. +func NewCommitQueue(ctx context.Context) *commitQueue { + q := &commitQueue{ + ctx: ctx, + readerMap: make(map[string]int), + writerMap: make(map[string]int), + queue: make(chan func(), commitQueueSize), + } + + // Start the queue consumer loop. + q.wg.Add(1) + go q.mainLoop() + + return q +} + +// Wait waits for the queue to stop (after the queue context has been canceled). +func (c *commitQueue) Wait() { + c.wg.Wait() +} + +// Add increases lock counts and queues up tx commit closure for execution. +// Transactions that don't have any conflicts are executed immediately by +// "downgrading" the count mutex to allow concurrency. +func (c *commitQueue) Add(commitLoop func(), rset readSet, wset writeSet) { + c.mx.Lock() + blocked := false + + // Mark as blocked if there's any writer changing any of the keys in + // the read set. Do not increment the reader counts yet as we'll need to + // use the original reader counts when scanning through the write set. + for key := range rset { + if c.writerMap[key] > 0 { + blocked = true + break + } + } + + // Mark as blocked if there's any writer or reader for any of the keys + // in the write set. + for key := range wset { + blocked = blocked || c.readerMap[key] > 0 || c.writerMap[key] > 0 + + // Increment the writer count. + c.writerMap[key] += 1 + } + + // Finally we can increment the reader counts for keys in the read set. + for key := range rset { + c.readerMap[key] += 1 + } + + if blocked { + // Add the transaction to the queue if conflicts with an already + // queued one. + c.mx.Unlock() + + select { + case c.queue <- commitLoop: + case <-c.ctx.Done(): + } + } else { + // To make sure we don't add a new tx to the queue that depends + // on this "unblocked" tx, grab the commitMutex before lifting + // the mutex guarding the lock maps. + c.commitMutex.RLock() + c.mx.Unlock() + + // At this point we're safe to execute the "unblocked" tx, as + // we cannot execute blocked tx that may have been read from the + // queue until the commitMutex is held. + commitLoop() + + c.commitMutex.RUnlock() + } +} + +// Done decreases lock counts of the keys in the read/write sets. +func (c *commitQueue) Done(rset readSet, wset writeSet) { + c.mx.Lock() + defer c.mx.Unlock() + + for key := range rset { + c.readerMap[key] -= 1 + if c.readerMap[key] == 0 { + delete(c.readerMap, key) + } + } + + for key := range wset { + c.writerMap[key] -= 1 + if c.writerMap[key] == 0 { + delete(c.writerMap, key) + } + } +} + +// mainLoop executes queued transaction commits for transactions that have +// dependencies. The queue ensures that the top element doesn't conflict with +// any other transactions and therefore can be executed freely. +func (c *commitQueue) mainLoop() { + defer c.wg.Done() + + for { + select { + case top := <-c.queue: + // Execute the next blocked transaction. As it is + // the top element in the queue it means that it doesn't + // depend on any other transactions anymore. + c.commitMutex.Lock() + top() + c.commitMutex.Unlock() + + case <-c.ctx.Done(): + return + } + } +} diff --git a/channeldb/kvdb/etcd/commit_queue_test.go b/channeldb/kvdb/etcd/commit_queue_test.go new file mode 100644 index 000000000..16ff71006 --- /dev/null +++ b/channeldb/kvdb/etcd/commit_queue_test.go @@ -0,0 +1,115 @@ +// +build kvdb_etcd + +package etcd + +import ( + "context" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +// TestCommitQueue tests that non-conflicting transactions commit concurrently, +// while conflicting transactions are queued up. +func TestCommitQueue(t *testing.T) { + // The duration of each commit. + const commitDuration = time.Millisecond * 500 + const numCommits = 4 + + var wg sync.WaitGroup + commits := make([]string, numCommits) + idx := int32(-1) + + commit := func(tag string, sleep bool) func() { + return func() { + defer wg.Done() + + // Update our log of commit order. Avoid blocking + // by preallocating the commit log and increasing + // the log index atomically. + i := atomic.AddInt32(&idx, 1) + commits[i] = tag + + if sleep { + time.Sleep(commitDuration) + } + } + } + + // Helper function to create a read set from the passed keys. + makeReadSet := func(keys []string) readSet { + rs := make(map[string]stmGet) + + for _, key := range keys { + rs[key] = stmGet{} + } + + return rs + } + + // Helper function to create a write set from the passed keys. + makeWriteSet := func(keys []string) writeSet { + ws := make(map[string]stmPut) + + for _, key := range keys { + ws[key] = stmPut{} + } + + return ws + } + + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + q := NewCommitQueue(ctx) + defer q.Wait() + defer cancel() + + wg.Add(numCommits) + t1 := time.Now() + + // Tx1: reads: key1, key2, writes: key3, conflict: none + q.Add( + commit("free", true), + makeReadSet([]string{"key1", "key2"}), + makeWriteSet([]string{"key3"}), + ) + // Tx2: reads: key1, key2, writes: key3, conflict: Tx1 + q.Add( + commit("blocked1", false), + makeReadSet([]string{"key1", "key2"}), + makeWriteSet([]string{"key3"}), + ) + // Tx3: reads: key1, writes: key4, conflict: none + q.Add( + commit("free", true), + makeReadSet([]string{"key1", "key2"}), + makeWriteSet([]string{"key4"}), + ) + // Tx4: reads: key2, writes: key4 conflict: Tx3 + q.Add( + commit("blocked2", false), + makeReadSet([]string{"key2"}), + makeWriteSet([]string{"key4"}), + ) + + // Wait for all commits. + wg.Wait() + t2 := time.Now() + + // Expected total execution time: delta. + // 2 * commitDuration <= delta < 3 * commitDuration + delta := t2.Sub(t1) + require.LessOrEqual(t, int64(commitDuration*2), int64(delta)) + require.Greater(t, int64(commitDuration*3), int64(delta)) + + // Expect that the non-conflicting "free" transactions are executed + // before the blocking ones, and the blocking ones are executed in + // the order of addition. + require.Equal(t, + []string{"free", "free", "blocked1", "blocked2"}, + commits, + ) +} diff --git a/channeldb/kvdb/etcd/db.go b/channeldb/kvdb/etcd/db.go index 3bd89c290..9f52ad4eb 100644 --- a/channeldb/kvdb/etcd/db.go +++ b/channeldb/kvdb/etcd/db.go @@ -16,8 +16,8 @@ import ( ) const ( - // etcdConnectionTimeout is the timeout until successful connection to the - // etcd instance. + // etcdConnectionTimeout is the timeout until successful connection to + // the etcd instance. etcdConnectionTimeout = 10 * time.Second // etcdLongTimeout is a timeout for longer taking etcd operatons. @@ -34,7 +34,8 @@ type callerStats struct { func (s callerStats) String() string { return fmt.Sprintf("count: %d, retries: %d, rset: %d, wset: %d", - s.count, s.commitStats.Retries, s.commitStats.Rset, s.commitStats.Wset) + s.count, s.commitStats.Retries, s.commitStats.Rset, + s.commitStats.Wset) } // commitStatsCollector collects commit stats for commits succeeding @@ -117,6 +118,7 @@ type db struct { config BackendConfig cli *clientv3.Client commitStatsCollector *commitStatsCollector + txQueue *commitQueue } // Enforce db implements the walletdb.DB interface. @@ -174,12 +176,13 @@ func newEtcdBackend(config BackendConfig) (*db, error) { } cli, err := clientv3.New(clientv3.Config{ - Context: config.Ctx, - Endpoints: []string{config.Host}, - DialTimeout: etcdConnectionTimeout, - Username: config.User, - Password: config.Pass, - TLS: tlsConfig, + Context: config.Ctx, + Endpoints: []string{config.Host}, + DialTimeout: etcdConnectionTimeout, + Username: config.User, + Password: config.Pass, + TLS: tlsConfig, + MaxCallSendMsgSize: 16384*1024 - 1, }) if err != nil { @@ -187,8 +190,9 @@ func newEtcdBackend(config BackendConfig) (*db, error) { } backend := &db{ - cli: cli, - config: config, + cli: cli, + config: config, + txQueue: NewCommitQueue(config.Ctx), } if config.CollectCommitStats { @@ -200,7 +204,9 @@ func newEtcdBackend(config BackendConfig) (*db, error) { // getSTMOptions creats all STM options based on the backend config. func (db *db) getSTMOptions() []STMOptionFunc { - opts := []STMOptionFunc{WithAbortContext(db.config.Ctx)} + opts := []STMOptionFunc{ + WithAbortContext(db.config.Ctx), + } if db.config.CollectCommitStats { opts = append(opts, @@ -220,7 +226,7 @@ func (db *db) View(f func(tx walletdb.ReadTx) error) error { return f(newReadWriteTx(stm, db.config.Prefix)) } - return RunSTM(db.cli, apply, db.getSTMOptions()...) + return RunSTM(db.cli, apply, db.txQueue, db.getSTMOptions()...) } // Update opens a database read/write transaction and executes the function f @@ -234,7 +240,7 @@ func (db *db) Update(f func(tx walletdb.ReadWriteTx) error) error { return f(newReadWriteTx(stm, db.config.Prefix)) } - return RunSTM(db.cli, apply, db.getSTMOptions()...) + return RunSTM(db.cli, apply, db.txQueue, db.getSTMOptions()...) } // PrintStats returns all collected stats pretty printed into a string. @@ -246,18 +252,18 @@ func (db *db) PrintStats() string { return "" } -// BeginReadTx opens a database read transaction. +// BeginReadWriteTx opens a database read+write transaction. func (db *db) BeginReadWriteTx() (walletdb.ReadWriteTx, error) { return newReadWriteTx( - NewSTM(db.cli, db.getSTMOptions()...), + NewSTM(db.cli, db.txQueue, db.getSTMOptions()...), db.config.Prefix, ), nil } -// BeginReadWriteTx opens a database read+write transaction. +// BeginReadTx opens a database read transaction. func (db *db) BeginReadTx() (walletdb.ReadTx, error) { return newReadWriteTx( - NewSTM(db.cli, db.getSTMOptions()...), + NewSTM(db.cli, db.txQueue, db.getSTMOptions()...), db.config.Prefix, ), nil } diff --git a/channeldb/kvdb/etcd/db_test.go b/channeldb/kvdb/etcd/db_test.go index 155d912ec..8a5d623e5 100644 --- a/channeldb/kvdb/etcd/db_test.go +++ b/channeldb/kvdb/etcd/db_test.go @@ -8,7 +8,7 @@ import ( "testing" "github.com/btcsuite/btcwallet/walletdb" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestCopy(t *testing.T) { @@ -18,30 +18,30 @@ func TestCopy(t *testing.T) { defer f.Cleanup() db, err := newEtcdBackend(f.BackendConfig()) - assert.NoError(t, err) + require.NoError(t, err) err = db.Update(func(tx walletdb.ReadWriteTx) error { // "apple" apple, err := tx.CreateTopLevelBucket([]byte("apple")) - assert.NoError(t, err) - assert.NotNil(t, apple) + require.NoError(t, err) + require.NotNil(t, apple) - assert.NoError(t, apple.Put([]byte("key"), []byte("val"))) + require.NoError(t, apple.Put([]byte("key"), []byte("val"))) return nil }) // Expect non-zero copy. var buf bytes.Buffer - assert.NoError(t, db.Copy(&buf)) - assert.Greater(t, buf.Len(), 0) - assert.Nil(t, err) + require.NoError(t, db.Copy(&buf)) + require.Greater(t, buf.Len(), 0) + require.Nil(t, err) expected := map[string]string{ bkey("apple"): bval("apple"), vkey("key", "apple"): "val", } - assert.Equal(t, expected, f.Dump()) + require.Equal(t, expected, f.Dump()) } func TestAbortContext(t *testing.T) { @@ -57,19 +57,19 @@ func TestAbortContext(t *testing.T) { // Pass abort context and abort right away. db, err := newEtcdBackend(config) - assert.NoError(t, err) + require.NoError(t, err) cancel() // Expect that the update will fail. err = db.Update(func(tx walletdb.ReadWriteTx) error { _, err := tx.CreateTopLevelBucket([]byte("bucket")) - assert.NoError(t, err) + require.Error(t, err, "context canceled") return nil }) - assert.Error(t, err, "context canceled") + require.Error(t, err, "context canceled") // No changes in the DB. - assert.Equal(t, map[string]string{}, f.Dump()) + require.Equal(t, map[string]string{}, f.Dump()) } diff --git a/channeldb/kvdb/etcd/driver_test.go b/channeldb/kvdb/etcd/driver_test.go index 365eda7a0..ea4196eff 100644 --- a/channeldb/kvdb/etcd/driver_test.go +++ b/channeldb/kvdb/etcd/driver_test.go @@ -6,25 +6,25 @@ import ( "testing" "github.com/btcsuite/btcwallet/walletdb" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestOpenCreateFailure(t *testing.T) { t.Parallel() db, err := walletdb.Open(dbType) - assert.Error(t, err) - assert.Nil(t, db) + require.Error(t, err) + require.Nil(t, db) db, err = walletdb.Open(dbType, "wrong") - assert.Error(t, err) - assert.Nil(t, db) + require.Error(t, err) + require.Nil(t, db) db, err = walletdb.Create(dbType) - assert.Error(t, err) - assert.Nil(t, db) + require.Error(t, err) + require.Nil(t, db) db, err = walletdb.Create(dbType, "wrong") - assert.Error(t, err) - assert.Nil(t, db) + require.Error(t, err) + require.Nil(t, db) } diff --git a/channeldb/kvdb/etcd/embed.go b/channeldb/kvdb/etcd/embed.go index 96ea71ab5..d99b901ef 100644 --- a/channeldb/kvdb/etcd/embed.go +++ b/channeldb/kvdb/etcd/embed.go @@ -42,7 +42,8 @@ func NewEmbeddedEtcdInstance(path string) (*BackendConfig, func(), error) { cfg.Dir = path // To ensure that we can submit large transactions. - cfg.MaxTxnOps = 1000 + cfg.MaxTxnOps = 8192 + cfg.MaxRequestBytes = 16384 * 1024 // Listen on random free ports. clientURL := fmt.Sprintf("127.0.0.1:%d", getFreePort()) @@ -63,8 +64,10 @@ func NewEmbeddedEtcdInstance(path string) (*BackendConfig, func(), error) { fmt.Errorf("etcd failed to start after: %v", readyTimeout) } + ctx, cancel := context.WithCancel(context.Background()) + connConfig := &BackendConfig{ - Ctx: context.Background(), + Ctx: ctx, Host: "http://" + peerURL, User: "user", Pass: "pass", @@ -72,6 +75,7 @@ func NewEmbeddedEtcdInstance(path string) (*BackendConfig, func(), error) { } return connConfig, func() { + cancel() etcd.Close() }, nil } diff --git a/channeldb/kvdb/etcd/readwrite_bucket.go b/channeldb/kvdb/etcd/readwrite_bucket.go index e60d2cec3..dafab5ff4 100644 --- a/channeldb/kvdb/etcd/readwrite_bucket.go +++ b/channeldb/kvdb/etcd/readwrite_bucket.go @@ -3,7 +3,6 @@ package etcd import ( - "bytes" "strconv" "github.com/btcsuite/btcwallet/walletdb" @@ -24,11 +23,6 @@ type readWriteBucket struct { // newReadWriteBucket creates a new rw bucket with the passed transaction // and bucket id. func newReadWriteBucket(tx *readWriteTx, key, id []byte) *readWriteBucket { - if !bytes.Equal(id, tx.rootBucketID[:]) { - // Add the bucket key/value to the lock set. - tx.lock(string(key), string(id)) - } - return &readWriteBucket{ id: id, tx: tx, @@ -46,44 +40,23 @@ func (b *readWriteBucket) NestedReadBucket(key []byte) walletdb.ReadBucket { // is nil, but it does not include the key/value pairs within those // nested buckets. func (b *readWriteBucket) ForEach(cb func(k, v []byte) error) error { - prefix := makeValuePrefix(b.id) - prefixLen := len(prefix) + prefix := string(b.id) // Get the first matching key that is in the bucket. - kv, err := b.tx.stm.First(string(prefix)) + kv, err := b.tx.stm.First(prefix) if err != nil { return err } for kv != nil { - if err := cb([]byte(kv.key[prefixLen:]), []byte(kv.val)); err != nil { + key, val := getKeyVal(kv) + + if err := cb(key, val); err != nil { return err } // Step to the next key. - kv, err = b.tx.stm.Next(string(prefix), kv.key) - if err != nil { - return err - } - } - - // Make a bucket prefix. This prefixes all sub buckets. - prefix = makeBucketPrefix(b.id) - prefixLen = len(prefix) - - // Get the first bucket. - kv, err = b.tx.stm.First(string(prefix)) - if err != nil { - return err - } - - for kv != nil { - if err := cb([]byte(kv.key[prefixLen:]), nil); err != nil { - return err - } - - // Step to the next bucket. - kv, err = b.tx.stm.Next(string(prefix), kv.key) + kv, err = b.tx.stm.Next(prefix, kv.key) if err != nil { return err } @@ -143,6 +116,20 @@ func (b *readWriteBucket) NestedReadWriteBucket(key []byte) walletdb.ReadWriteBu return newReadWriteBucket(b.tx, bucketKey, bucketVal) } +// assertNoValue checks if the value for the passed key exists. +func (b *readWriteBucket) assertNoValue(key []byte) error { + val, err := b.tx.stm.Get(string(makeValueKey(b.id, key))) + if err != nil { + return err + } + + if val != nil { + return walletdb.ErrIncompatibleValue + } + + return nil +} + // CreateBucket creates and returns a new nested bucket with the given // key. Returns ErrBucketExists if the bucket already exists, // ErrBucketNameRequired if the key is empty, or ErrIncompatibleValue @@ -168,11 +155,15 @@ func (b *readWriteBucket) CreateBucket(key []byte) ( return nil, walletdb.ErrBucketExists } + if err := b.assertNoValue(key); err != nil { + return nil, err + } + // Create a deterministic bucket id from the bucket key. newID := makeBucketID(bucketKey) // Create the bucket. - b.tx.put(string(bucketKey), string(newID[:])) + b.tx.stm.Put(string(bucketKey), string(newID[:])) return newReadWriteBucket(b.tx, bucketKey, newID[:]), nil } @@ -198,8 +189,12 @@ func (b *readWriteBucket) CreateBucketIfNotExists(key []byte) ( } if !isValidBucketID(bucketVal) { + if err := b.assertNoValue(key); err != nil { + return nil, err + } + newID := makeBucketID(bucketKey) - b.tx.put(string(bucketKey), string(newID[:])) + b.tx.stm.Put(string(bucketKey), string(newID[:])) return newReadWriteBucket(b.tx, bucketKey, newID[:]), nil } @@ -241,46 +236,31 @@ func (b *readWriteBucket) DeleteNestedBucket(key []byte) error { id := queue[0] queue = queue[1:] - // Delete values in the current bucket - valuePrefix := string(makeValuePrefix(id)) - - kv, err := b.tx.stm.First(valuePrefix) + kv, err := b.tx.stm.First(string(id)) if err != nil { return err } for kv != nil { - b.tx.del(kv.key) + b.tx.stm.Del(kv.key) - kv, err = b.tx.stm.Next(valuePrefix, kv.key) + if isBucketKey(kv.key) { + queue = append(queue, []byte(kv.val)) + } + + kv, err = b.tx.stm.Next(string(id), kv.key) if err != nil { return err } } - // Iterate sub buckets - bucketPrefix := string(makeBucketPrefix(id)) - - kv, err = b.tx.stm.First(bucketPrefix) - if err != nil { - return err - } - - for kv != nil { - // Delete sub bucket key. - b.tx.del(kv.key) - // Queue it for traversal. - queue = append(queue, []byte(kv.val)) - - kv, err = b.tx.stm.Next(bucketPrefix, kv.key) - if err != nil { - return err - } - } + // Finally delete the sequence key for the bucket. + b.tx.stm.Del(string(makeSequenceKey(id))) } - // Delete the top level bucket. - b.tx.del(bucketKey) + // Delete the top level bucket and sequence key. + b.tx.stm.Del(bucketKey) + b.tx.stm.Del(string(makeSequenceKey(bucketVal))) return nil } @@ -292,8 +272,17 @@ func (b *readWriteBucket) Put(key, value []byte) error { return walletdb.ErrKeyRequired } + val, err := b.tx.stm.Get(string(makeBucketKey(b.id, key))) + if err != nil { + return err + } + + if val != nil { + return walletdb.ErrIncompatibleValue + } + // Update the transaction with the new value. - b.tx.put(string(makeValueKey(b.id, key)), string(value)) + b.tx.stm.Put(string(makeValueKey(b.id, key)), string(value)) return nil } @@ -306,7 +295,7 @@ func (b *readWriteBucket) Delete(key []byte) error { } // Update the transaction to delete the key/value. - b.tx.del(string(makeValueKey(b.id, key))) + b.tx.stm.Del(string(makeValueKey(b.id, key))) return nil } @@ -336,7 +325,7 @@ func (b *readWriteBucket) SetSequence(v uint64) error { val := strconv.FormatUint(v, 10) // Update the transaction with the new value for the sequence key. - b.tx.put(string(makeSequenceKey(b.id)), val) + b.tx.stm.Put(string(makeSequenceKey(b.id)), val) return nil } diff --git a/channeldb/kvdb/etcd/readwrite_bucket_test.go b/channeldb/kvdb/etcd/readwrite_bucket_test.go index a3a5d6208..2795dce34 100644 --- a/channeldb/kvdb/etcd/readwrite_bucket_test.go +++ b/channeldb/kvdb/etcd/readwrite_bucket_test.go @@ -8,7 +8,7 @@ import ( "testing" "github.com/btcsuite/btcwallet/walletdb" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestBucketCreation(t *testing.T) { @@ -18,70 +18,70 @@ func TestBucketCreation(t *testing.T) { defer f.Cleanup() db, err := newEtcdBackend(f.BackendConfig()) - assert.NoError(t, err) + require.NoError(t, err) err = db.Update(func(tx walletdb.ReadWriteTx) error { // empty bucket name b, err := tx.CreateTopLevelBucket(nil) - assert.Error(t, walletdb.ErrBucketNameRequired, err) - assert.Nil(t, b) + require.Error(t, walletdb.ErrBucketNameRequired, err) + require.Nil(t, b) // empty bucket name b, err = tx.CreateTopLevelBucket([]byte("")) - assert.Error(t, walletdb.ErrBucketNameRequired, err) - assert.Nil(t, b) + require.Error(t, walletdb.ErrBucketNameRequired, err) + require.Nil(t, b) // "apple" apple, err := tx.CreateTopLevelBucket([]byte("apple")) - assert.NoError(t, err) - assert.NotNil(t, apple) + require.NoError(t, err) + require.NotNil(t, apple) // Check bucket tx. - assert.Equal(t, tx, apple.Tx()) + require.Equal(t, tx, apple.Tx()) // "apple" already created b, err = tx.CreateTopLevelBucket([]byte("apple")) - assert.NoError(t, err) - assert.NotNil(t, b) + require.NoError(t, err) + require.NotNil(t, b) // "apple/banana" banana, err := apple.CreateBucket([]byte("banana")) - assert.NoError(t, err) - assert.NotNil(t, banana) + require.NoError(t, err) + require.NotNil(t, banana) banana, err = apple.CreateBucketIfNotExists([]byte("banana")) - assert.NoError(t, err) - assert.NotNil(t, banana) + require.NoError(t, err) + require.NotNil(t, banana) // Try creating "apple/banana" again b, err = apple.CreateBucket([]byte("banana")) - assert.Error(t, walletdb.ErrBucketExists, err) - assert.Nil(t, b) + require.Error(t, walletdb.ErrBucketExists, err) + require.Nil(t, b) // "apple/mango" mango, err := apple.CreateBucket([]byte("mango")) - assert.Nil(t, err) - assert.NotNil(t, mango) + require.Nil(t, err) + require.NotNil(t, mango) // "apple/banana/pear" pear, err := banana.CreateBucket([]byte("pear")) - assert.Nil(t, err) - assert.NotNil(t, pear) + require.Nil(t, err) + require.NotNil(t, pear) // empty bucket - assert.Nil(t, apple.NestedReadWriteBucket(nil)) - assert.Nil(t, apple.NestedReadWriteBucket([]byte(""))) + require.Nil(t, apple.NestedReadWriteBucket(nil)) + require.Nil(t, apple.NestedReadWriteBucket([]byte(""))) // "apple/pear" doesn't exist - assert.Nil(t, apple.NestedReadWriteBucket([]byte("pear"))) + require.Nil(t, apple.NestedReadWriteBucket([]byte("pear"))) // "apple/banana" exits - assert.NotNil(t, apple.NestedReadWriteBucket([]byte("banana"))) - assert.NotNil(t, apple.NestedReadBucket([]byte("banana"))) + require.NotNil(t, apple.NestedReadWriteBucket([]byte("banana"))) + require.NotNil(t, apple.NestedReadBucket([]byte("banana"))) return nil }) - assert.Nil(t, err) + require.Nil(t, err) expected := map[string]string{ bkey("apple"): bval("apple"), @@ -89,7 +89,7 @@ func TestBucketCreation(t *testing.T) { bkey("apple", "mango"): bval("apple", "mango"), bkey("apple", "banana", "pear"): bval("apple", "banana", "pear"), } - assert.Equal(t, expected, f.Dump()) + require.Equal(t, expected, f.Dump()) } func TestBucketDeletion(t *testing.T) { @@ -99,99 +99,99 @@ func TestBucketDeletion(t *testing.T) { defer f.Cleanup() db, err := newEtcdBackend(f.BackendConfig()) - assert.NoError(t, err) + require.NoError(t, err) err = db.Update(func(tx walletdb.ReadWriteTx) error { // "apple" apple, err := tx.CreateTopLevelBucket([]byte("apple")) - assert.Nil(t, err) - assert.NotNil(t, apple) + require.Nil(t, err) + require.NotNil(t, apple) // "apple/banana" banana, err := apple.CreateBucket([]byte("banana")) - assert.Nil(t, err) - assert.NotNil(t, banana) + require.Nil(t, err) + require.NotNil(t, banana) kvs := []KV{{"key1", "val1"}, {"key2", "val2"}, {"key3", "val3"}} for _, kv := range kvs { - assert.NoError(t, banana.Put([]byte(kv.key), []byte(kv.val))) - assert.Equal(t, []byte(kv.val), banana.Get([]byte(kv.key))) + require.NoError(t, banana.Put([]byte(kv.key), []byte(kv.val))) + require.Equal(t, []byte(kv.val), banana.Get([]byte(kv.key))) } // Delete a k/v from "apple/banana" - assert.NoError(t, banana.Delete([]byte("key2"))) + require.NoError(t, banana.Delete([]byte("key2"))) // Try getting/putting/deleting invalid k/v's. - assert.Nil(t, banana.Get(nil)) - assert.Error(t, walletdb.ErrKeyRequired, banana.Put(nil, []byte("val"))) - assert.Error(t, walletdb.ErrKeyRequired, banana.Delete(nil)) + require.Nil(t, banana.Get(nil)) + require.Error(t, walletdb.ErrKeyRequired, banana.Put(nil, []byte("val"))) + require.Error(t, walletdb.ErrKeyRequired, banana.Delete(nil)) // Try deleting a k/v that doesn't exist. - assert.NoError(t, banana.Delete([]byte("nokey"))) + require.NoError(t, banana.Delete([]byte("nokey"))) // "apple/pear" pear, err := apple.CreateBucket([]byte("pear")) - assert.Nil(t, err) - assert.NotNil(t, pear) + require.Nil(t, err) + require.NotNil(t, pear) // Put some values into "apple/pear" for _, kv := range kvs { - assert.Nil(t, pear.Put([]byte(kv.key), []byte(kv.val))) - assert.Equal(t, []byte(kv.val), pear.Get([]byte(kv.key))) + require.Nil(t, pear.Put([]byte(kv.key), []byte(kv.val))) + require.Equal(t, []byte(kv.val), pear.Get([]byte(kv.key))) } // Create nested bucket "apple/pear/cherry" cherry, err := pear.CreateBucket([]byte("cherry")) - assert.Nil(t, err) - assert.NotNil(t, cherry) + require.Nil(t, err) + require.NotNil(t, cherry) // Put some values into "apple/pear/cherry" for _, kv := range kvs { - assert.NoError(t, cherry.Put([]byte(kv.key), []byte(kv.val))) + require.NoError(t, cherry.Put([]byte(kv.key), []byte(kv.val))) } // Read back values in "apple/pear/cherry" trough a read bucket. cherryReadBucket := pear.NestedReadBucket([]byte("cherry")) for _, kv := range kvs { - assert.Equal( + require.Equal( t, []byte(kv.val), cherryReadBucket.Get([]byte(kv.key)), ) } // Try deleting some invalid buckets. - assert.Error(t, + require.Error(t, walletdb.ErrBucketNameRequired, apple.DeleteNestedBucket(nil), ) // Try deleting a non existing bucket. - assert.Error( + require.Error( t, walletdb.ErrBucketNotFound, apple.DeleteNestedBucket([]byte("missing")), ) // Delete "apple/pear" - assert.Nil(t, apple.DeleteNestedBucket([]byte("pear"))) + require.Nil(t, apple.DeleteNestedBucket([]byte("pear"))) // "apple/pear" deleted - assert.Nil(t, apple.NestedReadWriteBucket([]byte("pear"))) + require.Nil(t, apple.NestedReadWriteBucket([]byte("pear"))) // "apple/pear/cherry" deleted - assert.Nil(t, pear.NestedReadWriteBucket([]byte("cherry"))) + require.Nil(t, pear.NestedReadWriteBucket([]byte("cherry"))) // Values deleted too. for _, kv := range kvs { - assert.Nil(t, pear.Get([]byte(kv.key))) - assert.Nil(t, cherry.Get([]byte(kv.key))) + require.Nil(t, pear.Get([]byte(kv.key))) + require.Nil(t, cherry.Get([]byte(kv.key))) } // "aple/banana" exists - assert.NotNil(t, apple.NestedReadWriteBucket([]byte("banana"))) + require.NotNil(t, apple.NestedReadWriteBucket([]byte("banana"))) return nil }) - assert.Nil(t, err) + require.Nil(t, err) expected := map[string]string{ bkey("apple"): bval("apple"), @@ -199,7 +199,7 @@ func TestBucketDeletion(t *testing.T) { vkey("key1", "apple", "banana"): "val1", vkey("key3", "apple", "banana"): "val3", } - assert.Equal(t, expected, f.Dump()) + require.Equal(t, expected, f.Dump()) } func TestBucketForEach(t *testing.T) { @@ -209,28 +209,28 @@ func TestBucketForEach(t *testing.T) { defer f.Cleanup() db, err := newEtcdBackend(f.BackendConfig()) - assert.NoError(t, err) + require.NoError(t, err) err = db.Update(func(tx walletdb.ReadWriteTx) error { // "apple" apple, err := tx.CreateTopLevelBucket([]byte("apple")) - assert.Nil(t, err) - assert.NotNil(t, apple) + require.Nil(t, err) + require.NotNil(t, apple) // "apple/banana" banana, err := apple.CreateBucket([]byte("banana")) - assert.Nil(t, err) - assert.NotNil(t, banana) + require.Nil(t, err) + require.NotNil(t, banana) kvs := []KV{{"key1", "val1"}, {"key2", "val2"}, {"key3", "val3"}} // put some values into "apple" and "apple/banana" too for _, kv := range kvs { - assert.Nil(t, apple.Put([]byte(kv.key), []byte(kv.val))) - assert.Equal(t, []byte(kv.val), apple.Get([]byte(kv.key))) + require.Nil(t, apple.Put([]byte(kv.key), []byte(kv.val))) + require.Equal(t, []byte(kv.val), apple.Get([]byte(kv.key))) - assert.Nil(t, banana.Put([]byte(kv.key), []byte(kv.val))) - assert.Equal(t, []byte(kv.val), banana.Get([]byte(kv.key))) + require.Nil(t, banana.Put([]byte(kv.key), []byte(kv.val))) + require.Equal(t, []byte(kv.val), banana.Get([]byte(kv.key))) } got := make(map[string]string) @@ -246,8 +246,8 @@ func TestBucketForEach(t *testing.T) { "banana": "", } - assert.NoError(t, err) - assert.Equal(t, expected, got) + require.NoError(t, err) + require.Equal(t, expected, got) got = make(map[string]string) err = banana.ForEach(func(key, val []byte) error { @@ -255,15 +255,15 @@ func TestBucketForEach(t *testing.T) { return nil }) - assert.NoError(t, err) + require.NoError(t, err) // remove the sub-bucket key delete(expected, "banana") - assert.Equal(t, expected, got) + require.Equal(t, expected, got) return nil }) - assert.Nil(t, err) + require.Nil(t, err) expected := map[string]string{ bkey("apple"): bval("apple"), @@ -275,7 +275,7 @@ func TestBucketForEach(t *testing.T) { vkey("key2", "apple", "banana"): "val2", vkey("key3", "apple", "banana"): "val3", } - assert.Equal(t, expected, f.Dump()) + require.Equal(t, expected, f.Dump()) } func TestBucketForEachWithError(t *testing.T) { @@ -285,37 +285,37 @@ func TestBucketForEachWithError(t *testing.T) { defer f.Cleanup() db, err := newEtcdBackend(f.BackendConfig()) - assert.NoError(t, err) + require.NoError(t, err) err = db.Update(func(tx walletdb.ReadWriteTx) error { // "apple" apple, err := tx.CreateTopLevelBucket([]byte("apple")) - assert.Nil(t, err) - assert.NotNil(t, apple) + require.Nil(t, err) + require.NotNil(t, apple) // "apple/banana" banana, err := apple.CreateBucket([]byte("banana")) - assert.Nil(t, err) - assert.NotNil(t, banana) + require.Nil(t, err) + require.NotNil(t, banana) // "apple/pear" pear, err := apple.CreateBucket([]byte("pear")) - assert.Nil(t, err) - assert.NotNil(t, pear) + require.Nil(t, err) + require.NotNil(t, pear) kvs := []KV{{"key1", "val1"}, {"key2", "val2"}} // Put some values into "apple" and "apple/banana" too. for _, kv := range kvs { - assert.Nil(t, apple.Put([]byte(kv.key), []byte(kv.val))) - assert.Equal(t, []byte(kv.val), apple.Get([]byte(kv.key))) + require.Nil(t, apple.Put([]byte(kv.key), []byte(kv.val))) + require.Equal(t, []byte(kv.val), apple.Get([]byte(kv.key))) } got := make(map[string]string) i := 0 // Error while iterating value keys. err = apple.ForEach(func(key, val []byte) error { - if i == 1 { + if i == 2 { return fmt.Errorf("error") } @@ -325,11 +325,12 @@ func TestBucketForEachWithError(t *testing.T) { }) expected := map[string]string{ - "key1": "val1", + "banana": "", + "key1": "val1", } - assert.Equal(t, expected, got) - assert.Error(t, err) + require.Equal(t, expected, got) + require.Error(t, err) got = make(map[string]string) i = 0 @@ -345,17 +346,17 @@ func TestBucketForEachWithError(t *testing.T) { }) expected = map[string]string{ + "banana": "", "key1": "val1", "key2": "val2", - "banana": "", } - assert.Equal(t, expected, got) - assert.Error(t, err) + require.Equal(t, expected, got) + require.Error(t, err) return nil }) - assert.Nil(t, err) + require.Nil(t, err) expected := map[string]string{ bkey("apple"): bval("apple"), @@ -364,7 +365,7 @@ func TestBucketForEachWithError(t *testing.T) { vkey("key1", "apple"): "val1", vkey("key2", "apple"): "val2", } - assert.Equal(t, expected, f.Dump()) + require.Equal(t, expected, f.Dump()) } func TestBucketSequence(t *testing.T) { @@ -374,31 +375,149 @@ func TestBucketSequence(t *testing.T) { defer f.Cleanup() db, err := newEtcdBackend(f.BackendConfig()) - assert.NoError(t, err) + require.NoError(t, err) err = db.Update(func(tx walletdb.ReadWriteTx) error { apple, err := tx.CreateTopLevelBucket([]byte("apple")) - assert.Nil(t, err) - assert.NotNil(t, apple) + require.Nil(t, err) + require.NotNil(t, apple) banana, err := apple.CreateBucket([]byte("banana")) - assert.Nil(t, err) - assert.NotNil(t, banana) + require.Nil(t, err) + require.NotNil(t, banana) - assert.Equal(t, uint64(0), apple.Sequence()) - assert.Equal(t, uint64(0), banana.Sequence()) + require.Equal(t, uint64(0), apple.Sequence()) + require.Equal(t, uint64(0), banana.Sequence()) - assert.Nil(t, apple.SetSequence(math.MaxUint64)) - assert.Equal(t, uint64(math.MaxUint64), apple.Sequence()) + require.Nil(t, apple.SetSequence(math.MaxUint64)) + require.Equal(t, uint64(math.MaxUint64), apple.Sequence()) for i := uint64(0); i < uint64(5); i++ { s, err := apple.NextSequence() - assert.Nil(t, err) - assert.Equal(t, i, s) + require.Nil(t, err) + require.Equal(t, i, s) } return nil }) - assert.Nil(t, err) + require.Nil(t, err) +} + +// TestKeyClash tests that one cannot create a bucket if a value with the same +// key exists and the same is true in reverse: that a value cannot be put if +// a bucket with the same key exists. +func TestKeyClash(t *testing.T) { + t.Parallel() + + f := NewEtcdTestFixture(t) + defer f.Cleanup() + + db, err := newEtcdBackend(f.BackendConfig()) + require.NoError(t, err) + + // First: + // put: /apple/key -> val + // create bucket: /apple/banana + err = db.Update(func(tx walletdb.ReadWriteTx) error { + apple, err := tx.CreateTopLevelBucket([]byte("apple")) + require.Nil(t, err) + require.NotNil(t, apple) + + require.NoError(t, apple.Put([]byte("key"), []byte("val"))) + + banana, err := apple.CreateBucket([]byte("banana")) + require.Nil(t, err) + require.NotNil(t, banana) + + return nil + }) + + require.Nil(t, err) + + // Next try to: + // put: /apple/banana -> val => will fail (as /apple/banana is a bucket) + // create bucket: /apple/key => will fail (as /apple/key is a value) + err = db.Update(func(tx walletdb.ReadWriteTx) error { + apple, err := tx.CreateTopLevelBucket([]byte("apple")) + require.Nil(t, err) + require.NotNil(t, apple) + + require.Error(t, + walletdb.ErrIncompatibleValue, + apple.Put([]byte("banana"), []byte("val")), + ) + + b, err := apple.CreateBucket([]byte("key")) + require.Nil(t, b) + require.Error(t, walletdb.ErrIncompatibleValue, b) + + b, err = apple.CreateBucketIfNotExists([]byte("key")) + require.Nil(t, b) + require.Error(t, walletdb.ErrIncompatibleValue, b) + + return nil + }) + + require.Nil(t, err) + + // Except that the only existing items in the db are: + // bucket: /apple + // bucket: /apple/banana + // value: /apple/key -> val + expected := map[string]string{ + bkey("apple"): bval("apple"), + bkey("apple", "banana"): bval("apple", "banana"), + vkey("key", "apple"): "val", + } + require.Equal(t, expected, f.Dump()) + +} + +// TestBucketCreateDelete tests that creating then deleting then creating a +// bucket suceeds. +func TestBucketCreateDelete(t *testing.T) { + t.Parallel() + f := NewEtcdTestFixture(t) + defer f.Cleanup() + + db, err := newEtcdBackend(f.BackendConfig()) + require.NoError(t, err) + + err = db.Update(func(tx walletdb.ReadWriteTx) error { + apple, err := tx.CreateTopLevelBucket([]byte("apple")) + require.NoError(t, err) + require.NotNil(t, apple) + + banana, err := apple.CreateBucket([]byte("banana")) + require.NoError(t, err) + require.NotNil(t, banana) + + return nil + }) + require.NoError(t, err) + + err = db.Update(func(tx walletdb.ReadWriteTx) error { + apple := tx.ReadWriteBucket([]byte("apple")) + require.NotNil(t, apple) + require.NoError(t, apple.DeleteNestedBucket([]byte("banana"))) + + return nil + }) + require.NoError(t, err) + + err = db.Update(func(tx walletdb.ReadWriteTx) error { + apple := tx.ReadWriteBucket([]byte("apple")) + require.NotNil(t, apple) + require.NoError(t, apple.Put([]byte("banana"), []byte("value"))) + + return nil + }) + require.NoError(t, err) + + expected := map[string]string{ + vkey("banana", "apple"): "value", + bkey("apple"): bval("apple"), + } + require.Equal(t, expected, f.Dump()) } diff --git a/channeldb/kvdb/etcd/readwrite_cursor.go b/channeldb/kvdb/etcd/readwrite_cursor.go index 989656933..75c0456d7 100644 --- a/channeldb/kvdb/etcd/readwrite_cursor.go +++ b/channeldb/kvdb/etcd/readwrite_cursor.go @@ -19,7 +19,7 @@ type readWriteCursor struct { func newReadWriteCursor(bucket *readWriteBucket) *readWriteCursor { return &readWriteCursor{ bucket: bucket, - prefix: string(makeValuePrefix(bucket.id)), + prefix: string(bucket.id), } } @@ -35,8 +35,7 @@ func (c *readWriteCursor) First() (key, value []byte) { if kv != nil { c.currKey = kv.key - // Chop the prefix and return the key/value. - return []byte(kv.key[len(c.prefix):]), []byte(kv.val) + return getKeyVal(kv) } return nil, nil @@ -53,8 +52,7 @@ func (c *readWriteCursor) Last() (key, value []byte) { if kv != nil { c.currKey = kv.key - // Chop the prefix and return the key/value. - return []byte(kv.key[len(c.prefix):]), []byte(kv.val) + return getKeyVal(kv) } return nil, nil @@ -71,8 +69,7 @@ func (c *readWriteCursor) Next() (key, value []byte) { if kv != nil { c.currKey = kv.key - // Chop the prefix and return the key/value. - return []byte(kv.key[len(c.prefix):]), []byte(kv.val) + return getKeyVal(kv) } return nil, nil @@ -89,8 +86,7 @@ func (c *readWriteCursor) Prev() (key, value []byte) { if kv != nil { c.currKey = kv.key - // Chop the prefix and return the key/value. - return []byte(kv.key[len(c.prefix):]), []byte(kv.val) + return getKeyVal(kv) } return nil, nil @@ -115,8 +111,7 @@ func (c *readWriteCursor) Seek(seek []byte) (key, value []byte) { if kv != nil { c.currKey = kv.key - // Chop the prefix and return the key/value. - return []byte(kv.key[len(c.prefix):]), []byte(kv.val) + return getKeyVal(kv) } return nil, nil @@ -133,11 +128,14 @@ func (c *readWriteCursor) Delete() error { return err } - // Delete the current key. - c.bucket.tx.stm.Del(c.currKey) + if isBucketKey(c.currKey) { + c.bucket.DeleteNestedBucket(getKey(c.currKey)) + } else { + c.bucket.Delete(getKey(c.currKey)) + } - // Set current key to the next one if possible. if nextKey != nil { + // Set current key to the next one. c.currKey = nextKey.key } diff --git a/channeldb/kvdb/etcd/readwrite_cursor_test.go b/channeldb/kvdb/etcd/readwrite_cursor_test.go index c14de7aa8..216b47c43 100644 --- a/channeldb/kvdb/etcd/readwrite_cursor_test.go +++ b/channeldb/kvdb/etcd/readwrite_cursor_test.go @@ -6,7 +6,7 @@ import ( "testing" "github.com/btcsuite/btcwallet/walletdb" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestReadCursorEmptyInterval(t *testing.T) { @@ -16,41 +16,41 @@ func TestReadCursorEmptyInterval(t *testing.T) { defer f.Cleanup() db, err := newEtcdBackend(f.BackendConfig()) - assert.NoError(t, err) + require.NoError(t, err) err = db.Update(func(tx walletdb.ReadWriteTx) error { - b, err := tx.CreateTopLevelBucket([]byte("alma")) - assert.NoError(t, err) - assert.NotNil(t, b) + b, err := tx.CreateTopLevelBucket([]byte("apple")) + require.NoError(t, err) + require.NotNil(t, b) return nil }) - assert.NoError(t, err) + require.NoError(t, err) err = db.View(func(tx walletdb.ReadTx) error { - b := tx.ReadBucket([]byte("alma")) - assert.NotNil(t, b) + b := tx.ReadBucket([]byte("apple")) + require.NotNil(t, b) cursor := b.ReadCursor() k, v := cursor.First() - assert.Nil(t, k) - assert.Nil(t, v) + require.Nil(t, k) + require.Nil(t, v) k, v = cursor.Next() - assert.Nil(t, k) - assert.Nil(t, v) + require.Nil(t, k) + require.Nil(t, v) k, v = cursor.Last() - assert.Nil(t, k) - assert.Nil(t, v) + require.Nil(t, k) + require.Nil(t, v) k, v = cursor.Prev() - assert.Nil(t, k) - assert.Nil(t, v) + require.Nil(t, k) + require.Nil(t, v) return nil }) - assert.NoError(t, err) + require.NoError(t, err) } func TestReadCursorNonEmptyInterval(t *testing.T) { @@ -60,7 +60,7 @@ func TestReadCursorNonEmptyInterval(t *testing.T) { defer f.Cleanup() db, err := newEtcdBackend(f.BackendConfig()) - assert.NoError(t, err) + require.NoError(t, err) testKeyValues := []KV{ {"b", "1"}, @@ -70,21 +70,21 @@ func TestReadCursorNonEmptyInterval(t *testing.T) { } err = db.Update(func(tx walletdb.ReadWriteTx) error { - b, err := tx.CreateTopLevelBucket([]byte("alma")) - assert.NoError(t, err) - assert.NotNil(t, b) + b, err := tx.CreateTopLevelBucket([]byte("apple")) + require.NoError(t, err) + require.NotNil(t, b) for _, kv := range testKeyValues { - assert.NoError(t, b.Put([]byte(kv.key), []byte(kv.val))) + require.NoError(t, b.Put([]byte(kv.key), []byte(kv.val))) } return nil }) - assert.NoError(t, err) + require.NoError(t, err) err = db.View(func(tx walletdb.ReadTx) error { - b := tx.ReadBucket([]byte("alma")) - assert.NotNil(t, b) + b := tx.ReadBucket([]byte("apple")) + require.NotNil(t, b) // Iterate from the front. var kvs []KV @@ -95,7 +95,7 @@ func TestReadCursorNonEmptyInterval(t *testing.T) { kvs = append(kvs, KV{string(k), string(v)}) k, v = cursor.Next() } - assert.Equal(t, testKeyValues, kvs) + require.Equal(t, testKeyValues, kvs) // Iterate from the back. kvs = []KV{} @@ -105,29 +105,29 @@ func TestReadCursorNonEmptyInterval(t *testing.T) { kvs = append(kvs, KV{string(k), string(v)}) k, v = cursor.Prev() } - assert.Equal(t, reverseKVs(testKeyValues), kvs) + require.Equal(t, reverseKVs(testKeyValues), kvs) // Random access perm := []int{3, 0, 2, 1} for _, i := range perm { k, v := cursor.Seek([]byte(testKeyValues[i].key)) - assert.Equal(t, []byte(testKeyValues[i].key), k) - assert.Equal(t, []byte(testKeyValues[i].val), v) + require.Equal(t, []byte(testKeyValues[i].key), k) + require.Equal(t, []byte(testKeyValues[i].val), v) } // Seek to nonexisting key. k, v = cursor.Seek(nil) - assert.Nil(t, k) - assert.Nil(t, v) + require.Nil(t, k) + require.Nil(t, v) k, v = cursor.Seek([]byte("x")) - assert.Nil(t, k) - assert.Nil(t, v) + require.Nil(t, k) + require.Nil(t, v) return nil }) - assert.NoError(t, err) + require.NoError(t, err) } func TestReadWriteCursor(t *testing.T) { @@ -137,7 +137,7 @@ func TestReadWriteCursor(t *testing.T) { defer f.Cleanup() db, err := newEtcdBackend(f.BackendConfig()) - assert.NoError(t, err) + require.NoError(t, err) testKeyValues := []KV{ {"b", "1"}, @@ -149,24 +149,24 @@ func TestReadWriteCursor(t *testing.T) { count := len(testKeyValues) // Pre-store the first half of the interval. - assert.NoError(t, db.Update(func(tx walletdb.ReadWriteTx) error { + require.NoError(t, db.Update(func(tx walletdb.ReadWriteTx) error { b, err := tx.CreateTopLevelBucket([]byte("apple")) - assert.NoError(t, err) - assert.NotNil(t, b) + require.NoError(t, err) + require.NotNil(t, b) for i := 0; i < count/2; i++ { err = b.Put( []byte(testKeyValues[i].key), []byte(testKeyValues[i].val), ) - assert.NoError(t, err) + require.NoError(t, err) } return nil })) err = db.Update(func(tx walletdb.ReadWriteTx) error { b := tx.ReadWriteBucket([]byte("apple")) - assert.NotNil(t, b) + require.NotNil(t, b) // Store the second half of the interval. for i := count / 2; i < count; i++ { @@ -174,77 +174,77 @@ func TestReadWriteCursor(t *testing.T) { []byte(testKeyValues[i].key), []byte(testKeyValues[i].val), ) - assert.NoError(t, err) + require.NoError(t, err) } cursor := b.ReadWriteCursor() // First on valid interval. fk, fv := cursor.First() - assert.Equal(t, []byte("b"), fk) - assert.Equal(t, []byte("1"), fv) + require.Equal(t, []byte("b"), fk) + require.Equal(t, []byte("1"), fv) // Prev(First()) = nil k, v := cursor.Prev() - assert.Nil(t, k) - assert.Nil(t, v) + require.Nil(t, k) + require.Nil(t, v) // Last on valid interval. lk, lv := cursor.Last() - assert.Equal(t, []byte("e"), lk) - assert.Equal(t, []byte("4"), lv) + require.Equal(t, []byte("e"), lk) + require.Equal(t, []byte("4"), lv) // Next(Last()) = nil k, v = cursor.Next() - assert.Nil(t, k) - assert.Nil(t, v) + require.Nil(t, k) + require.Nil(t, v) // Delete first item, then add an item before the // deleted one. Check that First/Next will "jump" // over the deleted item and return the new first. _, _ = cursor.First() - assert.NoError(t, cursor.Delete()) - assert.NoError(t, b.Put([]byte("a"), []byte("0"))) + require.NoError(t, cursor.Delete()) + require.NoError(t, b.Put([]byte("a"), []byte("0"))) fk, fv = cursor.First() - assert.Equal(t, []byte("a"), fk) - assert.Equal(t, []byte("0"), fv) + require.Equal(t, []byte("a"), fk) + require.Equal(t, []byte("0"), fv) k, v = cursor.Next() - assert.Equal(t, []byte("c"), k) - assert.Equal(t, []byte("2"), v) + require.Equal(t, []byte("c"), k) + require.Equal(t, []byte("2"), v) // Similarly test that a new end is returned if // the old end is deleted first. _, _ = cursor.Last() - assert.NoError(t, cursor.Delete()) - assert.NoError(t, b.Put([]byte("f"), []byte("5"))) + require.NoError(t, cursor.Delete()) + require.NoError(t, b.Put([]byte("f"), []byte("5"))) lk, lv = cursor.Last() - assert.Equal(t, []byte("f"), lk) - assert.Equal(t, []byte("5"), lv) + require.Equal(t, []byte("f"), lk) + require.Equal(t, []byte("5"), lv) k, v = cursor.Prev() - assert.Equal(t, []byte("da"), k) - assert.Equal(t, []byte("3"), v) + require.Equal(t, []byte("da"), k) + require.Equal(t, []byte("3"), v) // Overwrite k/v in the middle of the interval. - assert.NoError(t, b.Put([]byte("c"), []byte("3"))) + require.NoError(t, b.Put([]byte("c"), []byte("3"))) k, v = cursor.Prev() - assert.Equal(t, []byte("c"), k) - assert.Equal(t, []byte("3"), v) + require.Equal(t, []byte("c"), k) + require.Equal(t, []byte("3"), v) // Insert new key/values. - assert.NoError(t, b.Put([]byte("cx"), []byte("x"))) - assert.NoError(t, b.Put([]byte("cy"), []byte("y"))) + require.NoError(t, b.Put([]byte("cx"), []byte("x"))) + require.NoError(t, b.Put([]byte("cy"), []byte("y"))) k, v = cursor.Next() - assert.Equal(t, []byte("cx"), k) - assert.Equal(t, []byte("x"), v) + require.Equal(t, []byte("cx"), k) + require.Equal(t, []byte("x"), v) k, v = cursor.Next() - assert.Equal(t, []byte("cy"), k) - assert.Equal(t, []byte("y"), v) + require.Equal(t, []byte("cy"), k) + require.Equal(t, []byte("y"), v) expected := []KV{ {"a", "0"}, @@ -263,7 +263,7 @@ func TestReadWriteCursor(t *testing.T) { kvs = append(kvs, KV{string(k), string(v)}) k, v = cursor.Next() } - assert.Equal(t, expected, kvs) + require.Equal(t, expected, kvs) // Iterate from the back. kvs = []KV{} @@ -273,12 +273,12 @@ func TestReadWriteCursor(t *testing.T) { kvs = append(kvs, KV{string(k), string(v)}) k, v = cursor.Prev() } - assert.Equal(t, reverseKVs(expected), kvs) + require.Equal(t, reverseKVs(expected), kvs) return nil }) - assert.NoError(t, err) + require.NoError(t, err) expected := map[string]string{ bkey("apple"): bval("apple"), @@ -289,5 +289,80 @@ func TestReadWriteCursor(t *testing.T) { vkey("da", "apple"): "3", vkey("f", "apple"): "5", } - assert.Equal(t, expected, f.Dump()) + require.Equal(t, expected, f.Dump()) +} + +// TestReadWriteCursorWithBucketAndValue tests that cursors are able to iterate +// over both bucket and value keys if both are present in the iterated bucket. +func TestReadWriteCursorWithBucketAndValue(t *testing.T) { + t.Parallel() + + f := NewEtcdTestFixture(t) + defer f.Cleanup() + + db, err := newEtcdBackend(f.BackendConfig()) + require.NoError(t, err) + + // Pre-store the first half of the interval. + require.NoError(t, db.Update(func(tx walletdb.ReadWriteTx) error { + b, err := tx.CreateTopLevelBucket([]byte("apple")) + require.NoError(t, err) + require.NotNil(t, b) + + require.NoError(t, b.Put([]byte("key"), []byte("val"))) + + b1, err := b.CreateBucket([]byte("banana")) + require.NoError(t, err) + require.NotNil(t, b1) + + b2, err := b.CreateBucket([]byte("pear")) + require.NoError(t, err) + require.NotNil(t, b2) + + return nil + })) + + err = db.View(func(tx walletdb.ReadTx) error { + b := tx.ReadBucket([]byte("apple")) + require.NotNil(t, b) + + cursor := b.ReadCursor() + + // First on valid interval. + k, v := cursor.First() + require.Equal(t, []byte("banana"), k) + require.Nil(t, v) + + k, v = cursor.Next() + require.Equal(t, []byte("key"), k) + require.Equal(t, []byte("val"), v) + + k, v = cursor.Last() + require.Equal(t, []byte("pear"), k) + require.Nil(t, v) + + k, v = cursor.Seek([]byte("k")) + require.Equal(t, []byte("key"), k) + require.Equal(t, []byte("val"), v) + + k, v = cursor.Seek([]byte("banana")) + require.Equal(t, []byte("banana"), k) + require.Nil(t, v) + + k, v = cursor.Next() + require.Equal(t, []byte("key"), k) + require.Equal(t, []byte("val"), v) + + return nil + }) + + require.NoError(t, err) + + expected := map[string]string{ + bkey("apple"): bval("apple"), + bkey("apple", "banana"): bval("apple", "banana"), + bkey("apple", "pear"): bval("apple", "pear"), + vkey("key", "apple"): "val", + } + require.Equal(t, expected, f.Dump()) } diff --git a/channeldb/kvdb/etcd/readwrite_tx.go b/channeldb/kvdb/etcd/readwrite_tx.go index 22d0ce421..81c27323f 100644 --- a/channeldb/kvdb/etcd/readwrite_tx.go +++ b/channeldb/kvdb/etcd/readwrite_tx.go @@ -17,14 +17,6 @@ type readWriteTx struct { // active is true if the transaction hasn't been committed yet. active bool - - // dirty is true if we intent to update a value in this transaction. - dirty bool - - // lset holds key/value set that we want to lock on. If upon commit the - // transaction is dirty and the lset is not empty, we'll bump the mod - // version of these key/values. - lset map[string]string } // newReadWriteTx creates an rw transaction with the passed STM. @@ -33,7 +25,6 @@ func newReadWriteTx(stm STM, prefix string) *readWriteTx { stm: stm, active: true, rootBucketID: makeBucketID([]byte(prefix)), - lset: make(map[string]string), } } @@ -43,50 +34,6 @@ func rootBucket(tx *readWriteTx) *readWriteBucket { return newReadWriteBucket(tx, tx.rootBucketID[:], tx.rootBucketID[:]) } -// lock adds a key value to the lock set. -func (tx *readWriteTx) lock(key, val string) { - tx.stm.Lock(key) - if !tx.dirty { - tx.lset[key] = val - } else { - // Bump the mod version of the key, - // leaving the value intact. - tx.stm.Put(key, val) - } -} - -// put updates the passed key/value. -func (tx *readWriteTx) put(key, val string) { - tx.stm.Put(key, val) - tx.setDirty() -} - -// del marks the passed key deleted. -func (tx *readWriteTx) del(key string) { - tx.stm.Del(key) - tx.setDirty() -} - -// setDirty marks the transaction dirty and bumps -// mod version for the existing lock set if it is -// not empty. -func (tx *readWriteTx) setDirty() { - // Bump the lock set. - if !tx.dirty && len(tx.lset) > 0 { - for key, val := range tx.lset { - // Bump the mod version of the key, - // leaving the value intact. - tx.stm.Put(key, val) - } - - // Clear the lock set. - tx.lset = make(map[string]string) - } - - // Set dirty. - tx.dirty = true -} - // ReadBucket opens the root bucket for read only access. If the bucket // described by the key does not exist, nil is returned. func (tx *readWriteTx) ReadBucket(key []byte) walletdb.ReadBucket { diff --git a/channeldb/kvdb/etcd/readwrite_tx_test.go b/channeldb/kvdb/etcd/readwrite_tx_test.go index f65faa545..bab6967f8 100644 --- a/channeldb/kvdb/etcd/readwrite_tx_test.go +++ b/channeldb/kvdb/etcd/readwrite_tx_test.go @@ -6,7 +6,7 @@ import ( "testing" "github.com/btcsuite/btcwallet/walletdb" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestTxManualCommit(t *testing.T) { @@ -16,11 +16,11 @@ func TestTxManualCommit(t *testing.T) { defer f.Cleanup() db, err := newEtcdBackend(f.BackendConfig()) - assert.NoError(t, err) + require.NoError(t, err) tx, err := db.BeginReadWriteTx() - assert.NoError(t, err) - assert.NotNil(t, tx) + require.NoError(t, err) + require.NotNil(t, tx) committed := false @@ -29,24 +29,24 @@ func TestTxManualCommit(t *testing.T) { }) apple, err := tx.CreateTopLevelBucket([]byte("apple")) - assert.NoError(t, err) - assert.NotNil(t, apple) - assert.NoError(t, apple.Put([]byte("testKey"), []byte("testVal"))) + require.NoError(t, err) + require.NotNil(t, apple) + require.NoError(t, apple.Put([]byte("testKey"), []byte("testVal"))) banana, err := tx.CreateTopLevelBucket([]byte("banana")) - assert.NoError(t, err) - assert.NotNil(t, banana) - assert.NoError(t, banana.Put([]byte("testKey"), []byte("testVal"))) - assert.NoError(t, tx.DeleteTopLevelBucket([]byte("banana"))) + require.NoError(t, err) + require.NotNil(t, banana) + require.NoError(t, banana.Put([]byte("testKey"), []byte("testVal"))) + require.NoError(t, tx.DeleteTopLevelBucket([]byte("banana"))) - assert.NoError(t, tx.Commit()) - assert.True(t, committed) + require.NoError(t, tx.Commit()) + require.True(t, committed) expected := map[string]string{ bkey("apple"): bval("apple"), vkey("testKey", "apple"): "testVal", } - assert.Equal(t, expected, f.Dump()) + require.Equal(t, expected, f.Dump()) } func TestTxRollback(t *testing.T) { @@ -56,21 +56,21 @@ func TestTxRollback(t *testing.T) { defer f.Cleanup() db, err := newEtcdBackend(f.BackendConfig()) - assert.NoError(t, err) + require.NoError(t, err) tx, err := db.BeginReadWriteTx() - assert.Nil(t, err) - assert.NotNil(t, tx) + require.Nil(t, err) + require.NotNil(t, tx) apple, err := tx.CreateTopLevelBucket([]byte("apple")) - assert.Nil(t, err) - assert.NotNil(t, apple) + require.Nil(t, err) + require.NotNil(t, apple) - assert.NoError(t, apple.Put([]byte("testKey"), []byte("testVal"))) + require.NoError(t, apple.Put([]byte("testKey"), []byte("testVal"))) - assert.NoError(t, tx.Rollback()) - assert.Error(t, walletdb.ErrTxClosed, tx.Commit()) - assert.Equal(t, map[string]string{}, f.Dump()) + require.NoError(t, tx.Rollback()) + require.Error(t, walletdb.ErrTxClosed, tx.Commit()) + require.Equal(t, map[string]string{}, f.Dump()) } func TestChangeDuringManualTx(t *testing.T) { @@ -80,24 +80,24 @@ func TestChangeDuringManualTx(t *testing.T) { defer f.Cleanup() db, err := newEtcdBackend(f.BackendConfig()) - assert.NoError(t, err) + require.NoError(t, err) tx, err := db.BeginReadWriteTx() - assert.Nil(t, err) - assert.NotNil(t, tx) + require.Nil(t, err) + require.NotNil(t, tx) apple, err := tx.CreateTopLevelBucket([]byte("apple")) - assert.Nil(t, err) - assert.NotNil(t, apple) + require.Nil(t, err) + require.NotNil(t, apple) - assert.NoError(t, apple.Put([]byte("testKey"), []byte("testVal"))) + require.NoError(t, apple.Put([]byte("testKey"), []byte("testVal"))) // Try overwriting the bucket key. f.Put(bkey("apple"), "banana") // TODO: translate error - assert.NotNil(t, tx.Commit()) - assert.Equal(t, map[string]string{ + require.NotNil(t, tx.Commit()) + require.Equal(t, map[string]string{ bkey("apple"): "banana", }, f.Dump()) } @@ -109,16 +109,16 @@ func TestChangeDuringUpdate(t *testing.T) { defer f.Cleanup() db, err := newEtcdBackend(f.BackendConfig()) - assert.NoError(t, err) + require.NoError(t, err) count := 0 err = db.Update(func(tx walletdb.ReadWriteTx) error { apple, err := tx.CreateTopLevelBucket([]byte("apple")) - assert.NoError(t, err) - assert.NotNil(t, apple) + require.NoError(t, err) + require.NotNil(t, apple) - assert.NoError(t, apple.Put([]byte("key"), []byte("value"))) + require.NoError(t, apple.Put([]byte("key"), []byte("value"))) if count == 0 { f.Put(vkey("key", "apple"), "new_value") @@ -127,30 +127,30 @@ func TestChangeDuringUpdate(t *testing.T) { cursor := apple.ReadCursor() k, v := cursor.First() - assert.Equal(t, []byte("key"), k) - assert.Equal(t, []byte("value"), v) - assert.Equal(t, v, apple.Get([]byte("key"))) + require.Equal(t, []byte("key"), k) + require.Equal(t, []byte("value"), v) + require.Equal(t, v, apple.Get([]byte("key"))) k, v = cursor.Next() if count == 0 { - assert.Nil(t, k) - assert.Nil(t, v) + require.Nil(t, k) + require.Nil(t, v) } else { - assert.Equal(t, []byte("key2"), k) - assert.Equal(t, []byte("value2"), v) + require.Equal(t, []byte("key2"), k) + require.Equal(t, []byte("value2"), v) } count++ return nil }) - assert.Nil(t, err) - assert.Equal(t, count, 2) + require.Nil(t, err) + require.Equal(t, count, 2) expected := map[string]string{ bkey("apple"): bval("apple"), vkey("key", "apple"): "value", vkey("key2", "apple"): "value2", } - assert.Equal(t, expected, f.Dump()) + require.Equal(t, expected, f.Dump()) } diff --git a/channeldb/kvdb/etcd/stm.go b/channeldb/kvdb/etcd/stm.go index 7a2f33b51..59ac1f457 100644 --- a/channeldb/kvdb/etcd/stm.go +++ b/channeldb/kvdb/etcd/stm.go @@ -32,11 +32,6 @@ type STM interface { // set. Returns nil if there's no matching key, or the key is empty. Get(key string) ([]byte, error) - // Lock adds a key to the lock set. If the lock set is not empty, we'll - // only check for conflicts in the lock set and the write set, instead - // of all read keys plus the write set. - Lock(key string) - // Put adds a value for a key to the txn's write set. Put(key, val string) @@ -139,6 +134,10 @@ type stm struct { // execute in the STM run loop. manual bool + // txQueue is lightweight contention manager, which is used to detect + // transaction conflicts and reduce retries. + txQueue *commitQueue + // options stores optional settings passed by the user. options *STMOptions @@ -151,9 +150,6 @@ type stm struct { // wset holds overwritten keys and their values. wset writeSet - // lset holds keys we intent to lock on. - lset map[string]interface{} - // getOpts are the opts used for gets. getOpts []v3.OpOption @@ -191,18 +187,22 @@ func WithCommitStatsCallback(cb func(bool, CommitStats)) STMOptionFunc { // RunSTM runs the apply function by creating an STM using serializable snapshot // isolation, passing it to the apply and handling commit errors and retries. -func RunSTM(cli *v3.Client, apply func(STM) error, so ...STMOptionFunc) error { - return runSTM(makeSTM(cli, false, so...), apply) +func RunSTM(cli *v3.Client, apply func(STM) error, txQueue *commitQueue, + so ...STMOptionFunc) error { + + return runSTM(makeSTM(cli, false, txQueue, so...), apply) } // NewSTM creates a new STM instance, using serializable snapshot isolation. -func NewSTM(cli *v3.Client, so ...STMOptionFunc) STM { - return makeSTM(cli, true, so...) +func NewSTM(cli *v3.Client, txQueue *commitQueue, so ...STMOptionFunc) STM { + return makeSTM(cli, true, txQueue, so...) } // makeSTM is the actual constructor of the stm. It first apply all passed // options then creates the stm object and resets it before returning. -func makeSTM(cli *v3.Client, manual bool, so ...STMOptionFunc) *stm { +func makeSTM(cli *v3.Client, manual bool, txQueue *commitQueue, + so ...STMOptionFunc) *stm { + opts := &STMOptions{ ctx: cli.Ctx(), } @@ -215,6 +215,7 @@ func makeSTM(cli *v3.Client, manual bool, so ...STMOptionFunc) *stm { s := &stm{ client: cli, manual: manual, + txQueue: txQueue, options: opts, prefetch: make(map[string]stmGet), } @@ -230,50 +231,72 @@ func makeSTM(cli *v3.Client, manual bool, so ...STMOptionFunc) *stm { // CommitError which is used to indicate a necessary retry. func runSTM(s *stm, apply func(STM) error) error { var ( - retries int - stats CommitStats - err error + retries int + stats CommitStats + executeErr error ) -loop: - // In a loop try to apply and commit and roll back if the database has - // changed (CommitError). - for { - select { - // Check if the STM is aborted and break the retry loop if it is. - case <-s.options.ctx.Done(): - err = fmt.Errorf("aborted") - break loop + done := make(chan struct{}) - default: + execute := func() { + defer close(done) + + for { + select { + // Check if the STM is aborted and break the retry loop + // if it is. + case <-s.options.ctx.Done(): + executeErr = fmt.Errorf("aborted") + return + + default: + } + + stats, executeErr = s.commit() + + // Re-apply only upon commit error (meaning the + // keys were changed). + if _, ok := executeErr.(CommitError); !ok { + // Anything that's not a CommitError + // aborts the transaction. + return + } + + // Rollback before trying to re-apply. + s.Rollback() + retries++ + + // Re-apply the transaction closure. + if executeErr = apply(s); executeErr != nil { + return + } } - - // Apply the transaction closure and abort the STM if there was an - // application error. - if err = apply(s); err != nil { - break loop - } - - stats, err = s.commit() - - // Re-apply only upon commit error (meaning the database was changed). - if _, ok := err.(CommitError); !ok { - // Anything that's not a CommitError - // aborts the STM run loop. - break loop - } - - // Rollback before trying to re-apply. - s.Rollback() - retries++ } + // Run the tx closure to construct the read and write sets. + // Also we expect that if there are no conflicting transactions + // in the queue, then we only run apply once. + if preApplyErr := apply(s); preApplyErr != nil { + return preApplyErr + } + + // Queue up the transaction for execution. + s.txQueue.Add(execute, s.rset, s.wset) + + // Wait for the transaction to execute, or break if aborted. + select { + case <-done: + case <-s.options.ctx.Done(): + } + + s.txQueue.Done(s.rset, s.wset) + if s.options.commitStatsCallback != nil { stats.Retries = retries - s.options.commitStatsCallback(err == nil, stats) + s.options.commitStatsCallback(executeErr == nil, stats) } - return err + return executeErr } // add inserts a txn response to the read set. This is useful when the txn @@ -303,24 +326,14 @@ func (rs readSet) gets() []v3.Op { return ops } -// cmps returns a cmp list testing values in read set didn't change. -func (rs readSet) cmps(lset map[string]interface{}) []v3.Cmp { - if len(lset) > 0 { - cmps := make([]v3.Cmp, 0, len(lset)) - for key := range lset { - if getValue, ok := rs[key]; ok { - cmps = append( - cmps, - v3.Compare(v3.ModRevision(key), "=", getValue.rev), - ) - } - } - return cmps - } - +// cmps returns a compare list which will serve as a precondition testing that +// the values in the read set didn't change. +func (rs readSet) cmps() []v3.Cmp { cmps := make([]v3.Cmp, 0, len(rs)) for key, getValue := range rs { - cmps = append(cmps, v3.Compare(v3.ModRevision(key), "=", getValue.rev)) + cmps = append(cmps, v3.Compare( + v3.ModRevision(key), "=", getValue.rev, + )) } return cmps @@ -370,6 +383,15 @@ func (s *stm) fetch(key string, opts ...v3.OpOption) ([]KV, error) { } } + if len(resp.Kvs) == 0 { + // Add assertion to the read set which will extend our commit + // constraint such that the commit will fail if the key is + // present in the database. + s.rset[key] = stmGet{ + rev: 0, + } + } + var result []KV // Fill the read set with key/values returned. @@ -413,12 +435,22 @@ func (s *stm) Get(key string) ([]byte, error) { // the prefetch set. if getValue, ok := s.prefetch[key]; ok { delete(s.prefetch, key) - s.rset[key] = getValue + + // Use the prefetched value only if it is for + // an existing key. + if getValue.rev != 0 { + s.rset[key] = getValue + } } // Return value if alread in read set. - if getVal, ok := s.rset[key]; ok { - return []byte(getVal.val), nil + if getValue, ok := s.rset[key]; ok { + // Return the value if the rset contains an existing key. + if getValue.rev != 0 { + return []byte(getValue.val), nil + } else { + return nil, nil + } } // Fetch and return value. @@ -435,13 +467,6 @@ func (s *stm) Get(key string) ([]byte, error) { return nil, nil } -// Lock adds a key to the lock set. If the lock set is -// not empty, we'll only check conflicts for the keys -// in the lock set. -func (s *stm) Lock(key string) { - s.lset[key] = nil -} - // First returns the first key/value matching prefix. If there's no key starting // with prefix, Last will return nil. func (s *stm) First(prefix string) (*KV, error) { @@ -711,7 +736,7 @@ func (s *stm) OnCommit(cb func()) { // because the keys have changed return a CommitError, otherwise return a // DatabaseError. func (s *stm) commit() (CommitStats, error) { - rset := s.rset.cmps(s.lset) + rset := s.rset.cmps() wset := s.wset.cmps(s.revision + 1) stats := CommitStats{ @@ -775,7 +800,6 @@ func (s *stm) Commit() error { func (s *stm) Rollback() { s.rset = make(map[string]stmGet) s.wset = make(map[string]stmPut) - s.lset = make(map[string]interface{}) s.getOpts = nil s.revision = math.MaxInt64 - 1 } diff --git a/channeldb/kvdb/etcd/stm_test.go b/channeldb/kvdb/etcd/stm_test.go index 767963d4f..cde4abf3a 100644 --- a/channeldb/kvdb/etcd/stm_test.go +++ b/channeldb/kvdb/etcd/stm_test.go @@ -6,7 +6,7 @@ import ( "errors" "testing" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func reverseKVs(a []KV) []KV { @@ -21,27 +21,35 @@ func TestPutToEmpty(t *testing.T) { t.Parallel() f := NewEtcdTestFixture(t) - defer f.Cleanup() + txQueue := NewCommitQueue(f.config.Ctx) + defer func() { + f.Cleanup() + txQueue.Wait() + }() db, err := newEtcdBackend(f.BackendConfig()) - assert.NoError(t, err) + require.NoError(t, err) apply := func(stm STM) error { stm.Put("123", "abc") return nil } - err = RunSTM(db.cli, apply) - assert.NoError(t, err) + err = RunSTM(db.cli, apply, txQueue) + require.NoError(t, err) - assert.Equal(t, "abc", f.Get("123")) + require.Equal(t, "abc", f.Get("123")) } func TestGetPutDel(t *testing.T) { t.Parallel() f := NewEtcdTestFixture(t) - defer f.cleanup() + txQueue := NewCommitQueue(f.config.Ctx) + defer func() { + f.Cleanup() + txQueue.Wait() + }() testKeyValues := []KV{ {"a", "1"}, @@ -56,71 +64,75 @@ func TestGetPutDel(t *testing.T) { } db, err := newEtcdBackend(f.BackendConfig()) - assert.NoError(t, err) + require.NoError(t, err) apply := func(stm STM) error { // Get some non existing keys. v, err := stm.Get("") - assert.NoError(t, err) - assert.Nil(t, v) + require.NoError(t, err) + require.Nil(t, v) v, err = stm.Get("x") - assert.NoError(t, err) - assert.Nil(t, v) + require.NoError(t, err) + require.Nil(t, v) // Get all existing keys. for _, kv := range testKeyValues { v, err = stm.Get(kv.key) - assert.NoError(t, err) - assert.Equal(t, []byte(kv.val), v) + require.NoError(t, err) + require.Equal(t, []byte(kv.val), v) } // Overwrite, then delete an existing key. stm.Put("c", "6") v, err = stm.Get("c") - assert.NoError(t, err) - assert.Equal(t, []byte("6"), v) + require.NoError(t, err) + require.Equal(t, []byte("6"), v) stm.Del("c") v, err = stm.Get("c") - assert.NoError(t, err) - assert.Nil(t, v) + require.NoError(t, err) + require.Nil(t, v) // Re-add the deleted key. stm.Put("c", "7") v, err = stm.Get("c") - assert.NoError(t, err) - assert.Equal(t, []byte("7"), v) + require.NoError(t, err) + require.Equal(t, []byte("7"), v) // Add a new key. stm.Put("x", "x") v, err = stm.Get("x") - assert.NoError(t, err) - assert.Equal(t, []byte("x"), v) + require.NoError(t, err) + require.Equal(t, []byte("x"), v) return nil } - err = RunSTM(db.cli, apply) - assert.NoError(t, err) + err = RunSTM(db.cli, apply, txQueue) + require.NoError(t, err) - assert.Equal(t, "1", f.Get("a")) - assert.Equal(t, "2", f.Get("b")) - assert.Equal(t, "7", f.Get("c")) - assert.Equal(t, "4", f.Get("d")) - assert.Equal(t, "5", f.Get("e")) - assert.Equal(t, "x", f.Get("x")) + require.Equal(t, "1", f.Get("a")) + require.Equal(t, "2", f.Get("b")) + require.Equal(t, "7", f.Get("c")) + require.Equal(t, "4", f.Get("d")) + require.Equal(t, "5", f.Get("e")) + require.Equal(t, "x", f.Get("x")) } func TestFirstLastNextPrev(t *testing.T) { t.Parallel() f := NewEtcdTestFixture(t) - defer f.Cleanup() + txQueue := NewCommitQueue(f.config.Ctx) + defer func() { + f.Cleanup() + txQueue.Wait() + }() testKeyValues := []KV{ {"kb", "1"}, @@ -134,44 +146,44 @@ func TestFirstLastNextPrev(t *testing.T) { } db, err := newEtcdBackend(f.BackendConfig()) - assert.NoError(t, err) + require.NoError(t, err) apply := func(stm STM) error { // First/Last on valid multi item interval. kv, err := stm.First("k") - assert.NoError(t, err) - assert.Equal(t, &KV{"kb", "1"}, kv) + require.NoError(t, err) + require.Equal(t, &KV{"kb", "1"}, kv) kv, err = stm.Last("k") - assert.NoError(t, err) - assert.Equal(t, &KV{"ke", "4"}, kv) + require.NoError(t, err) + require.Equal(t, &KV{"ke", "4"}, kv) // First/Last on single item interval. kv, err = stm.First("w") - assert.NoError(t, err) - assert.Equal(t, &KV{"w", "w"}, kv) + require.NoError(t, err) + require.Equal(t, &KV{"w", "w"}, kv) kv, err = stm.Last("w") - assert.NoError(t, err) - assert.Equal(t, &KV{"w", "w"}, kv) + require.NoError(t, err) + require.Equal(t, &KV{"w", "w"}, kv) // Next/Prev on start/end. kv, err = stm.Next("k", "ke") - assert.NoError(t, err) - assert.Nil(t, kv) + require.NoError(t, err) + require.Nil(t, kv) kv, err = stm.Prev("k", "kb") - assert.NoError(t, err) - assert.Nil(t, kv) + require.NoError(t, err) + require.Nil(t, kv) // Next/Prev in the middle. kv, err = stm.Next("k", "kc") - assert.NoError(t, err) - assert.Equal(t, &KV{"kda", "3"}, kv) + require.NoError(t, err) + require.Equal(t, &KV{"kda", "3"}, kv) kv, err = stm.Prev("k", "ke") - assert.NoError(t, err) - assert.Equal(t, &KV{"kda", "3"}, kv) + require.NoError(t, err) + require.Equal(t, &KV{"kda", "3"}, kv) // Delete first item, then add an item before the // deleted one. Check that First/Next will "jump" @@ -180,12 +192,12 @@ func TestFirstLastNextPrev(t *testing.T) { stm.Put("ka", "0") kv, err = stm.First("k") - assert.NoError(t, err) - assert.Equal(t, &KV{"ka", "0"}, kv) + require.NoError(t, err) + require.Equal(t, &KV{"ka", "0"}, kv) kv, err = stm.Prev("k", "kc") - assert.NoError(t, err) - assert.Equal(t, &KV{"ka", "0"}, kv) + require.NoError(t, err) + require.Equal(t, &KV{"ka", "0"}, kv) // Similarly test that a new end is returned if // the old end is deleted first. @@ -193,19 +205,19 @@ func TestFirstLastNextPrev(t *testing.T) { stm.Put("kf", "5") kv, err = stm.Last("k") - assert.NoError(t, err) - assert.Equal(t, &KV{"kf", "5"}, kv) + require.NoError(t, err) + require.Equal(t, &KV{"kf", "5"}, kv) kv, err = stm.Next("k", "kda") - assert.NoError(t, err) - assert.Equal(t, &KV{"kf", "5"}, kv) + require.NoError(t, err) + require.Equal(t, &KV{"kf", "5"}, kv) // Overwrite one in the middle. stm.Put("kda", "6") kv, err = stm.Next("k", "kc") - assert.NoError(t, err) - assert.Equal(t, &KV{"kda", "6"}, kv) + require.NoError(t, err) + require.Equal(t, &KV{"kda", "6"}, kv) // Add three in the middle, then delete one. stm.Put("kdb", "7") @@ -218,12 +230,12 @@ func TestFirstLastNextPrev(t *testing.T) { var kvs []KV curr, err := stm.First("k") - assert.NoError(t, err) + require.NoError(t, err) for curr != nil { kvs = append(kvs, *curr) curr, err = stm.Next("k", curr.key) - assert.NoError(t, err) + require.NoError(t, err) } expected := []KV{ @@ -234,47 +246,51 @@ func TestFirstLastNextPrev(t *testing.T) { {"kdd", "9"}, {"kf", "5"}, } - assert.Equal(t, expected, kvs) + require.Equal(t, expected, kvs) // Similarly check that stepping from last to first // returns the expected sequence. kvs = []KV{} curr, err = stm.Last("k") - assert.NoError(t, err) + require.NoError(t, err) for curr != nil { kvs = append(kvs, *curr) curr, err = stm.Prev("k", curr.key) - assert.NoError(t, err) + require.NoError(t, err) } expected = reverseKVs(expected) - assert.Equal(t, expected, kvs) + require.Equal(t, expected, kvs) return nil } - err = RunSTM(db.cli, apply) - assert.NoError(t, err) + err = RunSTM(db.cli, apply, txQueue) + require.NoError(t, err) - assert.Equal(t, "0", f.Get("ka")) - assert.Equal(t, "2", f.Get("kc")) - assert.Equal(t, "6", f.Get("kda")) - assert.Equal(t, "7", f.Get("kdb")) - assert.Equal(t, "9", f.Get("kdd")) - assert.Equal(t, "5", f.Get("kf")) - assert.Equal(t, "w", f.Get("w")) + require.Equal(t, "0", f.Get("ka")) + require.Equal(t, "2", f.Get("kc")) + require.Equal(t, "6", f.Get("kda")) + require.Equal(t, "7", f.Get("kdb")) + require.Equal(t, "9", f.Get("kdd")) + require.Equal(t, "5", f.Get("kf")) + require.Equal(t, "w", f.Get("w")) } func TestCommitError(t *testing.T) { t.Parallel() f := NewEtcdTestFixture(t) - defer f.Cleanup() + txQueue := NewCommitQueue(f.config.Ctx) + defer func() { + f.Cleanup() + txQueue.Wait() + }() db, err := newEtcdBackend(f.BackendConfig()) - assert.NoError(t, err) + require.NoError(t, err) // Preset DB state. f.Put("123", "xyz") @@ -285,10 +301,10 @@ func TestCommitError(t *testing.T) { apply := func(stm STM) error { // STM must have the key/value. val, err := stm.Get("123") - assert.NoError(t, err) + require.NoError(t, err) if cnt == 0 { - assert.Equal(t, []byte("xyz"), val) + require.Equal(t, []byte("xyz"), val) // Put a conflicting key/value during the first apply. f.Put("123", "def") @@ -301,44 +317,48 @@ func TestCommitError(t *testing.T) { return nil } - err = RunSTM(db.cli, apply) - assert.NoError(t, err) - assert.Equal(t, 2, cnt) + err = RunSTM(db.cli, apply, txQueue) + require.NoError(t, err) + require.Equal(t, 2, cnt) - assert.Equal(t, "abc", f.Get("123")) + require.Equal(t, "abc", f.Get("123")) } func TestManualTxError(t *testing.T) { t.Parallel() f := NewEtcdTestFixture(t) - defer f.Cleanup() + txQueue := NewCommitQueue(f.config.Ctx) + defer func() { + f.Cleanup() + txQueue.Wait() + }() db, err := newEtcdBackend(f.BackendConfig()) - assert.NoError(t, err) + require.NoError(t, err) // Preset DB state. f.Put("123", "xyz") - stm := NewSTM(db.cli) + stm := NewSTM(db.cli, txQueue) val, err := stm.Get("123") - assert.NoError(t, err) - assert.Equal(t, []byte("xyz"), val) + require.NoError(t, err) + require.Equal(t, []byte("xyz"), val) // Put a conflicting key/value. f.Put("123", "def") // Should still get the original version. val, err = stm.Get("123") - assert.NoError(t, err) - assert.Equal(t, []byte("xyz"), val) + require.NoError(t, err) + require.Equal(t, []byte("xyz"), val) // Commit will fail with CommitError. err = stm.Commit() var e CommitError - assert.True(t, errors.As(err, &e)) + require.True(t, errors.As(err, &e)) // We expect that the transacton indeed did not commit. - assert.Equal(t, "def", f.Get("123")) + require.Equal(t, "def", f.Get("123")) } diff --git a/channeldb/meta_test.go b/channeldb/meta_test.go index 956ffb5d6..98e9c88a0 100644 --- a/channeldb/meta_test.go +++ b/channeldb/meta_test.go @@ -15,7 +15,7 @@ import ( func applyMigration(t *testing.T, beforeMigration, afterMigration func(d *DB), migrationFunc migration, shouldFail bool, dryRun bool) { - cdb, cleanUp, err := makeTestDB() + cdb, cleanUp, err := MakeTestDB() defer cleanUp() if err != nil { t.Fatal(err) @@ -86,7 +86,7 @@ func applyMigration(t *testing.T, beforeMigration, afterMigration func(d *DB), func TestVersionFetchPut(t *testing.T) { t.Parallel() - db, cleanUp, err := makeTestDB() + db, cleanUp, err := MakeTestDB() defer cleanUp() if err != nil { t.Fatal(err) diff --git a/channeldb/nodes_test.go b/channeldb/nodes_test.go index 755177aa7..0d649d431 100644 --- a/channeldb/nodes_test.go +++ b/channeldb/nodes_test.go @@ -13,7 +13,7 @@ import ( func TestLinkNodeEncodeDecode(t *testing.T) { t.Parallel() - cdb, cleanUp, err := makeTestDB() + cdb, cleanUp, err := MakeTestDB() if err != nil { t.Fatalf("unable to make test database: %v", err) } @@ -110,7 +110,7 @@ func TestLinkNodeEncodeDecode(t *testing.T) { func TestDeleteLinkNode(t *testing.T) { t.Parallel() - cdb, cleanUp, err := makeTestDB() + cdb, cleanUp, err := MakeTestDB() if err != nil { t.Fatalf("unable to make test database: %v", err) } diff --git a/channeldb/payment_control_test.go b/channeldb/payment_control_test.go index 147e54525..4f9014621 100644 --- a/channeldb/payment_control_test.go +++ b/channeldb/payment_control_test.go @@ -56,7 +56,7 @@ func genInfo() (*PaymentCreationInfo, *HTLCAttemptInfo, func TestPaymentControlSwitchFail(t *testing.T) { t.Parallel() - db, cleanup, err := makeTestDB() + db, cleanup, err := MakeTestDB() defer cleanup() if err != nil { t.Fatalf("unable to init db: %v", err) @@ -203,7 +203,7 @@ func TestPaymentControlSwitchFail(t *testing.T) { func TestPaymentControlSwitchDoubleSend(t *testing.T) { t.Parallel() - db, cleanup, err := makeTestDB() + db, cleanup, err := MakeTestDB() defer cleanup() if err != nil { @@ -286,7 +286,7 @@ func TestPaymentControlSwitchDoubleSend(t *testing.T) { func TestPaymentControlSuccessesWithoutInFlight(t *testing.T) { t.Parallel() - db, cleanup, err := makeTestDB() + db, cleanup, err := MakeTestDB() defer cleanup() if err != nil { @@ -319,7 +319,7 @@ func TestPaymentControlSuccessesWithoutInFlight(t *testing.T) { func TestPaymentControlFailsWithoutInFlight(t *testing.T) { t.Parallel() - db, cleanup, err := makeTestDB() + db, cleanup, err := MakeTestDB() defer cleanup() if err != nil { @@ -347,7 +347,7 @@ func TestPaymentControlFailsWithoutInFlight(t *testing.T) { func TestPaymentControlDeleteNonInFligt(t *testing.T) { t.Parallel() - db, cleanup, err := makeTestDB() + db, cleanup, err := MakeTestDB() defer cleanup() if err != nil { @@ -530,7 +530,7 @@ func TestPaymentControlMultiShard(t *testing.T) { } runSubTest := func(t *testing.T, test testCase) { - db, cleanup, err := makeTestDB() + db, cleanup, err := MakeTestDB() defer cleanup() if err != nil { @@ -780,7 +780,7 @@ func TestPaymentControlMultiShard(t *testing.T) { func TestPaymentControlMPPRecordValidation(t *testing.T) { t.Parallel() - db, cleanup, err := makeTestDB() + db, cleanup, err := MakeTestDB() defer cleanup() if err != nil { diff --git a/channeldb/payments_test.go b/channeldb/payments_test.go index 9e790c3e3..0dc059561 100644 --- a/channeldb/payments_test.go +++ b/channeldb/payments_test.go @@ -399,7 +399,7 @@ func TestQueryPayments(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - db, cleanup, err := makeTestDB() + db, cleanup, err := MakeTestDB() if err != nil { t.Fatalf("unable to init db: %v", err) } @@ -512,7 +512,7 @@ func TestQueryPayments(t *testing.T) { // case where a specific duplicate is not found and the duplicates bucket is not // present when we expect it to be. func TestFetchPaymentWithSequenceNumber(t *testing.T) { - db, cleanup, err := makeTestDB() + db, cleanup, err := MakeTestDB() require.NoError(t, err) defer cleanup() diff --git a/channeldb/peers.go b/channeldb/peers.go new file mode 100644 index 000000000..fabb5361d --- /dev/null +++ b/channeldb/peers.go @@ -0,0 +1,121 @@ +package channeldb + +import ( + "bytes" + "errors" + "fmt" + "time" + + "github.com/btcsuite/btcwallet/walletdb" + "github.com/lightningnetwork/lnd/routing/route" +) + +var ( + // peersBucket is the name of a top level bucket in which we store + // information about our peers. Information for different peers is + // stored in buckets keyed by their public key. + // + // + // peers-bucket + // | + // |-- + // | |--flap-count-key: + // | + // |-- + // | |--flap-count-key: + peersBucket = []byte("peers-bucket") + + // flapCountKey is a key used in the peer pubkey sub-bucket that stores + // the timestamp of a peer's last flap count and its all time flap + // count. + flapCountKey = []byte("flap-count") +) + +var ( + // ErrNoPeerBucket is returned when we try to read entries for a peer + // that is not tracked. + ErrNoPeerBucket = errors.New("peer bucket not found") +) + +// FlapCount contains information about a peer's flap count. +type FlapCount struct { + // Count provides the total flap count for a peer. + Count uint32 + + // LastFlap is the timestamp of the last flap recorded for a peer. + LastFlap time.Time +} + +// WriteFlapCounts writes the flap count for a set of peers to disk, creating a +// bucket for the peer's pubkey if necessary. Note that this function overwrites +// the current value. +func (d *DB) WriteFlapCounts(flapCounts map[route.Vertex]*FlapCount) error { + return d.Update(func(tx walletdb.ReadWriteTx) error { + // Run through our set of flap counts and record them for + // each peer, creating a bucket for the peer pubkey if required. + for peer, flapCount := range flapCounts { + peers := tx.ReadWriteBucket(peersBucket) + + peerBucket, err := peers.CreateBucketIfNotExists( + peer[:], + ) + if err != nil { + return err + } + + var b bytes.Buffer + err = serializeTime(&b, flapCount.LastFlap) + if err != nil { + return err + } + + if err = WriteElement(&b, flapCount.Count); err != nil { + return err + } + + err = peerBucket.Put(flapCountKey, b.Bytes()) + if err != nil { + return err + } + } + + return nil + }) +} + +// ReadFlapCount attempts to read the flap count for a peer, failing if the +// peer is not found or we do not have flap count stored. +func (d *DB) ReadFlapCount(pubkey route.Vertex) (*FlapCount, error) { + var flapCount FlapCount + + if err := d.View(func(tx walletdb.ReadTx) error { + peers := tx.ReadBucket(peersBucket) + + peerBucket := peers.NestedReadBucket(pubkey[:]) + if peerBucket == nil { + return ErrNoPeerBucket + } + + flapBytes := peerBucket.Get(flapCountKey) + if flapBytes == nil { + return fmt.Errorf("flap count not recorded for: %v", + pubkey) + } + + var ( + err error + r = bytes.NewReader(flapBytes) + ) + + flapCount.LastFlap, err = deserializeTime(r) + if err != nil { + return err + } + + return ReadElements(r, &flapCount.Count) + }); err != nil { + return nil, err + } + + return &flapCount, nil +} diff --git a/channeldb/peers_test.go b/channeldb/peers_test.go new file mode 100644 index 000000000..b702c18df --- /dev/null +++ b/channeldb/peers_test.go @@ -0,0 +1,50 @@ +package channeldb + +import ( + "testing" + "time" + + "github.com/lightningnetwork/lnd/routing/route" + "github.com/stretchr/testify/require" +) + +// TestFlapCount tests lookup and writing of flap count to disk. +func TestFlapCount(t *testing.T) { + db, cleanup, err := MakeTestDB() + require.NoError(t, err) + defer cleanup() + + // Try to read flap count for a peer that we have no records for. + _, err = db.ReadFlapCount(testPub) + require.Equal(t, ErrNoPeerBucket, err) + + var ( + testPub2 = route.Vertex{2, 2, 2} + peer1FlapCount = &FlapCount{ + Count: 20, + LastFlap: time.Unix(100, 23), + } + peer2FlapCount = &FlapCount{ + Count: 39, + LastFlap: time.Unix(200, 23), + } + ) + + peers := map[route.Vertex]*FlapCount{ + testPub: peer1FlapCount, + testPub2: peer2FlapCount, + } + + err = db.WriteFlapCounts(peers) + require.NoError(t, err) + + // Lookup flap count for our first pubkey. + count, err := db.ReadFlapCount(testPub) + require.NoError(t, err) + require.Equal(t, peer1FlapCount, count) + + // Lookup our flap count for the second peer. + count, err = db.ReadFlapCount(testPub2) + require.NoError(t, err) + require.Equal(t, peer2FlapCount, count) +} diff --git a/channeldb/reports_test.go b/channeldb/reports_test.go index 398d0e6db..a63fe42b0 100644 --- a/channeldb/reports_test.go +++ b/channeldb/reports_test.go @@ -48,7 +48,7 @@ func TestPersistReport(t *testing.T) { test := test t.Run(test.name, func(t *testing.T) { - db, cleanup, err := makeTestDB() + db, cleanup, err := MakeTestDB() require.NoError(t, err) defer cleanup() @@ -85,7 +85,7 @@ func TestPersistReport(t *testing.T) { // channel, testing that the appropriate error is returned based on the state // of the existing bucket. func TestFetchChannelReadBucket(t *testing.T) { - db, cleanup, err := makeTestDB() + db, cleanup, err := MakeTestDB() require.NoError(t, err) defer cleanup() @@ -197,7 +197,7 @@ func TestFetchChannelWriteBucket(t *testing.T) { test := test t.Run(test.name, func(t *testing.T) { - db, cleanup, err := makeTestDB() + db, cleanup, err := MakeTestDB() require.NoError(t, err) defer cleanup() diff --git a/channeldb/waitingproof_test.go b/channeldb/waitingproof_test.go index fff52b921..12679b69f 100644 --- a/channeldb/waitingproof_test.go +++ b/channeldb/waitingproof_test.go @@ -14,7 +14,7 @@ import ( func TestWaitingProofStore(t *testing.T) { t.Parallel() - db, cleanup, err := makeTestDB() + db, cleanup, err := MakeTestDB() if err != nil { t.Fatalf("failed to make test database: %s", err) } diff --git a/channeldb/witness_cache_test.go b/channeldb/witness_cache_test.go index 8ba1e8355..fb6c9683a 100644 --- a/channeldb/witness_cache_test.go +++ b/channeldb/witness_cache_test.go @@ -12,7 +12,7 @@ import ( func TestWitnessCacheSha256Retrieval(t *testing.T) { t.Parallel() - cdb, cleanUp, err := makeTestDB() + cdb, cleanUp, err := MakeTestDB() if err != nil { t.Fatalf("unable to make test database: %v", err) } @@ -57,7 +57,7 @@ func TestWitnessCacheSha256Retrieval(t *testing.T) { func TestWitnessCacheSha256Deletion(t *testing.T) { t.Parallel() - cdb, cleanUp, err := makeTestDB() + cdb, cleanUp, err := MakeTestDB() if err != nil { t.Fatalf("unable to make test database: %v", err) } @@ -108,7 +108,7 @@ func TestWitnessCacheSha256Deletion(t *testing.T) { func TestWitnessCacheUnknownWitness(t *testing.T) { t.Parallel() - cdb, cleanUp, err := makeTestDB() + cdb, cleanUp, err := MakeTestDB() if err != nil { t.Fatalf("unable to make test database: %v", err) } @@ -127,7 +127,7 @@ func TestWitnessCacheUnknownWitness(t *testing.T) { // TestAddSha256Witnesses tests that insertion using AddSha256Witnesses behaves // identically to the insertion via the generalized interface. func TestAddSha256Witnesses(t *testing.T) { - cdb, cleanUp, err := makeTestDB() + cdb, cleanUp, err := MakeTestDB() if err != nil { t.Fatalf("unable to make test database: %v", err) } diff --git a/chanrestore.go b/chanrestore.go index c42f0de37..02696545a 100644 --- a/chanrestore.go +++ b/chanrestore.go @@ -271,7 +271,7 @@ func (s *server) ConnectPeer(nodePub *btcec.PublicKey, addrs []net.Addr) error { // Attempt to connect to the peer using this full address. If // we're unable to connect to them, then we'll try the next // address in place of it. - err := s.ConnectToPeer(netAddr, true) + err := s.ConnectToPeer(netAddr, true, s.cfg.ConnectionTimeout) // If we're already connected to this peer, then we don't // consider this an error, so we'll exit here. diff --git a/cmd/lncli/cmd_bake_macaroon.go b/cmd/lncli/cmd_bake_macaroon.go deleted file mode 100644 index 5929a536d..000000000 --- a/cmd/lncli/cmd_bake_macaroon.go +++ /dev/null @@ -1,182 +0,0 @@ -package main - -import ( - "context" - "encoding/hex" - "fmt" - "io/ioutil" - "net" - "strings" - - "github.com/lightningnetwork/lnd/lnrpc" - "github.com/lightningnetwork/lnd/macaroons" - "github.com/urfave/cli" - "gopkg.in/macaroon.v2" -) - -var bakeMacaroonCommand = cli.Command{ - Name: "bakemacaroon", - Category: "Macaroons", - Usage: "Bakes a new macaroon with the provided list of permissions " + - "and restrictions", - ArgsUsage: "[--save_to=] [--timeout=] [--ip_address=] permissions...", - Description: ` - Bake a new macaroon that grants the provided permissions and - optionally adds restrictions (timeout, IP address) to it. - - The new macaroon can either be shown on command line in hex serialized - format or it can be saved directly to a file using the --save_to - argument. - - A permission is a tuple of an entity and an action, separated by a - colon. Multiple operations can be added as arguments, for example: - - lncli bakemacaroon info:read invoices:write foo:bar - `, - Flags: []cli.Flag{ - cli.StringFlag{ - Name: "save_to", - Usage: "save the created macaroon to this file " + - "using the default binary format", - }, - cli.Uint64Flag{ - Name: "timeout", - Usage: "the number of seconds the macaroon will be " + - "valid before it times out", - }, - cli.StringFlag{ - Name: "ip_address", - Usage: "the IP address the macaroon will be bound to", - }, - }, - Action: actionDecorator(bakeMacaroon), -} - -func bakeMacaroon(ctx *cli.Context) error { - client, cleanUp := getClient(ctx) - defer cleanUp() - - // Show command help if no arguments. - if ctx.NArg() == 0 { - return cli.ShowCommandHelp(ctx, "bakemacaroon") - } - args := ctx.Args() - - var ( - savePath string - timeout int64 - ipAddress net.IP - parsedPermissions []*lnrpc.MacaroonPermission - err error - ) - - if ctx.String("save_to") != "" { - savePath = cleanAndExpandPath(ctx.String("save_to")) - } - - if ctx.IsSet("timeout") { - timeout = ctx.Int64("timeout") - if timeout <= 0 { - return fmt.Errorf("timeout must be greater than 0") - } - } - - if ctx.IsSet("ip_address") { - ipAddress = net.ParseIP(ctx.String("ip_address")) - if ipAddress == nil { - return fmt.Errorf("unable to parse ip_address: %s", - ctx.String("ip_address")) - } - } - - // A command line argument can't be an empty string. So we'll check each - // entry if it's a valid entity:action tuple. The content itself is - // validated server side. We just make sure we can parse it correctly. - for _, permission := range args { - tuple := strings.Split(permission, ":") - if len(tuple) != 2 { - return fmt.Errorf("unable to parse "+ - "permission tuple: %s", permission) - } - entity, action := tuple[0], tuple[1] - if entity == "" { - return fmt.Errorf("invalid permission [%s]. entity "+ - "cannot be empty", permission) - } - if action == "" { - return fmt.Errorf("invalid permission [%s]. action "+ - "cannot be empty", permission) - } - - // No we can assume that we have a formally valid entity:action - // tuple. The rest of the validation happens server side. - parsedPermissions = append( - parsedPermissions, &lnrpc.MacaroonPermission{ - Entity: entity, - Action: action, - }, - ) - } - - // Now we have gathered all the input we need and can do the actual - // RPC call. - req := &lnrpc.BakeMacaroonRequest{ - Permissions: parsedPermissions, - } - resp, err := client.BakeMacaroon(context.Background(), req) - if err != nil { - return err - } - - // Now we should have gotten a valid macaroon. Unmarshal it so we can - // add first-party caveats (if necessary) to it. - macBytes, err := hex.DecodeString(resp.Macaroon) - if err != nil { - return err - } - unmarshalMac := &macaroon.Macaroon{} - if err = unmarshalMac.UnmarshalBinary(macBytes); err != nil { - return err - } - - // Now apply the desired constraints to the macaroon. This will always - // create a new macaroon object, even if no constraints are added. - macConstraints := make([]macaroons.Constraint, 0) - if timeout > 0 { - macConstraints = append( - macConstraints, macaroons.TimeoutConstraint(timeout), - ) - } - if ipAddress != nil { - macConstraints = append( - macConstraints, - macaroons.IPLockConstraint(ipAddress.String()), - ) - } - constrainedMac, err := macaroons.AddConstraints( - unmarshalMac, macConstraints..., - ) - if err != nil { - return err - } - macBytes, err = constrainedMac.MarshalBinary() - if err != nil { - return err - } - - // Now we can output the result. We either write it binary serialized to - // a file or write to the standard output using hex encoding. - switch { - case savePath != "": - err = ioutil.WriteFile(savePath, macBytes, 0644) - if err != nil { - return err - } - fmt.Printf("Macaroon saved to %s\n", savePath) - - default: - fmt.Printf("%s\n", hex.EncodeToString(macBytes)) - } - - return nil -} diff --git a/cmd/lncli/cmd_macaroon.go b/cmd/lncli/cmd_macaroon.go new file mode 100644 index 000000000..4ea7dbfd0 --- /dev/null +++ b/cmd/lncli/cmd_macaroon.go @@ -0,0 +1,413 @@ +package main + +import ( + "bytes" + "context" + "encoding/hex" + "fmt" + "io/ioutil" + "net" + "strconv" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/lightningnetwork/lnd/lncfg" + "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/macaroons" + "github.com/urfave/cli" + "gopkg.in/macaroon-bakery.v2/bakery" + "gopkg.in/macaroon.v2" +) + +var bakeMacaroonCommand = cli.Command{ + Name: "bakemacaroon", + Category: "Macaroons", + Usage: "Bakes a new macaroon with the provided list of permissions " + + "and restrictions.", + ArgsUsage: "[--save_to=] [--timeout=] [--ip_address=] permissions...", + Description: ` + Bake a new macaroon that grants the provided permissions and + optionally adds restrictions (timeout, IP address) to it. + + The new macaroon can either be shown on command line in hex serialized + format or it can be saved directly to a file using the --save_to + argument. + + A permission is a tuple of an entity and an action, separated by a + colon. Multiple operations can be added as arguments, for example: + + lncli bakemacaroon info:read invoices:write foo:bar + + For even more fine-grained permission control, it is also possible to + specify single RPC method URIs that are allowed to be accessed by a + macaroon. This can be achieved by specifying "uri:" pairs, + for example: + + lncli bakemacaroon uri:/lnrpc.Lightning/GetInfo uri:/verrpc.Versioner/GetVersion + + The macaroon created by this command would only be allowed to use the + "lncli getinfo" and "lncli version" commands. + + To get a list of all available URIs and permissions, use the + "lncli listpermissions" command. + `, + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "save_to", + Usage: "save the created macaroon to this file " + + "using the default binary format", + }, + cli.Uint64Flag{ + Name: "timeout", + Usage: "the number of seconds the macaroon will be " + + "valid before it times out", + }, + cli.StringFlag{ + Name: "ip_address", + Usage: "the IP address the macaroon will be bound to", + }, + cli.Uint64Flag{ + Name: "root_key_id", + Usage: "the numerical root key ID used to create the macaroon", + }, + }, + Action: actionDecorator(bakeMacaroon), +} + +func bakeMacaroon(ctx *cli.Context) error { + client, cleanUp := getClient(ctx) + defer cleanUp() + + // Show command help if no arguments. + if ctx.NArg() == 0 { + return cli.ShowCommandHelp(ctx, "bakemacaroon") + } + args := ctx.Args() + + var ( + savePath string + timeout int64 + ipAddress net.IP + rootKeyID uint64 + parsedPermissions []*lnrpc.MacaroonPermission + err error + ) + + if ctx.String("save_to") != "" { + savePath = lncfg.CleanAndExpandPath(ctx.String("save_to")) + } + + if ctx.IsSet("timeout") { + timeout = ctx.Int64("timeout") + if timeout <= 0 { + return fmt.Errorf("timeout must be greater than 0") + } + } + + if ctx.IsSet("ip_address") { + ipAddress = net.ParseIP(ctx.String("ip_address")) + if ipAddress == nil { + return fmt.Errorf("unable to parse ip_address: %s", + ctx.String("ip_address")) + } + } + + if ctx.IsSet("root_key_id") { + rootKeyID = ctx.Uint64("root_key_id") + } + + // A command line argument can't be an empty string. So we'll check each + // entry if it's a valid entity:action tuple. The content itself is + // validated server side. We just make sure we can parse it correctly. + for _, permission := range args { + tuple := strings.Split(permission, ":") + if len(tuple) != 2 { + return fmt.Errorf("unable to parse "+ + "permission tuple: %s", permission) + } + entity, action := tuple[0], tuple[1] + if entity == "" { + return fmt.Errorf("invalid permission [%s]. entity "+ + "cannot be empty", permission) + } + if action == "" { + return fmt.Errorf("invalid permission [%s]. action "+ + "cannot be empty", permission) + } + + // No we can assume that we have a formally valid entity:action + // tuple. The rest of the validation happens server side. + parsedPermissions = append( + parsedPermissions, &lnrpc.MacaroonPermission{ + Entity: entity, + Action: action, + }, + ) + } + + // Now we have gathered all the input we need and can do the actual + // RPC call. + req := &lnrpc.BakeMacaroonRequest{ + Permissions: parsedPermissions, + RootKeyId: rootKeyID, + } + resp, err := client.BakeMacaroon(context.Background(), req) + if err != nil { + return err + } + + // Now we should have gotten a valid macaroon. Unmarshal it so we can + // add first-party caveats (if necessary) to it. + macBytes, err := hex.DecodeString(resp.Macaroon) + if err != nil { + return err + } + unmarshalMac := &macaroon.Macaroon{} + if err = unmarshalMac.UnmarshalBinary(macBytes); err != nil { + return err + } + + // Now apply the desired constraints to the macaroon. This will always + // create a new macaroon object, even if no constraints are added. + macConstraints := make([]macaroons.Constraint, 0) + if timeout > 0 { + macConstraints = append( + macConstraints, macaroons.TimeoutConstraint(timeout), + ) + } + if ipAddress != nil { + macConstraints = append( + macConstraints, + macaroons.IPLockConstraint(ipAddress.String()), + ) + } + constrainedMac, err := macaroons.AddConstraints( + unmarshalMac, macConstraints..., + ) + if err != nil { + return err + } + macBytes, err = constrainedMac.MarshalBinary() + if err != nil { + return err + } + + // Now we can output the result. We either write it binary serialized to + // a file or write to the standard output using hex encoding. + switch { + case savePath != "": + err = ioutil.WriteFile(savePath, macBytes, 0644) + if err != nil { + return err + } + fmt.Printf("Macaroon saved to %s\n", savePath) + + default: + fmt.Printf("%s\n", hex.EncodeToString(macBytes)) + } + + return nil +} + +var listMacaroonIDsCommand = cli.Command{ + Name: "listmacaroonids", + Category: "Macaroons", + Usage: "List all macaroons root key IDs in use.", + Action: actionDecorator(listMacaroonIDs), +} + +func listMacaroonIDs(ctx *cli.Context) error { + client, cleanUp := getClient(ctx) + defer cleanUp() + + req := &lnrpc.ListMacaroonIDsRequest{} + resp, err := client.ListMacaroonIDs(context.Background(), req) + if err != nil { + return err + } + + printRespJSON(resp) + return nil +} + +var deleteMacaroonIDCommand = cli.Command{ + Name: "deletemacaroonid", + Category: "Macaroons", + Usage: "Delete a specific macaroon ID.", + ArgsUsage: "root_key_id", + Description: ` + Remove a macaroon ID using the specified root key ID. For example: + + lncli deletemacaroonid 1 + + WARNING + When the ID is deleted, all macaroons created from that root key will + be invalidated. + + Note that the default root key ID 0 cannot be deleted. + `, + Action: actionDecorator(deleteMacaroonID), +} + +func deleteMacaroonID(ctx *cli.Context) error { + client, cleanUp := getClient(ctx) + defer cleanUp() + + // Validate args length. Only one argument is allowed. + if ctx.NArg() != 1 { + return cli.ShowCommandHelp(ctx, "deletemacaroonid") + } + + rootKeyIDString := ctx.Args().First() + + // Convert string into uint64. + rootKeyID, err := strconv.ParseUint(rootKeyIDString, 10, 64) + if err != nil { + return fmt.Errorf("root key ID must be a positive integer") + } + + // Check that the value is not equal to DefaultRootKeyID. Note that the + // server also validates the root key ID when removing it. However, we check + // it here too so that we can give users a nice warning. + if bytes.Equal([]byte(rootKeyIDString), macaroons.DefaultRootKeyID) { + return fmt.Errorf("deleting the default root key ID 0 is not allowed") + } + + // Make the actual RPC call. + req := &lnrpc.DeleteMacaroonIDRequest{ + RootKeyId: rootKeyID, + } + resp, err := client.DeleteMacaroonID(context.Background(), req) + if err != nil { + return err + } + + printRespJSON(resp) + return nil +} + +var listPermissionsCommand = cli.Command{ + Name: "listpermissions", + Category: "Macaroons", + Usage: "Lists all RPC method URIs and the macaroon permissions they " + + "require to be invoked.", + Action: actionDecorator(listPermissions), +} + +func listPermissions(ctx *cli.Context) error { + client, cleanUp := getClient(ctx) + defer cleanUp() + + request := &lnrpc.ListPermissionsRequest{} + response, err := client.ListPermissions(context.Background(), request) + if err != nil { + return err + } + + printRespJSON(response) + + return nil +} + +type macaroonContent struct { + Version uint16 `json:"version"` + Location string `json:"location"` + RootKeyID string `json:"root_key_id"` + Permissions []string `json:"permissions"` + Caveats []string `json:"caveats"` +} + +var printMacaroonCommand = cli.Command{ + Name: "printmacaroon", + Category: "Macaroons", + Usage: "Print the content of a macaroon in a human readable format.", + ArgsUsage: "[macaroon_content_hex]", + Description: ` + Decode a macaroon and show its content in a more human readable format. + The macaroon can either be passed as a hex encoded positional parameter + or loaded from a file. + `, + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "macaroon_file", + Usage: "load the macaroon from a file instead of the " + + "command line directly", + }, + }, + Action: actionDecorator(printMacaroon), +} + +func printMacaroon(ctx *cli.Context) error { + // Show command help if no arguments or flags are set. + if ctx.NArg() == 0 && ctx.NumFlags() == 0 { + return cli.ShowCommandHelp(ctx, "printmacaroon") + } + + var ( + macBytes []byte + err error + args = ctx.Args() + ) + switch { + case ctx.IsSet("macaroon_file"): + macPath := lncfg.CleanAndExpandPath(ctx.String("macaroon_file")) + + // Load the specified macaroon file. + macBytes, err = ioutil.ReadFile(macPath) + if err != nil { + return fmt.Errorf("unable to read macaroon path %v: %v", + macPath, err) + } + + case args.Present(): + macBytes, err = hex.DecodeString(args.First()) + if err != nil { + return fmt.Errorf("unable to hex decode macaroon: %v", + err) + } + + default: + return fmt.Errorf("macaroon parameter missing") + } + + // Decode the macaroon and its protobuf encoded internal identifier. + mac := &macaroon.Macaroon{} + if err = mac.UnmarshalBinary(macBytes); err != nil { + return fmt.Errorf("unable to decode macaroon: %v", err) + } + rawID := mac.Id() + if rawID[0] != byte(bakery.LatestVersion) { + return fmt.Errorf("invalid macaroon version: %x", rawID) + } + decodedID := &lnrpc.MacaroonId{} + idProto := rawID[1:] + err = proto.Unmarshal(idProto, decodedID) + if err != nil { + return fmt.Errorf("unable to decode macaroon version: %v", err) + } + + // Prepare everything to be printed in a more human readable format. + content := &macaroonContent{ + Version: uint16(mac.Version()), + Location: mac.Location(), + RootKeyID: string(decodedID.StorageId), + Permissions: nil, + Caveats: nil, + } + + for _, caveat := range mac.Caveats() { + content.Caveats = append(content.Caveats, string(caveat.Id)) + } + for _, op := range decodedID.Ops { + for _, action := range op.Actions { + permission := fmt.Sprintf("%s:%s", op.Entity, action) + content.Permissions = append( + content.Permissions, permission, + ) + } + } + + printJSON(content) + + return nil +} diff --git a/cmd/lncli/cmd_open_channel.go b/cmd/lncli/cmd_open_channel.go index b7e78d132..d6547de92 100644 --- a/cmd/lncli/cmd_open_channel.go +++ b/cmd/lncli/cmd_open_channel.go @@ -1,6 +1,7 @@ package main import ( + "bytes" "context" "crypto/rand" "encoding/base64" @@ -11,6 +12,7 @@ import ( "strings" "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnwallet/chanfunding" @@ -43,9 +45,9 @@ Base64 encoded PSBT: ` userMsgSign = ` PSBT verified by lnd, please continue the funding flow by signing the PSBT by all required parties/devices. Once the transaction is fully signed, paste it -again here. +again here either in base64 PSBT or hex encoded raw wire TX format. -Base64 encoded signed PSBT: ` +Signed base64 encoded PSBT or hex encoded raw wire TX: ` ) // TODO(roasbeef): change default number of confirmations @@ -414,7 +416,10 @@ func openChannelPsbt(ctx *cli.Context, client lnrpc.LightningClient, if err != nil { return fmt.Errorf("opening stream to server failed: %v", err) } - signal.Intercept() + + if err := signal.Intercept(); err != nil { + return err + } // We also need to spawn a goroutine that reads from the server. This // will copy the messages to the channel as long as they come in or add @@ -502,7 +507,7 @@ func openChannelPsbt(ctx *cli.Context, client lnrpc.LightningClient, return fmt.Errorf("reading from console "+ "failed: %v", err) } - psbt, err := base64.StdEncoding.DecodeString( + fundedPsbt, err := base64.StdEncoding.DecodeString( strings.TrimSpace(psbtBase64), ) if err != nil { @@ -512,7 +517,7 @@ func openChannelPsbt(ctx *cli.Context, client lnrpc.LightningClient, verifyMsg := &lnrpc.FundingTransitionMsg{ Trigger: &lnrpc.FundingTransitionMsg_PsbtVerify{ PsbtVerify: &lnrpc.FundingPsbtVerify{ - FundedPsbt: psbt, + FundedPsbt: fundedPsbt, PendingChanId: pendingChanID[:], }, }, @@ -528,7 +533,7 @@ func openChannelPsbt(ctx *cli.Context, client lnrpc.LightningClient, fmt.Print(userMsgSign) // Read the signed PSBT and send it to lnd. - psbtBase64, err = readLine(quit) + finalTxStr, err := readLine(quit) if err == io.EOF { return nil } @@ -536,22 +541,16 @@ func openChannelPsbt(ctx *cli.Context, client lnrpc.LightningClient, return fmt.Errorf("reading from console "+ "failed: %v", err) } - psbt, err = base64.StdEncoding.DecodeString( - strings.TrimSpace(psbtBase64), + finalizeMsg, err := finalizeMsgFromString( + finalTxStr, pendingChanID[:], ) if err != nil { - return fmt.Errorf("base64 decode failed: %v", - err) + return err } - finalizeMsg := &lnrpc.FundingTransitionMsg{ - Trigger: &lnrpc.FundingTransitionMsg_PsbtFinalize{ - PsbtFinalize: &lnrpc.FundingPsbtFinalize{ - SignedPsbt: psbt, - PendingChanId: pendingChanID[:], - }, - }, + transitionMsg := &lnrpc.FundingTransitionMsg{ + Trigger: finalizeMsg, } - err = sendFundingState(ctxc, ctx, finalizeMsg) + err = sendFundingState(ctxc, ctx, transitionMsg) if err != nil { return fmt.Errorf("finalizing PSBT funding "+ "flow failed: %v", err) @@ -683,3 +682,41 @@ func sendFundingState(cancelCtx context.Context, cliCtx *cli.Context, _, err := client.FundingStateStep(cancelCtx, msg) return err } + +// finalizeMsgFromString creates the final message for the PsbtFinalize step +// from either a hex encoded raw wire transaction or a base64 encoded PSBT +// packet. +func finalizeMsgFromString(tx string, + pendingChanID []byte) (*lnrpc.FundingTransitionMsg_PsbtFinalize, error) { + + rawTx, err := hex.DecodeString(strings.TrimSpace(tx)) + if err == nil { + // Hex decoding succeeded so we assume we have a raw wire format + // transaction. Let's submit that instead of a PSBT packet. + tx := &wire.MsgTx{} + err := tx.Deserialize(bytes.NewReader(rawTx)) + if err != nil { + return nil, fmt.Errorf("deserializing as raw wire "+ + "transaction failed: %v", err) + } + return &lnrpc.FundingTransitionMsg_PsbtFinalize{ + PsbtFinalize: &lnrpc.FundingPsbtFinalize{ + FinalRawTx: rawTx, + PendingChanId: pendingChanID, + }, + }, nil + } + + // If the string isn't a hex encoded transaction, we assume it must be + // a base64 encoded PSBT packet. + psbtBytes, err := base64.StdEncoding.DecodeString(strings.TrimSpace(tx)) + if err != nil { + return nil, fmt.Errorf("base64 decode failed: %v", err) + } + return &lnrpc.FundingTransitionMsg_PsbtFinalize{ + PsbtFinalize: &lnrpc.FundingPsbtFinalize{ + SignedPsbt: psbtBytes, + PendingChanId: pendingChanID, + }, + }, nil +} diff --git a/cmd/lncli/cmd_profile.go b/cmd/lncli/cmd_profile.go new file mode 100644 index 000000000..e646635c7 --- /dev/null +++ b/cmd/lncli/cmd_profile.go @@ -0,0 +1,449 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "path" + "strings" + + "github.com/btcsuite/btcutil" + "github.com/lightningnetwork/lnd/lncfg" + "github.com/urfave/cli" + "gopkg.in/macaroon.v2" +) + +var ( + // defaultLncliDir is the default directory to store the profile file + // in. This defaults to: + // C:\Users\\AppData\Local\Lncli\ on Windows + // ~/.lncli/ on Linux + // ~/Library/Application Support/Lncli/ on MacOS + defaultLncliDir = btcutil.AppDataDir("lncli", false) + + // defaultProfileFile is the full, absolute path of the profile file. + defaultProfileFile = path.Join(defaultLncliDir, "profiles.json") +) + +var profileSubCommand = cli.Command{ + Name: "profile", + Category: "Profiles", + Usage: "Create and manage lncli profiles", + Description: ` + Profiles for lncli are an easy and comfortable way to manage multiple + nodes from the command line by storing node specific parameters like RPC + host, network, TLS certificate path or macaroons in a named profile. + + To use a predefined profile, just use the '--profile=myprofile' (or + short version '-p=myprofile') with any lncli command. + + A default profile can also be defined, lncli will then always use the + connection/node parameters from that profile instead of the default + values. + + WARNING: Setting a default profile changes the default behavior of + lncli! To disable the use of the default profile for a single command, + set '--profile= '. + + The profiles are stored in a file called profiles.json in the user's + home directory, for example: + C:\Users\\AppData\Local\Lncli\profiles.json on Windows + ~/.lncli/profiles.json on Linux + ~/Library/Application Support/Lncli/profiles.json on MacOS + `, + Subcommands: []cli.Command{ + profileListCommand, + profileAddCommand, + profileRemoveCommand, + profileSetDefaultCommand, + profileUnsetDefaultCommand, + profileAddMacaroonCommand, + }, +} + +var profileListCommand = cli.Command{ + Name: "list", + Usage: "Lists all lncli profiles", + Action: profileList, +} + +func profileList(_ *cli.Context) error { + f, err := loadProfileFile(defaultProfileFile) + if err != nil { + return err + } + + printJSON(f) + return nil +} + +var profileAddCommand = cli.Command{ + Name: "add", + Usage: "Add a new profile", + ArgsUsage: "name", + Description: ` + Add a new named profile to the main profiles.json. All global options + (see 'lncli --help') passed into this command are stored in that named + profile. + `, + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "name", + Usage: "the name of the new profile", + }, + cli.BoolFlag{ + Name: "default", + Usage: "set the new profile to be the default profile", + }, + }, + Action: profileAdd, +} + +func profileAdd(ctx *cli.Context) error { + if ctx.NArg() == 0 && ctx.NumFlags() == 0 { + return cli.ShowCommandHelp(ctx, "add") + } + + // Load the default profile file or create a new one if it doesn't exist + // yet. + f, err := loadProfileFile(defaultProfileFile) + switch { + case err == errNoProfileFile: + f = &profileFile{} + _ = os.MkdirAll(path.Dir(defaultProfileFile), 0700) + + case err != nil: + return err + } + + // Create a profile struct from all the global options. + profile, err := profileFromContext(ctx, true) + if err != nil { + return fmt.Errorf("could not load global options: %v", err) + } + + // Finally, all that's left is to get the profile name from either + // positional argument or flag. + args := ctx.Args() + switch { + case ctx.IsSet("name"): + profile.Name = ctx.String("name") + case args.Present(): + profile.Name = args.First() + default: + return fmt.Errorf("name argument missing") + } + + // Is there already a profile with that name? + for _, p := range f.Profiles { + if p.Name == profile.Name { + return fmt.Errorf("a profile with the name %s already "+ + "exists", profile.Name) + } + } + + // Do we need to update the default entry to be this one? + if ctx.Bool("default") { + f.Default = profile.Name + } + + // All done, store the updated profile file. + f.Profiles = append(f.Profiles, profile) + if err = saveProfileFile(defaultProfileFile, f); err != nil { + return fmt.Errorf("error writing profile file %s: %v", + defaultProfileFile, err) + } + + fmt.Printf("Profile %s added to file %s.\n", profile.Name, + defaultProfileFile) + return nil +} + +var profileRemoveCommand = cli.Command{ + Name: "remove", + Usage: "Remove a profile", + ArgsUsage: "name", + Description: `Remove the specified profile from the profile file.`, + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "name", + Usage: "the name of the profile to delete", + }, + }, + Action: profileRemove, +} + +func profileRemove(ctx *cli.Context) error { + if ctx.NArg() == 0 && ctx.NumFlags() == 0 { + return cli.ShowCommandHelp(ctx, "remove") + } + + // Load the default profile file. + f, err := loadProfileFile(defaultProfileFile) + if err != nil { + return fmt.Errorf("could not load profile file: %v", err) + } + + // Get the profile name from either positional argument or flag. + var ( + args = ctx.Args() + name string + found = false + ) + switch { + case ctx.IsSet("name"): + name = ctx.String("name") + case args.Present(): + name = args.First() + default: + return fmt.Errorf("name argument missing") + } + + // Create a copy of all profiles but don't include the one to delete. + newProfiles := make([]*profileEntry, 0, len(f.Profiles)-1) + for _, p := range f.Profiles { + // Skip the one we want to delete. + if p.Name == name { + found = true + + if p.Name == f.Default { + fmt.Println("Warning: removing default profile.") + } + continue + } + + // Keep all others. + newProfiles = append(newProfiles, p) + } + + // If what we were looking for didn't exist in the first place, there's + // no need for updating the file. + if !found { + return fmt.Errorf("profile with name %s not found in file", + name) + } + + // Great, everything updated, now let's save the file. + f.Profiles = newProfiles + return saveProfileFile(defaultProfileFile, f) +} + +var profileSetDefaultCommand = cli.Command{ + Name: "setdefault", + Usage: "Set the default profile", + ArgsUsage: "name", + Description: ` + Set a specified profile to be used as the default profile. + + WARNING: Setting a default profile changes the default behavior of + lncli! To disable the use of the default profile for a single command, + set '--profile= '. + `, + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "name", + Usage: "the name of the profile to set as default", + }, + }, + Action: profileSetDefault, +} + +func profileSetDefault(ctx *cli.Context) error { + if ctx.NArg() == 0 && ctx.NumFlags() == 0 { + return cli.ShowCommandHelp(ctx, "setdefault") + } + + // Load the default profile file. + f, err := loadProfileFile(defaultProfileFile) + if err != nil { + return fmt.Errorf("could not load profile file: %v", err) + } + + // Get the profile name from either positional argument or flag. + var ( + args = ctx.Args() + name string + found = false + ) + switch { + case ctx.IsSet("name"): + name = ctx.String("name") + case args.Present(): + name = args.First() + default: + return fmt.Errorf("name argument missing") + } + + // Make sure the new default profile actually exists. + for _, p := range f.Profiles { + if p.Name == name { + found = true + f.Default = p.Name + + break + } + } + + // If the default profile doesn't exist, there's no need for updating + // the file. + if !found { + return fmt.Errorf("profile with name %s not found in file", + name) + } + + // Great, everything updated, now let's save the file. + return saveProfileFile(defaultProfileFile, f) +} + +var profileUnsetDefaultCommand = cli.Command{ + Name: "unsetdefault", + Usage: "Unsets the default profile", + Description: ` + Disables the use of a default profile and restores lncli to its original + behavior. + `, + Action: profileUnsetDefault, +} + +func profileUnsetDefault(_ *cli.Context) error { + // Load the default profile file. + f, err := loadProfileFile(defaultProfileFile) + if err != nil { + return fmt.Errorf("could not load profile file: %v", err) + } + + // Save the file with the flag disabled. + f.Default = "" + return saveProfileFile(defaultProfileFile, f) +} + +var profileAddMacaroonCommand = cli.Command{ + Name: "addmacaroon", + Usage: "Add a macaroon to a profile's macaroon jar", + ArgsUsage: "macaroon-name", + Description: ` + Add an additional macaroon specified by the global option --macaroonpath + to an existing profile's macaroon jar. + + If no profile is selected, the macaroon is added to the default profile + (if one exists). To add a macaroon to a specific profile, use the global + --profile=myprofile option. + + If multiple macaroons exist in a profile's macaroon jar, the one to use + can be specified with the global option --macfromjar=xyz. + `, + Flags: []cli.Flag{ + cli.StringFlag{ + Name: "name", + Usage: "the name of the macaroon", + }, + cli.BoolFlag{ + Name: "default", + Usage: "set the new macaroon to be the default " + + "macaroon in the jar", + }, + }, + Action: profileAddMacaroon, +} + +func profileAddMacaroon(ctx *cli.Context) error { + if ctx.NArg() == 0 && ctx.NumFlags() == 0 { + return cli.ShowCommandHelp(ctx, "addmacaroon") + } + + // Load the default profile file or create a new one if it doesn't exist + // yet. + f, err := loadProfileFile(defaultProfileFile) + if err != nil { + return fmt.Errorf("could not load profile file: %v", err) + } + + // Finally, all that's left is to get the profile name from either + // positional argument or flag. + var ( + args = ctx.Args() + profileName string + macName string + ) + switch { + case ctx.IsSet("name"): + macName = ctx.String("name") + case args.Present(): + macName = args.First() + default: + return fmt.Errorf("name argument missing") + } + + // Make sure the user actually set a macaroon path to use. + if !ctx.GlobalIsSet("macaroonpath") { + return fmt.Errorf("macaroonpath global option missing") + } + + // Find out which profile we should add the macaroon. The global flag + // takes precedence over the default profile. + if f.Default != "" { + profileName = f.Default + } + if ctx.GlobalIsSet("profile") { + profileName = ctx.GlobalString("profile") + } + if len(strings.TrimSpace(profileName)) == 0 { + return fmt.Errorf("no profile specified and no default " + + "profile exists") + } + + // Is there a profile with that name? + var selectedProfile *profileEntry + for _, p := range f.Profiles { + if p.Name == profileName { + selectedProfile = p + break + } + } + if selectedProfile == nil { + return fmt.Errorf("profile with name %s not found", profileName) + } + + // Does a macaroon with that name already exist? + for _, m := range selectedProfile.Macaroons.Jar { + if m.Name == macName { + return fmt.Errorf("a macaroon with the name %s "+ + "already exists", macName) + } + } + + // Do we need to update the default entry to be this one? + if ctx.Bool("default") { + selectedProfile.Macaroons.Default = macName + } + + // Now load and possibly encrypt the macaroon file. + macPath := lncfg.CleanAndExpandPath(ctx.GlobalString("macaroonpath")) + macBytes, err := ioutil.ReadFile(macPath) + if err != nil { + return fmt.Errorf("unable to read macaroon path: %v", err) + } + mac := &macaroon.Macaroon{} + if err = mac.UnmarshalBinary(macBytes); err != nil { + return fmt.Errorf("unable to decode macaroon: %v", err) + } + macEntry := &macaroonEntry{ + Name: macName, + } + if err = macEntry.storeMacaroon(mac, nil); err != nil { + return fmt.Errorf("unable to store macaroon: %v", err) + } + + // All done, store the updated profile file. + selectedProfile.Macaroons.Jar = append( + selectedProfile.Macaroons.Jar, macEntry, + ) + if err = saveProfileFile(defaultProfileFile, f); err != nil { + return fmt.Errorf("error writing profile file %s: %v", + defaultProfileFile, err) + } + + fmt.Printf("Macaroon %s added to profile %s in file %s.\n", macName, + selectedProfile.Name, defaultProfileFile) + return nil +} diff --git a/cmd/lncli/commands.go b/cmd/lncli/commands.go index f0403e3d0..2df10414f 100644 --- a/cmd/lncli/commands.go +++ b/cmd/lncli/commands.go @@ -14,7 +14,6 @@ import ( "strconv" "strings" "sync" - "syscall" "time" "github.com/btcsuite/btcd/chaincfg/chainhash" @@ -26,7 +25,6 @@ import ( "github.com/lightningnetwork/lnd/routing/route" "github.com/lightningnetwork/lnd/walletunlocker" "github.com/urfave/cli" - "golang.org/x/crypto/ssh/terminal" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -221,7 +219,7 @@ var sendCoinsCommand = cli.Command{ cli.BoolFlag{ Name: "sweepall", Usage: "if set, then the amount field will be ignored, " + - "and all the wallet will attempt to sweep all " + + "and the wallet will attempt to sweep all " + "outputs within the wallet to the target " + "address", }, @@ -498,6 +496,14 @@ var connectCommand = cli.Command{ Category: "Peers", Usage: "Connect to a remote lnd peer.", ArgsUsage: "@host", + Description: ` + Connect to a peer using its and host. + + A custom timeout on the connection is supported. For instance, to timeout + the connection request in 30 seconds, use the following: + + lncli connect @host --timeout 30s + `, Flags: []cli.Flag{ cli.BoolFlag{ Name: "perm", @@ -505,6 +511,13 @@ var connectCommand = cli.Command{ "connect to the target peer.\n" + " If not, the call will be synchronous.", }, + cli.DurationFlag{ + Name: "timeout", + Usage: "The connection timeout value for current request. " + + "Valid uints are {ms, s, m, h}.\n" + + "If not set, the global connection " + + "timeout value (default to 120s) is used.", + }, }, Action: actionDecorator(connectPeer), } @@ -526,8 +539,9 @@ func connectPeer(ctx *cli.Context) error { Host: splitAddr[1], } req := &lnrpc.ConnectPeerRequest{ - Addr: addr, - Perm: ctx.Bool("perm"), + Addr: addr, + Perm: ctx.Bool("perm"), + Timeout: uint64(ctx.Duration("timeout").Seconds()), } lnid, err := client.ConnectPeer(ctxb, req) @@ -1340,15 +1354,13 @@ mnemonicCheck: // Additionally, the user may have a passphrase, that will also // need to be provided so the daemon can properly decipher the // cipher seed. - fmt.Printf("Input your cipher seed passphrase (press enter if " + - "your seed doesn't have a passphrase): ") - passphrase, err := terminal.ReadPassword(int(syscall.Stdin)) + aezeedPass, err = readPassword("Input your cipher seed " + + "passphrase (press enter if your seed doesn't have a " + + "passphrase): ") if err != nil { return err } - aezeedPass = []byte(passphrase) - for { fmt.Println() fmt.Printf("Input an optional address look-ahead "+ @@ -1460,12 +1472,10 @@ func capturePassword(instruction string, optional bool, validate func([]byte) error) ([]byte, error) { for { - fmt.Printf(instruction) - password, err := terminal.ReadPassword(int(syscall.Stdin)) + password, err := readPassword(instruction) if err != nil { return nil, err } - fmt.Println() // Do not require users to repeat password if // it is optional and they are not using one. @@ -1481,21 +1491,16 @@ func capturePassword(instruction string, optional bool, continue } - fmt.Printf("Confirm password: ") - passwordConfirmed, err := terminal.ReadPassword( - int(syscall.Stdin), - ) + passwordConfirmed, err := readPassword("Confirm password: ") if err != nil { return nil, err } - fmt.Println() if bytes.Equal(password, passwordConfirmed) { return password, nil } - fmt.Println("Passwords don't match, " + - "please try again") + fmt.Println("Passwords don't match, please try again") fmt.Println() } } @@ -1558,13 +1563,7 @@ func unlock(ctx *cli.Context) error { // terminal to be a real tty and will fail if a string is piped into // lncli. default: - fmt.Printf("Input wallet password: ") - - // The variable syscall.Stdin is of a different type in the - // Windows API that's why we need the explicit cast. And of - // course the linter doesn't like it either. - pw, err = terminal.ReadPassword(int(syscall.Stdin)) // nolint:unconvert - fmt.Println() + pw, err = readPassword("Input wallet password: ") } if err != nil { return err @@ -1625,26 +1624,20 @@ func changePassword(ctx *cli.Context) error { client, cleanUp := getWalletUnlockerClient(ctx) defer cleanUp() - fmt.Printf("Input current wallet password: ") - currentPw, err := terminal.ReadPassword(int(syscall.Stdin)) + currentPw, err := readPassword("Input current wallet password: ") if err != nil { return err } - fmt.Println() - fmt.Printf("Input new wallet password: ") - newPw, err := terminal.ReadPassword(int(syscall.Stdin)) + newPw, err := readPassword("Input new wallet password: ") if err != nil { return err } - fmt.Println() - fmt.Printf("Confirm new wallet password: ") - confirmPw, err := terminal.ReadPassword(int(syscall.Stdin)) + confirmPw, err := readPassword("Confirm new wallet password: ") if err != nil { return err } - fmt.Println() if !bytes.Equal(newPw, confirmPw) { return fmt.Errorf("passwords don't match") @@ -2165,6 +2158,11 @@ var queryRoutesCommand = cli.Command{ Name: "use_mc", Usage: "use mission control probabilities", }, + cli.Uint64Flag{ + Name: "outgoing_chanid", + Usage: "(optional) the channel id of the channel " + + "that must be taken to the first hop", + }, cltvLimitFlag, }, Action: actionDecorator(queryRoutes), @@ -2217,6 +2215,7 @@ func queryRoutes(ctx *cli.Context) error { FinalCltvDelta: int32(ctx.Int("final_cltv_delta")), UseMissionControl: ctx.Bool("use_mc"), CltvLimit: uint32(ctx.Uint64(cltvLimitFlag.Name)), + OutgoingChanId: ctx.Uint64("outgoing_chanid"), } route, err := client.QueryRoutes(ctxb, req) diff --git a/cmd/lncli/macaroon_jar.go b/cmd/lncli/macaroon_jar.go new file mode 100644 index 000000000..29b364904 --- /dev/null +++ b/cmd/lncli/macaroon_jar.go @@ -0,0 +1,162 @@ +package main + +import ( + "encoding/base64" + "encoding/hex" + "fmt" + "strings" + + "github.com/btcsuite/btcwallet/snacl" + "gopkg.in/macaroon.v2" +) + +const ( + encryptionPrefix = "snacl:" +) + +// getPasswordFn is a function that asks the user to type a password after +// presenting it the given prompt. +type getPasswordFn func(prompt string) ([]byte, error) + +// macaroonJar is a struct that represents all macaroons of a profile. +type macaroonJar struct { + Default string `json:"default,omitempty"` + Timeout int64 `json:"timeout,omitempty"` + IP string `json:"ip,omitempty"` + Jar []*macaroonEntry `json:"jar"` +} + +// macaroonEntry is a struct that represents a single macaroon. Its content can +// either be cleartext (hex encoded) or encrypted (snacl secretbox). +type macaroonEntry struct { + Name string `json:"name"` + Data string `json:"data"` +} + +// loadMacaroon returns the fully usable macaroon instance from the entry. This +// detects whether the macaroon needs to be decrypted and does so if necessary. +// An encrypted macaroon that needs to be decrypted will prompt for the user's +// password by calling the provided password callback. Normally that should +// result in the user being prompted for the password in the terminal. +func (e *macaroonEntry) loadMacaroon( + pwCallback getPasswordFn) (*macaroon.Macaroon, error) { + + if len(strings.TrimSpace(e.Data)) == 0 { + return nil, fmt.Errorf("macaroon data is empty") + } + + var ( + macBytes []byte + err error + ) + + // Either decrypt or simply decode the macaroon data. + if strings.HasPrefix(e.Data, encryptionPrefix) { + parts := strings.Split(e.Data, ":") + if len(parts) != 3 { + return nil, fmt.Errorf("invalid encrypted macaroon " + + "format, expected 'snacl::" + + "'") + } + + pw, err := pwCallback("Enter macaroon encryption password: ") + if err != nil { + return nil, fmt.Errorf("could not read password from "+ + "terminal: %v", err) + } + + macBytes, err = decryptMacaroon(parts[1], parts[2], pw) + if err != nil { + return nil, fmt.Errorf("unable to decrypt macaroon: %v", + err) + } + } else { + macBytes, err = hex.DecodeString(e.Data) + if err != nil { + return nil, fmt.Errorf("unable to hex decode "+ + "macaroon: %v", err) + } + } + + // Parse the macaroon data into its native struct. + mac := &macaroon.Macaroon{} + if err := mac.UnmarshalBinary(macBytes); err != nil { + return nil, fmt.Errorf("unable to decode macaroon: %v", err) + } + return mac, nil +} + +// storeMacaroon stores a native macaroon instance to the entry. If a non-nil +// password is provided, then the macaroon is encrypted with that password. If +// not, the macaroon is stored as plain text. +func (e *macaroonEntry) storeMacaroon(mac *macaroon.Macaroon, pw []byte) error { + // First of all, make sure we can serialize the macaroon. + macBytes, err := mac.MarshalBinary() + if err != nil { + return fmt.Errorf("unable to marshal macaroon: %v", err) + } + + if len(pw) == 0 { + e.Data = hex.EncodeToString(macBytes) + return nil + } + + // The user did set a password. Let's derive an encryption key from it. + key, err := snacl.NewSecretKey( + &pw, snacl.DefaultN, snacl.DefaultR, snacl.DefaultP, + ) + if err != nil { + return fmt.Errorf("unable to create encryption key: %v", err) + } + + // Encrypt the macaroon data with the derived key and store it in the + // human readable format snacl::. + encryptedMac, err := key.Encrypt(macBytes) + if err != nil { + return fmt.Errorf("unable to encrypt macaroon: %v", err) + } + + keyB64 := base64.StdEncoding.EncodeToString(key.Marshal()) + dataB64 := base64.StdEncoding.EncodeToString(encryptedMac) + e.Data = fmt.Sprintf("%s%s:%s", encryptionPrefix, keyB64, dataB64) + + return nil +} + +// decryptMacaroon decrypts the cipher text macaroon by using the serialized +// encryption key and the password. +func decryptMacaroon(keyB64, dataB64 string, pw []byte) ([]byte, error) { + // Base64 decode both the marshalled encryption key and macaroon data. + keyData, err := base64.StdEncoding.DecodeString(keyB64) + if err != nil { + return nil, fmt.Errorf("could not base64 decode encryption "+ + "key: %v", err) + } + encryptedMac, err := base64.StdEncoding.DecodeString(dataB64) + if err != nil { + return nil, fmt.Errorf("could not base64 decode macaroon "+ + "data: %v", err) + } + + // Unmarshal the encryption key and ask the user for the password. + key := &snacl.SecretKey{} + err = key.Unmarshal(keyData) + if err != nil { + return nil, fmt.Errorf("could not unmarshal encryption key: %v", + err) + } + + // Derive the final encryption key and then decrypt the macaroon with + // it. + err = key.DeriveKey(&pw) + if err != nil { + return nil, fmt.Errorf("could not derive encryption key, "+ + "possibly due to incorrect password: %v", err) + } + macBytes, err := key.Decrypt(encryptedMac) + if err != nil { + return nil, fmt.Errorf("could not decrypt macaroon data: %v", + err) + } + return macBytes, nil +} diff --git a/cmd/lncli/macaroon_jar_test.go b/cmd/lncli/macaroon_jar_test.go new file mode 100644 index 000000000..8e1d1c6bd --- /dev/null +++ b/cmd/lncli/macaroon_jar_test.go @@ -0,0 +1,102 @@ +package main + +import ( + "encoding/hex" + "testing" + + "github.com/stretchr/testify/require" + "gopkg.in/macaroon.v2" +) + +var ( + dummyMacStr = "0201047465737402067788991234560000062052d26ed139ea5af8" + + "3e675500c4ccb2471f62191b745bab820f129e5588a255d2" + dummyMac, _ = hex.DecodeString(dummyMacStr) + encryptedEntry = &macaroonEntry{ + Name: "encryptedMac", + Data: "snacl:exX8xbUOb6Gih88ybL2jZGo+DBDPU2tYKkvo0eVVmbDGDoFP" + + "zlv5xvqNK5eml0LKLcB8LdZRw43qXK1W2OLs/gBAAAAAAAAACAAA" + + "AAAAAAABAAAAAAAAAA==:C8TN/aDOvSLiBCX+IdoPTx+UUWhVdGj" + + "NQvbcaWp+KXQWqPfpRZpjJQ6B2PDx5mJxImcezJGPx8ShAqMdxWe" + + "l2precU+1cOjk7HQFkYuu943eJ00s6JerAY+ssg==", + } + plaintextEntry = &macaroonEntry{ + Name: "plaintextMac", + Data: dummyMacStr, + } + + testPassword = []byte("S3curePazzw0rd") + pwCallback = func(string) ([]byte, error) { + return testPassword, nil + } + noPwCallback = func(string) ([]byte, error) { + return nil, nil + } +) + +// TestMacaroonJarEncrypted tests that a macaroon can be stored and retrieved +// safely by encrypting/decrypting it with a password. +func TestMacaroonJarEncrypted(t *testing.T) { + // Create a new macaroon entry from the dummy macaroon and encrypt it + // with the test password. + newEntry := &macaroonEntry{ + Name: "encryptedMac", + } + err := newEntry.storeMacaroon(toMacaroon(t, dummyMac), testPassword) + require.NoError(t, err) + + // Now decrypt it again and make sure we get the same content back. + mac, err := newEntry.loadMacaroon(pwCallback) + require.NoError(t, err) + macBytes, err := mac.MarshalBinary() + require.NoError(t, err) + require.Equal(t, dummyMac, macBytes) + + // The encrypted data of the entry we just created shouldn't be the + // same as our test entry because of the salt snacl uses. + require.NotEqual(t, encryptedEntry.Data, newEntry.Data) + + // Decrypt the hard coded test entry and make sure the decrypted content + // matches our created entry. + mac, err = encryptedEntry.loadMacaroon(pwCallback) + require.NoError(t, err) + macBytes, err = mac.MarshalBinary() + require.NoError(t, err) + require.Equal(t, dummyMac, macBytes) +} + +// TestMacaroonJarPlaintext tests that a macaroon can be stored and retrieved +// as plaintext as well. +func TestMacaroonJarPlaintext(t *testing.T) { + // Create a new macaroon entry from the dummy macaroon and encrypt it + // with the test password. + newEntry := &macaroonEntry{ + Name: "plaintextMac", + } + err := newEntry.storeMacaroon(toMacaroon(t, dummyMac), nil) + require.NoError(t, err) + + // Now decrypt it again and make sure we get the same content back. + mac, err := newEntry.loadMacaroon(noPwCallback) + require.NoError(t, err) + macBytes, err := mac.MarshalBinary() + require.NoError(t, err) + require.Equal(t, dummyMac, macBytes) + require.Equal(t, plaintextEntry.Data, newEntry.Data) + + // Load the hard coded plaintext test entry and make sure the loaded + // content matches our created entry. + mac, err = plaintextEntry.loadMacaroon(noPwCallback) + require.NoError(t, err) + macBytes, err = mac.MarshalBinary() + require.NoError(t, err) + require.Equal(t, dummyMac, macBytes) +} + +func toMacaroon(t *testing.T, macData []byte) *macaroon.Macaroon { + mac := &macaroon.Macaroon{} + err := mac.UnmarshalBinary(macData) + require.NoError(t, err) + + return mac +} diff --git a/cmd/lncli/main.go b/cmd/lncli/main.go index 86e637644..12f673743 100644 --- a/cmd/lncli/main.go +++ b/cmd/lncli/main.go @@ -5,14 +5,12 @@ package main import ( + "crypto/tls" "fmt" - "io/ioutil" "os" - "os/user" "path/filepath" "strings" - - macaroon "gopkg.in/macaroon.v2" + "syscall" "github.com/btcsuite/btcutil" "github.com/lightningnetwork/lnd/build" @@ -21,6 +19,7 @@ import ( "github.com/lightningnetwork/lnd/macaroons" "github.com/urfave/cli" + "golang.org/x/crypto/ssh/terminal" "google.golang.org/grpc" "google.golang.org/grpc/credentials" ) @@ -69,17 +68,30 @@ func getClient(ctx *cli.Context) (lnrpc.LightningClient, func()) { } func getClientConn(ctx *cli.Context, skipMacaroons bool) *grpc.ClientConn { - // First, we'll parse the args from the command. - tlsCertPath, macPath, err := extractPathArgs(ctx) + // First, we'll get the selected stored profile or an ephemeral one + // created from the global options in the CLI context. + profile, err := getGlobalOptions(ctx) if err != nil { - fatal(err) + fatal(fmt.Errorf("could not load global options: %v", err)) } - // Load the specified TLS certificate and build transport credentials - // with it. - creds, err := credentials.NewClientTLSFromFile(tlsCertPath, "") + // Load the specified TLS certificate. + certPool, err := profile.cert() if err != nil { - fatal(err) + fatal(fmt.Errorf("could not create cert pool: %v", err)) + } + + // Build transport credentials from the certificate pool. If there is no + // certificate pool, we expect the server to use a non-self-signed + // certificate such as a certificate obtained from Let's Encrypt. + var creds credentials.TransportCredentials + if certPool != nil { + creds = credentials.NewClientTLSFromCert(certPool, "") + } else { + // Fallback to the system pool. Using an empty tls config is an + // alternative to x509.SystemCertPool(). That call is not + // supported on Windows. + creds = credentials.NewTLS(&tls.Config{}) } // Create a dial options array. @@ -89,17 +101,31 @@ func getClientConn(ctx *cli.Context, skipMacaroons bool) *grpc.ClientConn { // Only process macaroon credentials if --no-macaroons isn't set and // if we're not skipping macaroon processing. - if !ctx.GlobalBool("no-macaroons") && !skipMacaroons { - // Load the specified macaroon file. - macBytes, err := ioutil.ReadFile(macPath) - if err != nil { - fatal(fmt.Errorf("unable to read macaroon path (check "+ - "the network setting!): %v", err)) + if !profile.NoMacaroons && !skipMacaroons { + // Find out which macaroon to load. + macName := profile.Macaroons.Default + if ctx.GlobalIsSet("macfromjar") { + macName = ctx.GlobalString("macfromjar") + } + var macEntry *macaroonEntry + for _, entry := range profile.Macaroons.Jar { + if entry.Name == macName { + macEntry = entry + break + } + } + if macEntry == nil { + fatal(fmt.Errorf("macaroon with name '%s' not found "+ + "in profile", macName)) } - mac := &macaroon.Macaroon{} - if err = mac.UnmarshalBinary(macBytes); err != nil { - fatal(fmt.Errorf("unable to decode macaroon: %v", err)) + // Get and possibly decrypt the specified macaroon. + // + // TODO(guggero): Make it possible to cache the password so we + // don't need to ask for it every time. + mac, err := macEntry.loadMacaroon(readPassword) + if err != nil { + fatal(fmt.Errorf("could not load macaroon: %v", err)) } macConstraints := []macaroons.Constraint{ @@ -114,16 +140,18 @@ func getClientConn(ctx *cli.Context, skipMacaroons bool) *grpc.ClientConn { // altogether if, in the latter case, this time is more than 60 // seconds). // TODO(aakselrod): add better anti-replay protection. - macaroons.TimeoutConstraint(ctx.GlobalInt64("macaroontimeout")), + macaroons.TimeoutConstraint(profile.Macaroons.Timeout), // Lock macaroon down to a specific IP address. - macaroons.IPLockConstraint(ctx.GlobalString("macaroonip")), + macaroons.IPLockConstraint(profile.Macaroons.IP), // ... Add more constraints if needed. } // Apply constraints to the macaroon. - constrainedMac, err := macaroons.AddConstraints(mac, macConstraints...) + constrainedMac, err := macaroons.AddConstraints( + mac, macConstraints..., + ) if err != nil { fatal(err) } @@ -139,7 +167,7 @@ func getClientConn(ctx *cli.Context, skipMacaroons bool) *grpc.ClientConn { opts = append(opts, grpc.WithContextDialer(genericDialer)) opts = append(opts, grpc.WithDefaultCallOptions(maxMsgRecvSize)) - conn, err := grpc.Dial(ctx.GlobalString("rpcserver"), opts...) + conn, err := grpc.Dial(profile.RPCServer, opts...) if err != nil { fatal(fmt.Errorf("unable to connect to RPC server: %v", err)) } @@ -171,13 +199,13 @@ func extractPathArgs(ctx *cli.Context) (string, string, error) { // properly read the macaroons (if needed) and also the cert. This will // either be the default, or will have been overwritten by the end // user. - lndDir := cleanAndExpandPath(ctx.GlobalString("lnddir")) + lndDir := lncfg.CleanAndExpandPath(ctx.GlobalString("lnddir")) // If the macaroon path as been manually provided, then we'll only // target the specified file. var macPath string if ctx.GlobalString("macaroonpath") != "" { - macPath = cleanAndExpandPath(ctx.GlobalString("macaroonpath")) + macPath = lncfg.CleanAndExpandPath(ctx.GlobalString("macaroonpath")) } else { // Otherwise, we'll go into the path: // lnddir/data/chain// in order to fetch the @@ -188,7 +216,7 @@ func extractPathArgs(ctx *cli.Context) (string, string, error) { ) } - tlsCertPath := cleanAndExpandPath(ctx.GlobalString("tlscertpath")) + tlsCertPath := lncfg.CleanAndExpandPath(ctx.GlobalString("tlscertpath")) // If a custom lnd directory was set, we'll also check if custom paths // for the TLS cert and macaroon file were set as well. If not, we'll @@ -211,45 +239,60 @@ func main() { cli.StringFlag{ Name: "rpcserver", Value: defaultRPCHostPort, - Usage: "host:port of ln daemon", + Usage: "The host:port of LN daemon.", }, cli.StringFlag{ Name: "lnddir", Value: defaultLndDir, - Usage: "path to lnd's base directory", + Usage: "The path to lnd's base directory.", }, cli.StringFlag{ Name: "tlscertpath", Value: defaultTLSCertPath, - Usage: "path to TLS certificate", + Usage: "The path to lnd's TLS certificate.", }, cli.StringFlag{ Name: "chain, c", - Usage: "the chain lnd is running on e.g. bitcoin", + Usage: "The chain lnd is running on, e.g. bitcoin.", Value: "bitcoin", }, cli.StringFlag{ Name: "network, n", - Usage: "the network lnd is running on e.g. mainnet, " + + Usage: "The network lnd is running on, e.g. mainnet, " + "testnet, etc.", Value: "mainnet", }, cli.BoolFlag{ Name: "no-macaroons", - Usage: "disable macaroon authentication", + Usage: "Disable macaroon authentication.", }, cli.StringFlag{ Name: "macaroonpath", - Usage: "path to macaroon file", + Usage: "The path to macaroon file.", }, cli.Int64Flag{ Name: "macaroontimeout", Value: 60, - Usage: "anti-replay macaroon validity time in seconds", + Usage: "Anti-replay macaroon validity time in seconds.", }, cli.StringFlag{ Name: "macaroonip", - Usage: "if set, lock macaroon to specific IP address", + Usage: "If set, lock macaroon to specific IP address.", + }, + cli.StringFlag{ + Name: "profile, p", + Usage: "Instead of reading settings from command " + + "line parameters or using the default " + + "profile, use a specific profile. If " + + "a default profile is set, this flag can be " + + "set to an empty string to disable reading " + + "values from the profiles file.", + }, + cli.StringFlag{ + Name: "macfromjar", + Usage: "Use this macaroon from the profile's " + + "macaroon jar instead of the default one. " + + "Can only be used if profiles are defined.", }, } app.Commands = []cli.Command{ @@ -301,8 +344,13 @@ func main() { verifyChanBackupCommand, restoreChanBackupCommand, bakeMacaroonCommand, + listMacaroonIDsCommand, + deleteMacaroonIDCommand, + listPermissionsCommand, + printMacaroonCommand, trackPaymentCommand, versionCommand, + profileSubCommand, } // Add any extra commands determined by build flags. @@ -318,28 +366,15 @@ func main() { } } -// cleanAndExpandPath expands environment variables and leading ~ in the -// passed path, cleans the result, and returns it. -// This function is taken from https://github.com/btcsuite/btcd -func cleanAndExpandPath(path string) string { - if path == "" { - return "" - } +// readPassword reads a password from the terminal. This requires there to be an +// actual TTY so passing in a password from stdin won't work. +func readPassword(text string) ([]byte, error) { + fmt.Print(text) - // Expand initial ~ to OS specific home directory. - if strings.HasPrefix(path, "~") { - var homeDir string - user, err := user.Current() - if err == nil { - homeDir = user.HomeDir - } else { - homeDir = os.Getenv("HOME") - } - - path = strings.Replace(path, "~", homeDir, 1) - } - - // NOTE: The os.ExpandEnv doesn't work with Windows-style %VARIABLE%, - // but the variables can still be expanded via POSIX-style $VARIABLE. - return filepath.Clean(os.ExpandEnv(path)) + // The variable syscall.Stdin is of a different type in the Windows API + // that's why we need the explicit cast. And of course the linter + // doesn't like it either. + pw, err := terminal.ReadPassword(int(syscall.Stdin)) // nolint:unconvert + fmt.Println() + return pw, err } diff --git a/cmd/lncli/profile.go b/cmd/lncli/profile.go new file mode 100644 index 000000000..5b1f496e7 --- /dev/null +++ b/cmd/lncli/profile.go @@ -0,0 +1,245 @@ +package main + +import ( + "bytes" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "path" + "strings" + + "github.com/lightningnetwork/lnd/lncfg" + "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/walletunlocker" + "github.com/urfave/cli" + "gopkg.in/macaroon.v2" +) + +var ( + errNoProfileFile = errors.New("no profile file found") +) + +// profileEntry is a struct that represents all settings for one specific +// profile. +type profileEntry struct { + Name string `json:"name"` + RPCServer string `json:"rpcserver"` + LndDir string `json:"lnddir"` + Chain string `json:"chain"` + Network string `json:"network"` + NoMacaroons bool `json:"no-macaroons,omitempty"` + TLSCert string `json:"tlscert"` + Macaroons *macaroonJar `json:"macaroons"` +} + +// cert returns the profile's TLS certificate as a x509 certificate pool. +func (e *profileEntry) cert() (*x509.CertPool, error) { + if e.TLSCert == "" { + return nil, nil + } + + cp := x509.NewCertPool() + if !cp.AppendCertsFromPEM([]byte(e.TLSCert)) { + return nil, fmt.Errorf("credentials: failed to append " + + "certificate") + } + return cp, nil +} + +// getGlobalOptions returns the global connection options. If a profile file +// exists, these global options might be read from a predefined profile. If no +// profile exists, the global options from the command line are returned as an +// ephemeral profile entry. +func getGlobalOptions(ctx *cli.Context) (*profileEntry, error) { + var profileName string + + // Try to load the default profile file and depending on its existence + // what profile to use. + f, err := loadProfileFile(defaultProfileFile) + switch { + // The legacy case where no profile file exists and the user also didn't + // request to use one. We only consider the global options here. + case err == errNoProfileFile && !ctx.GlobalIsSet("profile"): + return profileFromContext(ctx, false) + + // The file doesn't exist but the user specified an explicit profile. + case err == errNoProfileFile && ctx.GlobalIsSet("profile"): + return nil, fmt.Errorf("profile file %s does not exist", + defaultProfileFile) + + // There is a file but we couldn't read/parse it. + case err != nil: + return nil, fmt.Errorf("could not read profile file %s: "+ + "%v", defaultProfileFile, err) + + // The user explicitly disabled the use of profiles for this command by + // setting the flag to an empty string. We fall back to the default/old + // behavior. + case ctx.GlobalIsSet("profile") && ctx.GlobalString("profile") == "": + return profileFromContext(ctx, false) + + // There is a file, but no default profile is specified. The user also + // didn't specify a profile to use so we fall back to the default/old + // behavior. + case !ctx.GlobalIsSet("profile") && len(f.Default) == 0: + return profileFromContext(ctx, false) + + // The user didn't specify a profile but there is a default one defined. + case !ctx.GlobalIsSet("profile") && len(f.Default) > 0: + profileName = f.Default + + // The user specified a specific profile to use. + case ctx.GlobalIsSet("profile"): + profileName = ctx.GlobalString("profile") + } + + // If we got to here, we do have a profile file and know the name of the + // profile to use. Now we just need to make sure it does exist. + for _, prof := range f.Profiles { + if prof.Name == profileName { + return prof, nil + } + } + + return nil, fmt.Errorf("profile '%s' not found in file %s", profileName, + defaultProfileFile) +} + +// profileFromContext creates an ephemeral profile entry from the global options +// set in the CLI context. +func profileFromContext(ctx *cli.Context, store bool) (*profileEntry, error) { + // Parse the paths of the cert and macaroon. This will validate the + // chain and network value as well. + tlsCertPath, macPath, err := extractPathArgs(ctx) + if err != nil { + return nil, err + } + + // Load the certificate file now, if specified. We store it as plain PEM + // directly. + var tlsCert []byte + if lnrpc.FileExists(tlsCertPath) { + var err error + tlsCert, err = ioutil.ReadFile(tlsCertPath) + if err != nil { + return nil, fmt.Errorf("could not load TLS cert file "+ + "%s: %v", tlsCertPath, err) + } + } + + // Now load and possibly encrypt the macaroon file. + macBytes, err := ioutil.ReadFile(macPath) + if err != nil { + return nil, fmt.Errorf("unable to read macaroon path (check "+ + "the network setting!): %v", err) + } + mac := &macaroon.Macaroon{} + if err = mac.UnmarshalBinary(macBytes); err != nil { + return nil, fmt.Errorf("unable to decode macaroon: %v", err) + } + + var pw []byte + if store { + // Read a password from the terminal. If it's empty, we won't + // encrypt the macaroon and store it plaintext. + pw, err = capturePassword( + "Enter password to encrypt macaroon with or leave "+ + "blank to store in plaintext: ", true, + walletunlocker.ValidatePassword, + ) + if err != nil { + return nil, fmt.Errorf("unable to get encryption "+ + "password: %v", err) + } + } + macEntry := &macaroonEntry{} + if err = macEntry.storeMacaroon(mac, pw); err != nil { + return nil, fmt.Errorf("unable to store macaroon: %v", err) + } + + // We determine the name of the macaroon from the file itself but cut + // off the ".macaroon" at the end. + macEntry.Name = path.Base(macPath) + if path.Ext(macEntry.Name) == "macaroon" { + macEntry.Name = strings.TrimSuffix(macEntry.Name, ".macaroon") + } + + // Now that we have the complicated arguments behind us, let's return + // the new entry with all the values populated. + return &profileEntry{ + RPCServer: ctx.GlobalString("rpcserver"), + LndDir: lncfg.CleanAndExpandPath(ctx.GlobalString("lnddir")), + Chain: ctx.GlobalString("chain"), + Network: ctx.GlobalString("network"), + NoMacaroons: ctx.GlobalBool("no-macaroons"), + TLSCert: string(tlsCert), + Macaroons: &macaroonJar{ + Default: macEntry.Name, + Timeout: ctx.GlobalInt64("macaroontimeout"), + IP: ctx.GlobalString("macaroonip"), + Jar: []*macaroonEntry{macEntry}, + }, + }, nil +} + +// loadProfileFile tries to load the file specified and JSON deserialize it into +// the profile file struct. +func loadProfileFile(file string) (*profileFile, error) { + if !lnrpc.FileExists(file) { + return nil, errNoProfileFile + } + + content, err := ioutil.ReadFile(file) + if err != nil { + return nil, fmt.Errorf("could not load profile file %s: %v", + file, err) + } + f := &profileFile{} + err = f.unmarshalJSON(content) + if err != nil { + return nil, fmt.Errorf("could not unmarshal profile file %s: "+ + "%v", file, err) + } + return f, nil +} + +// saveProfileFile stores the given profile file struct in the specified file, +// overwriting it if it already existed. +func saveProfileFile(file string, f *profileFile) error { + content, err := f.marshalJSON() + if err != nil { + return fmt.Errorf("could not marshal profile: %v", err) + } + return ioutil.WriteFile(file, content, 0644) +} + +// profileFile is a struct that represents the whole content of a profile file. +type profileFile struct { + Default string `json:"default,omitempty"` + Profiles []*profileEntry `json:"profiles"` +} + +// unmarshalJSON tries to parse the given JSON and unmarshal it into the +// receiving instance. +func (f *profileFile) unmarshalJSON(content []byte) error { + return json.Unmarshal(content, f) +} + +// marshalJSON serializes the receiving instance to formatted/indented JSON. +func (f *profileFile) marshalJSON() ([]byte, error) { + b, err := json.Marshal(f) + if err != nil { + return nil, fmt.Errorf("error JSON marshalling profile: %v", + err) + } + + var out bytes.Buffer + err = json.Indent(&out, b, "", " ") + if err != nil { + return nil, fmt.Errorf("error indenting profile JSON: %v", err) + } + out.WriteString("\n") + return out.Bytes(), nil +} diff --git a/cmd/lnd/main.go b/cmd/lnd/main.go index 9364afbe2..7196ebff0 100644 --- a/cmd/lnd/main.go +++ b/cmd/lnd/main.go @@ -14,23 +14,28 @@ func main() { // function will also set up logging properly. loadedConfig, err := lnd.LoadConfig() if err != nil { + if e, ok := err.(*flags.Error); !ok || e.Type != flags.ErrHelp { + // Print error if not due to help request. + _, _ = fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + + // Help was requested, exit normally. + os.Exit(0) + } + + // Hook interceptor for os signals. + if err := signal.Intercept(); err != nil { _, _ = fmt.Fprintln(os.Stderr, err) os.Exit(1) } - // Hook interceptor for os signals. - signal.Intercept() - // Call the "real" main in a nested manner so the defers will properly // be executed in the case of a graceful shutdown. - err = lnd.Main( + if err := lnd.Main( loadedConfig, lnd.ListenerCfg{}, signal.ShutdownChannel(), - ) - if err != nil { - if e, ok := err.(*flags.Error); ok && e.Type == flags.ErrHelp { - } else { - _, _ = fmt.Fprintln(os.Stderr, err) - } + ); err != nil { + _, _ = fmt.Fprintln(os.Stderr, err) os.Exit(1) } } diff --git a/config.go b/config.go index 3a52ca964..51d84a5a0 100644 --- a/config.go +++ b/config.go @@ -27,6 +27,7 @@ import ( "github.com/lightningnetwork/lnd/discovery" "github.com/lightningnetwork/lnd/htlcswitch" "github.com/lightningnetwork/lnd/htlcswitch/hodl" + "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/lncfg" "github.com/lightningnetwork/lnd/lnrpc/routerrpc" "github.com/lightningnetwork/lnd/lnrpc/signrpc" @@ -63,6 +64,8 @@ const ( defaultMaxLogFileSize = 10 defaultMinBackoff = time.Second defaultMaxBackoff = time.Hour + defaultLetsEncryptDirname = "letsencrypt" + defaultLetsEncryptListen = ":80" defaultTorSOCKSPort = 9050 defaultTorDNSHost = "soa.nodes.lightning.directory" @@ -86,6 +89,27 @@ const ( // HostAnnouncer will wait between DNS resolutions to check if the // backing IP of a host has changed. defaultHostSampleInterval = time.Minute * 5 + + defaultChainInterval = time.Minute + defaultChainTimeout = time.Second * 10 + defaultChainBackoff = time.Second * 30 + defaultChainAttempts = 3 + + // Set defaults for a health check which ensures that we have space + // available on disk. Although this check is off by default so that we + // avoid breaking any existing setups (particularly on mobile), we still + // set the other default values so that the health check can be easily + // enabled with sane defaults. + defaultRequiredDisk = 0.1 + defaultDiskInterval = time.Hour * 12 + defaultDiskTimeout = time.Second * 5 + defaultDiskBackoff = time.Minute + defaultDiskAttempts = 0 + + // defaultRemoteMaxHtlcs specifies the default limit for maximum + // concurrent HTLCs the remote party may add to commitment transactions. + // This value can be overridden with --default-remote-max-htlcs. + defaultRemoteMaxHtlcs = 483 ) var ( @@ -106,8 +130,9 @@ var ( defaultTowerDir = filepath.Join(defaultDataDir, defaultTowerSubDirname) - defaultTLSCertPath = filepath.Join(DefaultLndDir, defaultTLSCertFilename) - defaultTLSKeyPath = filepath.Join(DefaultLndDir, defaultTLSKeyFilename) + defaultTLSCertPath = filepath.Join(DefaultLndDir, defaultTLSCertFilename) + defaultTLSKeyPath = filepath.Join(DefaultLndDir, defaultTLSKeyFilename) + defaultLetsEncryptDir = filepath.Join(DefaultLndDir, defaultLetsEncryptDirname) defaultBtcdDir = btcutil.AppDataDir("btcd", false) defaultBtcdRPCCertFile = filepath.Join(defaultBtcdDir, "rpc.cert") @@ -126,6 +151,8 @@ var ( // estimatesmartfee RPC call. defaultBitcoindEstimateMode = "CONSERVATIVE" bitcoindEstimateModes = [2]string{"ECONOMICAL", defaultBitcoindEstimateMode} + + defaultSphinxDbName = "sphinxreplay.db" ) // Config defines the configuration options for lnd. @@ -140,13 +167,14 @@ type Config struct { DataDir string `short:"b" long:"datadir" description:"The directory to store lnd's data within"` SyncFreelist bool `long:"sync-freelist" description:"Whether the databases used within lnd should sync their freelist to disk. This is disabled by default resulting in improved memory performance during operation, but with an increase in startup time."` - TLSCertPath string `long:"tlscertpath" description:"Path to write the TLS certificate for lnd's RPC and REST services"` - TLSKeyPath string `long:"tlskeypath" description:"Path to write the TLS private key for lnd's RPC and REST services"` - TLSExtraIPs []string `long:"tlsextraip" description:"Adds an extra ip to the generated certificate"` - TLSExtraDomains []string `long:"tlsextradomain" description:"Adds an extra domain to the generated certificate"` - TLSAutoRefresh bool `long:"tlsautorefresh" description:"Re-generate TLS certificate and key if the IPs or domains are changed"` + TLSCertPath string `long:"tlscertpath" description:"Path to write the TLS certificate for lnd's RPC and REST services"` + TLSKeyPath string `long:"tlskeypath" description:"Path to write the TLS private key for lnd's RPC and REST services"` + TLSExtraIPs []string `long:"tlsextraip" description:"Adds an extra ip to the generated certificate"` + TLSExtraDomains []string `long:"tlsextradomain" description:"Adds an extra domain to the generated certificate"` + TLSAutoRefresh bool `long:"tlsautorefresh" description:"Re-generate TLS certificate and key if the IPs or domains are changed"` + TLSDisableAutofill bool `long:"tlsdisableautofill" description:"Do not include the interface IPs or the system hostname in TLS certificate, use first --tlsextradomain as Common Name instead, if set"` - NoMacaroons bool `long:"no-macaroons" description:"Disable macaroon authentication"` + NoMacaroons bool `long:"no-macaroons" description:"Disable macaroon authentication, can only be used if server is not listening on a public interface."` AdminMacPath string `long:"adminmacaroonpath" description:"Path to write the admin macaroon for lnd's RPC and REST services if it doesn't exist"` ReadMacPath string `long:"readonlymacaroonpath" description:"Path to write the read-only macaroon for lnd's RPC and REST services if it doesn't exist"` InvoiceMacPath string `long:"invoicemacaroonpath" description:"Path to the invoice-only macaroon for lnd's RPC and REST services if it doesn't exist"` @@ -155,25 +183,30 @@ type Config struct { MaxLogFileSize int `long:"maxlogfilesize" description:"Maximum logfile size in MB"` AcceptorTimeout time.Duration `long:"acceptortimeout" description:"Time after which an RPCAcceptor will time out and return false if it hasn't yet received a response"` + LetsEncryptDir string `long:"letsencryptdir" description:"The directory to store Let's Encrypt certificates within"` + LetsEncryptListen string `long:"letsencryptlisten" description:"The IP:port on which lnd will listen for Let's Encrypt challenges. Let's Encrypt will always try to contact on port 80. Often non-root processes are not allowed to bind to ports lower than 1024. This configuration option allows a different port to be used, but must be used in combination with port forwarding from port 80. This configuration can also be used to specify another IP address to listen on, for example an IPv6 address."` + LetsEncryptDomain string `long:"letsencryptdomain" description:"Request a Let's Encrypt certificate for this domain. Note that the certicate is only requested and stored when the first rpc connection comes in."` + // We'll parse these 'raw' string arguments into real net.Addrs in the // loadConfig function. We need to expose the 'raw' strings so the // command line library can access them. // Only the parsed net.Addrs should be used! - RawRPCListeners []string `long:"rpclisten" description:"Add an interface/port/socket to listen for RPC connections"` - RawRESTListeners []string `long:"restlisten" description:"Add an interface/port/socket to listen for REST connections"` - RawListeners []string `long:"listen" description:"Add an interface/port to listen for peer connections"` - RawExternalIPs []string `long:"externalip" description:"Add an ip:port to the list of local addresses we claim to listen on to peers. If a port is not specified, the default (9735) will be used regardless of other parameters"` - ExternalHosts []string `long:"externalhosts" description:"A set of hosts that should be periodically resolved to announce IPs for"` - RPCListeners []net.Addr - RESTListeners []net.Addr - RestCORS []string `long:"restcors" description:"Add an ip:port/hostname to allow cross origin access from. To allow all origins, set as \"*\"."` - Listeners []net.Addr - ExternalIPs []net.Addr - DisableListen bool `long:"nolisten" description:"Disable listening for incoming peer connections"` - DisableRest bool `long:"norest" description:"Disable REST API"` - NAT bool `long:"nat" description:"Toggle NAT traversal support (using either UPnP or NAT-PMP) to automatically advertise your external IP address to the network -- NOTE this does not support devices behind multiple NATs"` - MinBackoff time.Duration `long:"minbackoff" description:"Shortest backoff when reconnecting to persistent peers. Valid time units are {s, m, h}."` - MaxBackoff time.Duration `long:"maxbackoff" description:"Longest backoff when reconnecting to persistent peers. Valid time units are {s, m, h}."` + RawRPCListeners []string `long:"rpclisten" description:"Add an interface/port/socket to listen for RPC connections"` + RawRESTListeners []string `long:"restlisten" description:"Add an interface/port/socket to listen for REST connections"` + RawListeners []string `long:"listen" description:"Add an interface/port to listen for peer connections"` + RawExternalIPs []string `long:"externalip" description:"Add an ip:port to the list of local addresses we claim to listen on to peers. If a port is not specified, the default (9735) will be used regardless of other parameters"` + ExternalHosts []string `long:"externalhosts" description:"A set of hosts that should be periodically resolved to announce IPs for"` + RPCListeners []net.Addr + RESTListeners []net.Addr + RestCORS []string `long:"restcors" description:"Add an ip:port/hostname to allow cross origin access from. To allow all origins, set as \"*\"."` + Listeners []net.Addr + ExternalIPs []net.Addr + DisableListen bool `long:"nolisten" description:"Disable listening for incoming peer connections"` + DisableRest bool `long:"norest" description:"Disable REST API"` + NAT bool `long:"nat" description:"Toggle NAT traversal support (using either UPnP or NAT-PMP) to automatically advertise your external IP address to the network -- NOTE this does not support devices behind multiple NATs"` + MinBackoff time.Duration `long:"minbackoff" description:"Shortest backoff when reconnecting to persistent peers. Valid time units are {s, m, h}."` + MaxBackoff time.Duration `long:"maxbackoff" description:"Longest backoff when reconnecting to persistent peers. Valid time units are {s, m, h}."` + ConnectionTimeout time.Duration `long:"connectiontimeout" description:"The timeout value for network connections. Valid time units are {ms, s, m, h}."` DebugLevel string `short:"d" long:"debuglevel" description:"Logging level for all subsystems {trace, debug, info, warn, error, critical} -- You may also specify =,=,... to set the log level for individual subsystems -- Use show to list available subsystems"` @@ -186,6 +219,8 @@ type Config struct { MaxPendingChannels int `long:"maxpendingchannels" description:"The maximum number of incoming pending channels permitted per peer."` BackupFilePath string `long:"backupfilepath" description:"The target location of the channel backup file"` + FeeURL string `long:"feeurl" description:"Optional URL for external fee estimation. If no URL is specified, the method for fee estimation will depend on the chosen backend and network."` + Bitcoin *lncfg.Chain `group:"Bitcoin" namespace:"bitcoin"` BtcdMode *lncfg.Btcd `group:"btcd" namespace:"btcd"` BitcoindMode *lncfg.Bitcoind `group:"bitcoind" namespace:"bitcoind"` @@ -205,7 +240,7 @@ type Config struct { NoNetBootstrap bool `long:"nobootstrap" description:"If true, then automatic network bootstrapping will not be attempted."` - NoSeedBackup bool `long:"noseedbackup" description:"If true, NO SEED WILL BE EXPOSED AND THE WALLET WILL BE ENCRYPTED USING THE DEFAULT PASSPHRASE -- EVER. THIS FLAG IS ONLY FOR TESTING AND IS BEING DEPRECATED."` + NoSeedBackup bool `long:"noseedbackup" description:"If true, NO SEED WILL BE EXPOSED -- EVER, AND THE WALLET WILL BE ENCRYPTED USING THE DEFAULT PASSPHRASE. THIS FLAG IS ONLY FOR TESTING AND SHOULD NEVER BE USED ON MAINNET."` PaymentsExpirationGracePeriod time.Duration `long:"payments-expiration-grace-period" description:"A period to wait before force closing channels with outgoing htlcs that have timed-out and are a result of this node initiated payments."` TrickleDelay int `long:"trickledelay" description:"Time in milliseconds between each release of announcements to the network"` @@ -216,6 +251,9 @@ type Config struct { Alias string `long:"alias" description:"The node alias. Used as a moniker by peers and intelligence services"` Color string `long:"color" description:"The color of the node in hex format (i.e. '#3399FF'). Used to customize node appearance in intelligence services"` MinChanSize int64 `long:"minchansize" description:"The smallest channel size (in satoshis) that we should accept. Incoming channels smaller than this will be rejected"` + MaxChanSize int64 `long:"maxchansize" description:"The largest channel size (in satoshis) that we should accept. Incoming channels larger than this will be rejected"` + + DefaultRemoteMaxHtlcs uint16 `long:"default-remote-max-htlcs" description:"The default max_htlc applied when opening or accepting channels. This value limits the number of concurrent HTLCs that the remote party can add to the commitment. The maximum possible value is 483."` NumGraphSyncPeers int `long:"numgraphsyncpeers" description:"The number of peers that we should receive new graph updates from. This option can be tuned to save bandwidth for light clients or routing nodes."` HistoricalSyncInterval time.Duration `long:"historicalsyncinterval" description:"The polling interval between historical graph sync attempts. Each historical graph sync attempt ensures we reconcile with the remote peer's graph from the genesis block."` @@ -242,6 +280,10 @@ type Config struct { KeysendHoldTime time.Duration `long:"keysend-hold-time" description:"If non-zero, keysend payments are accepted but not immediately settled. If the payment isn't settled manually after the specified time, it is canceled automatically. [experimental]"` + GcCanceledInvoicesOnStartup bool `long:"gc-canceled-invoices-on-startup" description:"If true, we'll attempt to garbage collect canceled invoices upon start."` + + GcCanceledInvoicesOnTheFly bool `long:"gc-canceled-invoices-on-the-fly" description:"If true, we'll delete newly canceled invoices on the fly."` + Routing *routing.Conf `group:"routing" namespace:"routing"` Workers *lncfg.Workers `group:"workers" namespace:"workers"` @@ -258,6 +300,8 @@ type Config struct { AllowCircularRoute bool `long:"allow-circular-route" description:"If true, our node will allow htlc forwards that arrive and depart on the same channel."` + HealthChecks *lncfg.HealthCheckConfig `group:"healthcheck" namespace:"healthcheck"` + DB *lncfg.DB `group:"db" namespace:"db"` // LogWriter is the root logger that all of the daemon's subloggers are @@ -272,21 +316,26 @@ type Config struct { // network. This path will hold the files related to each different // network. networkDir string + + // ActiveNetParams contains parameters of the target chain. + ActiveNetParams bitcoinNetParams } // DefaultConfig returns all default values for the Config struct. func DefaultConfig() Config { return Config{ - LndDir: DefaultLndDir, - ConfigFile: DefaultConfigFile, - DataDir: defaultDataDir, - DebugLevel: defaultLogLevel, - TLSCertPath: defaultTLSCertPath, - TLSKeyPath: defaultTLSKeyPath, - LogDir: defaultLogDir, - MaxLogFiles: defaultMaxLogFiles, - MaxLogFileSize: defaultMaxLogFileSize, - AcceptorTimeout: defaultAcceptorTimeout, + LndDir: DefaultLndDir, + ConfigFile: DefaultConfigFile, + DataDir: defaultDataDir, + DebugLevel: defaultLogLevel, + TLSCertPath: defaultTLSCertPath, + TLSKeyPath: defaultTLSKeyPath, + LetsEncryptDir: defaultLetsEncryptDir, + LetsEncryptListen: defaultLetsEncryptListen, + LogDir: defaultLogDir, + MaxLogFiles: defaultMaxLogFiles, + MaxLogFileSize: defaultMaxLogFileSize, + AcceptorTimeout: defaultAcceptorTimeout, Bitcoin: &lncfg.Chain{ MinHTLCIn: defaultBitcoinMinHTLCInMSat, MinHTLCOut: defaultBitcoinMinHTLCOutMSat, @@ -328,6 +377,7 @@ func DefaultConfig() Config { NoSeedBackup: defaultNoSeedBackup, MinBackoff: defaultMinBackoff, MaxBackoff: defaultMaxBackoff, + ConnectionTimeout: tor.DefaultConnTimeout, SubRPCServers: &subRPCServerConfigs{ SignRPC: &signrpc.Config{}, RouterRPC: routerrpc.DefaultConfig(), @@ -352,6 +402,8 @@ func DefaultConfig() Config { Alias: defaultAlias, Color: defaultColor, MinChanSize: int64(minChanFundingSize), + MaxChanSize: int64(0), + DefaultRemoteMaxHtlcs: defaultRemoteMaxHtlcs, NumGraphSyncPeers: defaultMinPeers, HistoricalSyncInterval: discovery.DefaultHistoricalSyncInterval, Tor: &lncfg.Tor{ @@ -373,11 +425,29 @@ func DefaultConfig() Config { Watchtower: &lncfg.Watchtower{ TowerDir: defaultTowerDir, }, + HealthChecks: &lncfg.HealthCheckConfig{ + ChainCheck: &lncfg.CheckConfig{ + Interval: defaultChainInterval, + Timeout: defaultChainTimeout, + Attempts: defaultChainAttempts, + Backoff: defaultChainBackoff, + }, + DiskCheck: &lncfg.DiskCheckConfig{ + RequiredRemaining: defaultRequiredDisk, + CheckConfig: &lncfg.CheckConfig{ + Interval: defaultDiskInterval, + Attempts: defaultDiskAttempts, + Timeout: defaultDiskTimeout, + Backoff: defaultDiskBackoff, + }, + }, + }, MaxOutgoingCltvExpiry: htlcswitch.DefaultMaxOutgoingCltvExpiry, MaxChannelFeeAllocation: htlcswitch.DefaultMaxLinkFeeAllocation, LogWriter: build.NewRotatingLogWriter(), DB: lncfg.DefaultDB(), registeredChains: newChainRegistry(), + ActiveNetParams: bitcoinTestNetParams, } } @@ -466,6 +536,9 @@ func ValidateConfig(cfg Config, usageMessage string) (*Config, error) { lndDir := CleanAndExpandPath(cfg.LndDir) if lndDir != DefaultLndDir { cfg.DataDir = filepath.Join(lndDir, defaultDataDirname) + cfg.LetsEncryptDir = filepath.Join( + lndDir, defaultLetsEncryptDirname, + ) cfg.TLSCertPath = filepath.Join(lndDir, defaultTLSCertFilename) cfg.TLSKeyPath = filepath.Join(lndDir, defaultTLSKeyFilename) cfg.LogDir = filepath.Join(lndDir, defaultLogDirname) @@ -479,23 +552,28 @@ func ValidateConfig(cfg Config, usageMessage string) (*Config, error) { } } - // Create the lnd directory if it doesn't already exist. funcName := "loadConfig" - if err := os.MkdirAll(lndDir, 0700); err != nil { - // Show a nicer error message if it's because a symlink is - // linked to a directory that does not exist (probably because - // it's not mounted). - if e, ok := err.(*os.PathError); ok && os.IsExist(err) { - if link, lerr := os.Readlink(e.Path); lerr == nil { - str := "is symlink %s -> %s mounted?" - err = fmt.Errorf(str, e.Path, link) + makeDirectory := func(dir string) error { + err := os.MkdirAll(dir, 0700) + if err != nil { + // Show a nicer error message if it's because a symlink + // is linked to a directory that does not exist + // (probably because it's not mounted). + if e, ok := err.(*os.PathError); ok && os.IsExist(err) { + link, lerr := os.Readlink(e.Path) + if lerr == nil { + str := "is symlink %s -> %s mounted?" + err = fmt.Errorf(str, e.Path, link) + } } + + str := "%s: Failed to create lnd directory: %v" + err := fmt.Errorf(str, funcName, err) + _, _ = fmt.Fprintln(os.Stderr, err) + return err } - str := "%s: Failed to create lnd directory: %v" - err := fmt.Errorf(str, funcName, err) - _, _ = fmt.Fprintln(os.Stderr, err) - return nil, err + return nil } // As soon as we're done parsing configuration options, ensure all paths @@ -504,6 +582,7 @@ func ValidateConfig(cfg Config, usageMessage string) (*Config, error) { cfg.DataDir = CleanAndExpandPath(cfg.DataDir) cfg.TLSCertPath = CleanAndExpandPath(cfg.TLSCertPath) cfg.TLSKeyPath = CleanAndExpandPath(cfg.TLSKeyPath) + cfg.LetsEncryptDir = CleanAndExpandPath(cfg.LetsEncryptDir) cfg.AdminMacPath = CleanAndExpandPath(cfg.AdminMacPath) cfg.ReadMacPath = CleanAndExpandPath(cfg.ReadMacPath) cfg.InvoiceMacPath = CleanAndExpandPath(cfg.InvoiceMacPath) @@ -516,6 +595,24 @@ func ValidateConfig(cfg Config, usageMessage string) (*Config, error) { cfg.Tor.WatchtowerKeyPath = CleanAndExpandPath(cfg.Tor.WatchtowerKeyPath) cfg.Watchtower.TowerDir = CleanAndExpandPath(cfg.Watchtower.TowerDir) + // Create the lnd directory and all other sub directories if they don't + // already exist. This makes sure that directory trees are also created + // for files that point to outside of the lnddir. + dirs := []string{ + lndDir, cfg.DataDir, + cfg.LetsEncryptDir, cfg.Watchtower.TowerDir, + filepath.Dir(cfg.TLSCertPath), filepath.Dir(cfg.TLSKeyPath), + filepath.Dir(cfg.AdminMacPath), filepath.Dir(cfg.ReadMacPath), + filepath.Dir(cfg.InvoiceMacPath), + filepath.Dir(cfg.Tor.PrivateKeyPath), + filepath.Dir(cfg.Tor.WatchtowerKeyPath), + } + for _, dir := range dirs { + if err := makeDirectory(dir); err != nil { + return nil, err + } + } + // Ensure that the user didn't attempt to specify negative values for // any of the autopilot params. if cfg.Autopilot.MaxChannels < 0 { @@ -556,7 +653,7 @@ func ValidateConfig(cfg Config, usageMessage string) (*Config, error) { } // Ensure that the specified values for the min and max channel size - // don't are within the bounds of the normal chan size constraints. + // are within the bounds of the normal chan size constraints. if cfg.Autopilot.MinChannelSize < int64(minChanFundingSize) { cfg.Autopilot.MinChannelSize = int64(minChanFundingSize) } @@ -568,6 +665,38 @@ func ValidateConfig(cfg Config, usageMessage string) (*Config, error) { return nil, err } + // Ensure that --maxchansize is properly handled when set by user. + // For non-Wumbo channels this limit remains 16777215 satoshis by default + // as specified in BOLT-02. For wumbo channels this limit is 1,000,000,000. + // satoshis (10 BTC). Always enforce --maxchansize explicitly set by user. + // If unset (marked by 0 value), then enforce proper default. + if cfg.MaxChanSize == 0 { + if cfg.ProtocolOptions.Wumbo() { + cfg.MaxChanSize = int64(MaxBtcFundingAmountWumbo) + } else { + cfg.MaxChanSize = int64(MaxBtcFundingAmount) + } + } + + // Ensure that the user specified values for the min and max channel + // size make sense. + if cfg.MaxChanSize < cfg.MinChanSize { + return nil, fmt.Errorf("invalid channel size parameters: "+ + "max channel size %v, must be no less than min chan size %v", + cfg.MaxChanSize, cfg.MinChanSize, + ) + } + + // Don't allow superflous --maxchansize greater than + // BOLT 02 soft-limit for non-wumbo channel + if !cfg.ProtocolOptions.Wumbo() && cfg.MaxChanSize > int64(MaxFundingAmount) { + return nil, fmt.Errorf("invalid channel size parameters: "+ + "maximum channel size %v is greater than maximum non-wumbo"+ + " channel size %v", + cfg.MaxChanSize, MaxFundingAmount, + ) + } + // Ensure a valid max channel fee allocation was set. if cfg.MaxChannelFeeAllocation <= 0 || cfg.MaxChannelFeeAllocation > 1 { return nil, fmt.Errorf("invalid max channel fee allocation: "+ @@ -734,12 +863,12 @@ func ValidateConfig(cfg Config, usageMessage string) (*Config, error) { // throughout the codebase we required chaincfg.Params. So as a // temporary hack, we'll mutate the default net params for // bitcoin with the litecoin specific information. - applyLitecoinParams(&activeNetParams, <cParams) + applyLitecoinParams(&cfg.ActiveNetParams, <cParams) switch cfg.Litecoin.Node { case "ltcd": err := parseRPCParams(cfg.Litecoin, cfg.LtcdMode, - litecoinChain, funcName) + litecoinChain, funcName, cfg.ActiveNetParams) if err != nil { err := fmt.Errorf("unable to load RPC "+ "credentials for ltcd: %v", err) @@ -751,7 +880,7 @@ func ValidateConfig(cfg Config, usageMessage string) (*Config, error) { "support simnet", funcName) } err := parseRPCParams(cfg.Litecoin, cfg.LitecoindMode, - litecoinChain, funcName) + litecoinChain, funcName, cfg.ActiveNetParams) if err != nil { err := fmt.Errorf("unable to load RPC "+ "credentials for litecoind: %v", err) @@ -779,19 +908,19 @@ func ValidateConfig(cfg Config, usageMessage string) (*Config, error) { numNets := 0 if cfg.Bitcoin.MainNet { numNets++ - activeNetParams = bitcoinMainNetParams + cfg.ActiveNetParams = bitcoinMainNetParams } if cfg.Bitcoin.TestNet3 { numNets++ - activeNetParams = bitcoinTestNetParams + cfg.ActiveNetParams = bitcoinTestNetParams } if cfg.Bitcoin.RegTest { numNets++ - activeNetParams = bitcoinRegTestNetParams + cfg.ActiveNetParams = bitcoinRegTestNetParams } if cfg.Bitcoin.SimNet { numNets++ - activeNetParams = bitcoinSimNetParams + cfg.ActiveNetParams = bitcoinSimNetParams } if numNets > 1 { str := "%s: The mainnet, testnet, regtest, and " + @@ -820,6 +949,7 @@ func ValidateConfig(cfg Config, usageMessage string) (*Config, error) { case "btcd": err := parseRPCParams( cfg.Bitcoin, cfg.BtcdMode, bitcoinChain, funcName, + cfg.ActiveNetParams, ) if err != nil { err := fmt.Errorf("unable to load RPC "+ @@ -834,6 +964,7 @@ func ValidateConfig(cfg Config, usageMessage string) (*Config, error) { err := parseRPCParams( cfg.Bitcoin, cfg.BitcoindMode, bitcoinChain, funcName, + cfg.ActiveNetParams, ) if err != nil { err := fmt.Errorf("unable to load RPC "+ @@ -911,7 +1042,7 @@ func ValidateConfig(cfg Config, usageMessage string) (*Config, error) { cfg.networkDir = filepath.Join( cfg.DataDir, defaultChainSubDirname, cfg.registeredChains.PrimaryChain().String(), - normalizeNetwork(activeNetParams.Name), + normalizeNetwork(cfg.ActiveNetParams.Name), ) // If a custom macaroon directory wasn't specified and the data @@ -945,7 +1076,7 @@ func ValidateConfig(cfg Config, usageMessage string) (*Config, error) { // per network in the same fashion as the data directory. cfg.LogDir = filepath.Join(cfg.LogDir, cfg.registeredChains.PrimaryChain().String(), - normalizeNetwork(activeNetParams.Name)) + normalizeNetwork(cfg.ActiveNetParams.Name)) // A log writer must be passed in, otherwise we can't function and would // run into a panic later on. @@ -1097,12 +1228,30 @@ func ValidateConfig(cfg Config, usageMessage string) (*Config, error) { "minbackoff") } + // Newer versions of lnd added a new sub-config for bolt-specific + // parameters. However we want to also allow existing users to use the + // value on the top-level config. If the outer config value is set, + // then we'll use that directly. + if cfg.SyncFreelist { + cfg.DB.Bolt.SyncFreelist = cfg.SyncFreelist + } + + // Ensure that the user hasn't chosen a remote-max-htlc value greater + // than the protocol maximum. + maxRemoteHtlcs := uint16(input.MaxHTLCNumber / 2) + if cfg.DefaultRemoteMaxHtlcs > maxRemoteHtlcs { + return nil, fmt.Errorf("default-remote-max-htlcs (%v) must be "+ + "less than %v", cfg.DefaultRemoteMaxHtlcs, + maxRemoteHtlcs) + } + // Validate the subconfigs for workers, caches, and the tower client. err = lncfg.Validate( cfg.Workers, cfg.Caches, cfg.WtClient, cfg.DB, + cfg.HealthChecks, ) if err != nil { return nil, err @@ -1125,11 +1274,11 @@ func ValidateConfig(cfg Config, usageMessage string) (*Config, error) { func (c *Config) localDatabaseDir() string { return filepath.Join(c.DataDir, defaultGraphSubDirname, - normalizeNetwork(activeNetParams.Name)) + normalizeNetwork(c.ActiveNetParams.Name)) } func (c *Config) networkName() string { - return normalizeNetwork(activeNetParams.Name) + return normalizeNetwork(c.ActiveNetParams.Name) } // CleanAndExpandPath expands environment variables and leading ~ in the @@ -1159,7 +1308,7 @@ func CleanAndExpandPath(path string) string { } func parseRPCParams(cConfig *lncfg.Chain, nodeConfig interface{}, net chainCode, - funcName string) error { // nolint:unparam + funcName string, netParams bitcoinNetParams) error { // nolint:unparam // First, we'll check our node config to make sure the RPC parameters // were set correctly. We'll also determine the path to the conf file @@ -1269,7 +1418,7 @@ func parseRPCParams(cConfig *lncfg.Chain, nodeConfig interface{}, net chainCode, case "bitcoind", "litecoind": nConf := nodeConfig.(*lncfg.Bitcoind) rpcUser, rpcPass, zmqBlockHost, zmqTxHost, err := - extractBitcoindRPCParams(confFile) + extractBitcoindRPCParams(netParams.Params.Name, confFile) if err != nil { return fmt.Errorf("unable to extract RPC credentials:"+ " %v, cannot start w/o RPC connection", @@ -1329,13 +1478,13 @@ func extractBtcdRPCParams(btcdConfigPath string) (string, string, error) { return string(userSubmatches[1]), string(passSubmatches[1]), nil } -// extractBitcoindParams attempts to extract the RPC credentials for an +// extractBitcoindRPCParams attempts to extract the RPC credentials for an // existing bitcoind node instance. The passed path is expected to be the // location of bitcoind's bitcoin.conf on the target system. The routine looks // for a cookie first, optionally following the datadir configuration option in // the bitcoin.conf. If it doesn't find one, it looks for rpcuser/rpcpassword. -func extractBitcoindRPCParams(bitcoindConfigPath string) (string, string, string, - string, error) { +func extractBitcoindRPCParams(networkName string, + bitcoindConfigPath string) (string, string, string, string, error) { // First, we'll open up the bitcoind configuration file found at the // target destination. @@ -1393,7 +1542,7 @@ func extractBitcoindRPCParams(bitcoindConfigPath string) (string, string, string } chainDir := "/" - switch activeNetParams.Params.Name { + switch networkName { case "testnet3": chainDir = "/testnet3/" case "testnet4": diff --git a/contractcourt/anchor_resolver.go b/contractcourt/anchor_resolver.go index c67265c77..98a4fccae 100644 --- a/contractcourt/anchor_resolver.go +++ b/contractcourt/anchor_resolver.go @@ -10,7 +10,6 @@ import ( "github.com/btcsuite/btcutil" "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/input" - "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/sweep" ) @@ -86,41 +85,36 @@ func (c *anchorResolver) Resolve() (ContractResolver, error) { // situation. We don't want to force sweep anymore, because the anchor // lost its special purpose to get the commitment confirmed. It is just // an output that we want to sweep only if it is economical to do so. + // + // An exclusive group is not necessary anymore, because we know that + // this is the only anchor that can be swept. + // + // We also clear the parent tx information for cpfp, because the + // commitment tx is confirmed. + // + // After a restart or when the remote force closes, the sweeper is not + // yet aware of the anchor. In that case, it will be added as new input + // to the sweeper. relayFeeRate := c.Sweeper.RelayFeePerKW() - resultChan, err := c.Sweeper.UpdateParams( - c.anchor, - sweep.ParamsUpdate{ + anchorInput := input.MakeBaseInput( + &c.anchor, + input.CommitmentAnchor, + &c.anchorSignDescriptor, + c.broadcastHeight, + nil, + ) + + resultChan, err := c.Sweeper.SweepInput( + &anchorInput, + sweep.Params{ Fee: sweep.FeePreference{ FeeRate: relayFeeRate, }, - Force: false, }, ) - - // After a restart or when the remote force closes, the sweeper is not - // yet aware of the anchor. In that case, offer it as a new input to the - // sweeper. An exclusive group is not necessary anymore, because we know - // that this is the only anchor that can be swept. - if err == lnwallet.ErrNotMine { - anchorInput := input.MakeBaseInput( - &c.anchor, - input.CommitmentAnchor, - &c.anchorSignDescriptor, - c.broadcastHeight, - ) - - resultChan, err = c.Sweeper.SweepInput( - &anchorInput, - sweep.Params{ - Fee: sweep.FeePreference{ - FeeRate: relayFeeRate, - }, - }, - ) - if err != nil { - return nil, err - } + if err != nil { + return nil, err } var ( diff --git a/contractcourt/briefcase.go b/contractcourt/briefcase.go index 1685401c8..fbf2e96a8 100644 --- a/contractcourt/briefcase.go +++ b/contractcourt/briefcase.go @@ -442,7 +442,6 @@ func (b *boltArbitratorLog) CurrentState() (ArbitratorState, error) { // // NOTE: Part of the ContractResolver interface. func (b *boltArbitratorLog) CommitState(s ArbitratorState) error { - fmt.Printf("yeee: %T\n", b.db) return kvdb.Batch(b.db, func(tx kvdb.RwTx) error { scopeBucket, err := tx.CreateTopLevelBucket(b.scopeKey[:]) if err != nil { @@ -727,7 +726,7 @@ func (b *boltArbitratorLog) FetchContractResolutions() (*ContractResolutions, er numOutgoing uint32 ) - // Next, we'll read out he incoming and outgoing HTLC + // Next, we'll read out the incoming and outgoing HTLC // resolutions. err = binary.Read(resReader, endian, &numIncoming) if err != nil { diff --git a/contractcourt/chain_arbitrator.go b/contractcourt/chain_arbitrator.go index dacbc1f59..79c722cc3 100644 --- a/contractcourt/chain_arbitrator.go +++ b/contractcourt/chain_arbitrator.go @@ -15,6 +15,7 @@ import ( "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/clock" "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/labels" "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/lnwire" @@ -59,7 +60,7 @@ type ChainArbitratorConfig struct { // broadcast our commitment transaction if we have incoming htlcs. This // value should be set based on our current fee estimation of the // commitment transaction. We use this to determine when we should - // broadcast instead of the just the HTLC timeout, as we want to ensure + // broadcast instead of just the HTLC timeout, as we want to ensure // that the commitment transaction is already confirmed, by the time the // HTLC expires. Otherwise we may end up not settling the htlc on-chain // because the other party managed to time it out. @@ -89,7 +90,7 @@ type ChainArbitratorConfig struct { DeliverResolutionMsg func(...ResolutionMsg) error // MarkLinkInactive is a function closure that the ChainArbitrator will - // use to mark that active HTLC's shouldn't be attempt ted to be routed + // use to mark that active HTLC's shouldn't be attempted to be routed // over a particular channel. This function will be called in that a // ChannelArbitrator decides that it needs to go to chain in order to // resolve contracts. @@ -157,7 +158,7 @@ type ChainArbitratorConfig struct { // resolution. OnionProcessor OnionProcessor - // PaymentsExpirationGracePeriod indicates is a time window we let the + // PaymentsExpirationGracePeriod indicates a time window we let the // other node to cancel an outgoing htlc that our node has initiated and // has timed out. PaymentsExpirationGracePeriod time.Duration @@ -715,7 +716,10 @@ func (c *ChainArbitrator) rebroadcast(channel *channeldb.OpenChannel, log.Infof("Re-publishing %s close tx(%v) for channel %v", kind, closeTx.TxHash(), chanPoint) - err = c.cfg.PublishTx(closeTx, "") + label := labels.MakeLabel( + labels.LabelTypeChannelClose, &channel.ShortChannelID, + ) + err = c.cfg.PublishTx(closeTx, label) if err != nil && err != lnwallet.ErrDoubleSpend { log.Warnf("Unable to broadcast %s close tx(%v): %v", kind, closeTx.TxHash(), err) diff --git a/contractcourt/chain_arbitrator_test.go b/contractcourt/chain_arbitrator_test.go index 60c7eebe7..e197c0b09 100644 --- a/contractcourt/chain_arbitrator_test.go +++ b/contractcourt/chain_arbitrator_test.go @@ -8,8 +8,10 @@ import ( "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" + "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/clock" + "github.com/lightningnetwork/lnd/lntest/mock" "github.com/lightningnetwork/lnd/lnwallet" ) @@ -80,8 +82,12 @@ func TestChainArbitratorRepublishCloses(t *testing.T) { published := make(map[chainhash.Hash]int) chainArbCfg := ChainArbitratorConfig{ - ChainIO: &mockChainIO{}, - Notifier: &mockNotifier{}, + ChainIO: &mock.ChainIO{}, + Notifier: &mock.ChainNotifier{ + SpendChan: make(chan *chainntnfs.SpendDetail), + EpochChan: make(chan *chainntnfs.BlockEpoch), + ConfChan: make(chan *chainntnfs.TxConfirmation), + }, PublishTx: func(tx *wire.MsgTx, _ string) error { published[tx.TxHash()]++ return nil @@ -172,8 +178,12 @@ func TestResolveContract(t *testing.T) { // chain arbitrator that should pick up these new channels and launch // resolver for them. chainArbCfg := ChainArbitratorConfig{ - ChainIO: &mockChainIO{}, - Notifier: &mockNotifier{}, + ChainIO: &mock.ChainIO{}, + Notifier: &mock.ChainNotifier{ + SpendChan: make(chan *chainntnfs.SpendDetail), + EpochChan: make(chan *chainntnfs.BlockEpoch), + ConfChan: make(chan *chainntnfs.TxConfirmation), + }, PublishTx: func(tx *wire.MsgTx, _ string) error { return nil }, diff --git a/contractcourt/chain_watcher.go b/contractcourt/chain_watcher.go index aab395c6e..ee62c0ceb 100644 --- a/contractcourt/chain_watcher.go +++ b/contractcourt/chain_watcher.go @@ -500,7 +500,7 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) { select { // We've detected a spend of the channel onchain! Depending on the type - // of spend, we'll act accordingly , so we'll examine the spending + // of spend, we'll act accordingly, so we'll examine the spending // transaction to determine what we should do. // // TODO(Roasbeef): need to be able to ensure this only triggers @@ -568,7 +568,7 @@ func (c *chainWatcher) closeObserver(spendNtfn *chainntnfs.SpendEvent) { // Next, we'll check to see if this is a cooperative channel // closure or not. This is characterized by having an input - // sequence number that's finalized. This won't happen with + // sequence number that's finalized. This won't happen with // regular commitment transactions due to the state hint // encoding scheme. if commitTxBroadcast.TxIn[0].Sequence == wire.MaxTxInSequenceNum { diff --git a/contractcourt/chain_watcher_test.go b/contractcourt/chain_watcher_test.go index 62c768717..50e73ec52 100644 --- a/contractcourt/chain_watcher_test.go +++ b/contractcourt/chain_watcher_test.go @@ -7,58 +7,15 @@ import ( "testing" "time" - "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/lntest/mock" "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwire" ) -type mockNotifier struct { - spendChan chan *chainntnfs.SpendDetail - epochChan chan *chainntnfs.BlockEpoch - confChan chan *chainntnfs.TxConfirmation -} - -func (m *mockNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash, _ []byte, numConfs, - heightHint uint32) (*chainntnfs.ConfirmationEvent, error) { - return &chainntnfs.ConfirmationEvent{ - Confirmed: m.confChan, - Cancel: func() {}, - }, nil -} - -func (m *mockNotifier) RegisterBlockEpochNtfn( - bestBlock *chainntnfs.BlockEpoch) (*chainntnfs.BlockEpochEvent, error) { - - return &chainntnfs.BlockEpochEvent{ - Epochs: m.epochChan, - Cancel: func() {}, - }, nil -} - -func (m *mockNotifier) Start() error { - return nil -} - -func (m *mockNotifier) Started() bool { - return true -} - -func (m *mockNotifier) Stop() error { - return nil -} -func (m *mockNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint, _ []byte, - heightHint uint32) (*chainntnfs.SpendEvent, error) { - - return &chainntnfs.SpendEvent{ - Spend: m.spendChan, - Cancel: func() {}, - }, nil -} - // TestChainWatcherRemoteUnilateralClose tests that the chain watcher is able // to properly detect a normal unilateral close by the remote node using their // lowest commitment. @@ -77,8 +34,10 @@ func TestChainWatcherRemoteUnilateralClose(t *testing.T) { // With the channels created, we'll now create a chain watcher instance // which will be watching for any closes of Alice's channel. - aliceNotifier := &mockNotifier{ - spendChan: make(chan *chainntnfs.SpendDetail), + aliceNotifier := &mock.ChainNotifier{ + SpendChan: make(chan *chainntnfs.SpendDetail), + EpochChan: make(chan *chainntnfs.BlockEpoch), + ConfChan: make(chan *chainntnfs.TxConfirmation), } aliceChainWatcher, err := newChainWatcher(chainWatcherConfig{ chanState: aliceChannel.State(), @@ -107,7 +66,7 @@ func TestChainWatcherRemoteUnilateralClose(t *testing.T) { SpenderTxHash: &bobTxHash, SpendingTx: bobCommit, } - aliceNotifier.spendChan <- bobSpend + aliceNotifier.SpendChan <- bobSpend // We should get a new spend event over the remote unilateral close // event channel. @@ -166,8 +125,10 @@ func TestChainWatcherRemoteUnilateralClosePendingCommit(t *testing.T) { // With the channels created, we'll now create a chain watcher instance // which will be watching for any closes of Alice's channel. - aliceNotifier := &mockNotifier{ - spendChan: make(chan *chainntnfs.SpendDetail), + aliceNotifier := &mock.ChainNotifier{ + SpendChan: make(chan *chainntnfs.SpendDetail), + EpochChan: make(chan *chainntnfs.BlockEpoch), + ConfChan: make(chan *chainntnfs.TxConfirmation), } aliceChainWatcher, err := newChainWatcher(chainWatcherConfig{ chanState: aliceChannel.State(), @@ -216,7 +177,7 @@ func TestChainWatcherRemoteUnilateralClosePendingCommit(t *testing.T) { SpenderTxHash: &bobTxHash, SpendingTx: bobCommit, } - aliceNotifier.spendChan <- bobSpend + aliceNotifier.SpendChan <- bobSpend // We should get a new spend event over the remote unilateral close // event channel. @@ -292,8 +253,10 @@ func TestChainWatcherDataLossProtect(t *testing.T) { // With the channels created, we'll now create a chain watcher // instance which will be watching for any closes of Alice's // channel. - aliceNotifier := &mockNotifier{ - spendChan: make(chan *chainntnfs.SpendDetail), + aliceNotifier := &mock.ChainNotifier{ + SpendChan: make(chan *chainntnfs.SpendDetail), + EpochChan: make(chan *chainntnfs.BlockEpoch), + ConfChan: make(chan *chainntnfs.TxConfirmation), } aliceChainWatcher, err := newChainWatcher(chainWatcherConfig{ chanState: aliceChannel.State(), @@ -350,7 +313,7 @@ func TestChainWatcherDataLossProtect(t *testing.T) { SpenderTxHash: &bobTxHash, SpendingTx: bobCommit, } - aliceNotifier.spendChan <- bobSpend + aliceNotifier.SpendChan <- bobSpend // We should get a new uni close resolution that indicates we // processed the DLP scenario. @@ -461,8 +424,10 @@ func TestChainWatcherLocalForceCloseDetect(t *testing.T) { // With the channels created, we'll now create a chain watcher // instance which will be watching for any closes of Alice's // channel. - aliceNotifier := &mockNotifier{ - spendChan: make(chan *chainntnfs.SpendDetail), + aliceNotifier := &mock.ChainNotifier{ + SpendChan: make(chan *chainntnfs.SpendDetail), + EpochChan: make(chan *chainntnfs.BlockEpoch), + ConfChan: make(chan *chainntnfs.TxConfirmation), } aliceChainWatcher, err := newChainWatcher(chainWatcherConfig{ chanState: aliceChannel.State(), @@ -517,7 +482,7 @@ func TestChainWatcherLocalForceCloseDetect(t *testing.T) { SpenderTxHash: &aliceTxHash, SpendingTx: aliceCommit, } - aliceNotifier.spendChan <- aliceSpend + aliceNotifier.SpendChan <- aliceSpend // We should get a local force close event from Alice as she // should be able to detect the close based on the commitment diff --git a/contractcourt/channel_arbitrator.go b/contractcourt/channel_arbitrator.go index b67d2c7cd..2d50ca1de 100644 --- a/contractcourt/channel_arbitrator.go +++ b/contractcourt/channel_arbitrator.go @@ -16,6 +16,7 @@ import ( "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/labels" "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwire" @@ -29,6 +30,12 @@ var ( "process of being force closed") ) +const ( + // anchorSweepConfTarget is the conf target used when sweeping + // commitment anchors. + anchorSweepConfTarget = 6 +) + // WitnessSubscription represents an intent to be notified once new witnesses // are discovered by various active contract resolvers. A contract resolver may // use this to be notified of when it can satisfy an incoming contract after we @@ -874,7 +881,11 @@ func (c *ChannelArbitrator) stateStep( // At this point, we'll now broadcast the commitment // transaction itself. - if err := c.cfg.PublishTx(closeTx, ""); err != nil { + label := labels.MakeLabel( + labels.LabelTypeChannelClose, &c.cfg.ShortChanID, + ) + + if err := c.cfg.PublishTx(closeTx, label); err != nil { log.Errorf("ChannelArbitrator(%v): unable to broadcast "+ "close tx: %v", c.cfg.ChanPoint, err) if err != lnwallet.ErrDoubleSpend { @@ -1055,9 +1066,6 @@ func (c *ChannelArbitrator) sweepAnchors(anchors []*lnwallet.AnchorResolution, // anchors from being batched together. exclusiveGroup := c.cfg.ShortChanID.ToUint64() - // Retrieve the current minimum fee rate from the sweeper. - minFeeRate := c.cfg.Sweeper.RelayFeePerKW() - for _, anchor := range anchors { log.Debugf("ChannelArbitrator(%v): pre-confirmation sweep of "+ "anchor of tx %v", c.cfg.ChanPoint, anchor.CommitAnchor) @@ -1068,18 +1076,25 @@ func (c *ChannelArbitrator) sweepAnchors(anchors []*lnwallet.AnchorResolution, input.CommitmentAnchor, &anchor.AnchorSignDescriptor, heightHint, + &input.TxInfo{ + Fee: anchor.CommitFee, + Weight: anchor.CommitWeight, + }, ) - // Sweep anchor output with the minimum fee rate. This usually - // (up to a min relay fee of 3 sat/b) means that the anchor - // sweep will be economical. Also signal that this is a force - // sweep. If the user decides to bump the fee on the anchor - // sweep, it will be swept even if it isn't economical. + // Sweep anchor output with a confirmation target fee + // preference. Because this is a cpfp-operation, the anchor will + // only be attempted to sweep when the current fee estimate for + // the confirmation target exceeds the commit fee rate. + // + // Also signal that this is a force sweep, so that the anchor + // will be swept even if it isn't economical purely based on the + // anchor value. _, err := c.cfg.Sweeper.SweepInput( &anchorInput, sweep.Params{ Fee: sweep.FeePreference{ - FeeRate: minFeeRate, + ConfTarget: anchorSweepConfTarget, }, Force: true, ExclusiveGroup: &exclusiveGroup, @@ -1987,7 +2002,7 @@ func (c *ChannelArbitrator) resolveContract(currentContract ContractResolver) { switch { // If this contract produced another, then this means // the current contract was only able to be partially - // resolved in this step. So we'll not a contract swap + // resolved in this step. So we'll do a contract swap // within our logs: the new contract will take the // place of the old one. case nextContract != nil: @@ -2082,7 +2097,7 @@ func (c *ChannelArbitrator) UpdateContractSignals(newSignals *ContractSignals) { // channelAttendant is the primary goroutine that acts at the judicial // arbitrator between our channel state, the remote channel peer, and the -// blockchain Our judge). This goroutine will ensure that we faithfully execute +// blockchain (Our judge). This goroutine will ensure that we faithfully execute // all clauses of our contract in the case that we need to go on-chain for a // dispute. Currently, two such conditions warrant our intervention: when an // outgoing HTLC is about to timeout, and when we know the pre-image for an diff --git a/contractcourt/channel_arbitrator_test.go b/contractcourt/channel_arbitrator_test.go index ce0963ece..d3c85f263 100644 --- a/contractcourt/channel_arbitrator_test.go +++ b/contractcourt/channel_arbitrator_test.go @@ -19,6 +19,7 @@ import ( "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/clock" "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/lntest/mock" "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwire" ) @@ -334,10 +335,10 @@ func createTestChannelArbitrator(t *testing.T, log ArbitratorLog, }, OutgoingBroadcastDelta: 5, IncomingBroadcastDelta: 5, - Notifier: &mockNotifier{ - epochChan: make(chan *chainntnfs.BlockEpoch), - spendChan: make(chan *chainntnfs.SpendDetail), - confChan: make(chan *chainntnfs.TxConfirmation), + Notifier: &mock.ChainNotifier{ + EpochChan: make(chan *chainntnfs.BlockEpoch), + SpendChan: make(chan *chainntnfs.SpendDetail), + ConfChan: make(chan *chainntnfs.TxConfirmation), }, IncubateOutputs: func(wire.OutPoint, *lnwallet.OutgoingHtlcResolution, @@ -878,7 +879,7 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) { // We'll grab the old notifier here as our resolvers are still holding // a reference to this instance, and a new one will be created when we // restart the channel arb below. - oldNotifier := chanArb.cfg.Notifier.(*mockNotifier) + oldNotifier := chanArb.cfg.Notifier.(*mock.ChainNotifier) // At this point, in order to simulate a restart, we'll re-create the // channel arbitrator. We do this to ensure that all information @@ -927,7 +928,7 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) { } // Send a notification that the expiry height has been reached. - oldNotifier.epochChan <- &chainntnfs.BlockEpoch{Height: 10} + oldNotifier.EpochChan <- &chainntnfs.BlockEpoch{Height: 10} // htlcOutgoingContestResolver is now transforming into a // htlcTimeoutResolver and should send the contract off for incubation. @@ -939,7 +940,7 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) { // Notify resolver that the HTLC output of the commitment has been // spent. - oldNotifier.spendChan <- &chainntnfs.SpendDetail{SpendingTx: closeTx} + oldNotifier.SpendChan <- &chainntnfs.SpendDetail{SpendingTx: closeTx} // Finally, we should also receive a resolution message instructing the // switch to cancel back the HTLC. @@ -967,7 +968,7 @@ func TestChannelArbitratorLocalForceClosePendingHtlc(t *testing.T) { } // Notify resolver that the second level transaction is spent. - oldNotifier.spendChan <- &chainntnfs.SpendDetail{SpendingTx: closeTx} + oldNotifier.SpendChan <- &chainntnfs.SpendDetail{SpendingTx: closeTx} // At this point channel should be marked as resolved. chanArbCtxNew.AssertStateTransitions(StateFullyResolved) @@ -2236,9 +2237,9 @@ func TestChannelArbitratorAnchors(t *testing.T) { t.Fatalf("expected anchor resolver, got %T", resolver) } - // The anchor resolver is expected to offer the anchor input to the + // The anchor resolver is expected to re-offer the anchor input to the // sweeper. - <-chanArbCtx.sweeper.updatedInputs + <-chanArbCtx.sweeper.sweptInputs // The mock sweeper immediately signals success for that input. This // should transition the channel to the resolved state. diff --git a/contractcourt/commit_sweep_resolver_test.go b/contractcourt/commit_sweep_resolver_test.go index 578b5c45d..eb54dbf32 100644 --- a/contractcourt/commit_sweep_resolver_test.go +++ b/contractcourt/commit_sweep_resolver_test.go @@ -10,6 +10,7 @@ import ( "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/lntest/mock" "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/sweep" @@ -17,7 +18,7 @@ import ( type commitSweepResolverTestContext struct { resolver *commitSweepResolver - notifier *mockNotifier + notifier *mock.ChainNotifier sweeper *mockSweeper resolverResultChan chan resolveResult t *testing.T @@ -26,10 +27,10 @@ type commitSweepResolverTestContext struct { func newCommitSweepResolverTestContext(t *testing.T, resolution *lnwallet.CommitOutputResolution) *commitSweepResolverTestContext { - notifier := &mockNotifier{ - epochChan: make(chan *chainntnfs.BlockEpoch), - spendChan: make(chan *chainntnfs.SpendDetail), - confChan: make(chan *chainntnfs.TxConfirmation), + notifier := &mock.ChainNotifier{ + EpochChan: make(chan *chainntnfs.BlockEpoch), + SpendChan: make(chan *chainntnfs.SpendDetail), + ConfChan: make(chan *chainntnfs.TxConfirmation), } sweeper := newMockSweeper() @@ -83,7 +84,7 @@ func (i *commitSweepResolverTestContext) resolve() { } func (i *commitSweepResolverTestContext) notifyEpoch(height int32) { - i.notifier.epochChan <- &chainntnfs.BlockEpoch{ + i.notifier.EpochChan <- &chainntnfs.BlockEpoch{ Height: height, } } @@ -189,7 +190,7 @@ func TestCommitSweepResolverNoDelay(t *testing.T) { spendTx := &wire.MsgTx{} spendHash := spendTx.TxHash() - ctx.notifier.confChan <- &chainntnfs.TxConfirmation{ + ctx.notifier.ConfChan <- &chainntnfs.TxConfirmation{ Tx: spendTx, } @@ -267,7 +268,7 @@ func testCommitSweepResolverDelay(t *testing.T, sweepErr error) { ctx.resolve() - ctx.notifier.confChan <- &chainntnfs.TxConfirmation{ + ctx.notifier.ConfChan <- &chainntnfs.TxConfirmation{ BlockHeight: testInitialBlockHeight - 1, } diff --git a/contractcourt/htlc_incoming_resolver_test.go b/contractcourt/htlc_incoming_resolver_test.go index 55bc33b5e..4ada27ffb 100644 --- a/contractcourt/htlc_incoming_resolver_test.go +++ b/contractcourt/htlc_incoming_resolver_test.go @@ -12,6 +12,7 @@ import ( "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/htlcswitch/hop" "github.com/lightningnetwork/lnd/invoices" + "github.com/lightningnetwork/lnd/lntest/mock" "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwire" @@ -295,7 +296,7 @@ type incomingResolverTestContext struct { registry *mockRegistry witnessBeacon *mockWitnessBeacon resolver *htlcIncomingContestResolver - notifier *mockNotifier + notifier *mock.ChainNotifier onionProcessor *mockOnionProcessor resolveErr chan error nextResolver ContractResolver @@ -303,10 +304,10 @@ type incomingResolverTestContext struct { } func newIncomingResolverTestContext(t *testing.T, isExit bool) *incomingResolverTestContext { - notifier := &mockNotifier{ - epochChan: make(chan *chainntnfs.BlockEpoch), - spendChan: make(chan *chainntnfs.SpendDetail), - confChan: make(chan *chainntnfs.TxConfirmation), + notifier := &mock.ChainNotifier{ + EpochChan: make(chan *chainntnfs.BlockEpoch), + SpendChan: make(chan *chainntnfs.SpendDetail), + ConfChan: make(chan *chainntnfs.TxConfirmation), } witnessBeacon := newMockWitnessBeacon() registry := &mockRegistry{ @@ -377,7 +378,7 @@ func (i *incomingResolverTestContext) resolve() { } func (i *incomingResolverTestContext) notifyEpoch(height int32) { - i.notifier.epochChan <- &chainntnfs.BlockEpoch{ + i.notifier.EpochChan <- &chainntnfs.BlockEpoch{ Height: height, } } diff --git a/contractcourt/htlc_outgoing_contest_resolver_test.go b/contractcourt/htlc_outgoing_contest_resolver_test.go index 10d8fbc1c..987c1a7a8 100644 --- a/contractcourt/htlc_outgoing_contest_resolver_test.go +++ b/contractcourt/htlc_outgoing_contest_resolver_test.go @@ -9,6 +9,7 @@ import ( "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/lntest/mock" "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwire" @@ -80,7 +81,7 @@ func TestHtlcOutgoingResolverRemoteClaim(t *testing.T) { spendHash := spendTx.TxHash() - ctx.notifier.spendChan <- &chainntnfs.SpendDetail{ + ctx.notifier.SpendChan <- &chainntnfs.SpendDetail{ SpendingTx: spendTx, SpenderTxHash: &spendHash, } @@ -114,7 +115,7 @@ type resolveResult struct { type outgoingResolverTestContext struct { resolver *htlcOutgoingContestResolver - notifier *mockNotifier + notifier *mock.ChainNotifier preimageDB *mockWitnessBeacon resolverResultChan chan resolveResult resolutionChan chan ResolutionMsg @@ -122,10 +123,10 @@ type outgoingResolverTestContext struct { } func newOutgoingResolverTestContext(t *testing.T) *outgoingResolverTestContext { - notifier := &mockNotifier{ - epochChan: make(chan *chainntnfs.BlockEpoch), - spendChan: make(chan *chainntnfs.SpendDetail), - confChan: make(chan *chainntnfs.TxConfirmation), + notifier := &mock.ChainNotifier{ + EpochChan: make(chan *chainntnfs.BlockEpoch), + SpendChan: make(chan *chainntnfs.SpendDetail), + ConfChan: make(chan *chainntnfs.TxConfirmation), } checkPointChan := make(chan struct{}, 1) @@ -212,7 +213,7 @@ func (i *outgoingResolverTestContext) resolve() { } func (i *outgoingResolverTestContext) notifyEpoch(height int32) { - i.notifier.epochChan <- &chainntnfs.BlockEpoch{ + i.notifier.EpochChan <- &chainntnfs.BlockEpoch{ Height: height, } } diff --git a/contractcourt/htlc_success_resolver.go b/contractcourt/htlc_success_resolver.go index 7b2620915..1a99cc3b6 100644 --- a/contractcourt/htlc_success_resolver.go +++ b/contractcourt/htlc_success_resolver.go @@ -10,6 +10,7 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/labels" "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/sweep" ) @@ -157,7 +158,10 @@ func (h *htlcSuccessResolver) Resolve() (ContractResolver, error) { // Regardless of whether an existing transaction was found or newly // constructed, we'll broadcast the sweep transaction to the // network. - err := h.PublishTx(h.sweepTx, "") + label := labels.MakeLabel( + labels.LabelTypeChannelClose, &h.ShortChanID, + ) + err := h.PublishTx(h.sweepTx, label) if err != nil { log.Infof("%T(%x): unable to publish tx: %v", h, h.htlc.RHash[:], err) @@ -206,7 +210,10 @@ func (h *htlcSuccessResolver) Resolve() (ContractResolver, error) { // the claiming process. // // TODO(roasbeef): after changing sighashes send to tx bundler - err := h.PublishTx(h.htlcResolution.SignedSuccessTx, "") + label := labels.MakeLabel( + labels.LabelTypeChannelClose, &h.ShortChanID, + ) + err := h.PublishTx(h.htlcResolution.SignedSuccessTx, label) if err != nil { return nil, err } diff --git a/contractcourt/htlc_success_resolver_test.go b/contractcourt/htlc_success_resolver_test.go index 0c700969f..6e44c22c7 100644 --- a/contractcourt/htlc_success_resolver_test.go +++ b/contractcourt/htlc_success_resolver_test.go @@ -8,6 +8,7 @@ import ( "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/channeldb/kvdb" + "github.com/lightningnetwork/lnd/lntest/mock" "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwire" ) @@ -16,16 +17,16 @@ var testHtlcAmt = lnwire.MilliSatoshi(200000) type htlcSuccessResolverTestContext struct { resolver *htlcSuccessResolver - notifier *mockNotifier + notifier *mock.ChainNotifier resolverResultChan chan resolveResult t *testing.T } func newHtlcSuccessResolverTextContext(t *testing.T) *htlcSuccessResolverTestContext { - notifier := &mockNotifier{ - epochChan: make(chan *chainntnfs.BlockEpoch), - spendChan: make(chan *chainntnfs.SpendDetail), - confChan: make(chan *chainntnfs.TxConfirmation), + notifier := &mock.ChainNotifier{ + EpochChan: make(chan *chainntnfs.BlockEpoch), + SpendChan: make(chan *chainntnfs.SpendDetail), + ConfChan: make(chan *chainntnfs.TxConfirmation), } checkPointChan := make(chan struct{}, 1) @@ -116,7 +117,7 @@ func TestSingleStageSuccess(t *testing.T) { // We send a confirmation for our sweep tx to indicate that our sweep // succeeded. resolve := func(ctx *htlcSuccessResolverTestContext) { - ctx.notifier.confChan <- &chainntnfs.TxConfirmation{ + ctx.notifier.ConfChan <- &chainntnfs.TxConfirmation{ Tx: ctx.resolver.sweepTx, BlockHeight: testInitialBlockHeight - 1, } @@ -165,7 +166,7 @@ func TestSecondStageResolution(t *testing.T) { // We send a spend notification for our output to resolve our htlc. resolve := func(ctx *htlcSuccessResolverTestContext) { - ctx.notifier.spendChan <- &chainntnfs.SpendDetail{ + ctx.notifier.SpendChan <- &chainntnfs.SpendDetail{ SpendingTx: sweepTx, SpenderTxHash: &sweepHash, } diff --git a/contractcourt/htlc_timeout_resolver_test.go b/contractcourt/htlc_timeout_resolver_test.go index fdf0bba53..6e41ad791 100644 --- a/contractcourt/htlc_timeout_resolver_test.go +++ b/contractcourt/htlc_timeout_resolver_test.go @@ -7,7 +7,6 @@ import ( "testing" "time" - "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" @@ -15,33 +14,11 @@ import ( "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/channeldb/kvdb" "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/lntest/mock" "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwallet" ) -type dummySignature struct{} - -func (s *dummySignature) Serialize() []byte { - return []byte{} -} - -func (s *dummySignature) Verify(_ []byte, _ *btcec.PublicKey) bool { - return true -} - -type mockSigner struct { -} - -func (m *mockSigner) SignOutputRaw(tx *wire.MsgTx, - signDesc *input.SignDescriptor) (input.Signature, error) { - return &dummySignature{}, nil -} - -func (m *mockSigner) ComputeInputScript(tx *wire.MsgTx, - signDesc *input.SignDescriptor) (*input.Script, error) { - return nil, nil -} - type mockWitnessBeacon struct { preImageUpdates chan lntypes.Preimage newPreimages chan []lntypes.Preimage @@ -93,7 +70,7 @@ func TestHtlcTimeoutResolver(t *testing.T) { copy(fakePreimage[:], fakePreimageBytes) - signer := &mockSigner{} + signer := &mock.DummySigner{} sweepTx := &wire.MsgTx{ TxIn: []*wire.TxIn{ { @@ -164,7 +141,7 @@ func TestHtlcTimeoutResolver(t *testing.T) { timeout: true, txToBroadcast: func() (*wire.MsgTx, error) { witness, err := input.SenderHtlcSpendTimeout( - &dummySignature{}, txscript.SigHashAll, + &mock.DummySignature{}, txscript.SigHashAll, signer, fakeSignDesc, sweepTx, ) if err != nil { @@ -189,7 +166,7 @@ func TestHtlcTimeoutResolver(t *testing.T) { timeout: false, txToBroadcast: func() (*wire.MsgTx, error) { witness, err := input.ReceiverHtlcSpendRedeem( - &dummySignature{}, txscript.SigHashAll, + &mock.DummySignature{}, txscript.SigHashAll, fakePreimageBytes, signer, fakeSignDesc, sweepTx, ) @@ -226,10 +203,10 @@ func TestHtlcTimeoutResolver(t *testing.T) { }, } - notifier := &mockNotifier{ - epochChan: make(chan *chainntnfs.BlockEpoch), - spendChan: make(chan *chainntnfs.SpendDetail), - confChan: make(chan *chainntnfs.TxConfirmation), + notifier := &mock.ChainNotifier{ + EpochChan: make(chan *chainntnfs.BlockEpoch), + SpendChan: make(chan *chainntnfs.SpendDetail), + ConfChan: make(chan *chainntnfs.TxConfirmation), } witnessBeacon := newMockWitnessBeacon() @@ -354,7 +331,7 @@ func TestHtlcTimeoutResolver(t *testing.T) { spendTxHash := spendingTx.TxHash() select { - case notifier.spendChan <- &chainntnfs.SpendDetail{ + case notifier.SpendChan <- &chainntnfs.SpendDetail{ SpendingTx: spendingTx, SpenderTxHash: &spendTxHash, }: @@ -411,7 +388,7 @@ func TestHtlcTimeoutResolver(t *testing.T) { // only if this is a local commitment transaction. if !testCase.remoteCommit { select { - case notifier.spendChan <- &chainntnfs.SpendDetail{ + case notifier.SpendChan <- &chainntnfs.SpendDetail{ SpendingTx: spendingTx, SpenderTxHash: &spendTxHash, }: diff --git a/discovery/bootstrapper.go b/discovery/bootstrapper.go index 8291ff22d..6aced5987 100644 --- a/discovery/bootstrapper.go +++ b/discovery/bootstrapper.go @@ -287,6 +287,10 @@ type DNSSeedBootstrapper struct { // the network seed. dnsSeeds [][2]string net tor.Net + + // timeout is the maximum amount of time a dial will wait for a connect to + // complete. + timeout time.Duration } // A compile time assertion to ensure that DNSSeedBootstrapper meets the @@ -300,8 +304,10 @@ var _ NetworkPeerBootstrapper = (*ChannelGraphBootstrapper)(nil) // used as a fallback for manual TCP resolution in the case of an error // receiving the UDP response. The second host should return a single A record // with the IP address of the authoritative name server. -func NewDNSSeedBootstrapper(seeds [][2]string, net tor.Net) NetworkPeerBootstrapper { - return &DNSSeedBootstrapper{dnsSeeds: seeds, net: net} +func NewDNSSeedBootstrapper( + seeds [][2]string, net tor.Net, + timeout time.Duration) NetworkPeerBootstrapper { + return &DNSSeedBootstrapper{dnsSeeds: seeds, net: net, timeout: timeout} } // fallBackSRVLookup attempts to manually query for SRV records we need to @@ -327,7 +333,7 @@ func (d *DNSSeedBootstrapper) fallBackSRVLookup(soaShim string, // Once we have the IP address, we'll establish a TCP connection using // port 53. dnsServer := net.JoinHostPort(addrs[0], "53") - conn, err := d.net.Dial("tcp", dnsServer) + conn, err := d.net.Dial("tcp", dnsServer, d.timeout) if err != nil { return nil, err } @@ -389,7 +395,9 @@ search: // obtain a random sample of the encoded public keys of nodes. // We use the lndLookupSRV function for this task. primarySeed := dnsSeedTuple[0] - _, addrs, err := d.net.LookupSRV("nodes", "tcp", primarySeed) + _, addrs, err := d.net.LookupSRV( + "nodes", "tcp", primarySeed, d.timeout, + ) if err != nil { log.Tracef("Unable to lookup SRV records via "+ "primary seed (%v): %v", primarySeed, err) diff --git a/discovery/gossiper_test.go b/discovery/gossiper_test.go index 5281d4aa0..9dfad6bad 100644 --- a/discovery/gossiper_test.go +++ b/discovery/gossiper_test.go @@ -23,8 +23,8 @@ import ( "github.com/go-errors/errors" "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/channeldb" - "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/lnpeer" + "github.com/lightningnetwork/lnd/lntest/mock" "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lnwire" "github.com/lightningnetwork/lnd/netann" @@ -92,26 +92,6 @@ func makeTestDB() (*channeldb.DB, func(), error) { return cdb, cleanUp, nil } -type mockSigner struct { - privKey *btcec.PrivateKey -} - -func (n *mockSigner) SignMessage(pubKey *btcec.PublicKey, - msg []byte) (input.Signature, error) { - - if !pubKey.IsEqual(n.privKey.PubKey()) { - return nil, fmt.Errorf("unknown public key") - } - - digest := chainhash.DoubleHashB(msg) - sign, err := n.privKey.Sign(digest) - if err != nil { - return nil, fmt.Errorf("can't sign the message: %v", err) - } - - return sign, nil -} - type mockGraphSource struct { bestHeight uint32 @@ -555,7 +535,7 @@ func createNodeAnnouncement(priv *btcec.PrivateKey, a.ExtraOpaqueData = extraBytes[0] } - signer := mockSigner{priv} + signer := mock.SingleSigner{Privkey: priv} sig, err := netann.SignAnnouncement(&signer, priv.PubKey(), a) if err != nil { return nil, err @@ -607,7 +587,7 @@ func createUpdateAnnouncement(blockHeight uint32, func signUpdate(nodeKey *btcec.PrivateKey, a *lnwire.ChannelUpdate) error { pub := nodeKey.PubKey() - signer := mockSigner{nodeKey} + signer := mock.SingleSigner{Privkey: nodeKey} sig, err := netann.SignAnnouncement(&signer, pub, a) if err != nil { return err @@ -649,7 +629,7 @@ func createRemoteChannelAnnouncement(blockHeight uint32, a := createAnnouncementWithoutProof(blockHeight, extraBytes...) pub := nodeKeyPriv1.PubKey() - signer := mockSigner{nodeKeyPriv1} + signer := mock.SingleSigner{Privkey: nodeKeyPriv1} sig, err := netann.SignAnnouncement(&signer, pub, a) if err != nil { return nil, err @@ -660,7 +640,7 @@ func createRemoteChannelAnnouncement(blockHeight uint32, } pub = nodeKeyPriv2.PubKey() - signer = mockSigner{nodeKeyPriv2} + signer = mock.SingleSigner{Privkey: nodeKeyPriv2} sig, err = netann.SignAnnouncement(&signer, pub, a) if err != nil { return nil, err @@ -671,7 +651,7 @@ func createRemoteChannelAnnouncement(blockHeight uint32, } pub = bitcoinKeyPriv1.PubKey() - signer = mockSigner{bitcoinKeyPriv1} + signer = mock.SingleSigner{Privkey: bitcoinKeyPriv1} sig, err = netann.SignAnnouncement(&signer, pub, a) if err != nil { return nil, err @@ -682,7 +662,7 @@ func createRemoteChannelAnnouncement(blockHeight uint32, } pub = bitcoinKeyPriv2.PubKey() - signer = mockSigner{bitcoinKeyPriv2} + signer = mock.SingleSigner{Privkey: bitcoinKeyPriv2} sig, err = netann.SignAnnouncement(&signer, pub, a) if err != nil { return nil, err @@ -761,7 +741,7 @@ func createTestCtx(startHeight uint32) (*testCtx, func(), error) { RotateTicker: ticker.NewForce(DefaultSyncerRotationInterval), HistoricalSyncTicker: ticker.NewForce(DefaultHistoricalSyncInterval), NumActiveSyncers: 3, - AnnSigner: &mockSigner{nodeKeyPriv1}, + AnnSigner: &mock.SingleSigner{Privkey: nodeKeyPriv1}, SubBatchDelay: time.Second * 5, MinimumBatchSize: 10, }, nodeKeyPub1) diff --git a/docs/INSTALL.md b/docs/INSTALL.md index 8214bc5f7..21c8bb9aa 100644 --- a/docs/INSTALL.md +++ b/docs/INSTALL.md @@ -29,7 +29,7 @@ **Note**: The minimum version of Go supported is Go 1.13. We recommend that users use the latest version of Go, which at the time of writing is - [`1.13`](https://blog.golang.org/go1.13). + [`1.15`](https://blog.golang.org/go1.15). On Linux: @@ -95,7 +95,7 @@ * **Go modules:** This project uses [Go modules](https://github.com/golang/go/wiki/Modules) to manage dependencies as well as to provide *reproducible builds*. - Usage of Go modules (with Go 1.12) means that you no longer need to clone + Usage of Go modules (with Go 1.13) means that you no longer need to clone `lnd` into your `$GOPATH` for development purposes. Instead, your `lnd` repo can now live anywhere! @@ -124,7 +124,7 @@ make install **NOTE**: Our instructions still use the `$GOPATH` directory from prior -versions of Go, but with Go 1.12, it's now possible for `lnd` to live +versions of Go, but with Go 1.13, it's now possible for `lnd` to live _anywhere_ on your file system. For Windows WSL users, make will need to be referenced directly via diff --git a/docs/macaroons.md b/docs/macaroons.md index aae12d1d9..b1ed988cd 100644 --- a/docs/macaroons.md +++ b/docs/macaroons.md @@ -81,7 +81,14 @@ methods. This means a few important things: You can also run `lnd` with the `--no-macaroons` option, which skips the creation of the macaroon files and all macaroon checks within the RPC server. This means you can still pass a macaroon to the RPC server with a client, but -it won't be checked for validity. +it won't be checked for validity. Note that disabling authentication of a server +that's listening on a public interface is not allowed. This means the +`--no-macaroons` option is only permitted when the RPC server is in a private +network. In CIDR notation, the following IPs are considered private, +- [`169.254.0.0/16` and `fe80::/10`](https://en.wikipedia.org/wiki/Link-local_address). +- [`224.0.0.0/4` and `ff00::/8`](https://en.wikipedia.org/wiki/Multicast_address). +- [`10.0.0.0/8`, `172.16.0.0/12` and `192.168.0.0/16`](https://tools.ietf.org/html/rfc1918). +- [`fc00::/7`](https://tools.ietf.org/html/rfc4193). Since `lnd` requires macaroons by default in order to call RPC methods, `lncli` now reads a macaroon and provides it in the RPC call. Unless the path is diff --git a/docs/rest/websockets.md b/docs/rest/websockets.md new file mode 100644 index 000000000..705a4c731 --- /dev/null +++ b/docs/rest/websockets.md @@ -0,0 +1,99 @@ +# WebSockets with `lnd`'s REST API + +This document describes how streaming response REST calls can be used correctly +by making use of the WebSocket API. + +As an example, we are going to write a simple JavaScript program that subscribes +to `lnd`'s +[block notification RPC](https://api.lightning.community/#v2-chainnotifier-register-blocks). + +The WebSocket will be kept open as long as `lnd` runs and JavaScript program +isn't stopped. + +## Browser environment + +When using WebSockets in a browser, there are certain security limitations of +what header fields are allowed to be sent. Therefore, the macaroon cannot just +be added as a `Grpc-Metadata-Macaroon` header field as it would work with normal +REST calls. The browser will just ignore that header field and not send it. + +Instead we have added a workaround in `lnd`'s WebSocket proxy that allows +sending the macaroon as a WebSocket "protocol": + +```javascript +const host = 'localhost:8080'; // The default REST port of lnd, can be overwritten with --restlisten=ip:port +const macaroon = '0201036c6e6402eb01030a10625e7e60fd00f5a6f9cd53f33fc82a...'; // The hex encoded macaroon to send +const initialRequest = { // The initial request to send (see API docs for each RPC). + hash: "xlkMdV382uNPskw6eEjDGFMQHxHNnZZgL47aVDSwiRQ=", // Just some example to show that all `byte` fields always have to be base64 encoded in the REST API. + height: 144, +} + +// The protocol is our workaround for sending the macaroon because custom header +// fields aren't allowed to be sent by the browser when opening a WebSocket. +const protocolString = 'Grpc-Metadata-Macaroon+' + macaroon; + +// Let's now connect the web socket. Notice that all WebSocket open calls are +// always GET requests. If the RPC expects a call to be POST or DELETE (see API +// docs to find out), the query parameter "method" can be set to overwrite. +const wsUrl = 'wss://' + host + '/v2/chainnotifier/register/blocks?method=POST'; +let ws = new WebSocket(wsUrl, protocolString); +ws.onopen = function (event) { + // After the WS connection is establishes, lnd expects the client to send the + // initial message. If an RPC doesn't have any request parameters, an empty + // JSON object has to be sent as a string, for example: ws.send('{}') + ws.send(JSON.stringify(initialRequest)); +} +ws.onmessage = function (event) { + // We received a new message. + console.log(event); + + // The data we're really interested in is in data and is always a string + // that needs to be parsed as JSON and always contains a "result" field: + console.log("Payload: "); + console.log(JSON.parse(event.data).result); +} +ws.onerror = function (event) { + // An error occured, let's log it to the console. + console.log(event); +} +``` + +## Node.js environment + +With Node.js it is a bit easier to use the streaming response APIs because we +can set the macaroon header field directly. This is the example from the API +docs: + +```javascript +// -------------------------- +// Example with websockets: +// -------------------------- +const WebSocket = require('ws'); +const fs = require('fs'); +const macaroon = fs.readFileSync('LND_DIR/data/chain/bitcoin/simnet/admin.macaroon').toString('hex'); +let ws = new WebSocket('wss://localhost:8080/v2/chainnotifier/register/blocks?method=POST', { + // Work-around for self-signed certificates. + rejectUnauthorized: false, + headers: { + 'Grpc-Metadata-Macaroon': macaroon, + }, +}); +let requestBody = { + hash: "", + height: "", +} +ws.on('open', function() { + ws.send(JSON.stringify(requestBody)); +}); +ws.on('error', function(err) { + console.log('Error: ' + err); +}); +ws.on('message', function(body) { + console.log(body); +}); +// Console output (repeated for every message in the stream): +// { +// "hash": , +// "height": , +// } +``` diff --git a/fundingmanager.go b/fundingmanager.go index aa3887ed1..a9276f1c5 100644 --- a/fundingmanager.go +++ b/fundingmanager.go @@ -22,6 +22,7 @@ import ( "github.com/lightningnetwork/lnd/htlcswitch" "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/keychain" + "github.com/lightningnetwork/lnd/labels" "github.com/lightningnetwork/lnd/lnpeer" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnwallet" @@ -66,6 +67,11 @@ const ( // in the real world. MaxBtcFundingAmount = btcutil.Amount(1<<24) - 1 + // MaxBtcFundingAmountWumbo is a soft-limit on the maximum size of wumbo + // channels. This limit is 10 BTC and is the only thing standing between + // you and limitless channel size (apart from 21 million cap) + MaxBtcFundingAmountWumbo = btcutil.Amount(1000000000) + // maxLtcFundingAmount is a soft-limit of the maximum channel size // currently accepted on the Litecoin chain within the Lightning // Protocol. @@ -123,6 +129,7 @@ type reservationWithCtx struct { remoteCsvDelay uint16 remoteMinHtlc lnwire.MilliSatoshi remoteMaxValue lnwire.MilliSatoshi + remoteMaxHtlcs uint16 updateMtx sync.RWMutex lastUpdated time.Time @@ -243,6 +250,10 @@ type fundingConfig struct { // transaction to the network. PublishTransaction func(*wire.MsgTx, string) error + // UpdateLabel updates the label that a transaction has in our wallet, + // overwriting any existing labels. + UpdateLabel func(chainhash.Hash, string) error + // FeeEstimator calculates appropriate fee rates based on historical // transaction information. FeeEstimator chainfee.Estimator @@ -354,6 +365,11 @@ type fundingConfig struct { // due to fees. MinChanSize btcutil.Amount + // MaxChanSize is the largest channel size that we'll accept as an + // inbound channel. We have such a parameter, so that you may decide how + // WUMBO you would like your channel. + MaxChanSize btcutil.Amount + // MaxPendingChannels is the maximum number of pending channels we // allow for each peer. MaxPendingChannels int @@ -576,8 +592,15 @@ func (f *fundingManager) start() error { channel.FundingOutpoint, fundingTxBuf.Bytes()) + // Set a nil short channel ID at this stage + // because we do not know it until our funding + // tx confirms. + label := labels.MakeLabel( + labels.LabelTypeChannelOpen, nil, + ) + err = f.cfg.PublishTransaction( - channel.FundingTxn, "", + channel.FundingTxn, label, ) if err != nil { fndgLog.Errorf("Unable to rebroadcast "+ @@ -1193,13 +1216,24 @@ func (f *fundingManager) handleFundingOpen(fmsg *fundingOpenMsg) { msg := fmsg.msg amt := msg.FundingAmount - // We count the number of pending channels for this peer. This is the - // sum of the active reservations and the channels pending open in the - // database. + // We get all pending channels for this peer. This is the list of the + // active reservations and the channels pending open in the database. f.resMtx.RLock() - numPending := len(f.activeReservations[peerIDKey]) + reservations := f.activeReservations[peerIDKey] + + // We don't count reservations that were created from a canned funding + // shim. The user has registered the shim and therefore expects this + // channel to arrive. + numPending := 0 + for _, res := range reservations { + if !res.reservation.IsCannedShim() { + numPending++ + } + } f.resMtx.RUnlock() + // Also count the channels that are already pending. There we don't know + // the underlying intent anymore, unfortunately. channels, err := f.cfg.Wallet.Cfg.Database.FetchOpenChannels(peerPubKey) if err != nil { f.failFundingFlow( @@ -1209,7 +1243,13 @@ func (f *fundingManager) handleFundingOpen(fmsg *fundingOpenMsg) { } for _, c := range channels { - if c.IsPending { + // Pending channels that have a non-zero thaw height were also + // created through a canned funding shim. Those also don't + // count towards the DoS protection limit. + // + // TODO(guggero): Properly store the funding type (wallet, shim, + // PSBT) on the channel so we don't need to use the thaw height. + if c.IsPending && c.ThawHeight == 0 { numPending++ } } @@ -1239,13 +1279,11 @@ func (f *fundingManager) handleFundingOpen(fmsg *fundingOpenMsg) { return } - // We'll reject any request to create a channel that's above the - // current soft-limit for channel size, but only if we're rejecting all - // wumbo channel initiations. - if f.cfg.NoWumboChans && msg.FundingAmount > MaxFundingAmount { + // Ensure that the remote party respects our maximum channel size. + if amt > f.cfg.MaxChanSize { f.failFundingFlow( fmsg.peer, fmsg.msg.PendingChannelID, - lnwire.ErrChanTooLarge, + lnwallet.ErrChanTooLarge(amt, f.cfg.MaxChanSize), ) return } @@ -1399,6 +1437,7 @@ func (f *fundingManager) handleFundingOpen(fmsg *fundingOpenMsg) { remoteCsvDelay: remoteCsvDelay, remoteMinHtlc: minHtlc, remoteMaxValue: remoteMaxValue, + remoteMaxHtlcs: maxHtlcs, err: make(chan error, 1), peer: fmsg.peer, } @@ -1548,7 +1587,6 @@ func (f *fundingManager) handleFundingAccept(fmsg *fundingAcceptMsg) { // here so we can properly commit their accepted constraints to the // reservation. chanReserve := f.cfg.RequiredRemoteChanReserve(resCtx.chanAmt, msg.DustLimit) - maxHtlcs := f.cfg.RequiredRemoteMaxHTLCs(resCtx.chanAmt) // The remote node has responded with their portion of the channel // contribution. At this point, we can process their contribution which @@ -1562,7 +1600,7 @@ func (f *fundingManager) handleFundingAccept(fmsg *fundingAcceptMsg) { MaxPendingAmount: resCtx.remoteMaxValue, ChanReserve: chanReserve, MinHTLC: resCtx.remoteMinHtlc, - MaxAcceptedHtlcs: maxHtlcs, + MaxAcceptedHtlcs: resCtx.remoteMaxHtlcs, CsvDelay: resCtx.remoteCsvDelay, }, MultiSigKey: keychain.KeyDescriptor{ @@ -2032,7 +2070,13 @@ func (f *fundingManager) handleFundingSigned(fmsg *fundingSignedMsg) { fndgLog.Infof("Broadcasting funding tx for ChannelPoint(%v): %x", completeChan.FundingOutpoint, fundingTxBuf.Bytes()) - err = f.cfg.PublishTransaction(fundingTx, "") + // Set a nil short channel ID at this stage because we do not + // know it until our funding tx confirms. + label := labels.MakeLabel( + labels.LabelTypeChannelOpen, nil, + ) + + err = f.cfg.PublishTransaction(fundingTx, label) if err != nil { fndgLog.Errorf("Unable to broadcast funding tx %x for "+ "ChannelPoint(%v): %v", fundingTxBuf.Bytes(), @@ -2372,6 +2416,25 @@ func (f *fundingManager) handleFundingConfirmation( fndgLog.Errorf("unable to report short chan id: %v", err) } + // If we opened the channel, and lnd's wallet published our funding tx + // (which is not the case for some channels) then we update our + // transaction label with our short channel ID, which is known now that + // our funding transaction has confirmed. We do not label transactions + // we did not publish, because our wallet has no knowledge of them. + if completeChan.IsInitiator && completeChan.ChanType.HasFundingTx() { + shortChanID := completeChan.ShortChanID() + label := labels.MakeLabel( + labels.LabelTypeChannelOpen, &shortChanID, + ) + + err = f.cfg.UpdateLabel( + completeChan.FundingOutpoint.Hash, label, + ) + if err != nil { + fndgLog.Errorf("unable to update label: %v", err) + } + } + // Close the discoverySignal channel, indicating to a separate // goroutine that the channel now is marked as open in the database // and that it is acceptable to process funding locked messages @@ -3073,6 +3136,7 @@ func (f *fundingManager) handleInitFundingMsg(msg *initFundingMsg) { minHtlcIn = msg.minHtlcIn remoteCsvDelay = msg.remoteCsvDelay maxValue = msg.maxValueInFlight + maxHtlcs = msg.maxHtlcs ) // We'll determine our dust limit depending on which chain is active. @@ -3211,6 +3275,10 @@ func (f *fundingManager) handleInitFundingMsg(msg *initFundingMsg) { maxValue = f.cfg.RequiredRemoteMaxValue(capacity) } + if maxHtlcs == 0 { + maxHtlcs = f.cfg.RequiredRemoteMaxHTLCs(capacity) + } + // If a pending channel map for this peer isn't already created, then // we create one, ultimately allowing us to track this pending // reservation within the target peer. @@ -3225,6 +3293,7 @@ func (f *fundingManager) handleInitFundingMsg(msg *initFundingMsg) { remoteCsvDelay: remoteCsvDelay, remoteMinHtlc: minHtlcIn, remoteMaxValue: maxValue, + remoteMaxHtlcs: maxHtlcs, reservation: reservation, peer: msg.peer, updates: msg.updates, @@ -3244,7 +3313,6 @@ func (f *fundingManager) handleInitFundingMsg(msg *initFundingMsg) { // policy to determine of required commitment constraints for the // remote party. chanReserve := f.cfg.RequiredRemoteChanReserve(capacity, ourDustLimit) - maxHtlcs := f.cfg.RequiredRemoteMaxHTLCs(capacity) fndgLog.Infof("Starting funding workflow with %v for pending_id(%x), "+ "committype=%v", msg.peer.Address(), chanID, commitType) diff --git a/fundingmanager_test.go b/fundingmanager_test.go index 9896563dc..411732037 100644 --- a/fundingmanager_test.go +++ b/fundingmanager_test.go @@ -33,6 +33,7 @@ import ( "github.com/lightningnetwork/lnd/keychain" "github.com/lightningnetwork/lnd/lnpeer" "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lntest/mock" "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/lnwire" @@ -54,6 +55,9 @@ const ( // maxPending is the maximum number of channels we allow opening to the // same peer in the max pending channels test. maxPending = 4 + + // A dummy value to use for the funding broadcast height. + fundingBroadcastHeight = 123 ) var ( @@ -99,6 +103,8 @@ var ( } _, _ = testSig.R.SetString("63724406601629180062774974542967536251589935445068131219452686511677818569431", 10) _, _ = testSig.S.SetString("18801056069249825825291287104931333862866033135609736119018462340006816851118", 10) + + fundingNetParams = bitcoinTestNetParams ) type mockNotifier struct { @@ -282,7 +288,7 @@ func createTestFundingManager(t *testing.T, privKey *btcec.PrivateKey, addr *lnwire.NetAddress, tempTestDir string, options ...cfgOption) (*testNode, error) { - netParams := activeNetParams.Params + netParams := fundingNetParams.Params estimator := chainfee.NewStaticEstimator(62500, 0) chainNotifier := &mockNotifier{ @@ -296,14 +302,14 @@ func createTestFundingManager(t *testing.T, privKey *btcec.PrivateKey, publTxChan := make(chan *wire.MsgTx, 1) shutdownChan := make(chan struct{}) - wc := &mockWalletController{ - rootKey: alicePrivKey, + wc := &mock.WalletController{ + RootKey: alicePrivKey, } - signer := &mockSigner{ - key: alicePrivKey, + signer := &mock.SingleSigner{ + Privkey: alicePrivKey, } - bio := &mockChainIO{ - bestHeight: fundingBroadcastHeight, + bio := &mock.ChainIO{ + BestHeight: fundingBroadcastHeight, } // The mock channel event notifier will receive events for each pending @@ -323,8 +329,8 @@ func createTestFundingManager(t *testing.T, privKey *btcec.PrivateKey, return nil, err } - keyRing := &mockSecretKeyRing{ - rootKey: alicePrivKey, + keyRing := &mock.SecretKeyRing{ + RootKey: alicePrivKey, } lnw, err := createTestWallet( @@ -421,8 +427,12 @@ func createTestFundingManager(t *testing.T, privKey *btcec.PrivateKey, publTxChan <- txn return nil }, + UpdateLabel: func(chainhash.Hash, string) error { + return nil + }, ZombieSweeperInterval: 1 * time.Hour, ReservationTimeout: 1 * time.Nanosecond, + MaxChanSize: MaxFundingAmount, MaxPendingChannels: lncfg.DefaultMaxPendingChannels, NotifyOpenChannelEvent: evt.NotifyOpenChannelEvent, OpenChannelPredicate: chainedAcceptor, @@ -524,6 +534,9 @@ func recreateAliceFundingManager(t *testing.T, alice *testNode) { publishChan <- txn return nil }, + UpdateLabel: func(chainhash.Hash, string) error { + return nil + }, ZombieSweeperInterval: oldCfg.ZombieSweeperInterval, ReservationTimeout: oldCfg.ReservationTimeout, OpenChannelPredicate: chainedAcceptor, @@ -643,7 +656,7 @@ func fundChannel(t *testing.T, alice, bob *testNode, localFundingAmt, errChan := make(chan error, 1) initReq := &openChanReq{ targetPubkey: bob.privKey.PubKey(), - chainHash: *activeNetParams.GenesisHash, + chainHash: *fundingNetParams.GenesisHash, subtractFees: subtractFees, localFundingAmt: localFundingAmt, pushAmt: lnwire.NewMSatFromSatoshis(pushAmt), @@ -1567,7 +1580,7 @@ func TestFundingManagerPeerTimeoutAfterInitFunding(t *testing.T) { errChan := make(chan error, 1) initReq := &openChanReq{ targetPubkey: bob.privKey.PubKey(), - chainHash: *activeNetParams.GenesisHash, + chainHash: *fundingNetParams.GenesisHash, localFundingAmt: 500000, pushAmt: lnwire.NewMSatFromSatoshis(0), private: false, @@ -1629,7 +1642,7 @@ func TestFundingManagerPeerTimeoutAfterFundingOpen(t *testing.T) { errChan := make(chan error, 1) initReq := &openChanReq{ targetPubkey: bob.privKey.PubKey(), - chainHash: *activeNetParams.GenesisHash, + chainHash: *fundingNetParams.GenesisHash, localFundingAmt: 500000, pushAmt: lnwire.NewMSatFromSatoshis(0), private: false, @@ -1700,7 +1713,7 @@ func TestFundingManagerPeerTimeoutAfterFundingAccept(t *testing.T) { errChan := make(chan error, 1) initReq := &openChanReq{ targetPubkey: bob.privKey.PubKey(), - chainHash: *activeNetParams.GenesisHash, + chainHash: *fundingNetParams.GenesisHash, localFundingAmt: 500000, pushAmt: lnwire.NewMSatFromSatoshis(0), private: false, @@ -2424,7 +2437,7 @@ func TestFundingManagerCustomChannelParameters(t *testing.T) { errChan := make(chan error, 1) initReq := &openChanReq{ targetPubkey: bob.privKey.PubKey(), - chainHash: *activeNetParams.GenesisHash, + chainHash: *fundingNetParams.GenesisHash, localFundingAmt: localAmt, pushAmt: lnwire.NewMSatFromSatoshis(pushAmt), private: false, @@ -2709,7 +2722,7 @@ func TestFundingManagerMaxPendingChannels(t *testing.T) { errChan := make(chan error, 1) initReq := &openChanReq{ targetPubkey: bob.privKey.PubKey(), - chainHash: *activeNetParams.GenesisHash, + chainHash: *fundingNetParams.GenesisHash, localFundingAmt: 5000000, pushAmt: lnwire.NewMSatFromSatoshis(0), private: false, @@ -2879,7 +2892,7 @@ func TestFundingManagerRejectPush(t *testing.T) { errChan := make(chan error, 1) initReq := &openChanReq{ targetPubkey: bob.privKey.PubKey(), - chainHash: *activeNetParams.GenesisHash, + chainHash: *fundingNetParams.GenesisHash, localFundingAmt: 500000, pushAmt: lnwire.NewMSatFromSatoshis(10), private: true, @@ -2936,7 +2949,7 @@ func TestFundingManagerMaxConfs(t *testing.T) { errChan := make(chan error, 1) initReq := &openChanReq{ targetPubkey: bob.privKey.PubKey(), - chainHash: *activeNetParams.GenesisHash, + chainHash: *fundingNetParams.GenesisHash, localFundingAmt: 500000, pushAmt: lnwire.NewMSatFromSatoshis(10), private: false, @@ -3005,7 +3018,7 @@ func TestFundingManagerFundAll(t *testing.T) { Value: btcutil.Amount( 0.05 * btcutil.SatoshiPerBitcoin, ), - PkScript: coinPkScript, + PkScript: mock.CoinPkScript, OutPoint: wire.OutPoint{ Hash: chainhash.Hash{}, Index: 0, @@ -3016,7 +3029,7 @@ func TestFundingManagerFundAll(t *testing.T) { Value: btcutil.Amount( 0.06 * btcutil.SatoshiPerBitcoin, ), - PkScript: coinPkScript, + PkScript: mock.CoinPkScript, OutPoint: wire.OutPoint{ Hash: chainhash.Hash{}, Index: 1, @@ -3050,7 +3063,7 @@ func TestFundingManagerFundAll(t *testing.T) { alice, bob := setupFundingManagers(t) defer tearDownFundingManagers(t, alice, bob) - alice.fundingMgr.cfg.Wallet.WalletController.(*mockWalletController).utxos = allCoins + alice.fundingMgr.cfg.Wallet.WalletController.(*mock.WalletController).Utxos = allCoins // We will consume the channel updates as we go, so no // buffering is needed. @@ -3200,6 +3213,75 @@ func expectOpenChannelMsg(t *testing.T, msgChan chan lnwire.Message) *lnwire.Ope return openChannelReq } +func TestMaxChannelSizeConfig(t *testing.T) { + t.Parallel() + + // Create a set of funding managers that will reject wumbo + // channels but set --maxchansize explicitly lower than soft-limit. + // Verify that wumbo rejecting funding managers will respect --maxchansize + // below 16777215 satoshi (MaxFundingAmount) limit. + alice, bob := setupFundingManagers(t, func(cfg *fundingConfig) { + cfg.NoWumboChans = true + cfg.MaxChanSize = MaxFundingAmount - 1 + }) + + // Attempt to create a channel above the limit + // imposed by --maxchansize, which should be rejected. + updateChan := make(chan *lnrpc.OpenStatusUpdate) + errChan := make(chan error, 1) + initReq := &openChanReq{ + targetPubkey: bob.privKey.PubKey(), + chainHash: *fundingNetParams.GenesisHash, + localFundingAmt: MaxFundingAmount, + pushAmt: lnwire.NewMSatFromSatoshis(0), + private: false, + updates: updateChan, + err: errChan, + } + + // After processing the funding open message, bob should respond with + // an error rejecting the channel that exceeds size limit. + alice.fundingMgr.initFundingWorkflow(bob, initReq) + openChanMsg := expectOpenChannelMsg(t, alice.msgChan) + bob.fundingMgr.processFundingOpen(openChanMsg, alice) + assertErrorSent(t, bob.msgChan) + + // Create a set of funding managers that will reject wumbo + // channels but set --maxchansize explicitly higher than soft-limit + // A --maxchansize greater than this limit should have no effect. + tearDownFundingManagers(t, alice, bob) + alice, bob = setupFundingManagers(t, func(cfg *fundingConfig) { + cfg.NoWumboChans = true + cfg.MaxChanSize = MaxFundingAmount + 1 + }) + + // We expect Bob to respond with an Accept channel message. + alice.fundingMgr.initFundingWorkflow(bob, initReq) + openChanMsg = expectOpenChannelMsg(t, alice.msgChan) + bob.fundingMgr.processFundingOpen(openChanMsg, alice) + assertFundingMsgSent(t, bob.msgChan, "AcceptChannel") + + // Verify that wumbo accepting funding managers will respect --maxchansize + // Create the funding managers, this time allowing + // wumbo channels but setting --maxchansize explicitly. + tearDownFundingManagers(t, alice, bob) + alice, bob = setupFundingManagers(t, func(cfg *fundingConfig) { + cfg.NoWumboChans = false + cfg.MaxChanSize = btcutil.Amount(100000000) + }) + + // Attempt to create a channel above the limit + // imposed by --maxchansize, which should be rejected. + initReq.localFundingAmt = btcutil.SatoshiPerBitcoin + 1 + + // After processing the funding open message, bob should respond with + // an error rejecting the channel that exceeds size limit. + alice.fundingMgr.initFundingWorkflow(bob, initReq) + openChanMsg = expectOpenChannelMsg(t, alice.msgChan) + bob.fundingMgr.processFundingOpen(openChanMsg, alice) + assertErrorSent(t, bob.msgChan) +} + // TestWumboChannelConfig tests that the funding manager will respect the wumbo // channel config param when creating or accepting new channels. func TestWumboChannelConfig(t *testing.T) { @@ -3218,7 +3300,7 @@ func TestWumboChannelConfig(t *testing.T) { errChan := make(chan error, 1) initReq := &openChanReq{ targetPubkey: bob.privKey.PubKey(), - chainHash: *activeNetParams.GenesisHash, + chainHash: *fundingNetParams.GenesisHash, localFundingAmt: MaxFundingAmount, pushAmt: lnwire.NewMSatFromSatoshis(0), private: false, @@ -3248,6 +3330,7 @@ func TestWumboChannelConfig(t *testing.T) { tearDownFundingManagers(t, alice, bob) alice, bob = setupFundingManagers(t, func(cfg *fundingConfig) { cfg.NoWumboChans = false + cfg.MaxChanSize = MaxBtcFundingAmountWumbo }) // We should now be able to initiate a wumbo channel funding w/o any diff --git a/go.mod b/go.mod index ab4d59dd1..0edb26dd2 100644 --- a/go.mod +++ b/go.mod @@ -5,11 +5,11 @@ require ( github.com/NebulousLabs/fastrand v0.0.0-20181203155948-6fb6489aac4e // indirect github.com/NebulousLabs/go-upnp v0.0.0-20180202185039-29b680b06c82 github.com/Yawning/aez v0.0.0-20180114000226-4dad034d9db2 - github.com/btcsuite/btcd v0.20.1-beta.0.20200515232429-9f0179fd2c46 + github.com/btcsuite/btcd v0.20.1-beta.0.20200730232343-1db1b6f8217f github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f github.com/btcsuite/btcutil v1.0.2 - github.com/btcsuite/btcutil/psbt v1.0.2 - github.com/btcsuite/btcwallet v0.11.1-0.20200612012534-48addcd5591a + github.com/btcsuite/btcutil/psbt v1.0.3-0.20200826194809-5f93e33af2b0 + github.com/btcsuite/btcwallet v0.11.1-0.20200904022754-2c5947a45222 github.com/btcsuite/btcwallet/wallet/txauthor v1.0.0 github.com/btcsuite/btcwallet/wallet/txrules v1.0.0 github.com/btcsuite/btcwallet/walletdb v1.3.3 @@ -48,7 +48,7 @@ require ( github.com/lightninglabs/neutrino v0.11.1-0.20200316235139-bffc52e8f200 github.com/lightninglabs/protobuf-hex-display v1.3.3-0.20191212020323-b444784ce75d github.com/lightningnetwork/lightning-onion v1.0.2-0.20200501022730-3c8c8d0b89ea - github.com/lightningnetwork/lnd/cert v1.0.2 + github.com/lightningnetwork/lnd/cert v1.0.3 github.com/lightningnetwork/lnd/clock v1.0.1 github.com/lightningnetwork/lnd/queue v1.0.4 github.com/lightningnetwork/lnd/ticker v1.0.0 @@ -66,6 +66,7 @@ require ( go.uber.org/zap v1.14.1 // indirect golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899 golang.org/x/net v0.0.0-20191002035440-2ec189313ef0 + golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 google.golang.org/grpc v1.24.0 gopkg.in/errgo.v1 v1.0.1 // indirect @@ -85,4 +86,4 @@ replace github.com/lightningnetwork/lnd/clock => ./clock replace git.schwanenlied.me/yawning/bsaes.git => github.com/Yawning/bsaes v0.0.0-20180720073208-c0276d75487e -go 1.12 +go 1.13 diff --git a/go.sum b/go.sum index f75d6f0da..c97fc7910 100644 --- a/go.sum +++ b/go.sum @@ -27,18 +27,18 @@ github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcug github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btcd v0.20.1-beta.0.20200513120220-b470eee47728/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= -github.com/btcsuite/btcd v0.20.1-beta.0.20200515232429-9f0179fd2c46 h1:QyTpiR5nQe94vza2qkvf7Ns8XX2Rjh/vdIhO3RzGj4o= -github.com/btcsuite/btcd v0.20.1-beta.0.20200515232429-9f0179fd2c46/go.mod h1:Yktc19YNjh/Iz2//CX0vfRTS4IJKM/RKO5YZ9Fn+Pgo= +github.com/btcsuite/btcd v0.20.1-beta.0.20200730232343-1db1b6f8217f h1:m/GhMTvDQLbID616c4TYdHyt0MZ9lH5B/nf9Lu3okCY= +github.com/btcsuite/btcd v0.20.1-beta.0.20200730232343-1db1b6f8217f/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d h1:yJzD/yFppdVCf6ApMkVy8cUxV0XrxdP9rVf6D87/Mng= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/btcutil v1.0.2 h1:9iZ1Terx9fMIOtq1VrwdqfsATL9MC2l8ZrUY6YZ2uts= github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= -github.com/btcsuite/btcutil/psbt v1.0.2 h1:gCVY3KxdoEVU7Q6TjusPO+GANIwVgr9yTLqM+a6CZr8= -github.com/btcsuite/btcutil/psbt v1.0.2/go.mod h1:LVveMu4VaNSkIRTZu2+ut0HDBRuYjqGocxDMNS1KuGQ= -github.com/btcsuite/btcwallet v0.11.1-0.20200612012534-48addcd5591a h1:AZ1Mf0gd9mgJqrTTIFUc17ep9EKUbQusVAIzJ6X+x3Q= -github.com/btcsuite/btcwallet v0.11.1-0.20200612012534-48addcd5591a/go.mod h1:9+AH3V5mcTtNXTKe+fe63fDLKGOwQbZqmvOVUef+JFE= +github.com/btcsuite/btcutil/psbt v1.0.3-0.20200826194809-5f93e33af2b0 h1:3Zumkyl6PWyHuVJ04me0xeD9CnPOhNgeGpapFbzy7O4= +github.com/btcsuite/btcutil/psbt v1.0.3-0.20200826194809-5f93e33af2b0/go.mod h1:LVveMu4VaNSkIRTZu2+ut0HDBRuYjqGocxDMNS1KuGQ= +github.com/btcsuite/btcwallet v0.11.1-0.20200904022754-2c5947a45222 h1:rh1FQAhh+BeR29twIFDM0RLOFpDK62tsABtUkWctTXw= +github.com/btcsuite/btcwallet v0.11.1-0.20200904022754-2c5947a45222/go.mod h1:owv9oZqM0HnUW+ByF7VqOgfs2eb0ooiePW/+Tl/i/Nk= github.com/btcsuite/btcwallet/wallet/txauthor v1.0.0 h1:KGHMW5sd7yDdDMkCZ/JpP0KltolFsQcB973brBnfj4c= github.com/btcsuite/btcwallet/wallet/txauthor v1.0.0/go.mod h1:VufDts7bd/zs3GV13f/lXc/0lXrPnvxD/NvmpG/FEKU= github.com/btcsuite/btcwallet/wallet/txrules v1.0.0 h1:2VsfS0sBedcM5KmDzRMT3+b6xobqWveZGvjb+jFez5w= @@ -47,8 +47,6 @@ github.com/btcsuite/btcwallet/wallet/txsizes v1.0.0 h1:6DxkcoMnCPY4E9cUDPB5tbuuf github.com/btcsuite/btcwallet/wallet/txsizes v1.0.0/go.mod h1:pauEU8UuMFiThe5PB3EO+gO5kx87Me5NvdQDsTuq6cs= github.com/btcsuite/btcwallet/walletdb v1.0.0/go.mod h1:bZTy9RyYZh9fLnSua+/CD48TJtYJSHjjYcSaszuxCCk= github.com/btcsuite/btcwallet/walletdb v1.2.0/go.mod h1:9cwc1Yyg4uvd4ZdfdoMnALji+V9gfWSMfxEdLdR5Vwc= -github.com/btcsuite/btcwallet/walletdb v1.3.1 h1:lW1Ac3F1jJY4K11P+YQtRNcP5jFk27ASfrV7C6mvRU0= -github.com/btcsuite/btcwallet/walletdb v1.3.1/go.mod h1:9cwc1Yyg4uvd4ZdfdoMnALji+V9gfWSMfxEdLdR5Vwc= github.com/btcsuite/btcwallet/walletdb v1.3.2 h1:nFnMBVgkqoVOx08Z756oDwNc9sdVgYR52T1ONSXs90w= github.com/btcsuite/btcwallet/walletdb v1.3.2/go.mod h1:GZCMPNpUu5KE3ASoVd+k06p/1OW8OwNGCCaNWRto2cQ= github.com/btcsuite/btcwallet/walletdb v1.3.3 h1:u6e7vRIKBF++cJy+hOHaMGg+88ZTwvpaY27AFvtB668= @@ -86,6 +84,8 @@ github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/decred/dcrd/lru v1.0.0 h1:Kbsb1SFDsIlaupWPwsPp+dkxiBY1frcS07PCPgotKz8= +github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= @@ -332,6 +332,7 @@ golang.org/x/sys v0.0.0-20190904154756-749cb33beabd h1:DBH9mDw0zluJT/R+nGuV3jWFW golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200724161237-0e2f3a69832c h1:UIcGWL6/wpCfyGuJnRFJRurA+yj8RrW7Q6x2YMCXt6c= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/healthcheck/diskcheck.go b/healthcheck/diskcheck.go new file mode 100644 index 000000000..ed230d275 --- /dev/null +++ b/healthcheck/diskcheck.go @@ -0,0 +1,18 @@ +// +build !windows,!solaris,!netbsd,!openbsd + +package healthcheck + +import "syscall" + +// AvailableDiskSpace returns ratio of available disk space to total capacity. +func AvailableDiskSpace(path string) (float64, error) { + s := syscall.Statfs_t{} + err := syscall.Statfs(path, &s) + if err != nil { + return 0, err + } + + // Calculate our free blocks/total blocks to get our total ratio of + // free blocks. + return float64(s.Bfree) / float64(s.Blocks), nil +} diff --git a/healthcheck/diskcheck_netbsd.go b/healthcheck/diskcheck_netbsd.go new file mode 100644 index 000000000..d44330b79 --- /dev/null +++ b/healthcheck/diskcheck_netbsd.go @@ -0,0 +1,17 @@ +package healthcheck + +import "golang.org/x/sys/unix" + +// AvailableDiskSpace returns ratio of available disk space to total capacity +// for solaris. +func AvailableDiskSpace(path string) (float64, error) { + s := unix.Statvfs_t{} + err := unix.Statvfs(path, &s) + if err != nil { + return 0, err + } + + // Calculate our free blocks/total blocks to get our total ratio of + // free blocks. + return float64(s.Bfree) / float64(s.Blocks), nil +} diff --git a/healthcheck/diskcheck_openbsd.go b/healthcheck/diskcheck_openbsd.go new file mode 100644 index 000000000..4738db9ac --- /dev/null +++ b/healthcheck/diskcheck_openbsd.go @@ -0,0 +1,17 @@ +package healthcheck + +import "golang.org/x/sys/unix" + +// AvailableDiskSpace returns ratio of available disk space to total capacity +// for solaris. +func AvailableDiskSpace(path string) (float64, error) { + s := unix.Statfs_t{} + err := unix.Statfs(path, &s) + if err != nil { + return 0, err + } + + // Calculate our free blocks/total blocks to get our total ratio of + // free blocks. + return float64(s.F_bfree) / float64(s.F_blocks), nil +} diff --git a/healthcheck/diskcheck_solaris.go b/healthcheck/diskcheck_solaris.go new file mode 100644 index 000000000..d44330b79 --- /dev/null +++ b/healthcheck/diskcheck_solaris.go @@ -0,0 +1,17 @@ +package healthcheck + +import "golang.org/x/sys/unix" + +// AvailableDiskSpace returns ratio of available disk space to total capacity +// for solaris. +func AvailableDiskSpace(path string) (float64, error) { + s := unix.Statvfs_t{} + err := unix.Statvfs(path, &s) + if err != nil { + return 0, err + } + + // Calculate our free blocks/total blocks to get our total ratio of + // free blocks. + return float64(s.Bfree) / float64(s.Blocks), nil +} diff --git a/healthcheck/diskcheck_windows.go b/healthcheck/diskcheck_windows.go new file mode 100644 index 000000000..7fed088b9 --- /dev/null +++ b/healthcheck/diskcheck_windows.go @@ -0,0 +1,17 @@ +package healthcheck + +import "golang.org/x/sys/windows" + +// AvailableDiskSpace returns ratio of available disk space to total capacity +// for windows. +func AvailableDiskSpace(path string) (float64, error) { + var free, total, avail uint64 + + pathPtr, err := windows.UTF16PtrFromString(path) + if err != nil { + panic(err) + } + err = windows.GetDiskFreeSpaceEx(pathPtr, &free, &total, &avail) + + return float64(avail) / float64(total), nil +} diff --git a/healthcheck/healthcheck.go b/healthcheck/healthcheck.go new file mode 100644 index 000000000..a1f7864ac --- /dev/null +++ b/healthcheck/healthcheck.go @@ -0,0 +1,231 @@ +// Package healthcheck contains a monitor which takes a set of liveliness checks +// which it periodically checks. If a check fails after its configured number +// of allowed call attempts, the monitor will send a request to shutdown using +// the function is is provided in its config. Checks are dispatched in their own +// goroutines so that they do not block each other. +package healthcheck + +import ( + "errors" + "fmt" + "sync" + "sync/atomic" + "time" + + "github.com/lightningnetwork/lnd/ticker" +) + +// Config contains configuration settings for our monitor. +type Config struct { + // Checks is a set of health checks that assert that lnd has access to + // critical resources. + Checks []*Observation + + // Shutdown should be called to request safe shutdown on failure of a + // health check. + Shutdown shutdownFunc +} + +// shutdownFunc is the signature we use for a shutdown function which allows us +// to print our reason for shutdown. +type shutdownFunc func(format string, params ...interface{}) + +// Monitor periodically checks a series of configured liveliness checks to +// ensure that lnd has access to all critical resources. +type Monitor struct { + started int32 // To be used atomically. + stopped int32 // To be used atomically. + + cfg *Config + + quit chan struct{} + wg sync.WaitGroup +} + +// NewMonitor returns a monitor with the provided config. +func NewMonitor(cfg *Config) *Monitor { + return &Monitor{ + cfg: cfg, + quit: make(chan struct{}), + } +} + +// Start launches the goroutines required to run our monitor. +func (m *Monitor) Start() error { + if !atomic.CompareAndSwapInt32(&m.started, 0, 1) { + return errors.New("monitor already started") + } + + // Run through all of the health checks that we have configured and + // start a goroutine for each check. + for _, check := range m.cfg.Checks { + check := check + + // Skip over health checks that are disabled by setting zero + // attempts. + if check.Attempts == 0 { + log.Warnf("check: %v configured with 0 attempts, "+ + "skipping it", check.Name) + + continue + } + + m.wg.Add(1) + go func() { + defer m.wg.Done() + check.monitor(m.cfg.Shutdown, m.quit) + }() + } + + return nil +} + +// Stop sends all goroutines the signal to exit and waits for them to exit. +func (m *Monitor) Stop() error { + if !atomic.CompareAndSwapInt32(&m.stopped, 0, 1) { + return fmt.Errorf("monitor already stopped") + } + + close(m.quit) + m.wg.Wait() + + return nil +} + +// CreateCheck is a helper function that takes a function that produces an error +// and wraps it in a function that returns its result on an error channel. +// We do not wait group the goroutine running our checkFunc because we expect +// to be dealing with health checks that may block; if we wait group them, we +// may wait forever. Ideally future health checks will allow callers to cancel +// them early, and we can wait group this. +func CreateCheck(checkFunc func() error) func() chan error { + return func() chan error { + errChan := make(chan error, 1) + go func() { + errChan <- checkFunc() + }() + + return errChan + } +} + +// Observation represents a liveliness check that we periodically check. +type Observation struct { + // Name describes the health check. + Name string + + // Check runs the health check itself, returning an error channel that + // is expected to receive nil or an error. + Check func() chan error + + // Interval is a ticker which triggers running our check function. This + // ticker must be started and stopped by the observation. + Interval ticker.Ticker + + // Attempts is the number of calls we make for a single check before + // failing. + Attempts int + + // Timeout is the amount of time we allow our check function to take + // before we time it out. + Timeout time.Duration + + // Backoff is the amount of time we back off between retries for failed + // checks. + Backoff time.Duration +} + +// NewObservation creates an observation. +func NewObservation(name string, check func() error, interval, + timeout, backoff time.Duration, attempts int) *Observation { + + return &Observation{ + Name: name, + Check: CreateCheck(check), + Interval: ticker.New(interval), + Attempts: attempts, + Timeout: timeout, + Backoff: backoff, + } +} + +// String returns a string representation of an observation. +func (o *Observation) String() string { + return o.Name +} + +// monitor executes a health check every time its interval ticks until the quit +// channel signals that we should shutdown. This function is also responsible +// for starting and stopping our ticker. +func (o *Observation) monitor(shutdown shutdownFunc, quit chan struct{}) { + log.Debugf("Monitoring: %v", o) + + o.Interval.Resume() + defer o.Interval.Stop() + + for { + select { + case <-o.Interval.Ticks(): + o.retryCheck(quit, shutdown) + + // Exit if we receive the instruction to shutdown. + case <-quit: + return + } + } +} + +// retryCheck calls a check function until it succeeds, or we reach our +// configured number of attempts, waiting for our back off period between failed +// calls. If we fail to obtain a passing health check after the allowed number +// of calls, we will request shutdown. +func (o *Observation) retryCheck(quit chan struct{}, shutdown shutdownFunc) { + var count int + + for count < o.Attempts { + // Increment our call count and call the health check endpoint. + count++ + + // Wait for our check to return, timeout to elapse, or quit + // signal to be received. + var err error + select { + case err = <-o.Check(): + + case <-time.After(o.Timeout): + err = fmt.Errorf("health check: %v timed out after: "+ + "%v", o, o.Timeout) + + case <-quit: + return + } + + // If our error is nil, we have passed our health check, so we + // can exit. + if err == nil { + return + } + + // If we have reached our allowed number of attempts, this + // check has failed so we request shutdown. + if count == o.Attempts { + shutdown("Health check: %v failed after %v "+ + "calls", o, o.Attempts) + + return + } + + log.Debugf("Health check: %v, call: %v failed with: %v, "+ + "backing off for: %v", o, count, err, o.Backoff) + + // If we are still within the number of calls allowed for this + // check, we wait for our back off period to elapse, or exit if + // we get the signal to shutdown. + select { + case <-time.After(o.Backoff): + + case <-quit: + return + } + } +} diff --git a/healthcheck/healthcheck_test.go b/healthcheck/healthcheck_test.go new file mode 100644 index 000000000..ed810ed73 --- /dev/null +++ b/healthcheck/healthcheck_test.go @@ -0,0 +1,225 @@ +package healthcheck + +import ( + "errors" + "testing" + "time" + + "github.com/lightningnetwork/lnd/ticker" + "github.com/stretchr/testify/require" +) + +var ( + errNonNil = errors.New("non-nil test error") + timeout = time.Second + testTime = time.Unix(1, 2) +) + +type mockedCheck struct { + t *testing.T + errChan chan error +} + +// newMockCheck creates a new mock. +func newMockCheck(t *testing.T) *mockedCheck { + return &mockedCheck{ + t: t, + errChan: make(chan error), + } +} + +// call returns our mock's error channel, which we can send responses on. +func (m *mockedCheck) call() chan error { + return m.errChan +} + +// sendError sends an error into our mock's error channel, mocking the sending +// of a response from our check function. +func (m *mockedCheck) sendError(err error) { + select { + case m.errChan <- err: + case <-time.After(timeout): + m.t.Fatalf("could not send error: %v", err) + } +} + +// TestMonitor tests creation and triggering of a monitor with a health check. +func TestMonitor(t *testing.T) { + intervalTicker := ticker.NewForce(time.Hour) + + mock := newMockCheck(t) + shutdown := make(chan struct{}) + + // Create our config for monitoring. We will use a 0 back off so that + // out test does not need to wait. + cfg := &Config{ + Checks: []*Observation{ + { + Check: mock.call, + Interval: intervalTicker, + Attempts: 2, + Backoff: 0, + Timeout: time.Hour, + }, + }, + Shutdown: func(string, ...interface{}) { + shutdown <- struct{}{} + }, + } + monitor := NewMonitor(cfg) + + require.NoError(t, monitor.Start(), "could not start monitor") + + // Tick is a helper we will use to tick our interval. + tick := func() { + select { + case intervalTicker.Force <- testTime: + case <-time.After(timeout): + t.Fatal("could not tick timer") + } + } + + // Tick our timer and provide our error channel with a nil error. This + // mocks our check function succeeding on the first call. + tick() + mock.sendError(nil) + + // Now we tick our timer again. This time send a non-nil error, followed + // by a nil error. This tests our retry logic, because we allow 2 + // retries, so should recover without needing to shutdown. + tick() + mock.sendError(errNonNil) + mock.sendError(nil) + + // Finally, we tick our timer once more, and send two non-nil errors + // into our error channel. This mocks our check function failing twice. + tick() + mock.sendError(errNonNil) + mock.sendError(errNonNil) + + // Since we have failed within our allowed number of retries, we now + // expect a call to our shutdown function. + select { + case <-shutdown: + case <-time.After(timeout): + t.Fatal("expected shutdown") + } + + require.NoError(t, monitor.Stop(), "could not stop monitor") +} + +// TestRetryCheck tests our retry logic. It does not include a test for exiting +// during the back off period. +func TestRetryCheck(t *testing.T) { + tests := []struct { + name string + + // errors provides an in-order list of errors that we expect our + // health check to respond with. The number of errors in this + // list indicates the number of times we expect our check to + // be called, because our test will fail if we do not consume + // every error. + errors []error + + // attempts is the number of times we call a check before + // failing. + attempts int + + // timeout is the time we allow our check to take before we + // fail them. + timeout time.Duration + + // expectedShutdown is true if we expect a shutdown to be + // triggered because all of our calls failed. + expectedShutdown bool + }{ + { + name: "first call succeeds", + errors: []error{nil}, + attempts: 2, + timeout: time.Hour, + expectedShutdown: false, + }, + { + name: "first call fails", + errors: []error{errNonNil}, + attempts: 1, + timeout: time.Hour, + expectedShutdown: true, + }, + { + name: "fail then recover", + errors: []error{errNonNil, nil}, + attempts: 2, + timeout: time.Hour, + expectedShutdown: false, + }, + { + name: "always fail", + errors: []error{errNonNil, errNonNil}, + attempts: 2, + timeout: time.Hour, + expectedShutdown: true, + }, + { + name: "no calls", + errors: nil, + attempts: 0, + timeout: time.Hour, + expectedShutdown: false, + }, + { + name: "call times out", + errors: nil, + attempts: 1, + timeout: 1, + expectedShutdown: true, + }, + } + + for _, test := range tests { + test := test + + t.Run(test.name, func(t *testing.T) { + var shutdown bool + shutdownFunc := func(string, ...interface{}) { + shutdown = true + } + + mock := newMockCheck(t) + + // Create an observation that calls our call counting + // function. We set a zero back off so that the test + // will not wait. + observation := &Observation{ + Check: mock.call, + Attempts: test.attempts, + Timeout: test.timeout, + Backoff: 0, + } + quit := make(chan struct{}) + + // Run our retry check in a goroutine because it blocks + // on us sending errors into the mocked caller's error + // channel. + done := make(chan struct{}) + go func() { + observation.retryCheck(quit, shutdownFunc) + close(done) + }() + + // Prompt our mock caller to send responses for calls + // to our call function. + for _, err := range test.errors { + mock.sendError(err) + } + + // Make sure that we have finished running our retry + // check function before we start checking results. + <-done + + require.Equal(t, test.expectedShutdown, shutdown, + "unexpected shutdown state") + }) + } +} diff --git a/healthcheck/log.go b/healthcheck/log.go new file mode 100644 index 000000000..159aa1025 --- /dev/null +++ b/healthcheck/log.go @@ -0,0 +1,32 @@ +package healthcheck + +import ( + "github.com/btcsuite/btclog" + "github.com/lightningnetwork/lnd/build" +) + +// Subsystem defines the logging code for this subsystem. +const Subsystem = "HLCK" + +// log is a logger that is initialized with no output filters. This +// means the package will not perform any logging by default until the caller +// requests it. +var log btclog.Logger + +// The default amount of logging is none. +func init() { + UseLogger(build.NewSubLogger(Subsystem, nil)) +} + +// DisableLog disables all library log output. Logging output is disabled +// by default until UseLogger is called. +func DisableLog() { + UseLogger(btclog.Disabled) +} + +// UseLogger uses a specified Logger to output package logging info. +// This should be used in preference to SetLogWriter if the caller is also +// using btclog. +func UseLogger(logger btclog.Logger) { + log = logger +} diff --git a/htlcswitch/decayedlog_test.go b/htlcswitch/decayedlog_test.go index 7ebe1f10b..961eade48 100644 --- a/htlcswitch/decayedlog_test.go +++ b/htlcswitch/decayedlog_test.go @@ -10,6 +10,7 @@ import ( sphinx "github.com/lightningnetwork/lightning-onion" "github.com/lightningnetwork/lnd/chainntnfs" + "github.com/lightningnetwork/lnd/lntest/mock" ) const ( @@ -28,16 +29,18 @@ func tempDecayedLogPath(t *testing.T) string { } // startup sets up the DecayedLog and possibly the garbage collector. -func startup(dbPath string, notifier bool) (sphinx.ReplayLog, *mockNotifier, - *sphinx.HashPrefix, error) { +func startup(dbPath string, notifier bool) (sphinx.ReplayLog, + *mock.ChainNotifier, *sphinx.HashPrefix, error) { var log sphinx.ReplayLog - var chainNotifier *mockNotifier + var chainNotifier *mock.ChainNotifier if notifier { // Create the MockNotifier which triggers the garbage collector - chainNotifier = &mockNotifier{ - epochChan: make(chan *chainntnfs.BlockEpoch, 1), + chainNotifier = &mock.ChainNotifier{ + SpendChan: make(chan *chainntnfs.SpendDetail), + EpochChan: make(chan *chainntnfs.BlockEpoch, 1), + ConfChan: make(chan *chainntnfs.TxConfirmation), } // Initialize the DecayedLog object @@ -99,7 +102,7 @@ func TestDecayedLogGarbageCollector(t *testing.T) { // should remove the entry by block 100001. // Send block 100000 - notifier.epochChan <- &chainntnfs.BlockEpoch{ + notifier.EpochChan <- &chainntnfs.BlockEpoch{ Height: 100000, } @@ -114,7 +117,7 @@ func TestDecayedLogGarbageCollector(t *testing.T) { } // Send block 100001 (expiry block) - notifier.epochChan <- &chainntnfs.BlockEpoch{ + notifier.EpochChan <- &chainntnfs.BlockEpoch{ Height: 100001, } @@ -175,7 +178,7 @@ func TestDecayedLogPersistentGarbageCollector(t *testing.T) { // Send a block notification to the garbage collector that expires // the stored CLTV. - notifier2.epochChan <- &chainntnfs.BlockEpoch{ + notifier2.EpochChan <- &chainntnfs.BlockEpoch{ Height: int32(100001), } diff --git a/htlcswitch/link.go b/htlcswitch/link.go index 3931af34d..974e3cc19 100644 --- a/htlcswitch/link.go +++ b/htlcswitch/link.go @@ -776,7 +776,7 @@ func (l *channelLink) resolveFwdPkg(fwdPkg *channeldb.FwdPkg) error { l.log.Debugf("removing completed fwd pkg for height=%d", fwdPkg.Height) - err := l.channel.RemoveFwdPkg(fwdPkg.Height) + err := l.channel.RemoveFwdPkgs(fwdPkg.Height) if err != nil { l.log.Errorf("unable to remove fwd pkg for height=%d: "+ "%v", fwdPkg.Height, err) @@ -843,35 +843,51 @@ func (l *channelLink) fwdPkgGarbager() { l.cfg.FwdPkgGCTicker.Resume() defer l.cfg.FwdPkgGCTicker.Stop() + if err := l.loadAndRemove(); err != nil { + l.log.Warnf("unable to run initial fwd pkgs gc: %v", err) + } + for { select { case <-l.cfg.FwdPkgGCTicker.Ticks(): - fwdPkgs, err := l.channel.LoadFwdPkgs() - if err != nil { - l.log.Warnf("unable to load fwdpkgs for gc: %v", + if err := l.loadAndRemove(); err != nil { + l.log.Warnf("unable to remove fwd pkgs: %v", err) continue } - - // TODO(conner): batch removal of forward packages. - for _, fwdPkg := range fwdPkgs { - if fwdPkg.State != channeldb.FwdStateCompleted { - continue - } - - err = l.channel.RemoveFwdPkg(fwdPkg.Height) - if err != nil { - l.log.Warnf("unable to remove fwd pkg "+ - "for height=%d: %v", - fwdPkg.Height, err) - } - } case <-l.quit: return } } } +// loadAndRemove loads all the channels forwarding packages and determines if +// they can be removed. It is called once before the FwdPkgGCTicker ticks so that +// a longer tick interval can be used. +func (l *channelLink) loadAndRemove() error { + fwdPkgs, err := l.channel.LoadFwdPkgs() + if err != nil { + return err + } + + var removeHeights []uint64 + for _, fwdPkg := range fwdPkgs { + if fwdPkg.State != channeldb.FwdStateCompleted { + continue + } + + removeHeights = append(removeHeights, fwdPkg.Height) + } + + // If removeHeights is empty, return early so we don't use a db + // transaction. + if len(removeHeights) == 0 { + return nil + } + + return l.channel.RemoveFwdPkgs(removeHeights...) +} + // htlcManager is the primary goroutine which drives a channel's commitment // update state-machine in response to messages received via several channels. // This goroutine reads messages from the upstream (remote) peer, and also from diff --git a/htlcswitch/mock.go b/htlcswitch/mock.go index 7c169ac76..69277bc31 100644 --- a/htlcswitch/mock.go +++ b/htlcswitch/mock.go @@ -15,8 +15,6 @@ import ( "time" "github.com/btcsuite/btcd/btcec" - "github.com/btcsuite/btcd/chaincfg/chainhash" - "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" "github.com/go-errors/errors" sphinx "github.com/lightningnetwork/lightning-onion" @@ -25,9 +23,9 @@ import ( "github.com/lightningnetwork/lnd/clock" "github.com/lightningnetwork/lnd/contractcourt" "github.com/lightningnetwork/lnd/htlcswitch/hop" - "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/invoices" "github.com/lightningnetwork/lnd/lnpeer" + "github.com/lightningnetwork/lnd/lntest/mock" "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/lnwire" @@ -172,7 +170,11 @@ func initSwitchWithDB(startingHeight uint32, db *channeldb.DB) (*Switch, error) FetchLastChannelUpdate: func(lnwire.ShortChannelID) (*lnwire.ChannelUpdate, error) { return nil, nil }, - Notifier: &mockNotifier{}, + Notifier: &mock.ChainNotifier{ + SpendChan: make(chan *chainntnfs.SpendDetail), + EpochChan: make(chan *chainntnfs.BlockEpoch), + ConfChan: make(chan *chainntnfs.TxConfirmation), + }, FwdEventTicker: ticker.NewForce(DefaultFwdEventInterval), LogEventTicker: ticker.NewForce(DefaultLogInterval), AckEventTicker: ticker.NewForce(DefaultAckInterval), @@ -855,103 +857,6 @@ func (i *mockInvoiceRegistry) HodlUnsubscribeAll(subscriber chan<- interface{}) var _ InvoiceDatabase = (*mockInvoiceRegistry)(nil) -type mockSigner struct { - key *btcec.PrivateKey -} - -func (m *mockSigner) SignOutputRaw(tx *wire.MsgTx, - signDesc *input.SignDescriptor) (input.Signature, error) { - - amt := signDesc.Output.Value - witnessScript := signDesc.WitnessScript - privKey := m.key - - if !privKey.PubKey().IsEqual(signDesc.KeyDesc.PubKey) { - return nil, fmt.Errorf("incorrect key passed") - } - - switch { - case signDesc.SingleTweak != nil: - privKey = input.TweakPrivKey(privKey, - signDesc.SingleTweak) - case signDesc.DoubleTweak != nil: - privKey = input.DeriveRevocationPrivKey(privKey, - signDesc.DoubleTweak) - } - - sig, err := txscript.RawTxInWitnessSignature(tx, signDesc.SigHashes, - signDesc.InputIndex, amt, witnessScript, signDesc.HashType, - privKey) - if err != nil { - return nil, err - } - - return btcec.ParseDERSignature(sig[:len(sig)-1], btcec.S256()) -} -func (m *mockSigner) ComputeInputScript(tx *wire.MsgTx, signDesc *input.SignDescriptor) (*input.Script, error) { - - // TODO(roasbeef): expose tweaked signer from lnwallet so don't need to - // duplicate this code? - - privKey := m.key - - switch { - case signDesc.SingleTweak != nil: - privKey = input.TweakPrivKey(privKey, - signDesc.SingleTweak) - case signDesc.DoubleTweak != nil: - privKey = input.DeriveRevocationPrivKey(privKey, - signDesc.DoubleTweak) - } - - witnessScript, err := txscript.WitnessSignature(tx, signDesc.SigHashes, - signDesc.InputIndex, signDesc.Output.Value, signDesc.Output.PkScript, - signDesc.HashType, privKey, true) - if err != nil { - return nil, err - } - - return &input.Script{ - Witness: witnessScript, - }, nil -} - -type mockNotifier struct { - epochChan chan *chainntnfs.BlockEpoch -} - -func (m *mockNotifier) RegisterConfirmationsNtfn(txid *chainhash.Hash, _ []byte, - numConfs uint32, heightHint uint32) (*chainntnfs.ConfirmationEvent, error) { - return nil, nil -} -func (m *mockNotifier) RegisterBlockEpochNtfn( - bestBlock *chainntnfs.BlockEpoch) (*chainntnfs.BlockEpochEvent, error) { - return &chainntnfs.BlockEpochEvent{ - Epochs: m.epochChan, - Cancel: func() {}, - }, nil -} - -func (m *mockNotifier) Start() error { - return nil -} - -func (m *mockNotifier) Started() bool { - return true -} - -func (m *mockNotifier) Stop() error { - return nil -} - -func (m *mockNotifier) RegisterSpendNtfn(outpoint *wire.OutPoint, _ []byte, - heightHint uint32) (*chainntnfs.SpendEvent, error) { - - return &chainntnfs.SpendEvent{ - Spend: make(chan *chainntnfs.SpendDetail), - }, nil -} - type mockCircuitMap struct { lookup chan *PaymentCircuit } diff --git a/htlcswitch/switch.go b/htlcswitch/switch.go index 0eb7ffe8a..28a0bb047 100644 --- a/htlcswitch/switch.go +++ b/htlcswitch/switch.go @@ -1811,7 +1811,7 @@ func (s *Switch) reforwardResponses() error { func (s *Switch) loadChannelFwdPkgs(source lnwire.ShortChannelID) ([]*channeldb.FwdPkg, error) { var fwdPkgs []*channeldb.FwdPkg - if err := kvdb.Update(s.cfg.DB, func(tx kvdb.RwTx) error { + if err := kvdb.View(s.cfg.DB, func(tx kvdb.RTx) error { var err error fwdPkgs, err = s.cfg.SwitchPackager.LoadChannelFwdPkgs( tx, source, diff --git a/htlcswitch/test_utils.go b/htlcswitch/test_utils.go index ae64cf1bd..37925ef9f 100644 --- a/htlcswitch/test_utils.go +++ b/htlcswitch/test_utils.go @@ -29,6 +29,7 @@ import ( "github.com/lightningnetwork/lnd/input" "github.com/lightningnetwork/lnd/keychain" "github.com/lightningnetwork/lnd/lnpeer" + "github.com/lightningnetwork/lnd/lntest/mock" "github.com/lightningnetwork/lnd/lntest/wait" "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwallet" @@ -376,8 +377,8 @@ func createTestChannel(alicePrivKey, bobPrivKey []byte, os.RemoveAll(alicePath) } - aliceSigner := &mockSigner{aliceKeyPriv} - bobSigner := &mockSigner{bobKeyPriv} + aliceSigner := &mock.SingleSigner{Privkey: aliceKeyPriv} + bobSigner := &mock.SingleSigner{Privkey: bobKeyPriv} alicePool := lnwallet.NewSigPool(runtime.NumCPU(), aliceSigner) channelAlice, err := lnwallet.NewLightningChannel( diff --git a/input/input.go b/input/input.go index 38a1651e5..2e3a71c0b 100644 --- a/input/input.go +++ b/input/input.go @@ -3,6 +3,7 @@ package input import ( "github.com/btcsuite/btcd/txscript" "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" ) // Input represents an abstract UTXO which is to be spent using a sweeping @@ -41,6 +42,19 @@ type Input interface { // HeightHint returns the minimum height at which a confirmed spending // tx can occur. HeightHint() uint32 + + // UnconfParent returns information about a possibly unconfirmed parent + // tx. + UnconfParent() *TxInfo +} + +// TxInfo describes properties of a parent tx that are relevant for CPFP. +type TxInfo struct { + // Fee is the fee of the tx. + Fee btcutil.Amount + + // Weight is the weight of the tx. + Weight int64 } type inputKit struct { @@ -49,6 +63,10 @@ type inputKit struct { signDesc SignDescriptor heightHint uint32 blockToMaturity uint32 + + // unconfParent contains information about a potential unconfirmed + // parent transaction. + unconfParent *TxInfo } // OutPoint returns the breached output's identifier that is to be included as @@ -82,6 +100,11 @@ func (i *inputKit) BlocksToMaturity() uint32 { return i.blockToMaturity } +// Cpfp returns information about a possibly unconfirmed parent tx. +func (i *inputKit) UnconfParent() *TxInfo { + return i.unconfParent +} + // BaseInput contains all the information needed to sweep a basic output // (CSV/CLTV/no time lock) type BaseInput struct { @@ -91,14 +114,16 @@ type BaseInput struct { // MakeBaseInput assembles a new BaseInput that can be used to construct a // sweep transaction. func MakeBaseInput(outpoint *wire.OutPoint, witnessType WitnessType, - signDescriptor *SignDescriptor, heightHint uint32) BaseInput { + signDescriptor *SignDescriptor, heightHint uint32, + unconfParent *TxInfo) BaseInput { return BaseInput{ inputKit{ - outpoint: *outpoint, - witnessType: witnessType, - signDesc: *signDescriptor, - heightHint: heightHint, + outpoint: *outpoint, + witnessType: witnessType, + signDesc: *signDescriptor, + heightHint: heightHint, + unconfParent: unconfParent, }, } } @@ -109,7 +134,7 @@ func NewBaseInput(outpoint *wire.OutPoint, witnessType WitnessType, signDescriptor *SignDescriptor, heightHint uint32) *BaseInput { input := MakeBaseInput( - outpoint, witnessType, signDescriptor, heightHint, + outpoint, witnessType, signDescriptor, heightHint, nil, ) return &input diff --git a/invoices/invoice_expiry_watcher.go b/invoices/invoice_expiry_watcher.go index f0db08d11..a46f27f5a 100644 --- a/invoices/invoice_expiry_watcher.go +++ b/invoices/invoice_expiry_watcher.go @@ -48,8 +48,8 @@ type InvoiceExpiryWatcher struct { // invoice to expire. expiryQueue queue.PriorityQueue - // newInvoices channel is used to wake up the main loop when a new invoices - // is added. + // newInvoices channel is used to wake up the main loop when a new + // invoices is added. newInvoices chan []*invoiceExpiry wg sync.WaitGroup @@ -109,7 +109,8 @@ func (ew *InvoiceExpiryWatcher) prepareInvoice( paymentHash lntypes.Hash, invoice *channeldb.Invoice) *invoiceExpiry { if invoice.State != channeldb.ContractOpen { - log.Debugf("Invoice not added to expiry watcher: %v", paymentHash) + log.Debugf("Invoice not added to expiry watcher: %v", + paymentHash) return nil } @@ -128,15 +129,15 @@ func (ew *InvoiceExpiryWatcher) prepareInvoice( // AddInvoices adds multiple invoices to the InvoiceExpiryWatcher. func (ew *InvoiceExpiryWatcher) AddInvoices( - invoices []channeldb.InvoiceWithPaymentHash) { + invoices map[lntypes.Hash]*channeldb.Invoice) { invoicesWithExpiry := make([]*invoiceExpiry, 0, len(invoices)) - for _, invoiceWithPaymentHash := range invoices { - newInvoiceExpiry := ew.prepareInvoice( - invoiceWithPaymentHash.PaymentHash, &invoiceWithPaymentHash.Invoice, - ) + for paymentHash, invoice := range invoices { + newInvoiceExpiry := ew.prepareInvoice(paymentHash, invoice) if newInvoiceExpiry != nil { - invoicesWithExpiry = append(invoicesWithExpiry, newInvoiceExpiry) + invoicesWithExpiry = append( + invoicesWithExpiry, newInvoiceExpiry, + ) } } @@ -160,8 +161,8 @@ func (ew *InvoiceExpiryWatcher) AddInvoice( newInvoiceExpiry := ew.prepareInvoice(paymentHash, invoice) if newInvoiceExpiry != nil { - log.Debugf("Adding invoice '%v' to expiry watcher, expiration: %v", - paymentHash, newInvoiceExpiry.Expiry) + log.Debugf("Adding invoice '%v' to expiry watcher,"+ + "expiration: %v", paymentHash, newInvoiceExpiry.Expiry) select { case ew.newInvoices <- []*invoiceExpiry{newInvoiceExpiry}: @@ -202,7 +203,8 @@ func (ew *InvoiceExpiryWatcher) cancelNextExpiredInvoice() { if err != nil && err != channeldb.ErrInvoiceAlreadySettled && err != channeldb.ErrInvoiceAlreadyCanceled { - log.Errorf("Unable to cancel invoice: %v", top.PaymentHash) + log.Errorf("Unable to cancel invoice: %v", + top.PaymentHash) } ew.expiryQueue.Pop() @@ -236,8 +238,8 @@ func (ew *InvoiceExpiryWatcher) mainLoop() { continue case invoicesWithExpiry := <-ew.newInvoices: - for _, invoiceWithExpiry := range invoicesWithExpiry { - ew.expiryQueue.Push(invoiceWithExpiry) + for _, invoice := range invoicesWithExpiry { + ew.expiryQueue.Push(invoice) } case <-ew.quit: diff --git a/invoices/invoice_expiry_watcher_test.go b/invoices/invoice_expiry_watcher_test.go index 2aa0f87ba..67ea25256 100644 --- a/invoices/invoice_expiry_watcher_test.go +++ b/invoices/invoice_expiry_watcher_test.go @@ -37,7 +37,9 @@ func newInvoiceExpiryWatcherTest(t *testing.T, now time.Time, err := test.watcher.Start(func(paymentHash lntypes.Hash, force bool) error { - test.canceledInvoices = append(test.canceledInvoices, paymentHash) + test.canceledInvoices = append( + test.canceledInvoices, paymentHash, + ) test.wg.Done() return nil }) @@ -70,7 +72,8 @@ func (t *invoiceExpiryWatcherTest) checkExpectations() { // that expired. if len(t.canceledInvoices) != len(t.testData.expiredInvoices) { t.t.Fatalf("expected %v cancellations, got %v", - len(t.testData.expiredInvoices), len(t.canceledInvoices)) + len(t.testData.expiredInvoices), + len(t.canceledInvoices)) } for i := range t.canceledInvoices { @@ -155,24 +158,14 @@ func TestInvoiceExpiryWhenAddingMultipleInvoices(t *testing.T) { t.Parallel() test := newInvoiceExpiryWatcherTest(t, testTime, 5, 5) - var invoices []channeldb.InvoiceWithPaymentHash + invoices := make(map[lntypes.Hash]*channeldb.Invoice) for hash, invoice := range test.testData.expiredInvoices { - invoices = append(invoices, - channeldb.InvoiceWithPaymentHash{ - Invoice: *invoice, - PaymentHash: hash, - }, - ) + invoices[hash] = invoice } for hash, invoice := range test.testData.pendingInvoices { - invoices = append(invoices, - channeldb.InvoiceWithPaymentHash{ - Invoice: *invoice, - PaymentHash: hash, - }, - ) + invoices[hash] = invoice } test.watcher.AddInvoices(invoices) diff --git a/invoices/invoiceregistry.go b/invoices/invoiceregistry.go index 84d646178..c827f1445 100644 --- a/invoices/invoiceregistry.go +++ b/invoices/invoiceregistry.go @@ -57,6 +57,14 @@ type RegistryConfig struct { // send payments. AcceptKeySend bool + // GcCanceledInvoicesOnStartup if set, we'll attempt to garbage collect + // all canceled invoices upon start. + GcCanceledInvoicesOnStartup bool + + // GcCanceledInvoicesOnTheFly if set, we'll garbage collect all newly + // canceled invoices on the fly. + GcCanceledInvoicesOnTheFly bool + // KeysendHoldTime indicates for how long we want to accept and hold // spontaneous keysend payments. KeysendHoldTime time.Duration @@ -147,21 +155,65 @@ func NewRegistry(cdb *channeldb.DB, expiryWatcher *InvoiceExpiryWatcher, } } -// populateExpiryWatcher fetches all active invoices and their corresponding -// payment hashes from ChannelDB and adds them to the expiry watcher. -func (i *InvoiceRegistry) populateExpiryWatcher() error { - pendingOnly := true - pendingInvoices, err := i.cdb.FetchAllInvoicesWithPaymentHash(pendingOnly) - if err != nil && err != channeldb.ErrNoInvoicesCreated { - log.Errorf( - "Error while prefetching active invoices from the database: %v", err, - ) +// scanInvoicesOnStart will scan all invoices on start and add active invoices +// to the invoice expirt watcher while also attempting to delete all canceled +// invoices. +func (i *InvoiceRegistry) scanInvoicesOnStart() error { + var ( + pending map[lntypes.Hash]*channeldb.Invoice + removable []channeldb.InvoiceDeleteRef + ) + + reset := func() { + // Zero out our results on start and if the scan is ever run + // more than once. This latter case can happen if the kvdb + // layer needs to retry the View transaction underneath (eg. + // using the etcd driver, where all transactions are allowed + // to retry for serializability). + pending = make(map[lntypes.Hash]*channeldb.Invoice) + removable = make([]channeldb.InvoiceDeleteRef, 0) + } + + scanFunc := func( + paymentHash lntypes.Hash, invoice *channeldb.Invoice) error { + + if invoice.IsPending() { + pending[paymentHash] = invoice + } else if i.cfg.GcCanceledInvoicesOnStartup && + invoice.State == channeldb.ContractCanceled { + + // Consider invoice for removal if it is already + // canceled. Invoices that are expired but not yet + // canceled, will be queued up for cancellation after + // startup and will be deleted afterwards. + ref := channeldb.InvoiceDeleteRef{ + PayHash: paymentHash, + AddIndex: invoice.AddIndex, + SettleIndex: invoice.SettleIndex, + } + + if invoice.Terms.PaymentAddr != channeldb.BlankPayAddr { + ref.PayAddr = &invoice.Terms.PaymentAddr + } + + removable = append(removable, ref) + } + return nil + } + + err := i.cdb.ScanInvoices(scanFunc, reset) + if err != nil { return err } log.Debugf("Adding %d pending invoices to the expiry watcher", - len(pendingInvoices)) - i.expiryWatcher.AddInvoices(pendingInvoices) + len(pending)) + i.expiryWatcher.AddInvoices(pending) + + if err := i.cdb.DeleteInvoice(removable); err != nil { + log.Warnf("Deleting old invoices failed: %v", err) + } + return nil } @@ -178,8 +230,9 @@ func (i *InvoiceRegistry) Start() error { i.wg.Add(1) go i.invoiceEventLoop() - // Now prefetch all pending invoices to the expiry watcher. - err = i.populateExpiryWatcher() + // Now scan all pending and removable invoices to the expiry watcher or + // delete them. + err = i.scanInvoicesOnStart() if err != nil { i.Stop() return err @@ -1075,6 +1128,32 @@ func (i *InvoiceRegistry) cancelInvoiceImpl(payHash lntypes.Hash, } i.notifyClients(payHash, invoice, channeldb.ContractCanceled) + // Attempt to also delete the invoice if requested through the registry + // config. + if i.cfg.GcCanceledInvoicesOnTheFly { + // Assemble the delete reference and attempt to delete through + // the invocice from the DB. + deleteRef := channeldb.InvoiceDeleteRef{ + PayHash: payHash, + AddIndex: invoice.AddIndex, + SettleIndex: invoice.SettleIndex, + } + if invoice.Terms.PaymentAddr != channeldb.BlankPayAddr { + deleteRef.PayAddr = &invoice.Terms.PaymentAddr + } + + err = i.cdb.DeleteInvoice( + []channeldb.InvoiceDeleteRef{deleteRef}, + ) + // If by any chance deletion failed, then log it instead of + // returning the error, as the invoice itsels has already been + // canceled. + if err != nil { + log.Warnf("Invoice%v could not be deleted: %v", + ref, err) + } + } + return nil } diff --git a/invoices/invoiceregistry_test.go b/invoices/invoiceregistry_test.go index c77b38ed5..cb916aeab 100644 --- a/invoices/invoiceregistry_test.go +++ b/invoices/invoiceregistry_test.go @@ -1,6 +1,7 @@ package invoices import ( + "math" "testing" "time" @@ -219,11 +220,14 @@ func TestSettleInvoice(t *testing.T) { } } -// TestCancelInvoice tests cancelation of an invoice and related notifications. -func TestCancelInvoice(t *testing.T) { +func testCancelInvoice(t *testing.T, gc bool) { ctx := newTestContext(t) defer ctx.cleanup() + // If set to true, then also delete the invoice from the DB after + // cancellation. + ctx.registry.cfg.GcCanceledInvoicesOnTheFly = gc + allSubscriptions, err := ctx.registry.SubscribeNotifications(0, 0) assert.Nil(t, err) defer allSubscriptions.Cancel() @@ -298,13 +302,26 @@ func TestCancelInvoice(t *testing.T) { t.Fatal("no update received") } + if gc { + // Check that the invoice has been deleted from the db. + _, err = ctx.cdb.LookupInvoice( + channeldb.InvoiceRefByHash(testInvoicePaymentHash), + ) + require.Error(t, err) + } + // We expect no cancel notification to be sent to all invoice // subscribers (backwards compatibility). - // Try to cancel again. + // Try to cancel again. Expect that we report ErrInvoiceNotFound if the + // invoice has been garbage collected (since the invoice has been + // deleted when it was canceled), and no error otherwise. err = ctx.registry.CancelInvoice(testInvoicePaymentHash) - if err != nil { - t.Fatal("expected cancelation of a canceled invoice to succeed") + + if gc { + require.Error(t, err, channeldb.ErrInvoiceNotFound) + } else { + require.NoError(t, err) } // Notify arrival of a new htlc paying to this invoice. This should @@ -326,12 +343,33 @@ func TestCancelInvoice(t *testing.T) { t.Fatalf("expected acceptHeight %v, but got %v", testCurrentHeight, failResolution.AcceptHeight) } - if failResolution.Outcome != ResultInvoiceAlreadyCanceled { - t.Fatalf("expected expiry too soon, got: %v", - failResolution.Outcome) + + // If the invoice has been deleted (or not present) then we expect the + // outcome to be ResultInvoiceNotFound instead of when the invoice is + // in our database in which case we expect ResultInvoiceAlreadyCanceled. + if gc { + require.Equal(t, failResolution.Outcome, ResultInvoiceNotFound) + } else { + require.Equal(t, + failResolution.Outcome, + ResultInvoiceAlreadyCanceled, + ) } } +// TestCancelInvoice tests cancelation of an invoice and related notifications. +func TestCancelInvoice(t *testing.T) { + // Test cancellation both with garbage collection (meaning that canceled + // invoice will be deleted) and without (meain it'll be kept). + t.Run("garbage collect", func(t *testing.T) { + testCancelInvoice(t, true) + }) + + t.Run("no garbage collect", func(t *testing.T) { + testCancelInvoice(t, false) + }) +} + // TestSettleHoldInvoice tests settling of a hold invoice and related // notifications. func TestSettleHoldInvoice(t *testing.T) { @@ -1077,3 +1115,78 @@ func TestInvoiceExpiryWithRegistry(t *testing.T) { } } } + +// TestOldInvoiceRemovalOnStart tests that we'll attempt to remove old canceled +// invoices upon start while keeping all settled ones. +func TestOldInvoiceRemovalOnStart(t *testing.T) { + t.Parallel() + + testClock := clock.NewTestClock(testTime) + cdb, cleanup, err := newTestChannelDB(testClock) + defer cleanup() + + require.NoError(t, err) + + cfg := RegistryConfig{ + FinalCltvRejectDelta: testFinalCltvRejectDelta, + Clock: testClock, + GcCanceledInvoicesOnStartup: true, + } + + expiryWatcher := NewInvoiceExpiryWatcher(cfg.Clock) + registry := NewRegistry(cdb, expiryWatcher, &cfg) + + // First prefill the Channel DB with some pre-existing expired invoices. + const numExpired = 5 + const numPending = 0 + existingInvoices := generateInvoiceExpiryTestData( + t, testTime, 0, numExpired, numPending, + ) + + i := 0 + for paymentHash, invoice := range existingInvoices.expiredInvoices { + // Mark half of the invoices as settled, the other hald as + // canceled. + if i%2 == 0 { + invoice.State = channeldb.ContractSettled + } else { + invoice.State = channeldb.ContractCanceled + } + + _, err := cdb.AddInvoice(invoice, paymentHash) + require.NoError(t, err) + i++ + } + + // Collect all settled invoices for our expectation set. + var expected []channeldb.Invoice + + // Perform a scan query to collect all invoices. + query := channeldb.InvoiceQuery{ + IndexOffset: 0, + NumMaxInvoices: math.MaxUint64, + } + + response, err := cdb.QueryInvoices(query) + require.NoError(t, err) + + // Save all settled invoices for our expectation set. + for _, invoice := range response.Invoices { + if invoice.State == channeldb.ContractSettled { + expected = append(expected, invoice) + } + } + + // Start the registry which should collect and delete all canceled + // invoices upon start. + err = registry.Start() + require.NoError(t, err, "cannot start the registry") + + // Perform a scan query to collect all invoices. + response, err = cdb.QueryInvoices(query) + require.NoError(t, err) + + // Check that we really only kept the settled invoices after the + // registry start. + require.Equal(t, expected, response.Invoices) +} diff --git a/keychain/derivation.go b/keychain/derivation.go index 4e416c2dd..b22d8f70f 100644 --- a/keychain/derivation.go +++ b/keychain/derivation.go @@ -55,7 +55,7 @@ const ( // KeyFamilyRevocationBase are keys that are used within channels to // create revocation basepoints that the remote party will use to // create revocation keys for us. - KeyFamilyRevocationBase = 1 + KeyFamilyRevocationBase KeyFamily = 1 // KeyFamilyHtlcBase are keys used within channels that will be // combined with per-state randomness to produce public keys that will diff --git a/keychain/interface_test.go b/keychain/interface_test.go index 345754db5..2f3945779 100644 --- a/keychain/interface_test.go +++ b/keychain/interface_test.go @@ -30,6 +30,9 @@ var versionZeroKeyFamilies = []KeyFamily{ KeyFamilyDelayBase, KeyFamilyRevocationRoot, KeyFamilyNodeKey, + KeyFamilyStaticBackup, + KeyFamilyTowerSession, + KeyFamilyTowerID, } var ( diff --git a/labels/labels.go b/labels/labels.go index 3bbe5feaa..c35faaf76 100644 --- a/labels/labels.go +++ b/labels/labels.go @@ -1,12 +1,24 @@ // Package labels contains labels used to label transactions broadcast by lnd. // These labels are used across packages, so they are declared in a separate // package to avoid dependency issues. +// +// Labels for transactions broadcast by lnd have two set fields followed by an +// optional set labelled data values, all separated by colons. +// - Label version: an integer that indicates the version lnd used +// - Label type: the type of transaction we are labelling +// - {field name}-{value}: a named field followed by its value, these items are +// optional, and there may be more than field present. +// +// For version 0 we have the following optional data fields defined: +// - shortchanid: the short channel ID that a transaction is associated with, +// with its value set to the uint64 short channel id. package labels import ( "fmt" "github.com/btcsuite/btcwallet/wtxmgr" + "github.com/lightningnetwork/lnd/lnwire" ) // External labels a transaction as user initiated via the api. This @@ -31,3 +43,50 @@ func ValidateAPI(label string) (string, error) { return label, nil } + +// LabelVersion versions our labels so they can be easily update to contain +// new data while still easily string matched. +type LabelVersion uint8 + +// LabelVersionZero is the label version for labels that contain label type and +// channel ID (where available). +const LabelVersionZero LabelVersion = iota + +// LabelType indicates the type of label we are creating. It is a string rather +// than an int for easy string matching and human-readability. +type LabelType string + +const ( + // LabelTypeChannelOpen is used to label channel opens. + LabelTypeChannelOpen LabelType = "openchannel" + + // LabelTypeChannelClose is used to label channel closes. + LabelTypeChannelClose LabelType = "closechannel" + + // LabelTypeJusticeTransaction is used to label justice transactions. + LabelTypeJusticeTransaction LabelType = "justicetx" + + // LabelTypeSweepTransaction is used to label sweeps. + LabelTypeSweepTransaction LabelType = "sweep" +) + +// LabelField is used to tag a value within a label. +type LabelField string + +const ( + // ShortChanID is used to tag short channel id values in our labels. + ShortChanID LabelField = "shortchanid" +) + +// MakeLabel creates a label with the provided type and short channel id. If +// our short channel ID is not known, we simply return version:label_type. If +// we do have a short channel ID set, the label will also contain its value: +// shortchanid-{int64 chan ID}. +func MakeLabel(labelType LabelType, channelID *lnwire.ShortChannelID) string { + if channelID == nil { + return fmt.Sprintf("%v:%v", LabelVersionZero, labelType) + } + + return fmt.Sprintf("%v:%v:%v-%v", LabelVersionZero, labelType, + ShortChanID, channelID.ToUint64()) +} diff --git a/lncfg/address.go b/lncfg/address.go index 7fe2d71d5..2c1770e4f 100644 --- a/lncfg/address.go +++ b/lncfg/address.go @@ -54,10 +54,10 @@ func NormalizeAddresses(addrs []string, defaultPort string, // interface. func EnforceSafeAuthentication(addrs []net.Addr, macaroonsActive bool) error { // We'll now examine all addresses that this RPC server is listening - // on. If it's a localhost address, we'll skip it, otherwise, we'll - // return an error if macaroons are inactive. + // on. If it's a localhost address or a private address, we'll skip it, + // otherwise, we'll return an error if macaroons are inactive. for _, addr := range addrs { - if IsLoopback(addr.String()) || IsUnix(addr) { + if IsLoopback(addr.String()) || IsUnix(addr) || IsPrivate(addr) { continue } @@ -117,6 +117,39 @@ func IsUnix(addr net.Addr) bool { return strings.HasPrefix(addr.Network(), "unix") } +// IsPrivate returns true if the address is private. The definitions are, +// https://en.wikipedia.org/wiki/Link-local_address +// https://en.wikipedia.org/wiki/Multicast_address +// Local IPv4 addresses, https://tools.ietf.org/html/rfc1918 +// Local IPv6 addresses, https://tools.ietf.org/html/rfc4193 +func IsPrivate(addr net.Addr) bool { + switch addr := addr.(type) { + case *net.TCPAddr: + // Check 169.254.0.0/16 and fe80::/10. + if addr.IP.IsLinkLocalUnicast() { + return true + } + + // Check 224.0.0.0/4 and ff00::/8. + if addr.IP.IsLinkLocalMulticast() { + return true + } + + // Check 10.0.0.0/8, 172.16.0.0/12 and 192.168.0.0/16. + if ip4 := addr.IP.To4(); ip4 != nil { + return ip4[0] == 10 || + (ip4[0] == 172 && ip4[1]&0xf0 == 16) || + (ip4[0] == 192 && ip4[1] == 168) + } + + // Check fc00::/7. + return len(addr.IP) == net.IPv6len && addr.IP[0]&0xfe == 0xfc + + default: + return false + } +} + // ParseAddressString converts an address in string format to a net.Addr that is // compatible with lnd. UDP is not supported because lnd needs reliable // connections. We accept a custom function to resolve any TCP addresses so diff --git a/lncfg/address_test.go b/lncfg/address_test.go index c35d7199d..208b0407e 100644 --- a/lncfg/address_test.go +++ b/lncfg/address_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/btcsuite/btcd/btcec" + "github.com/stretchr/testify/require" ) // addressTest defines a test vector for an address that contains the non- @@ -265,3 +266,57 @@ func validateAddr(t *testing.T, addr net.Addr, test addressTest) { ) } } + +func TestIsPrivate(t *testing.T) { + nonPrivateIPList := []net.IP{ + net.IPv4(169, 255, 0, 0), + {0xfe, 0x79, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + net.IPv4(225, 0, 0, 0), + {0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + net.IPv4(11, 0, 0, 0), + net.IPv4(172, 15, 0, 0), + net.IPv4(192, 169, 0, 0), + {0xfe, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + net.IPv4(8, 8, 8, 8), + {2, 0, 0, 1, 4, 8, 6, 0, 4, 8, 6, 0, 8, 8, 8, 8}, + } + privateIPList := []net.IP{ + net.IPv4(169, 254, 0, 0), + {0xfe, 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + net.IPv4(224, 0, 0, 0), + {0xff, 0x02, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + net.IPv4(10, 0, 0, 1), + net.IPv4(172, 16, 0, 1), + net.IPv4(172, 31, 255, 255), + net.IPv4(192, 168, 0, 1), + {0xfc, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + } + + testParams := []struct { + name string + ipList []net.IP + private bool + }{ + { + "Non-private addresses should return false", + nonPrivateIPList, false, + }, + { + "Private addresses should return true", + privateIPList, true, + }, + } + + for _, tt := range testParams { + test := tt + t.Run(test.name, func(t *testing.T) { + for _, ip := range test.ipList { + addr := &net.TCPAddr{IP: ip} + require.Equal( + t, test.private, IsPrivate(addr), + "expected IP: %s to be %v", ip, test.private, + ) + } + }) + } +} diff --git a/lncfg/db.go b/lncfg/db.go index d63da8caf..cb6bcb246 100644 --- a/lncfg/db.go +++ b/lncfg/db.go @@ -9,8 +9,8 @@ import ( const ( dbName = "channel.db" - boltBackend = "bolt" - etcdBackend = "etcd" + BoltBackend = "bolt" + EtcdBackend = "etcd" ) // DB holds database configuration for LND. @@ -25,41 +25,73 @@ type DB struct { // NewDB creates and returns a new default DB config. func DefaultDB() *DB { return &DB{ - Backend: boltBackend, - Bolt: &kvdb.BoltConfig{ - NoFreeListSync: true, - }, + Backend: BoltBackend, + Bolt: &kvdb.BoltConfig{}, } } // Validate validates the DB config. func (db *DB) Validate() error { switch db.Backend { - case boltBackend: + case BoltBackend: - case etcdBackend: + case EtcdBackend: if db.Etcd.Host == "" { return fmt.Errorf("etcd host must be set") } default: return fmt.Errorf("unknown backend, must be either \"%v\" or \"%v\"", - boltBackend, etcdBackend) + BoltBackend, EtcdBackend) } return nil } -// GetBackend returns a kvdb.Backend as set in the DB config. -func (db *DB) GetBackend(ctx context.Context, dbPath string, - networkName string) (kvdb.Backend, error) { +// DatabaseBackends is a two-tuple that holds the set of active database +// backends for the daemon. The two backends we expose are the local database +// backend, and the remote backend. The LocalDB attribute will always be +// populated. However, the remote DB will only be set if a replicated database +// is active. +type DatabaseBackends struct { + // LocalDB points to the local non-replicated backend. + LocalDB kvdb.Backend - if db.Backend == etcdBackend { + // RemoteDB points to a possibly networked replicated backend. If no + // replicated backend is active, then this pointer will be nil. + RemoteDB kvdb.Backend +} + +// GetBackends returns a set of kvdb.Backends as set in the DB config. The +// local database will ALWAYS be non-nil, while the remote database will only +// be populated if etcd is specified. +func (db *DB) GetBackends(ctx context.Context, dbPath string, + networkName string) (*DatabaseBackends, error) { + + var ( + localDB, remoteDB kvdb.Backend + err error + ) + + if db.Backend == EtcdBackend { // Prefix will separate key/values in the db. - return kvdb.GetEtcdBackend(ctx, networkName, db.Etcd) + remoteDB, err = kvdb.GetEtcdBackend(ctx, networkName, db.Etcd) + if err != nil { + return nil, err + } } - return kvdb.GetBoltBackend(dbPath, dbName, db.Bolt.NoFreeListSync) + localDB, err = kvdb.GetBoltBackend( + dbPath, dbName, !db.Bolt.SyncFreelist, + ) + if err != nil { + return nil, err + } + + return &DatabaseBackends{ + LocalDB: localDB, + RemoteDB: remoteDB, + }, nil } // Compile-time constraint to ensure Workers implements the Validator interface. diff --git a/lncfg/healthcheck.go b/lncfg/healthcheck.go new file mode 100644 index 000000000..a43505ca9 --- /dev/null +++ b/lncfg/healthcheck.go @@ -0,0 +1,90 @@ +package lncfg + +import ( + "errors" + "fmt" + "time" +) + +var ( + // MinHealthCheckInterval is the minimum interval we allow between + // health checks. + MinHealthCheckInterval = time.Minute + + // MinHealthCheckTimeout is the minimum timeout we allow for health + // check calls. + MinHealthCheckTimeout = time.Second + + // MinHealthCheckBackoff is the minimum back off we allow between health + // check retries. + MinHealthCheckBackoff = time.Second +) + +// HealthCheckConfig contains the configuration for the different health checks +// the lnd runs. +type HealthCheckConfig struct { + ChainCheck *CheckConfig `group:"chainbackend" namespace:"chainbackend"` + + DiskCheck *DiskCheckConfig `group:"diskspace" namespace:"diskspace"` +} + +// Validate checks the values configured for our health checks. +func (h *HealthCheckConfig) Validate() error { + if err := h.ChainCheck.validate("chain backend"); err != nil { + return err + } + + if err := h.DiskCheck.validate("disk space"); err != nil { + return err + } + + if h.DiskCheck.RequiredRemaining < 0 || + h.DiskCheck.RequiredRemaining >= 1 { + + return errors.New("disk required ratio must be in [0:1)") + } + + return nil +} + +type CheckConfig struct { + Interval time.Duration `long:"interval" description:"How often to run a health check."` + + Attempts int `long:"attempts" description:"The number of calls we will make for the check before failing. Set this value to 0 to disable a check."` + + Timeout time.Duration `long:"timeout" description:"The amount of time we allow the health check to take before failing due to timeout."` + + Backoff time.Duration `long:"backoff" description:"The amount of time to back-off between failed health checks."` +} + +// validate checks the values in a health check config entry if it is enabled. +func (c *CheckConfig) validate(name string) error { + if c.Attempts == 0 { + return nil + } + + if c.Backoff < MinHealthCheckBackoff { + return fmt.Errorf("%v backoff: %v below minimum: %v", name, + c.Backoff, MinHealthCheckBackoff) + } + + if c.Timeout < MinHealthCheckTimeout { + return fmt.Errorf("%v timeout: %v below minimum: %v", name, + c.Timeout, MinHealthCheckTimeout) + } + + if c.Interval < MinHealthCheckInterval { + return fmt.Errorf("%v interval: %v below minimum: %v", name, + c.Interval, MinHealthCheckInterval) + } + + return nil +} + +// DiskCheckConfig contains configuration for ensuring that our node has +// sufficient disk space. +type DiskCheckConfig struct { + RequiredRemaining float64 `long:"diskrequired" description:"The minimum ratio of free disk space to total capacity that we allow before shutting lnd down safely."` + + *CheckConfig +} diff --git a/lncfg/neutrino.go b/lncfg/neutrino.go index b6f892bf0..db4c3d4dd 100644 --- a/lncfg/neutrino.go +++ b/lncfg/neutrino.go @@ -10,6 +10,6 @@ type Neutrino struct { MaxPeers int `long:"maxpeers" description:"Max number of inbound and outbound peers"` BanDuration time.Duration `long:"banduration" description:"How long to ban misbehaving peers. Valid time units are {s, m, h}. Minimum 1 second"` BanThreshold uint32 `long:"banthreshold" description:"Maximum allowed ban score before disconnecting and banning misbehaving peers."` - FeeURL string `long:"feeurl" description:"Optional URL for fee estimation. If a URL is not specified, static fees will be used for estimation."` + FeeURL string `long:"feeurl" description:"DEPRECATED: Optional URL for fee estimation. If a URL is not specified, static fees will be used for estimation."` AssertFilterHeader string `long:"assertfilterheader" description:"Optional filter header in height:hash format to assert the state of neutrino's filter header chain on startup. If the assertion does not hold, then the filter header chain will be re-synced from the genesis block."` } diff --git a/lnd.go b/lnd.go index cccf30e61..e9387626f 100644 --- a/lnd.go +++ b/lnd.go @@ -11,6 +11,7 @@ import ( "io/ioutil" "net" "net/http" + _ "net/http/pprof" // Blank import to set up profiling HTTP handlers. "os" "path/filepath" "runtime/pprof" @@ -18,19 +19,15 @@ import ( "sync" "time" - // Blank import to set up profiling HTTP handlers. - _ "net/http/pprof" - - "gopkg.in/macaroon-bakery.v2/bakery" - "gopkg.in/macaroon.v2" - - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - "github.com/btcsuite/btcutil" "github.com/btcsuite/btcwallet/wallet" proxy "github.com/grpc-ecosystem/grpc-gateway/runtime" "github.com/lightninglabs/neutrino" + "golang.org/x/crypto/acme/autocert" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "gopkg.in/macaroon-bakery.v2/bakery" + "gopkg.in/macaroon.v2" "github.com/lightningnetwork/lnd/autopilot" "github.com/lightningnetwork/lnd/build" @@ -145,6 +142,12 @@ type RPCSubserverConfig struct { // per URI, they are all required. See rpcserver.go for a list of valid // action and entity values. Permissions map[string][]bakery.Op + + // MacaroonValidator is a custom macaroon validator that should be used + // instead of the default lnd validator. If specified, the custom + // validator is used for all URIs specified in the above Permissions + // map. + MacaroonValidator macaroons.MacaroonValidator } // ListenerWithSignal is a net.Listener that has an additional Ready channel that @@ -246,54 +249,31 @@ func Main(cfg *Config, lisCfg ListenerCfg, shutdownChan <-chan struct{}) error { defer pprof.StopCPUProfile() } - // Create the network-segmented directory for the channel database. - ltndLog.Infof("Opening the main database, this might take a few " + - "minutes...") - - startOpenTime := time.Now() ctx := context.Background() ctx, cancel := context.WithCancel(ctx) defer cancel() - chanDbBackend, err := cfg.DB.GetBackend(ctx, - cfg.localDatabaseDir(), cfg.networkName(), - ) - if err != nil { - ltndLog.Error(err) - return err - } - - // Open the channeldb, which is dedicated to storing channel, and - // network related metadata. - chanDB, err := channeldb.CreateWithBackend( - chanDbBackend, - channeldb.OptionSetRejectCacheSize(cfg.Caches.RejectCacheSize), - channeldb.OptionSetChannelCacheSize(cfg.Caches.ChannelCacheSize), - channeldb.OptionSetSyncFreelist(cfg.SyncFreelist), - channeldb.OptionDryRunMigration(cfg.DryRunMigration), - ) + localChanDB, remoteChanDB, cleanUp, err := initializeDatabases(ctx, cfg) switch { case err == channeldb.ErrDryRunMigrationOK: ltndLog.Infof("%v, exiting", err) return nil - case err != nil: - ltndLog.Errorf("Unable to open channeldb: %v", err) - return err + return fmt.Errorf("unable to open databases: %v", err) } - defer chanDB.Close() - openTime := time.Since(startOpenTime) - ltndLog.Infof("Database now open (time_to_open=%v)!", openTime) + defer cleanUp() // Only process macaroons if --no-macaroons isn't set. - tlsCfg, restCreds, restProxyDest, err := getTLSConfig(cfg) + tlsCfg, restCreds, restProxyDest, cleanUp, err := getTLSConfig(cfg) if err != nil { err := fmt.Errorf("unable to load TLS credentials: %v", err) ltndLog.Error(err) return err } + defer cleanUp() + serverCreds := credentials.NewTLS(tlsCfg) serverOpts := []grpc.ServerOption{grpc.Creds(serverCreds)} @@ -406,6 +386,11 @@ func Main(cfg *Config, lisCfg ListenerCfg, shutdownChan <-chan struct{}) error { walletInitParams = *params privateWalletPw = walletInitParams.Password publicWalletPw = walletInitParams.Password + defer func() { + if err := walletInitParams.UnloadWallet(); err != nil { + ltndLog.Errorf("Could not unload wallet: %v", err) + } + }() if walletInitParams.RecoveryWindow > 0 { ltndLog.Infof("Wallet recovery mode enabled with "+ @@ -418,7 +403,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, shutdownChan <-chan struct{}) error { if !cfg.NoMacaroons { // Create the macaroon authentication/authorization service. macaroonService, err = macaroons.NewService( - cfg.networkDir, macaroons.IPLockChecker, + cfg.networkDir, "lnd", macaroons.IPLockChecker, ) if err != nil { err := fmt.Errorf("unable to set up macaroon "+ @@ -456,8 +441,12 @@ func Main(cfg *Config, lisCfg ListenerCfg, shutdownChan <-chan struct{}) error { // With the information parsed from the configuration, create valid // instances of the pertinent interfaces required to operate the // Lightning Network Daemon. + // + // When we create the chain control, we need storage for the height + // hints and also the wallet itself, for these two we want them to be + // replicated, so we'll pass in the remote channel DB instance. activeChainControl, err := newChainControlFromConfig( - cfg, chanDB, privateWalletPw, publicWalletPw, + cfg, localChanDB, remoteChanDB, privateWalletPw, publicWalletPw, walletInitParams.Birthday, walletInitParams.RecoveryWindow, walletInitParams.Wallet, neutrinoCS, ) @@ -535,7 +524,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, shutdownChan <-chan struct{}) error { towerDBDir := filepath.Join( cfg.Watchtower.TowerDir, cfg.registeredChains.PrimaryChain().String(), - normalizeNetwork(activeNetParams.Name), + normalizeNetwork(cfg.ActiveNetParams.Name), ) towerDB, err := wtdb.OpenTowerDB(towerDBDir) @@ -573,7 +562,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, shutdownChan <-chan struct{}) error { towerKeyDesc, activeChainControl.keyRing, ), PublishTx: activeChainControl.wallet.PublishTransaction, - ChainHash: *activeNetParams.GenesisHash, + ChainHash: *cfg.ActiveNetParams.GenesisHash, } // If there is a tor controller (user wants auto hidden services), then @@ -612,9 +601,9 @@ func Main(cfg *Config, lisCfg ListenerCfg, shutdownChan <-chan struct{}) error { // Set up the core server which will listen for incoming peer // connections. server, err := newServer( - cfg, cfg.Listeners, chanDB, towerClientDB, activeChainControl, - &idKeyDesc, walletInitParams.ChansToRestore, chainedAcceptor, - torController, + cfg, cfg.Listeners, localChanDB, remoteChanDB, towerClientDB, + activeChainControl, &idKeyDesc, walletInitParams.ChansToRestore, + chainedAcceptor, torController, ) if err != nil { err := fmt.Errorf("unable to create server: %v", err) @@ -625,7 +614,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, shutdownChan <-chan struct{}) error { // Set up an autopilot manager from the current config. This will be // used to manage the underlying autopilot agent, starting and stopping // it at will. - atplCfg, err := initAutoPilot(server, cfg.Autopilot, mainChain) + atplCfg, err := initAutoPilot(server, cfg.Autopilot, mainChain, cfg.ActiveNetParams) if err != nil { err := fmt.Errorf("unable to initialize autopilot: %v", err) ltndLog.Error(err) @@ -767,7 +756,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, shutdownChan <-chan struct{}) error { // getTLSConfig returns a TLS configuration for the gRPC server and credentials // and a proxy destination for the REST reverse proxy. func getTLSConfig(cfg *Config) (*tls.Config, *credentials.TransportCredentials, - string, error) { + string, func(), error) { // Ensure we create TLS key and certificate if they don't exist. if !fileExists(cfg.TLSCertPath) && !fileExists(cfg.TLSKeyPath) { @@ -775,10 +764,10 @@ func getTLSConfig(cfg *Config) (*tls.Config, *credentials.TransportCredentials, err := cert.GenCertPair( "lnd autogenerated cert", cfg.TLSCertPath, cfg.TLSKeyPath, cfg.TLSExtraIPs, cfg.TLSExtraDomains, - cert.DefaultAutogenValidity, + cfg.TLSDisableAutofill, cert.DefaultAutogenValidity, ) if err != nil { - return nil, nil, "", err + return nil, nil, "", nil, err } rpcsLog.Infof("Done generating TLS certificates") } @@ -787,7 +776,7 @@ func getTLSConfig(cfg *Config) (*tls.Config, *credentials.TransportCredentials, cfg.TLSCertPath, cfg.TLSKeyPath, ) if err != nil { - return nil, nil, "", err + return nil, nil, "", nil, err } // We check whether the certifcate we have on disk match the IPs and @@ -797,10 +786,11 @@ func getTLSConfig(cfg *Config) (*tls.Config, *credentials.TransportCredentials, refresh := false if cfg.TLSAutoRefresh { refresh, err = cert.IsOutdated( - parsedCert, cfg.TLSExtraIPs, cfg.TLSExtraDomains, + parsedCert, cfg.TLSExtraIPs, + cfg.TLSExtraDomains, cfg.TLSDisableAutofill, ) if err != nil { - return nil, nil, "", err + return nil, nil, "", nil, err } } @@ -812,22 +802,22 @@ func getTLSConfig(cfg *Config) (*tls.Config, *credentials.TransportCredentials, err := os.Remove(cfg.TLSCertPath) if err != nil { - return nil, nil, "", err + return nil, nil, "", nil, err } err = os.Remove(cfg.TLSKeyPath) if err != nil { - return nil, nil, "", err + return nil, nil, "", nil, err } rpcsLog.Infof("Renewing TLS certificates...") err = cert.GenCertPair( "lnd autogenerated cert", cfg.TLSCertPath, cfg.TLSKeyPath, cfg.TLSExtraIPs, cfg.TLSExtraDomains, - cert.DefaultAutogenValidity, + cfg.TLSDisableAutofill, cert.DefaultAutogenValidity, ) if err != nil { - return nil, nil, "", err + return nil, nil, "", nil, err } rpcsLog.Infof("Done renewing TLS certificates") @@ -836,14 +826,15 @@ func getTLSConfig(cfg *Config) (*tls.Config, *credentials.TransportCredentials, cfg.TLSCertPath, cfg.TLSKeyPath, ) if err != nil { - return nil, nil, "", err + return nil, nil, "", nil, err } } tlsCfg := cert.TLSConfFromCert(certData) + restCreds, err := credentials.NewClientTLSFromFile(cfg.TLSCertPath, "") if err != nil { - return nil, nil, "", err + return nil, nil, "", nil, err } restProxyDest := cfg.RPCListeners[0].String() @@ -859,7 +850,64 @@ func getTLSConfig(cfg *Config) (*tls.Config, *credentials.TransportCredentials, ) } - return tlsCfg, &restCreds, restProxyDest, nil + // If Let's Encrypt is enabled, instantiate autocert to request/renew + // the certificates. + cleanUp := func() {} + if cfg.LetsEncryptDomain != "" { + ltndLog.Infof("Using Let's Encrypt certificate for domain %v", + cfg.LetsEncryptDomain) + + manager := autocert.Manager{ + Cache: autocert.DirCache(cfg.LetsEncryptDir), + Prompt: autocert.AcceptTOS, + HostPolicy: autocert.HostWhitelist(cfg.LetsEncryptDomain), + } + + srv := &http.Server{ + Addr: cfg.LetsEncryptListen, + Handler: manager.HTTPHandler(nil), + } + shutdownCompleted := make(chan struct{}) + cleanUp = func() { + err := srv.Shutdown(context.Background()) + if err != nil { + ltndLog.Errorf("Autocert listener shutdown "+ + " error: %v", err) + + return + } + <-shutdownCompleted + ltndLog.Infof("Autocert challenge listener stopped") + } + + go func() { + ltndLog.Infof("Autocert challenge listener started "+ + "at %v", cfg.LetsEncryptListen) + + err := srv.ListenAndServe() + if err != http.ErrServerClosed { + ltndLog.Errorf("autocert http: %v", err) + } + close(shutdownCompleted) + }() + + getCertificate := func(h *tls.ClientHelloInfo) ( + *tls.Certificate, error) { + + lecert, err := manager.GetCertificate(h) + if err != nil { + ltndLog.Errorf("GetCertificate: %v", err) + return &certData, nil + } + + return lecert, err + } + + // The self-signed tls.cert remains available as fallback. + tlsCfg.GetCertificate = getCertificate + } + + return tlsCfg, &restCreds, restProxyDest, cleanUp, nil } // fileExists reports whether the named file or directory exists. @@ -883,8 +931,8 @@ func genMacaroons(ctx context.Context, svc *macaroons.Service, // access invoice related calls. This is useful for merchants and other // services to allow an isolated instance that can only query and // modify invoices. - invoiceMac, err := svc.Oven.NewMacaroon( - ctx, bakery.LatestVersion, nil, invoicePermissions..., + invoiceMac, err := svc.NewMacaroon( + ctx, macaroons.DefaultRootKeyID, invoicePermissions..., ) if err != nil { return err @@ -900,8 +948,8 @@ func genMacaroons(ctx context.Context, svc *macaroons.Service, } // Generate the read-only macaroon and write it to a file. - roMacaroon, err := svc.Oven.NewMacaroon( - ctx, bakery.LatestVersion, nil, readPermissions..., + roMacaroon, err := svc.NewMacaroon( + ctx, macaroons.DefaultRootKeyID, readPermissions..., ) if err != nil { return err @@ -917,8 +965,8 @@ func genMacaroons(ctx context.Context, svc *macaroons.Service, // Generate the admin macaroon and write it to a file. adminPermissions := append(readPermissions, writePermissions...) - admMacaroon, err := svc.Oven.NewMacaroon( - ctx, bakery.LatestVersion, nil, adminPermissions..., + admMacaroon, err := svc.NewMacaroon( + ctx, macaroons.DefaultRootKeyID, adminPermissions..., ) if err != nil { return err @@ -958,6 +1006,10 @@ type WalletUnlockParams struct { // ChansToRestore a set of static channel backups that should be // restored before the main server instance starts up. ChansToRestore walletunlocker.ChannelsToRecover + + // UnloadWallet is a function for unloading the wallet, which should + // be called on shutdown. + UnloadWallet func() error } // waitForWalletPassword will spin up gRPC and REST endpoints for the @@ -995,7 +1047,7 @@ func waitForWalletPassword(cfg *Config, restEndpoints []net.Addr, cfg.AdminMacPath, cfg.ReadMacPath, cfg.InvoiceMacPath, } pwService := walletunlocker.New( - chainConfig.ChainDir, activeNetParams.Params, !cfg.SyncFreelist, + chainConfig.ChainDir, cfg.ActiveNetParams.Params, !cfg.SyncFreelist, macaroonFiles, ) lnrpc.RegisterWalletUnlockerServer(grpcServer, pwService) @@ -1032,7 +1084,7 @@ func waitForWalletPassword(cfg *Config, restEndpoints []net.Addr, return nil, err } - srv := &http.Server{Handler: mux} + srv := &http.Server{Handler: allowCORS(mux, cfg.RestCORS)} for _, restEndpoint := range restEndpoints { lis, err := lncfg.TLSListenOnAddress(restEndpoint, tlsConf) @@ -1090,10 +1142,10 @@ func waitForWalletPassword(cfg *Config, restEndpoints []net.Addr, } netDir := btcwallet.NetworkDir( - chainConfig.ChainDir, activeNetParams.Params, + chainConfig.ChainDir, cfg.ActiveNetParams.Params, ) loader := wallet.NewLoader( - activeNetParams.Params, netDir, !cfg.SyncFreelist, + cfg.ActiveNetParams.Params, netDir, !cfg.SyncFreelist, recoveryWindow, ) @@ -1119,6 +1171,7 @@ func waitForWalletPassword(cfg *Config, restEndpoints []net.Addr, RecoveryWindow: recoveryWindow, Wallet: newWallet, ChansToRestore: initMsg.ChanBackups, + UnloadWallet: loader.UnloadWallet, }, nil // The wallet has already been created in the past, and is simply being @@ -1129,9 +1182,132 @@ func waitForWalletPassword(cfg *Config, restEndpoints []net.Addr, RecoveryWindow: unlockMsg.RecoveryWindow, Wallet: unlockMsg.Wallet, ChansToRestore: unlockMsg.ChanBackups, + UnloadWallet: unlockMsg.UnloadWallet, }, nil case <-signal.ShutdownChannel(): return nil, fmt.Errorf("shutting down") } } + +// initializeDatabases extracts the current databases that we'll use for normal +// operation in the daemon. Two databases are returned: one remote and one +// local. However, only if the replicated database is active will the remote +// database point to a unique database. Otherwise, the local and remote DB will +// both point to the same local database. A function closure that closes all +// opened databases is also returned. +func initializeDatabases(ctx context.Context, + cfg *Config) (*channeldb.DB, *channeldb.DB, func(), error) { + + ltndLog.Infof("Opening the main database, this might take a few " + + "minutes...") + + if cfg.DB.Backend == lncfg.BoltBackend { + ltndLog.Infof("Opening bbolt database, sync_freelist=%v", + cfg.DB.Bolt.SyncFreelist) + } + + startOpenTime := time.Now() + + databaseBackends, err := cfg.DB.GetBackends( + ctx, cfg.localDatabaseDir(), cfg.networkName(), + ) + if err != nil { + return nil, nil, nil, fmt.Errorf("unable to obtain database "+ + "backends: %v", err) + } + + // If the remoteDB is nil, then we'll just open a local DB as normal, + // having the remote and local pointer be the exact same instance. + var ( + localChanDB, remoteChanDB *channeldb.DB + closeFuncs []func() + ) + if databaseBackends.RemoteDB == nil { + // Open the channeldb, which is dedicated to storing channel, + // and network related metadata. + localChanDB, err = channeldb.CreateWithBackend( + databaseBackends.LocalDB, + channeldb.OptionSetRejectCacheSize(cfg.Caches.RejectCacheSize), + channeldb.OptionSetChannelCacheSize(cfg.Caches.ChannelCacheSize), + channeldb.OptionDryRunMigration(cfg.DryRunMigration), + ) + switch { + case err == channeldb.ErrDryRunMigrationOK: + return nil, nil, nil, err + + case err != nil: + err := fmt.Errorf("unable to open local channeldb: %v", err) + ltndLog.Error(err) + return nil, nil, nil, err + } + + closeFuncs = append(closeFuncs, func() { + localChanDB.Close() + }) + + remoteChanDB = localChanDB + } else { + ltndLog.Infof("Database replication is available! Creating " + + "local and remote channeldb instances") + + // Otherwise, we'll open two instances, one for the state we + // only need locally, and the other for things we want to + // ensure are replicated. + localChanDB, err = channeldb.CreateWithBackend( + databaseBackends.LocalDB, + channeldb.OptionSetRejectCacheSize(cfg.Caches.RejectCacheSize), + channeldb.OptionSetChannelCacheSize(cfg.Caches.ChannelCacheSize), + channeldb.OptionDryRunMigration(cfg.DryRunMigration), + ) + switch { + // As we want to allow both versions to get thru the dry run + // migration, we'll only exit the second time here once the + // remote instance has had a time to migrate as well. + case err == channeldb.ErrDryRunMigrationOK: + ltndLog.Infof("Local DB dry run migration successful") + + case err != nil: + err := fmt.Errorf("unable to open local channeldb: %v", err) + ltndLog.Error(err) + return nil, nil, nil, err + } + + closeFuncs = append(closeFuncs, func() { + localChanDB.Close() + }) + + ltndLog.Infof("Opening replicated database instance...") + + remoteChanDB, err = channeldb.CreateWithBackend( + databaseBackends.RemoteDB, + channeldb.OptionDryRunMigration(cfg.DryRunMigration), + ) + switch { + case err == channeldb.ErrDryRunMigrationOK: + return nil, nil, nil, err + + case err != nil: + localChanDB.Close() + + err := fmt.Errorf("unable to open remote channeldb: %v", err) + ltndLog.Error(err) + return nil, nil, nil, err + } + + closeFuncs = append(closeFuncs, func() { + remoteChanDB.Close() + }) + } + + openTime := time.Since(startOpenTime) + ltndLog.Infof("Database now open (time_to_open=%v)!", openTime) + + cleanUp := func() { + for _, closeFunc := range closeFuncs { + closeFunc() + } + } + + return localChanDB, remoteChanDB, cleanUp, nil +} diff --git a/lnrpc/README.md b/lnrpc/README.md index 75f9f8f71..a6cf1514d 100644 --- a/lnrpc/README.md +++ b/lnrpc/README.md @@ -129,6 +129,11 @@ description): * BakeMacaroon * Bakes a new macaroon with the provided list of permissions and restrictions + * ListMacaroonIDs + * List all the macaroon root key IDs that are in use. + * DeleteMacaroonID + * Remove a specific macaroon root key ID from the database and invalidates + all macaroons derived from the key with that ID. ## Service: WalletUnlocker diff --git a/lnrpc/chainrpc/chainnotifier_server.go b/lnrpc/chainrpc/chainnotifier_server.go index afa53c013..fea947ff6 100644 --- a/lnrpc/chainrpc/chainnotifier_server.go +++ b/lnrpc/chainrpc/chainnotifier_server.go @@ -16,6 +16,7 @@ import ( "github.com/grpc-ecosystem/grpc-gateway/runtime" "github.com/lightningnetwork/lnd/chainntnfs" "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/macaroons" "google.golang.org/grpc" "gopkg.in/macaroon-bakery.v2/bakery" ) @@ -67,7 +68,7 @@ var ( // ErrChainNotifierServerNotActive indicates that the chain notifier hasn't // finished the startup process. - ErrChainNotifierServerNotActive = errors.New("chain notifier RPC is" + + ErrChainNotifierServerNotActive = errors.New("chain notifier RPC is " + "still in the process of starting") ) @@ -119,8 +120,9 @@ func New(cfg *Config) (*Server, lnrpc.MacaroonPerms, error) { // At this point, we know that the chain notifier macaroon // doesn't yet, exist, so we need to create it with the help of // the main macaroon service. - chainNotifierMac, err := cfg.MacService.Oven.NewMacaroon( - context.Background(), bakery.LatestVersion, nil, + chainNotifierMac, err := cfg.MacService.NewMacaroon( + context.Background(), + macaroons.DefaultRootKeyID, macaroonOps..., ) if err != nil { diff --git a/lnrpc/invoicesrpc/addinvoice.go b/lnrpc/invoicesrpc/addinvoice.go index 7daf599cf..a8bb22d57 100644 --- a/lnrpc/invoicesrpc/addinvoice.go +++ b/lnrpc/invoicesrpc/addinvoice.go @@ -11,6 +11,7 @@ import ( "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" "github.com/btcsuite/btcutil" "github.com/davecgh/go-spew/spew" @@ -255,113 +256,16 @@ func AddInvoice(ctx context.Context, cfg *AddInvoiceConfig, return nil, nil, fmt.Errorf("could not fetch all channels") } - graph := cfg.ChanDB.ChannelGraph() - - numHints := 0 - for _, channel := range openChannels { + if len(openChannels) > 0 { // We'll restrict the number of individual route hints // to 20 to avoid creating overly large invoices. - if numHints >= 20 { - break - } - - // Since we're only interested in our private channels, - // we'll skip public ones. - isPublic := channel.ChannelFlags&lnwire.FFAnnounceChannel != 0 - if isPublic { - continue - } - - // Make sure the counterparty has enough balance in the - // channel for our amount. We do this in order to reduce - // payment errors when attempting to use this channel - // as a hint. - chanPoint := lnwire.NewChanIDFromOutPoint( - &channel.FundingOutpoint, + const numMaxHophints = 20 + hopHints := selectHopHints( + amtMSat, cfg, openChannels, numMaxHophints, ) - if amtMSat >= channel.LocalCommitment.RemoteBalance { - log.Debugf("Skipping channel %v due to "+ - "not having enough remote balance", - chanPoint) - continue - } - // Make sure the channel is active. - if !cfg.IsChannelActive(chanPoint) { - log.Debugf("Skipping channel %v due to not "+ - "being eligible to forward payments", - chanPoint) - continue - } - - // To ensure we don't leak unadvertised nodes, we'll - // make sure our counterparty is publicly advertised - // within the network. Otherwise, we'll end up leaking - // information about nodes that intend to stay - // unadvertised, like in the case of a node only having - // private channels. - var remotePub [33]byte - copy(remotePub[:], channel.IdentityPub.SerializeCompressed()) - isRemoteNodePublic, err := graph.IsPublicNode(remotePub) - if err != nil { - log.Errorf("Unable to determine if node %x "+ - "is advertised: %v", remotePub, err) - continue - } - - if !isRemoteNodePublic { - log.Debugf("Skipping channel %v due to "+ - "counterparty %x being unadvertised", - chanPoint, remotePub) - continue - } - - // Fetch the policies for each end of the channel. - chanID := channel.ShortChanID().ToUint64() - info, p1, p2, err := graph.FetchChannelEdgesByID(chanID) - if err != nil { - log.Errorf("Unable to fetch the routing "+ - "policies for the edges of the channel "+ - "%v: %v", chanPoint, err) - continue - } - - // Now, we'll need to determine which is the correct - // policy for HTLCs being sent from the remote node. - var remotePolicy *channeldb.ChannelEdgePolicy - if bytes.Equal(remotePub[:], info.NodeKey1Bytes[:]) { - remotePolicy = p1 - } else { - remotePolicy = p2 - } - - // If for some reason we don't yet have the edge for - // the remote party, then we'll just skip adding this - // channel as a routing hint. - if remotePolicy == nil { - continue - } - - // Finally, create the routing hint for this channel and - // add it to our list of route hints. - hint := zpay32.HopHint{ - NodeID: channel.IdentityPub, - ChannelID: chanID, - FeeBaseMSat: uint32(remotePolicy.FeeBaseMSat), - FeeProportionalMillionths: uint32( - remotePolicy.FeeProportionalMillionths, - ), - CLTVExpiryDelta: remotePolicy.TimeLockDelta, - } - - // Include the route hint in our set of options that - // will be used when creating the invoice. - routeHint := []zpay32.HopHint{hint} - options = append(options, zpay32.RouteHint(routeHint)) - - numHints++ + options = append(options, hopHints...) } - } // Set our desired invoice features and add them to our list of options. @@ -427,3 +331,181 @@ func AddInvoice(ctx context.Context, cfg *AddInvoiceConfig, return &paymentHash, newInvoice, nil } + +// chanCanBeHopHint returns true if the target channel is eligible to be a hop +// hint. +func chanCanBeHopHint(channel *channeldb.OpenChannel, + graph *channeldb.ChannelGraph, + cfg *AddInvoiceConfig) (*channeldb.ChannelEdgePolicy, bool) { + + // Since we're only interested in our private channels, we'll skip + // public ones. + isPublic := channel.ChannelFlags&lnwire.FFAnnounceChannel != 0 + if isPublic { + return nil, false + } + + // Make sure the channel is active. + chanPoint := lnwire.NewChanIDFromOutPoint( + &channel.FundingOutpoint, + ) + if !cfg.IsChannelActive(chanPoint) { + log.Debugf("Skipping channel %v due to not "+ + "being eligible to forward payments", + chanPoint) + return nil, false + } + + // To ensure we don't leak unadvertised nodes, we'll make sure our + // counterparty is publicly advertised within the network. Otherwise, + // we'll end up leaking information about nodes that intend to stay + // unadvertised, like in the case of a node only having private + // channels. + var remotePub [33]byte + copy(remotePub[:], channel.IdentityPub.SerializeCompressed()) + isRemoteNodePublic, err := graph.IsPublicNode(remotePub) + if err != nil { + log.Errorf("Unable to determine if node %x "+ + "is advertised: %v", remotePub, err) + return nil, false + } + + if !isRemoteNodePublic { + log.Debugf("Skipping channel %v due to "+ + "counterparty %x being unadvertised", + chanPoint, remotePub) + return nil, false + } + + // Fetch the policies for each end of the channel. + chanID := channel.ShortChanID().ToUint64() + info, p1, p2, err := graph.FetchChannelEdgesByID(chanID) + if err != nil { + log.Errorf("Unable to fetch the routing "+ + "policies for the edges of the channel "+ + "%v: %v", chanPoint, err) + return nil, false + } + + // Now, we'll need to determine which is the correct policy for HTLCs + // being sent from the remote node. + var remotePolicy *channeldb.ChannelEdgePolicy + if bytes.Equal(remotePub[:], info.NodeKey1Bytes[:]) { + remotePolicy = p1 + } else { + remotePolicy = p2 + } + + return remotePolicy, true +} + +// addHopHint creates a hop hint out of the passed channel and channel policy. +// The new hop hint is appended to the passed slice. +func addHopHint(hopHints *[]func(*zpay32.Invoice), + channel *channeldb.OpenChannel, chanPolicy *channeldb.ChannelEdgePolicy) { + + hopHint := zpay32.HopHint{ + NodeID: channel.IdentityPub, + ChannelID: channel.ShortChanID().ToUint64(), + FeeBaseMSat: uint32(chanPolicy.FeeBaseMSat), + FeeProportionalMillionths: uint32( + chanPolicy.FeeProportionalMillionths, + ), + CLTVExpiryDelta: chanPolicy.TimeLockDelta, + } + *hopHints = append( + *hopHints, zpay32.RouteHint([]zpay32.HopHint{hopHint}), + ) +} + +// selectHopHints will select up to numMaxHophints from the set of passed open +// channels. The set of hop hints will be returned as a slice of functional +// options that'll append the route hint to the set of all route hints. +// +// TODO(roasbeef): do proper sub-set sum max hints usually << numChans +func selectHopHints(amtMSat lnwire.MilliSatoshi, cfg *AddInvoiceConfig, + openChannels []*channeldb.OpenChannel, + numMaxHophints int) []func(*zpay32.Invoice) { + + graph := cfg.ChanDB.ChannelGraph() + + // We'll add our hop hints in two passes, first we'll add all channels + // that are eligible to be hop hints, and also have a local balance + // above the payment amount. + var totalHintBandwidth lnwire.MilliSatoshi + hopHintChans := make(map[wire.OutPoint]struct{}) + hopHints := make([]func(*zpay32.Invoice), 0, numMaxHophints) + for _, channel := range openChannels { + // If this channel can't be a hop hint, then skip it. + edgePolicy, canBeHopHint := chanCanBeHopHint( + channel, graph, cfg, + ) + if edgePolicy == nil || !canBeHopHint { + continue + } + + // Similarly, in this first pass, we'll ignore all channels in + // isolation can't satisfy this payment. + if channel.LocalCommitment.RemoteBalance < amtMSat { + continue + } + + // Now that we now this channel use usable, add it as a hop + // hint and the indexes we'll use later. + addHopHint(&hopHints, channel, edgePolicy) + + hopHintChans[channel.FundingOutpoint] = struct{}{} + totalHintBandwidth += channel.LocalCommitment.RemoteBalance + } + + // If we have enough hop hints at this point, then we'll exit early. + // Otherwise, we'll continue to add more that may help out mpp users. + if len(hopHints) >= numMaxHophints { + return hopHints + } + + // In this second pass we'll add channels, and we'll either stop when + // we have 20 hop hints, we've run through all the available channels, + // or if the sum of available bandwidth in the routing hints exceeds 2x + // the payment amount. We do 2x here to account for a margin of error + // if some of the selected channels no longer become operable. + hopHintFactor := lnwire.MilliSatoshi(2) + for i := 0; i < len(openChannels); i++ { + // If we hit either of our early termination conditions, then + // we'll break the loop here. + if totalHintBandwidth > amtMSat*hopHintFactor || + len(hopHints) >= numMaxHophints { + + break + } + + channel := openChannels[i] + + // Skip the channel if we already selected it. + if _, ok := hopHintChans[channel.FundingOutpoint]; ok { + continue + } + + // If the channel can't be a hop hint, then we'll skip it. + // Otherwise, we'll use the policy information to populate the + // hop hint. + remotePolicy, canBeHopHint := chanCanBeHopHint( + channel, graph, cfg, + ) + if !canBeHopHint || remotePolicy == nil { + continue + } + + // Include the route hint in our set of options that will be + // used when creating the invoice. + addHopHint(&hopHints, channel, remotePolicy) + + // As we've just added a new hop hint, we'll accumulate it's + // available balance now to update our tally. + // + // TODO(roasbeef): have a cut off based on min bandwidth? + totalHintBandwidth += channel.LocalCommitment.RemoteBalance + } + + return hopHints +} diff --git a/lnrpc/invoicesrpc/invoices_server.go b/lnrpc/invoicesrpc/invoices_server.go index 75c9b7c66..6ed36b0f5 100644 --- a/lnrpc/invoicesrpc/invoices_server.go +++ b/lnrpc/invoicesrpc/invoices_server.go @@ -15,6 +15,7 @@ import ( "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lntypes" + "github.com/lightningnetwork/lnd/macaroons" ) const ( @@ -99,8 +100,8 @@ func New(cfg *Config) (*Server, lnrpc.MacaroonPerms, error) { // At this point, we know that the invoices macaroon doesn't // yet, exist, so we need to create it with the help of the // main macaroon service. - invoicesMac, err := cfg.MacService.Oven.NewMacaroon( - context.Background(), bakery.LatestVersion, nil, + invoicesMac, err := cfg.MacService.NewMacaroon( + context.Background(), macaroons.DefaultRootKeyID, macaroonOps..., ) if err != nil { diff --git a/lnrpc/rest-annotations.yaml b/lnrpc/rest-annotations.yaml index 8c6129013..fa29afc93 100644 --- a/lnrpc/rest-annotations.yaml +++ b/lnrpc/rest-annotations.yaml @@ -133,6 +133,12 @@ http: - selector: lnrpc.Lightning.BakeMacaroon post: "/v1/macaroon" body: "*" + - selector: lnrpc.Lightning.ListMacaroonIDs + get: "/v1/macaroon/ids" + - selector: lnrpc.Lightning.DeleteMacaroonID + delete: "/v1/macaroon/{root_key_id}" + - selector: lnrpc.Lightning.ListPermissions + get: "/v1/macaroon/permissions" # walletunlocker.proto - selector: lnrpc.WalletUnlocker.GenSeed diff --git a/lnrpc/routerrpc/config.go b/lnrpc/routerrpc/config.go index 045561dbf..1d1a6f8d4 100644 --- a/lnrpc/routerrpc/config.go +++ b/lnrpc/routerrpc/config.go @@ -46,9 +46,9 @@ func DefaultConfig() *Config { AprioriWeight: routing.DefaultAprioriWeight, MinRouteProbability: routing.DefaultMinRouteProbability, PenaltyHalfLife: routing.DefaultPenaltyHalfLife, - AttemptCost: routing.DefaultPaymentAttemptPenalty. - ToSatoshis(), - MaxMcHistory: routing.DefaultMaxMcHistory, + AttemptCost: routing.DefaultAttemptCost.ToSatoshis(), + AttemptCostPPM: routing.DefaultAttemptCostPPM, + MaxMcHistory: routing.DefaultMaxMcHistory, } return &Config{ @@ -63,6 +63,7 @@ func GetRoutingConfig(cfg *Config) *RoutingConfig { AprioriWeight: cfg.AprioriWeight, MinRouteProbability: cfg.MinRouteProbability, AttemptCost: cfg.AttemptCost, + AttemptCostPPM: cfg.AttemptCostPPM, PenaltyHalfLife: cfg.PenaltyHalfLife, MaxMcHistory: cfg.MaxMcHistory, } diff --git a/lnrpc/routerrpc/router_server.go b/lnrpc/routerrpc/router_server.go index e166d937a..d6cd505d9 100644 --- a/lnrpc/routerrpc/router_server.go +++ b/lnrpc/routerrpc/router_server.go @@ -15,6 +15,7 @@ import ( "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lntypes" "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/macaroons" "github.com/lightningnetwork/lnd/routing" "github.com/lightningnetwork/lnd/routing/route" @@ -164,8 +165,8 @@ func New(cfg *Config) (*Server, lnrpc.MacaroonPerms, error) { // At this point, we know that the router macaroon doesn't yet, // exist, so we need to create it with the help of the main // macaroon service. - routerMac, err := cfg.MacService.Oven.NewMacaroon( - context.Background(), bakery.LatestVersion, nil, + routerMac, err := cfg.MacService.NewMacaroon( + context.Background(), macaroons.DefaultRootKeyID, macaroonOps..., ) if err != nil { @@ -319,11 +320,13 @@ func (s *Server) EstimateRouteFee(ctx context.Context, // that target amount, we'll only request a single route. Set a // restriction for the default CLTV limit, otherwise we can find a route // that exceeds it and is useless to us. + mc := s.cfg.RouterBackend.MissionControl route, err := s.cfg.Router.FindRoute( s.cfg.RouterBackend.SelfNode, destNode, amtMsat, &routing.RestrictParams{ - FeeLimit: feeLimit, - CltvLimit: s.cfg.RouterBackend.MaxTotalTimelock, + FeeLimit: feeLimit, + CltvLimit: s.cfg.RouterBackend.MaxTotalTimelock, + ProbabilitySource: mc.GetProbability, }, nil, nil, s.cfg.RouterBackend.DefaultFinalCltvDelta, ) if err != nil { diff --git a/lnrpc/routerrpc/routing_config.go b/lnrpc/routerrpc/routing_config.go index 98b3594cf..dd0fe93d1 100644 --- a/lnrpc/routerrpc/routing_config.go +++ b/lnrpc/routerrpc/routing_config.go @@ -29,10 +29,16 @@ type RoutingConfig struct { // channel is back at 50% probability. PenaltyHalfLife time.Duration `long:"penaltyhalflife" description:"Defines the duration after which a penalized node or channel is back at 50% probability"` - // AttemptCost is the virtual cost in path finding weight units of - // executing a payment attempt that fails. It is used to trade off - // potentially better routes against their probability of succeeding. - AttemptCost btcutil.Amount `long:"attemptcost" description:"The (virtual) cost in sats of a failed payment attempt"` + // AttemptCost is the fixed virtual cost in path finding of a failed + // payment attempt. It is used to trade off potentially better routes + // against their probability of succeeding. + AttemptCost btcutil.Amount `long:"attemptcost" description:"The fixed (virtual) cost in sats of a failed payment attempt"` + + // AttemptCostPPM is the proportional virtual cost in path finding of a + // failed payment attempt. It is used to trade off potentially better + // routes against their probability of succeeding. This parameter is + // expressed in parts per million of the total payment amount. + AttemptCostPPM int64 `long:"attemptcostppm" description:"The proportional (virtual) cost in sats of a failed payment attempt expressed in parts per million of the total payment amount"` // MaxMcHistory defines the maximum number of payment results that // are held on disk by mission control. diff --git a/lnrpc/rpc.pb.go b/lnrpc/rpc.pb.go index 4d404792b..3db459330 100644 --- a/lnrpc/rpc.pb.go +++ b/lnrpc/rpc.pb.go @@ -760,7 +760,7 @@ func (x Failure_FailureCode) String() string { } func (Failure_FailureCode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{150, 0} + return fileDescriptor_77a6da22d6a3feb1, []int{157, 0} } type Utxo struct { @@ -2588,7 +2588,11 @@ type ConnectPeerRequest struct { Addr *LightningAddress `protobuf:"bytes,1,opt,name=addr,proto3" json:"addr,omitempty"` // If set, the daemon will attempt to persistently connect to the target // peer. Otherwise, the call will be synchronous. - Perm bool `protobuf:"varint,2,opt,name=perm,proto3" json:"perm,omitempty"` + Perm bool `protobuf:"varint,2,opt,name=perm,proto3" json:"perm,omitempty"` + // + //The connection timeout value (in seconds) for this request. It won't affect + //other requests. + Timeout uint64 `protobuf:"varint,3,opt,name=timeout,proto3" json:"timeout,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -2633,6 +2637,13 @@ func (m *ConnectPeerRequest) GetPerm() bool { return false } +func (m *ConnectPeerRequest) GetTimeout() uint64 { + if m != nil { + return m.Timeout + } + return 0 +} + type ConnectPeerResponse struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -3728,10 +3739,20 @@ type Peer struct { //are not persisted across lnd restarts. Note that these errors are only //stored for peers that we have channels open with, to prevent peers from //spamming us with errors at no cost. - Errors []*TimestampedError `protobuf:"bytes,12,rep,name=errors,proto3" json:"errors,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Errors []*TimestampedError `protobuf:"bytes,12,rep,name=errors,proto3" json:"errors,omitempty"` + // + //The number of times we have recorded this peer going offline or coming + //online, recorded across restarts. Note that this value is decreased over + //time if the peer has not recently flapped, so that we can forgive peers + //with historically high flap counts. + FlapCount int32 `protobuf:"varint,13,opt,name=flap_count,json=flapCount,proto3" json:"flap_count,omitempty"` + // + //The timestamp of the last flap we observed for this peer. If this value is + //zero, we have not observed any flaps for this peer. + LastFlapNs int64 `protobuf:"varint,14,opt,name=last_flap_ns,json=lastFlapNs,proto3" json:"last_flap_ns,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *Peer) Reset() { *m = Peer{} } @@ -3836,6 +3857,20 @@ func (m *Peer) GetErrors() []*TimestampedError { return nil } +func (m *Peer) GetFlapCount() int32 { + if m != nil { + return m.FlapCount + } + return 0 +} + +func (m *Peer) GetLastFlapNs() int64 { + if m != nil { + return m.LastFlapNs + } + return 0 +} + type TimestampedError struct { // The unix timestamp in seconds when the error occurred. Timestamp uint64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` @@ -4889,10 +4924,14 @@ type OpenChannelRequest struct { // //The maximum amount of coins in millisatoshi that can be pending within //the channel. It only applies to the remote party. - RemoteMaxValueInFlightMsat uint64 `protobuf:"varint,15,opt,name=remote_max_value_in_flight_msat,json=remoteMaxValueInFlightMsat,proto3" json:"remote_max_value_in_flight_msat,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + RemoteMaxValueInFlightMsat uint64 `protobuf:"varint,15,opt,name=remote_max_value_in_flight_msat,json=remoteMaxValueInFlightMsat,proto3" json:"remote_max_value_in_flight_msat,omitempty"` + // + //The maximum number of concurrent HTLCs we will allow the remote party to add + //to the commitment transaction. + RemoteMaxHtlcs uint32 `protobuf:"varint,16,opt,name=remote_max_htlcs,json=remoteMaxHtlcs,proto3" json:"remote_max_htlcs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *OpenChannelRequest) Reset() { *m = OpenChannelRequest{} } @@ -5019,6 +5058,13 @@ func (m *OpenChannelRequest) GetRemoteMaxValueInFlightMsat() uint64 { return 0 } +func (m *OpenChannelRequest) GetRemoteMaxHtlcs() uint32 { + if m != nil { + return m.RemoteMaxHtlcs + } + return 0 +} + type OpenStatusUpdate struct { // Types that are valid to be assigned to Update: // *OpenStatusUpdate_ChanPending @@ -5566,10 +5612,15 @@ type FundingPsbtFinalize struct { // //The funded PSBT that contains all witness data to send the exact channel //capacity amount to the PK script returned in the open channel message in a - //previous step. + //previous step. Cannot be set at the same time as final_raw_tx. SignedPsbt []byte `protobuf:"bytes,1,opt,name=signed_psbt,json=signedPsbt,proto3" json:"signed_psbt,omitempty"` // The pending channel ID of the channel to get the PSBT for. - PendingChanId []byte `protobuf:"bytes,2,opt,name=pending_chan_id,json=pendingChanId,proto3" json:"pending_chan_id,omitempty"` + PendingChanId []byte `protobuf:"bytes,2,opt,name=pending_chan_id,json=pendingChanId,proto3" json:"pending_chan_id,omitempty"` + // + //As an alternative to the signed PSBT with all witness data, the final raw + //wire format transaction can also be specified directly. Cannot be set at the + //same time as signed_psbt. + FinalRawTx []byte `protobuf:"bytes,3,opt,name=final_raw_tx,json=finalRawTx,proto3" json:"final_raw_tx,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -5614,6 +5665,13 @@ func (m *FundingPsbtFinalize) GetPendingChanId() []byte { return nil } +func (m *FundingPsbtFinalize) GetFinalRawTx() []byte { + if m != nil { + return m.FinalRawTx + } + return nil +} + type FundingTransitionMsg struct { // Types that are valid to be assigned to Trigger: // *FundingTransitionMsg_ShimRegister @@ -10029,10 +10087,11 @@ func (m *DeleteAllPaymentsResponse) XXX_DiscardUnknown() { var xxx_messageInfo_DeleteAllPaymentsResponse proto.InternalMessageInfo type AbandonChannelRequest struct { - ChannelPoint *ChannelPoint `protobuf:"bytes,1,opt,name=channel_point,json=channelPoint,proto3" json:"channel_point,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ChannelPoint *ChannelPoint `protobuf:"bytes,1,opt,name=channel_point,json=channelPoint,proto3" json:"channel_point,omitempty"` + PendingFundingShimOnly bool `protobuf:"varint,2,opt,name=pending_funding_shim_only,json=pendingFundingShimOnly,proto3" json:"pending_funding_shim_only,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *AbandonChannelRequest) Reset() { *m = AbandonChannelRequest{} } @@ -10067,6 +10126,13 @@ func (m *AbandonChannelRequest) GetChannelPoint() *ChannelPoint { return nil } +func (m *AbandonChannelRequest) GetPendingFundingShimOnly() bool { + if m != nil { + return m.PendingFundingShimOnly + } + return false +} + type AbandonChannelResponse struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -11501,10 +11567,12 @@ func (m *MacaroonPermission) GetAction() string { type BakeMacaroonRequest struct { // The list of permissions the new macaroon should grant. - Permissions []*MacaroonPermission `protobuf:"bytes,1,rep,name=permissions,proto3" json:"permissions,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Permissions []*MacaroonPermission `protobuf:"bytes,1,rep,name=permissions,proto3" json:"permissions,omitempty"` + // The root key ID used to create the macaroon, must be a positive integer. + RootKeyId uint64 `protobuf:"varint,2,opt,name=root_key_id,json=rootKeyId,proto3" json:"root_key_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *BakeMacaroonRequest) Reset() { *m = BakeMacaroonRequest{} } @@ -11539,6 +11607,13 @@ func (m *BakeMacaroonRequest) GetPermissions() []*MacaroonPermission { return nil } +func (m *BakeMacaroonRequest) GetRootKeyId() uint64 { + if m != nil { + return m.RootKeyId + } + return 0 +} + type BakeMacaroonResponse struct { // The hex encoded macaroon, serialized in binary format. Macaroon string `protobuf:"bytes,1,opt,name=macaroon,proto3" json:"macaroon,omitempty"` @@ -11579,6 +11654,270 @@ func (m *BakeMacaroonResponse) GetMacaroon() string { return "" } +type ListMacaroonIDsRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListMacaroonIDsRequest) Reset() { *m = ListMacaroonIDsRequest{} } +func (m *ListMacaroonIDsRequest) String() string { return proto.CompactTextString(m) } +func (*ListMacaroonIDsRequest) ProtoMessage() {} +func (*ListMacaroonIDsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{150} +} + +func (m *ListMacaroonIDsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListMacaroonIDsRequest.Unmarshal(m, b) +} +func (m *ListMacaroonIDsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListMacaroonIDsRequest.Marshal(b, m, deterministic) +} +func (m *ListMacaroonIDsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListMacaroonIDsRequest.Merge(m, src) +} +func (m *ListMacaroonIDsRequest) XXX_Size() int { + return xxx_messageInfo_ListMacaroonIDsRequest.Size(m) +} +func (m *ListMacaroonIDsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListMacaroonIDsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListMacaroonIDsRequest proto.InternalMessageInfo + +type ListMacaroonIDsResponse struct { + // The list of root key IDs that are in use. + RootKeyIds []uint64 `protobuf:"varint,1,rep,packed,name=root_key_ids,json=rootKeyIds,proto3" json:"root_key_ids,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListMacaroonIDsResponse) Reset() { *m = ListMacaroonIDsResponse{} } +func (m *ListMacaroonIDsResponse) String() string { return proto.CompactTextString(m) } +func (*ListMacaroonIDsResponse) ProtoMessage() {} +func (*ListMacaroonIDsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{151} +} + +func (m *ListMacaroonIDsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListMacaroonIDsResponse.Unmarshal(m, b) +} +func (m *ListMacaroonIDsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListMacaroonIDsResponse.Marshal(b, m, deterministic) +} +func (m *ListMacaroonIDsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListMacaroonIDsResponse.Merge(m, src) +} +func (m *ListMacaroonIDsResponse) XXX_Size() int { + return xxx_messageInfo_ListMacaroonIDsResponse.Size(m) +} +func (m *ListMacaroonIDsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListMacaroonIDsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListMacaroonIDsResponse proto.InternalMessageInfo + +func (m *ListMacaroonIDsResponse) GetRootKeyIds() []uint64 { + if m != nil { + return m.RootKeyIds + } + return nil +} + +type DeleteMacaroonIDRequest struct { + // The root key ID to be removed. + RootKeyId uint64 `protobuf:"varint,1,opt,name=root_key_id,json=rootKeyId,proto3" json:"root_key_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteMacaroonIDRequest) Reset() { *m = DeleteMacaroonIDRequest{} } +func (m *DeleteMacaroonIDRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteMacaroonIDRequest) ProtoMessage() {} +func (*DeleteMacaroonIDRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{152} +} + +func (m *DeleteMacaroonIDRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteMacaroonIDRequest.Unmarshal(m, b) +} +func (m *DeleteMacaroonIDRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteMacaroonIDRequest.Marshal(b, m, deterministic) +} +func (m *DeleteMacaroonIDRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteMacaroonIDRequest.Merge(m, src) +} +func (m *DeleteMacaroonIDRequest) XXX_Size() int { + return xxx_messageInfo_DeleteMacaroonIDRequest.Size(m) +} +func (m *DeleteMacaroonIDRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteMacaroonIDRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteMacaroonIDRequest proto.InternalMessageInfo + +func (m *DeleteMacaroonIDRequest) GetRootKeyId() uint64 { + if m != nil { + return m.RootKeyId + } + return 0 +} + +type DeleteMacaroonIDResponse struct { + // A boolean indicates that the deletion is successful. + Deleted bool `protobuf:"varint,1,opt,name=deleted,proto3" json:"deleted,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteMacaroonIDResponse) Reset() { *m = DeleteMacaroonIDResponse{} } +func (m *DeleteMacaroonIDResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteMacaroonIDResponse) ProtoMessage() {} +func (*DeleteMacaroonIDResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{153} +} + +func (m *DeleteMacaroonIDResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteMacaroonIDResponse.Unmarshal(m, b) +} +func (m *DeleteMacaroonIDResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteMacaroonIDResponse.Marshal(b, m, deterministic) +} +func (m *DeleteMacaroonIDResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteMacaroonIDResponse.Merge(m, src) +} +func (m *DeleteMacaroonIDResponse) XXX_Size() int { + return xxx_messageInfo_DeleteMacaroonIDResponse.Size(m) +} +func (m *DeleteMacaroonIDResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteMacaroonIDResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteMacaroonIDResponse proto.InternalMessageInfo + +func (m *DeleteMacaroonIDResponse) GetDeleted() bool { + if m != nil { + return m.Deleted + } + return false +} + +type MacaroonPermissionList struct { + // A list of macaroon permissions. + Permissions []*MacaroonPermission `protobuf:"bytes,1,rep,name=permissions,proto3" json:"permissions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MacaroonPermissionList) Reset() { *m = MacaroonPermissionList{} } +func (m *MacaroonPermissionList) String() string { return proto.CompactTextString(m) } +func (*MacaroonPermissionList) ProtoMessage() {} +func (*MacaroonPermissionList) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{154} +} + +func (m *MacaroonPermissionList) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MacaroonPermissionList.Unmarshal(m, b) +} +func (m *MacaroonPermissionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MacaroonPermissionList.Marshal(b, m, deterministic) +} +func (m *MacaroonPermissionList) XXX_Merge(src proto.Message) { + xxx_messageInfo_MacaroonPermissionList.Merge(m, src) +} +func (m *MacaroonPermissionList) XXX_Size() int { + return xxx_messageInfo_MacaroonPermissionList.Size(m) +} +func (m *MacaroonPermissionList) XXX_DiscardUnknown() { + xxx_messageInfo_MacaroonPermissionList.DiscardUnknown(m) +} + +var xxx_messageInfo_MacaroonPermissionList proto.InternalMessageInfo + +func (m *MacaroonPermissionList) GetPermissions() []*MacaroonPermission { + if m != nil { + return m.Permissions + } + return nil +} + +type ListPermissionsRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListPermissionsRequest) Reset() { *m = ListPermissionsRequest{} } +func (m *ListPermissionsRequest) String() string { return proto.CompactTextString(m) } +func (*ListPermissionsRequest) ProtoMessage() {} +func (*ListPermissionsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{155} +} + +func (m *ListPermissionsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListPermissionsRequest.Unmarshal(m, b) +} +func (m *ListPermissionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListPermissionsRequest.Marshal(b, m, deterministic) +} +func (m *ListPermissionsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListPermissionsRequest.Merge(m, src) +} +func (m *ListPermissionsRequest) XXX_Size() int { + return xxx_messageInfo_ListPermissionsRequest.Size(m) +} +func (m *ListPermissionsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ListPermissionsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ListPermissionsRequest proto.InternalMessageInfo + +type ListPermissionsResponse struct { + // + //A map between all RPC method URIs and their required macaroon permissions to + //access them. + MethodPermissions map[string]*MacaroonPermissionList `protobuf:"bytes,1,rep,name=method_permissions,json=methodPermissions,proto3" json:"method_permissions,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ListPermissionsResponse) Reset() { *m = ListPermissionsResponse{} } +func (m *ListPermissionsResponse) String() string { return proto.CompactTextString(m) } +func (*ListPermissionsResponse) ProtoMessage() {} +func (*ListPermissionsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{156} +} + +func (m *ListPermissionsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ListPermissionsResponse.Unmarshal(m, b) +} +func (m *ListPermissionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ListPermissionsResponse.Marshal(b, m, deterministic) +} +func (m *ListPermissionsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListPermissionsResponse.Merge(m, src) +} +func (m *ListPermissionsResponse) XXX_Size() int { + return xxx_messageInfo_ListPermissionsResponse.Size(m) +} +func (m *ListPermissionsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ListPermissionsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ListPermissionsResponse proto.InternalMessageInfo + +func (m *ListPermissionsResponse) GetMethodPermissions() map[string]*MacaroonPermissionList { + if m != nil { + return m.MethodPermissions + } + return nil +} + type Failure struct { // Failure code as defined in the Lightning spec Code Failure_FailureCode `protobuf:"varint,1,opt,name=code,proto3,enum=lnrpc.Failure_FailureCode" json:"code,omitempty"` @@ -11607,7 +11946,7 @@ func (m *Failure) Reset() { *m = Failure{} } func (m *Failure) String() string { return proto.CompactTextString(m) } func (*Failure) ProtoMessage() {} func (*Failure) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{150} + return fileDescriptor_77a6da22d6a3feb1, []int{157} } func (m *Failure) XXX_Unmarshal(b []byte) error { @@ -11751,7 +12090,7 @@ func (m *ChannelUpdate) Reset() { *m = ChannelUpdate{} } func (m *ChannelUpdate) String() string { return proto.CompactTextString(m) } func (*ChannelUpdate) ProtoMessage() {} func (*ChannelUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_77a6da22d6a3feb1, []int{151} + return fileDescriptor_77a6da22d6a3feb1, []int{158} } func (m *ChannelUpdate) XXX_Unmarshal(b []byte) error { @@ -11856,6 +12195,108 @@ func (m *ChannelUpdate) GetExtraOpaqueData() []byte { return nil } +type MacaroonId struct { + Nonce []byte `protobuf:"bytes,1,opt,name=nonce,proto3" json:"nonce,omitempty"` + StorageId []byte `protobuf:"bytes,2,opt,name=storageId,proto3" json:"storageId,omitempty"` + Ops []*Op `protobuf:"bytes,3,rep,name=ops,proto3" json:"ops,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *MacaroonId) Reset() { *m = MacaroonId{} } +func (m *MacaroonId) String() string { return proto.CompactTextString(m) } +func (*MacaroonId) ProtoMessage() {} +func (*MacaroonId) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{159} +} + +func (m *MacaroonId) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MacaroonId.Unmarshal(m, b) +} +func (m *MacaroonId) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MacaroonId.Marshal(b, m, deterministic) +} +func (m *MacaroonId) XXX_Merge(src proto.Message) { + xxx_messageInfo_MacaroonId.Merge(m, src) +} +func (m *MacaroonId) XXX_Size() int { + return xxx_messageInfo_MacaroonId.Size(m) +} +func (m *MacaroonId) XXX_DiscardUnknown() { + xxx_messageInfo_MacaroonId.DiscardUnknown(m) +} + +var xxx_messageInfo_MacaroonId proto.InternalMessageInfo + +func (m *MacaroonId) GetNonce() []byte { + if m != nil { + return m.Nonce + } + return nil +} + +func (m *MacaroonId) GetStorageId() []byte { + if m != nil { + return m.StorageId + } + return nil +} + +func (m *MacaroonId) GetOps() []*Op { + if m != nil { + return m.Ops + } + return nil +} + +type Op struct { + Entity string `protobuf:"bytes,1,opt,name=entity,proto3" json:"entity,omitempty"` + Actions []string `protobuf:"bytes,2,rep,name=actions,proto3" json:"actions,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Op) Reset() { *m = Op{} } +func (m *Op) String() string { return proto.CompactTextString(m) } +func (*Op) ProtoMessage() {} +func (*Op) Descriptor() ([]byte, []int) { + return fileDescriptor_77a6da22d6a3feb1, []int{160} +} + +func (m *Op) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Op.Unmarshal(m, b) +} +func (m *Op) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Op.Marshal(b, m, deterministic) +} +func (m *Op) XXX_Merge(src proto.Message) { + xxx_messageInfo_Op.Merge(m, src) +} +func (m *Op) XXX_Size() int { + return xxx_messageInfo_Op.Size(m) +} +func (m *Op) XXX_DiscardUnknown() { + xxx_messageInfo_Op.DiscardUnknown(m) +} + +var xxx_messageInfo_Op proto.InternalMessageInfo + +func (m *Op) GetEntity() string { + if m != nil { + return m.Entity + } + return "" +} + +func (m *Op) GetActions() []string { + if m != nil { + return m.Actions + } + return nil +} + func init() { proto.RegisterEnum("lnrpc.AddressType", AddressType_name, AddressType_value) proto.RegisterEnum("lnrpc.CommitmentType", CommitmentType_name, CommitmentType_value) @@ -12043,742 +12484,775 @@ func init() { proto.RegisterType((*MacaroonPermission)(nil), "lnrpc.MacaroonPermission") proto.RegisterType((*BakeMacaroonRequest)(nil), "lnrpc.BakeMacaroonRequest") proto.RegisterType((*BakeMacaroonResponse)(nil), "lnrpc.BakeMacaroonResponse") + proto.RegisterType((*ListMacaroonIDsRequest)(nil), "lnrpc.ListMacaroonIDsRequest") + proto.RegisterType((*ListMacaroonIDsResponse)(nil), "lnrpc.ListMacaroonIDsResponse") + proto.RegisterType((*DeleteMacaroonIDRequest)(nil), "lnrpc.DeleteMacaroonIDRequest") + proto.RegisterType((*DeleteMacaroonIDResponse)(nil), "lnrpc.DeleteMacaroonIDResponse") + proto.RegisterType((*MacaroonPermissionList)(nil), "lnrpc.MacaroonPermissionList") + proto.RegisterType((*ListPermissionsRequest)(nil), "lnrpc.ListPermissionsRequest") + proto.RegisterType((*ListPermissionsResponse)(nil), "lnrpc.ListPermissionsResponse") + proto.RegisterMapType((map[string]*MacaroonPermissionList)(nil), "lnrpc.ListPermissionsResponse.MethodPermissionsEntry") proto.RegisterType((*Failure)(nil), "lnrpc.Failure") proto.RegisterType((*ChannelUpdate)(nil), "lnrpc.ChannelUpdate") + proto.RegisterType((*MacaroonId)(nil), "lnrpc.MacaroonId") + proto.RegisterType((*Op)(nil), "lnrpc.Op") } func init() { proto.RegisterFile("rpc.proto", fileDescriptor_77a6da22d6a3feb1) } var fileDescriptor_77a6da22d6a3feb1 = []byte{ - // 11641 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0xbd, 0x5b, 0x6c, 0x23, 0x49, - 0x96, 0x18, 0x5a, 0x7c, 0x89, 0xe4, 0x21, 0x29, 0x51, 0xa1, 0x17, 0x4b, 0xd5, 0xd5, 0x55, 0x9d, - 0xdd, 0xd3, 0x5d, 0x53, 0xdd, 0xa3, 0xae, 0xae, 0x99, 0x7e, 0x4d, 0xdf, 0x9d, 0x19, 0x8a, 0xa2, - 0x4a, 0x9c, 0x92, 0x48, 0x4d, 0x92, 0xea, 0xde, 0x5e, 0xec, 0xdd, 0xdc, 0x14, 0x19, 0x92, 0xd2, - 0x45, 0x66, 0xb2, 0x33, 0x93, 0x2a, 0x69, 0x0c, 0xff, 0xad, 0x1f, 0x58, 0xd8, 0x06, 0x0c, 0x78, - 0x0d, 0xf8, 0xb1, 0xf0, 0x0b, 0xb6, 0xff, 0x16, 0x86, 0x67, 0xed, 0x2f, 0x7f, 0x7b, 0x01, 0xc3, - 0x86, 0x6d, 0x78, 0x0d, 0x3f, 0xb0, 0x58, 0xc0, 0x1f, 0x5e, 0x7f, 0x18, 0x30, 0x16, 0xf0, 0x97, - 0x3f, 0x0c, 0x18, 0x71, 0x4e, 0x44, 0x66, 0x24, 0x99, 0xaa, 0xaa, 0x9e, 0x6d, 0xcf, 0x8f, 0xc4, - 0x38, 0x71, 0xe2, 0x7d, 0xe2, 0xc4, 0x79, 0x45, 0x24, 0x94, 0xfd, 0xe9, 0x70, 0x67, 0xea, 0x7b, - 0xa1, 0xc7, 0x0a, 0x63, 0xd7, 0x9f, 0x0e, 0x8d, 0x3f, 0xca, 0x40, 0xfe, 0x24, 0xbc, 0xf2, 0xd8, - 0x87, 0x50, 0xb5, 0x47, 0x23, 0x9f, 0x07, 0x81, 0x15, 0x5e, 0x4f, 0x79, 0x23, 0x73, 0x3f, 0xf3, - 0x60, 0xf9, 0x31, 0xdb, 0x41, 0xb4, 0x9d, 0x26, 0x65, 0x0d, 0xae, 0xa7, 0xdc, 0xac, 0xd8, 0x71, - 0x82, 0x35, 0xa0, 0x28, 0x93, 0x8d, 0xec, 0xfd, 0xcc, 0x83, 0xb2, 0xa9, 0x92, 0xec, 0x2e, 0x80, - 0x3d, 0xf1, 0x66, 0x6e, 0x68, 0x05, 0x76, 0xd8, 0xc8, 0xdd, 0xcf, 0x3c, 0xc8, 0x99, 0x65, 0x82, - 0xf4, 0xed, 0x90, 0xdd, 0x81, 0xf2, 0xf4, 0x99, 0x15, 0x0c, 0x7d, 0x67, 0x1a, 0x36, 0xf2, 0x58, - 0xb4, 0x34, 0x7d, 0xd6, 0xc7, 0x34, 0x7b, 0x17, 0x4a, 0xde, 0x2c, 0x9c, 0x7a, 0x8e, 0x1b, 0x36, - 0x0a, 0xf7, 0x33, 0x0f, 0x2a, 0x8f, 0x57, 0x64, 0x47, 0x7a, 0xb3, 0xf0, 0x58, 0x80, 0xcd, 0x08, - 0x81, 0xbd, 0x05, 0xb5, 0xa1, 0xe7, 0x9e, 0x39, 0xfe, 0xc4, 0x0e, 0x1d, 0xcf, 0x0d, 0x1a, 0x4b, - 0xd8, 0x56, 0x12, 0x68, 0xfc, 0x8b, 0x2c, 0x54, 0x06, 0xbe, 0xed, 0x06, 0xf6, 0x50, 0x00, 0xd8, - 0x16, 0x14, 0xc3, 0x2b, 0xeb, 0xc2, 0x0e, 0x2e, 0x70, 0xa8, 0x65, 0x73, 0x29, 0xbc, 0x3a, 0xb0, - 0x83, 0x0b, 0xb6, 0x09, 0x4b, 0xd4, 0x4b, 0x1c, 0x50, 0xce, 0x94, 0x29, 0xf6, 0x2e, 0xac, 0xba, - 0xb3, 0x89, 0x95, 0x6c, 0x4a, 0x0c, 0xab, 0x60, 0xd6, 0xdd, 0xd9, 0xa4, 0xa5, 0xc3, 0xc5, 0xe0, - 0x4f, 0xc7, 0xde, 0xf0, 0x19, 0x35, 0x40, 0xc3, 0x2b, 0x23, 0x04, 0xdb, 0x78, 0x03, 0xaa, 0x32, - 0x9b, 0x3b, 0xe7, 0x17, 0x34, 0xc6, 0x82, 0x59, 0x21, 0x04, 0x04, 0x89, 0x1a, 0x42, 0x67, 0xc2, - 0xad, 0x20, 0xb4, 0x27, 0x53, 0x39, 0xa4, 0xb2, 0x80, 0xf4, 0x05, 0x00, 0xb3, 0xbd, 0xd0, 0x1e, - 0x5b, 0x67, 0x9c, 0x07, 0x8d, 0xa2, 0xcc, 0x16, 0x90, 0x7d, 0xce, 0x03, 0xf6, 0x2d, 0x58, 0x1e, - 0xf1, 0x20, 0xb4, 0xe4, 0x62, 0xf0, 0xa0, 0x51, 0xba, 0x9f, 0x7b, 0x50, 0x36, 0x6b, 0x02, 0xda, - 0x54, 0x40, 0xf6, 0x1a, 0x80, 0x6f, 0x3f, 0xb7, 0xc4, 0x44, 0xf0, 0xab, 0x46, 0x99, 0x56, 0xc1, - 0xb7, 0x9f, 0x0f, 0xae, 0x0e, 0xf8, 0x15, 0x5b, 0x87, 0xc2, 0xd8, 0x3e, 0xe5, 0xe3, 0x06, 0x60, - 0x06, 0x25, 0x8c, 0x5f, 0x81, 0xcd, 0x27, 0x3c, 0xd4, 0xa6, 0x32, 0x30, 0xf9, 0x57, 0x33, 0x1e, - 0x84, 0x62, 0x54, 0x41, 0x68, 0xfb, 0xa1, 0x1a, 0x55, 0x86, 0x46, 0x85, 0xb0, 0x78, 0x54, 0xdc, - 0x1d, 0x29, 0x84, 0x2c, 0x22, 0x94, 0xb9, 0x3b, 0xa2, 0x6c, 0xe3, 0x10, 0x98, 0x56, 0xf1, 0x1e, - 0x0f, 0x6d, 0x67, 0x1c, 0xb0, 0x8f, 0xa0, 0x1a, 0x6a, 0xcd, 0x35, 0x32, 0xf7, 0x73, 0x0f, 0x2a, - 0x11, 0x69, 0x6a, 0x05, 0xcc, 0x04, 0x9e, 0x71, 0x01, 0xa5, 0x7d, 0xce, 0x0f, 0x9d, 0x89, 0x13, - 0xb2, 0x4d, 0x28, 0x9c, 0x39, 0x57, 0x7c, 0x84, 0x9d, 0xca, 0x1d, 0xdc, 0x32, 0x29, 0xc9, 0xee, - 0x01, 0xe0, 0x0f, 0x6b, 0x12, 0x51, 0xe9, 0xc1, 0x2d, 0xb3, 0x8c, 0xb0, 0xa3, 0xc0, 0x0e, 0xd9, - 0x36, 0x14, 0xa7, 0xdc, 0x1f, 0x72, 0x45, 0x0f, 0x07, 0xb7, 0x4c, 0x05, 0xd8, 0x2d, 0x42, 0x61, - 0x2c, 0x6a, 0x37, 0x7e, 0xaf, 0x00, 0x95, 0x3e, 0x77, 0x47, 0x6a, 0x26, 0x18, 0xe4, 0xc5, 0x44, - 0x63, 0x63, 0x55, 0x13, 0x7f, 0xb3, 0x37, 0xa1, 0x82, 0x4b, 0x12, 0x84, 0xbe, 0xe3, 0x9e, 0xd3, - 0x6e, 0xd9, 0xcd, 0x36, 0x32, 0x26, 0x08, 0x70, 0x1f, 0xa1, 0xac, 0x0e, 0x39, 0x7b, 0xa2, 0x76, - 0x8b, 0xf8, 0xc9, 0x6e, 0x43, 0xc9, 0x9e, 0x84, 0xd4, 0xbd, 0x2a, 0x82, 0x8b, 0xf6, 0x24, 0xc4, - 0xae, 0xbd, 0x01, 0xd5, 0xa9, 0x7d, 0x3d, 0xe1, 0x6e, 0x18, 0x93, 0x59, 0xd5, 0xac, 0x48, 0x18, - 0x12, 0xda, 0x63, 0x58, 0xd3, 0x51, 0x54, 0xe3, 0x85, 0xa8, 0xf1, 0x55, 0x0d, 0x5b, 0xf6, 0xe1, - 0x1d, 0x58, 0x51, 0x65, 0x7c, 0x1a, 0x0f, 0x92, 0x5f, 0xd9, 0x5c, 0x96, 0x60, 0x35, 0xca, 0x07, - 0x50, 0x3f, 0x73, 0x5c, 0x7b, 0x6c, 0x0d, 0xc7, 0xe1, 0xa5, 0x35, 0xe2, 0xe3, 0xd0, 0x46, 0x4a, - 0x2c, 0x98, 0xcb, 0x08, 0x6f, 0x8d, 0xc3, 0xcb, 0x3d, 0x01, 0x65, 0xef, 0x41, 0xf9, 0x8c, 0x73, - 0x0b, 0x27, 0xab, 0x51, 0x4a, 0x6c, 0x68, 0xb5, 0x42, 0x66, 0xe9, 0x4c, 0xad, 0xd5, 0x7b, 0x50, - 0xf7, 0x66, 0xe1, 0xb9, 0xe7, 0xb8, 0xe7, 0xd6, 0xf0, 0xc2, 0x76, 0x2d, 0x67, 0x84, 0xb4, 0x99, - 0xdf, 0xcd, 0x3e, 0xca, 0x98, 0xcb, 0x2a, 0xaf, 0x75, 0x61, 0xbb, 0x9d, 0x11, 0x7b, 0x1b, 0x56, - 0xc6, 0x76, 0x10, 0x5a, 0x17, 0xde, 0xd4, 0x9a, 0xce, 0x4e, 0x9f, 0xf1, 0xeb, 0x46, 0x0d, 0x27, - 0xa2, 0x26, 0xc0, 0x07, 0xde, 0xf4, 0x18, 0x81, 0x82, 0xf4, 0xb0, 0x9f, 0xd4, 0x09, 0x41, 0xd2, - 0x35, 0xb3, 0x2c, 0x20, 0xd4, 0xe8, 0x97, 0xb0, 0x86, 0xcb, 0x33, 0x9c, 0x05, 0xa1, 0x37, 0xb1, - 0x7c, 0x3e, 0xf4, 0xfc, 0x51, 0xd0, 0xa8, 0x20, 0xad, 0x7d, 0x5b, 0x76, 0x56, 0x5b, 0xe3, 0x9d, - 0x3d, 0x1e, 0x84, 0x2d, 0x44, 0x36, 0x09, 0xb7, 0xed, 0x86, 0xfe, 0xb5, 0xb9, 0x3a, 0x9a, 0x87, - 0xb3, 0xf7, 0x80, 0xd9, 0xe3, 0xb1, 0xf7, 0xdc, 0x0a, 0xf8, 0xf8, 0xcc, 0x92, 0x93, 0xd8, 0x58, - 0xbe, 0x9f, 0x79, 0x50, 0x32, 0xeb, 0x98, 0xd3, 0xe7, 0xe3, 0xb3, 0x63, 0x82, 0xb3, 0x8f, 0x00, - 0x37, 0xa9, 0x75, 0xc6, 0xed, 0x70, 0xe6, 0xf3, 0xa0, 0xb1, 0x72, 0x3f, 0xf7, 0x60, 0xf9, 0xf1, - 0x6a, 0x34, 0x5f, 0x08, 0xde, 0x75, 0x42, 0xb3, 0x2a, 0xf0, 0x64, 0x3a, 0xd8, 0xde, 0x83, 0xcd, - 0xf4, 0x2e, 0x09, 0xa2, 0x12, 0xb3, 0x22, 0x88, 0x31, 0x6f, 0x8a, 0x9f, 0x62, 0x67, 0x5f, 0xda, - 0xe3, 0x19, 0x47, 0x2a, 0xac, 0x9a, 0x94, 0xf8, 0x7e, 0xf6, 0x93, 0x8c, 0xf1, 0xbb, 0x19, 0xa8, - 0xd2, 0x28, 0x83, 0xa9, 0xe7, 0x06, 0x9c, 0xbd, 0x09, 0x35, 0x45, 0x0d, 0xdc, 0xf7, 0x3d, 0x5f, - 0x72, 0x4b, 0x45, 0x79, 0x6d, 0x01, 0x63, 0xdf, 0x86, 0xba, 0x42, 0x9a, 0xfa, 0xdc, 0x99, 0xd8, - 0xe7, 0xaa, 0x6a, 0x45, 0x4a, 0xc7, 0x12, 0xcc, 0x3e, 0x88, 0xeb, 0xf3, 0xbd, 0x59, 0xc8, 0x91, - 0xd6, 0x2b, 0x8f, 0xab, 0x72, 0x78, 0xa6, 0x80, 0x45, 0xb5, 0x63, 0xea, 0x15, 0xe8, 0xdc, 0xf8, - 0xad, 0x0c, 0x30, 0xd1, 0xed, 0x81, 0x47, 0x15, 0xc4, 0x1c, 0x29, 0x51, 0x32, 0xf3, 0xca, 0x3b, - 0x24, 0xfb, 0xa2, 0x1d, 0x62, 0x40, 0x81, 0xfa, 0x9e, 0x4f, 0xe9, 0x3b, 0x65, 0xfd, 0x38, 0x5f, - 0xca, 0xd5, 0xf3, 0xc6, 0x7f, 0xc9, 0xc1, 0xba, 0xa0, 0x53, 0x97, 0x8f, 0x9b, 0xc3, 0x21, 0x9f, - 0x46, 0x7b, 0xe7, 0x1e, 0x54, 0x5c, 0x6f, 0xc4, 0x15, 0xc5, 0x52, 0xc7, 0x40, 0x80, 0x34, 0x72, - 0xbd, 0xb0, 0x1d, 0x97, 0x3a, 0x4e, 0x93, 0x59, 0x46, 0x08, 0x76, 0xfb, 0x6d, 0x58, 0x99, 0x72, - 0x77, 0xa4, 0x6f, 0x91, 0x1c, 0x51, 0xbd, 0x04, 0xcb, 0xdd, 0x71, 0x0f, 0x2a, 0x67, 0x33, 0xc2, - 0x13, 0x8c, 0x25, 0x8f, 0x34, 0x00, 0x12, 0xd4, 0x24, 0xfe, 0x32, 0x9d, 0x05, 0x17, 0x98, 0x5b, - 0xc0, 0xdc, 0xa2, 0x48, 0x8b, 0xac, 0xbb, 0x00, 0xa3, 0x59, 0x10, 0xca, 0x1d, 0xb3, 0x84, 0x99, - 0x65, 0x01, 0xa1, 0x1d, 0xf3, 0x1d, 0x58, 0x9b, 0xd8, 0x57, 0x16, 0xd2, 0x8e, 0xe5, 0xb8, 0xd6, - 0xd9, 0x18, 0x99, 0x7a, 0x11, 0xf1, 0xea, 0x13, 0xfb, 0xea, 0x73, 0x91, 0xd3, 0x71, 0xf7, 0x11, - 0x2e, 0xd8, 0xca, 0x90, 0x66, 0xc2, 0xf2, 0x79, 0xc0, 0xfd, 0x4b, 0x8e, 0x9c, 0x20, 0x6f, 0x2e, - 0x4b, 0xb0, 0x49, 0x50, 0xd1, 0xa3, 0x89, 0x18, 0x77, 0x38, 0x1e, 0xd2, 0xb6, 0x37, 0x8b, 0x13, - 0xc7, 0x3d, 0x08, 0xc7, 0x43, 0x71, 0x5e, 0x09, 0x3e, 0x32, 0xe5, 0xbe, 0xf5, 0xec, 0x39, 0xee, - 0xe1, 0x3c, 0xf2, 0x8d, 0x63, 0xee, 0x3f, 0x7d, 0x2e, 0x44, 0x8a, 0x61, 0x80, 0x8c, 0xc8, 0xbe, - 0x6e, 0x54, 0x70, 0x83, 0x97, 0x86, 0x81, 0x60, 0x41, 0xf6, 0xb5, 0xd8, 0x84, 0xa2, 0xb7, 0x36, - 0xae, 0x02, 0x1f, 0x61, 0xf5, 0x01, 0x72, 0xd4, 0x1a, 0x76, 0xb6, 0x29, 0x33, 0x44, 0x3b, 0x81, - 0xa0, 0x7a, 0xd5, 0xd9, 0xb3, 0xb1, 0x7d, 0x1e, 0x20, 0x4b, 0xa9, 0x99, 0x55, 0x09, 0xdc, 0x17, - 0x30, 0xe3, 0x0b, 0xd8, 0x98, 0x5b, 0x5b, 0xb9, 0x67, 0x84, 0x08, 0x81, 0x10, 0x5c, 0xd7, 0x92, - 0x29, 0x53, 0x69, 0x8b, 0x96, 0x4d, 0x59, 0x34, 0xe3, 0xb7, 0x33, 0x50, 0x95, 0x35, 0xa3, 0xb0, - 0xc3, 0x76, 0x80, 0xa9, 0x55, 0x0c, 0xaf, 0x9c, 0x91, 0x75, 0x7a, 0x1d, 0xf2, 0x80, 0x88, 0xe6, - 0xe0, 0x96, 0x59, 0x97, 0x79, 0x83, 0x2b, 0x67, 0xb4, 0x2b, 0x72, 0xd8, 0x43, 0xa8, 0x27, 0xf0, - 0x83, 0xd0, 0x27, 0x8a, 0x3e, 0xb8, 0x65, 0x2e, 0x6b, 0xd8, 0xfd, 0xd0, 0x17, 0x7b, 0x44, 0x88, - 0x52, 0xb3, 0xd0, 0x72, 0xdc, 0x11, 0xbf, 0x42, 0x32, 0xaa, 0x99, 0x15, 0x82, 0x75, 0x04, 0x68, - 0x77, 0x19, 0xaa, 0x7a, 0x75, 0xc6, 0x39, 0x94, 0x94, 0x1c, 0x86, 0x82, 0xc8, 0x5c, 0x97, 0xcc, - 0x72, 0x18, 0xf5, 0xe4, 0x36, 0x94, 0x92, 0x3d, 0x30, 0x8b, 0xe1, 0x2b, 0x37, 0x6c, 0xfc, 0x00, - 0xea, 0x87, 0x82, 0x78, 0x5c, 0x41, 0xac, 0x52, 0xae, 0xdc, 0x84, 0x25, 0x6d, 0xd3, 0x94, 0x4d, - 0x99, 0x12, 0x67, 0xee, 0x85, 0x17, 0x84, 0xb2, 0x15, 0xfc, 0x6d, 0xfc, 0x5e, 0x06, 0x58, 0x3b, - 0x08, 0x9d, 0x89, 0x1d, 0xf2, 0x7d, 0x1e, 0xb1, 0x85, 0x1e, 0x54, 0x45, 0x6d, 0x03, 0xaf, 0x49, - 0x82, 0x1e, 0x09, 0x14, 0xef, 0xca, 0x6d, 0xbc, 0x58, 0x60, 0x47, 0xc7, 0x26, 0x36, 0x9f, 0xa8, - 0x40, 0xec, 0xb2, 0xd0, 0xf6, 0xcf, 0x79, 0x88, 0xe2, 0xa1, 0x94, 0x6b, 0x80, 0x40, 0x42, 0x30, - 0xdc, 0xfe, 0x21, 0xac, 0x2e, 0xd4, 0xa1, 0xf3, 0xe5, 0x72, 0x0a, 0x5f, 0xce, 0xe9, 0x7c, 0xd9, - 0x82, 0xb5, 0x44, 0xbf, 0x24, 0xa5, 0x6d, 0x41, 0x51, 0x6c, 0x08, 0x21, 0x1c, 0x64, 0x48, 0x5a, - 0x3d, 0xe3, 0x5c, 0x88, 0xd7, 0xef, 0xc3, 0xfa, 0x19, 0xe7, 0xbe, 0x1d, 0x62, 0x26, 0xee, 0x18, - 0xb1, 0x42, 0xb2, 0xe2, 0x55, 0x99, 0xd7, 0xb7, 0xc3, 0x63, 0xee, 0x8b, 0x95, 0x32, 0xfe, 0x77, - 0x06, 0x56, 0x04, 0x07, 0x3d, 0xb2, 0xdd, 0x6b, 0x35, 0x4f, 0x87, 0xa9, 0xf3, 0xf4, 0x40, 0x3b, - 0x0c, 0x35, 0xec, 0xaf, 0x3b, 0x49, 0xb9, 0xf9, 0x49, 0x62, 0xf7, 0xa1, 0x9a, 0xe8, 0x6b, 0x01, - 0xfb, 0x0a, 0x41, 0xd4, 0xc9, 0x58, 0x22, 0x5d, 0xd2, 0x24, 0xd2, 0x3f, 0xf9, 0xe4, 0xbe, 0x0d, - 0xf5, 0x78, 0x30, 0x72, 0x66, 0x19, 0xe4, 0x05, 0xa1, 0xca, 0x0a, 0xf0, 0xb7, 0xf1, 0x4f, 0x32, - 0x84, 0xd8, 0xf2, 0x9c, 0x58, 0xea, 0x65, 0x90, 0x17, 0x52, 0xb6, 0x42, 0x14, 0xbf, 0x6f, 0xd4, - 0x21, 0xbe, 0x81, 0x29, 0xb8, 0x0d, 0xa5, 0x40, 0x88, 0xd0, 0xf6, 0x98, 0x66, 0xa1, 0x64, 0x16, - 0x45, 0xba, 0x39, 0x1e, 0xc7, 0xb3, 0x53, 0xd4, 0xe5, 0xf5, 0x77, 0x60, 0x55, 0xeb, 0xf3, 0x0b, - 0x46, 0xd7, 0x05, 0x76, 0xe8, 0x04, 0xe1, 0x89, 0x1b, 0x4c, 0x35, 0x21, 0xef, 0x0e, 0x94, 0x05, - 0x37, 0x16, 0xfd, 0x0d, 0xa4, 0x44, 0x2f, 0xd8, 0xb3, 0xe8, 0x6d, 0x80, 0x99, 0xf6, 0x95, 0xcc, - 0xcc, 0xca, 0x4c, 0xfb, 0x0a, 0x33, 0x8d, 0x4f, 0x60, 0x2d, 0x51, 0x9f, 0x6c, 0xfa, 0x0d, 0x28, - 0xcc, 0xc2, 0x2b, 0x4f, 0x89, 0xf1, 0x15, 0x49, 0x4d, 0x42, 0x09, 0x35, 0x29, 0xc7, 0xf8, 0x0c, - 0x56, 0xbb, 0xfc, 0xb9, 0xdc, 0xf0, 0xaa, 0x23, 0x6f, 0x43, 0xfe, 0x25, 0x8a, 0x29, 0xe6, 0x1b, - 0x3b, 0xc0, 0xf4, 0xc2, 0xb2, 0x55, 0x4d, 0x4f, 0xcd, 0x24, 0xf4, 0x54, 0xe3, 0x6d, 0x60, 0x7d, - 0xe7, 0xdc, 0x3d, 0xe2, 0x41, 0x60, 0x9f, 0x47, 0x2c, 0xa2, 0x0e, 0xb9, 0x49, 0x70, 0x2e, 0xf9, - 0x99, 0xf8, 0x69, 0x7c, 0x17, 0xd6, 0x12, 0x78, 0xb2, 0xe2, 0xd7, 0xa0, 0x1c, 0x38, 0xe7, 0x2e, - 0x0a, 0x61, 0xb2, 0xea, 0x18, 0x60, 0xec, 0xc3, 0xfa, 0xe7, 0xdc, 0x77, 0xce, 0xae, 0x5f, 0x56, - 0x7d, 0xb2, 0x9e, 0xec, 0x7c, 0x3d, 0x6d, 0xd8, 0x98, 0xab, 0x47, 0x36, 0x4f, 0x44, 0x2d, 0x57, - 0xb2, 0x64, 0x52, 0x42, 0xe3, 0x91, 0x59, 0x9d, 0x47, 0x1a, 0x27, 0xc0, 0x5a, 0x9e, 0xeb, 0xf2, - 0x61, 0x78, 0xcc, 0xb9, 0xaf, 0x3a, 0xf3, 0xae, 0x46, 0xc1, 0x95, 0xc7, 0x5b, 0x72, 0x66, 0xe7, - 0x19, 0xaf, 0x24, 0x6d, 0x06, 0xf9, 0x29, 0xf7, 0x27, 0x58, 0x71, 0xc9, 0xc4, 0xdf, 0xc6, 0x06, - 0xac, 0x25, 0xaa, 0xa5, 0xbe, 0x19, 0x8f, 0x60, 0x63, 0xcf, 0x09, 0x86, 0x8b, 0x0d, 0x6e, 0x41, - 0x71, 0x3a, 0x3b, 0xb5, 0x92, 0x3c, 0xfc, 0x29, 0xbf, 0x36, 0x1a, 0xb0, 0x39, 0x5f, 0x42, 0xd6, - 0xf5, 0x1b, 0x19, 0xc8, 0x1f, 0x0c, 0x0e, 0x5b, 0x6c, 0x1b, 0x4a, 0x8e, 0x3b, 0xf4, 0x26, 0x42, - 0x48, 0xa3, 0x31, 0x47, 0xe9, 0x1b, 0xb7, 0xdd, 0x1d, 0x28, 0xa3, 0x6c, 0x27, 0xd4, 0x6b, 0x29, - 0x26, 0x95, 0x04, 0xe0, 0xd0, 0x1b, 0x3e, 0x13, 0x7a, 0x3d, 0xbf, 0x9a, 0x3a, 0x3e, 0x6a, 0xee, - 0x4a, 0x33, 0xcd, 0x93, 0x5c, 0x10, 0x67, 0x48, 0x05, 0xf5, 0x37, 0xb2, 0xc0, 0xe4, 0xc9, 0xdc, - 0xf2, 0xdc, 0x20, 0xf4, 0x6d, 0xc7, 0x0d, 0x83, 0xa4, 0xe4, 0x91, 0x99, 0x93, 0x3c, 0x1e, 0x40, - 0x1d, 0x4f, 0x7b, 0x29, 0xf5, 0x20, 0xb3, 0xce, 0xc6, 0x92, 0x8f, 0x14, 0x7b, 0x04, 0xd3, 0x7e, - 0x0b, 0x96, 0x63, 0x81, 0x2b, 0x32, 0x9b, 0xe4, 0xcd, 0x6a, 0x24, 0x74, 0x49, 0xd6, 0x2e, 0x36, - 0x9d, 0x92, 0x24, 0x22, 0xed, 0x90, 0x64, 0xbb, 0xd5, 0x89, 0x7d, 0x75, 0xcc, 0x95, 0x78, 0x87, - 0x7a, 0xa2, 0x01, 0x35, 0x25, 0x50, 0x11, 0x26, 0xc9, 0x79, 0x15, 0x29, 0x55, 0x21, 0x4e, 0xba, - 0x78, 0xb4, 0x94, 0x2e, 0x1e, 0x19, 0xff, 0xb1, 0x0c, 0x45, 0x39, 0x0d, 0x24, 0xec, 0x84, 0xce, - 0x25, 0x8f, 0x85, 0x1d, 0x91, 0x12, 0x22, 0x94, 0xcf, 0x27, 0x5e, 0x18, 0xc9, 0xb8, 0x44, 0x8a, - 0x55, 0x02, 0x4a, 0x29, 0x57, 0x93, 0xb3, 0xc8, 0xda, 0x93, 0x23, 0xa4, 0xa1, 0x2e, 0xfd, 0xdc, - 0x81, 0xa2, 0x12, 0x97, 0xf2, 0x91, 0x1a, 0xb8, 0x34, 0x24, 0x01, 0x77, 0x1b, 0x4a, 0x43, 0x7b, - 0x6a, 0x0f, 0x9d, 0xf0, 0x5a, 0x72, 0xcb, 0x28, 0x2d, 0x6a, 0x1f, 0x7b, 0x43, 0x7b, 0x6c, 0x9d, - 0xda, 0x63, 0xdb, 0x1d, 0x72, 0x69, 0x46, 0xa9, 0x22, 0x70, 0x97, 0x60, 0xec, 0x5b, 0xb0, 0x2c, - 0xfb, 0xa9, 0xb0, 0xc8, 0x9a, 0x22, 0x7b, 0xaf, 0xd0, 0x84, 0x3c, 0xee, 0x4d, 0xc4, 0xba, 0x9c, - 0x71, 0x92, 0x5c, 0x73, 0x66, 0x99, 0x20, 0xfb, 0x1c, 0x47, 0x2b, 0xb3, 0x9f, 0x13, 0x05, 0x95, - 0xa9, 0x29, 0x02, 0x7e, 0x41, 0xd6, 0x8f, 0x45, 0xf1, 0x35, 0xa7, 0x89, 0xaf, 0xef, 0xc2, 0xea, - 0xcc, 0x0d, 0x78, 0x18, 0x8e, 0xf9, 0x28, 0xea, 0x4b, 0x05, 0x91, 0xea, 0x51, 0x86, 0xea, 0xce, - 0x0e, 0xac, 0x91, 0xfd, 0x27, 0xb0, 0x43, 0x2f, 0xb8, 0x70, 0x02, 0x2b, 0x10, 0x4a, 0x25, 0x59, - 0x08, 0x56, 0x31, 0xab, 0x2f, 0x73, 0xfa, 0xa4, 0x55, 0x6e, 0xcd, 0xe1, 0xfb, 0x7c, 0xc8, 0x9d, - 0x4b, 0x3e, 0x42, 0xd1, 0x36, 0x67, 0x6e, 0x24, 0xca, 0x98, 0x32, 0x13, 0xf5, 0x94, 0xd9, 0xc4, - 0x9a, 0x4d, 0x47, 0xb6, 0x90, 0xef, 0x96, 0x49, 0x7f, 0x70, 0x67, 0x93, 0x13, 0x82, 0xb0, 0x47, - 0xa0, 0x84, 0x57, 0x49, 0x33, 0x2b, 0x09, 0xb6, 0x2e, 0xf6, 0xac, 0x59, 0x95, 0x18, 0x24, 0x5b, - 0xdf, 0xd3, 0x37, 0x4b, 0x5d, 0x50, 0x18, 0xea, 0x59, 0xf1, 0x86, 0x69, 0x40, 0x71, 0xea, 0x3b, - 0x97, 0x76, 0xc8, 0x1b, 0xab, 0x74, 0xc2, 0xc9, 0xa4, 0x60, 0x92, 0x8e, 0xeb, 0x84, 0x8e, 0x1d, - 0x7a, 0x7e, 0x83, 0x61, 0x5e, 0x0c, 0x60, 0x0f, 0x61, 0x15, 0xe9, 0x24, 0x08, 0xed, 0x70, 0x16, - 0x48, 0xc1, 0x7d, 0x0d, 0x09, 0x0a, 0x55, 0x8f, 0x3e, 0xc2, 0x51, 0x76, 0x67, 0x1f, 0xc3, 0x26, - 0x91, 0xc6, 0xc2, 0xd6, 0x5c, 0x17, 0xd3, 0x81, 0x3d, 0x5a, 0x43, 0x8c, 0x56, 0x72, 0x8f, 0x7e, - 0x0a, 0x5b, 0x92, 0x5c, 0x16, 0x4a, 0x6e, 0x44, 0x25, 0xd7, 0x09, 0x65, 0xae, 0xe8, 0x0e, 0xac, - 0x8a, 0xae, 0x39, 0x43, 0x4b, 0xd6, 0x20, 0x76, 0xc5, 0xa6, 0x18, 0x05, 0x16, 0x5a, 0xa1, 0x4c, - 0x13, 0xf3, 0x9e, 0xf2, 0x6b, 0xf6, 0x03, 0x58, 0x21, 0xf2, 0x41, 0xed, 0x14, 0x0f, 0xbf, 0x6d, - 0x3c, 0xfc, 0x36, 0xe4, 0xe4, 0xb6, 0xa2, 0x5c, 0x3c, 0xff, 0x96, 0x87, 0x89, 0xb4, 0xd8, 0x1a, - 0x63, 0xe7, 0x8c, 0x87, 0xce, 0x84, 0x37, 0xb6, 0x88, 0xd8, 0x54, 0x5a, 0xec, 0xda, 0xd9, 0x14, - 0x73, 0x1a, 0xc4, 0x2a, 0x29, 0x85, 0x74, 0x3c, 0xf6, 0x02, 0xae, 0x2c, 0x87, 0x8d, 0xdb, 0x72, - 0x43, 0x0a, 0xa0, 0x12, 0xc1, 0x85, 0x1e, 0x43, 0x3a, 0x63, 0x64, 0xdf, 0xbd, 0x83, 0x84, 0x51, - 0x23, 0xd5, 0x51, 0xd9, 0x78, 0x85, 0xb8, 0x73, 0x61, 0x3f, 0x57, 0x4c, 0xf5, 0x35, 0xe4, 0x26, - 0x20, 0x40, 0xd2, 0x1c, 0xb8, 0x0f, 0xab, 0x72, 0x15, 0x62, 0x66, 0xda, 0xb8, 0x8b, 0xc7, 0xd0, - 0x6d, 0x35, 0xc6, 0x05, 0x6e, 0x6b, 0xd6, 0x69, 0x5d, 0x34, 0xfe, 0x7b, 0x00, 0x4c, 0x2d, 0x8a, - 0x56, 0xd1, 0xeb, 0x2f, 0xab, 0x68, 0x55, 0x2e, 0x53, 0x0c, 0x32, 0x7e, 0x96, 0x21, 0xa9, 0x45, - 0x62, 0x07, 0x9a, 0xbe, 0x4e, 0x7c, 0xcd, 0xf2, 0xdc, 0xf1, 0xb5, 0x64, 0x75, 0x40, 0xa0, 0x9e, - 0x3b, 0x46, 0x5e, 0xe3, 0xb8, 0x3a, 0x0a, 0x1d, 0x90, 0x55, 0x05, 0x44, 0xa4, 0x7b, 0x50, 0x99, - 0xce, 0x4e, 0xc7, 0xce, 0x90, 0x50, 0x72, 0x54, 0x0b, 0x81, 0x10, 0xe1, 0x0d, 0xa8, 0x4a, 0x5a, - 0x27, 0x8c, 0x3c, 0x62, 0x54, 0x24, 0x0c, 0x51, 0xf0, 0x00, 0xe6, 0x3e, 0x32, 0xbb, 0xaa, 0x89, - 0xbf, 0x8d, 0x5d, 0x58, 0x4f, 0x76, 0x5a, 0x4a, 0x07, 0x0f, 0xa1, 0x24, 0x39, 0xa9, 0xb2, 0x64, - 0x2d, 0x27, 0x67, 0xc3, 0x8c, 0xf2, 0x8d, 0xff, 0x54, 0x80, 0x35, 0x35, 0x47, 0x62, 0xb1, 0xfb, - 0xb3, 0xc9, 0xc4, 0xf6, 0x53, 0x58, 0x74, 0xe6, 0xc5, 0x2c, 0x3a, 0xbb, 0xc0, 0xa2, 0x93, 0xa6, - 0x0c, 0xe2, 0xf0, 0x49, 0x53, 0x86, 0xa0, 0x2e, 0xd2, 0x2e, 0x75, 0x83, 0x79, 0x4d, 0x82, 0x07, - 0x64, 0x98, 0x5f, 0x38, 0x50, 0x0a, 0x29, 0x07, 0x8a, 0x7e, 0x1c, 0x2c, 0xcd, 0x1d, 0x07, 0x6f, - 0x00, 0x91, 0xb1, 0xa2, 0xc7, 0x22, 0x29, 0x9c, 0x08, 0x93, 0x04, 0xf9, 0x0e, 0xac, 0xcc, 0x73, - 0x60, 0x62, 0xf5, 0xcb, 0x29, 0xfc, 0xd7, 0x99, 0x70, 0x14, 0x29, 0x34, 0xe4, 0xb2, 0xe4, 0xbf, - 0xce, 0x84, 0x1f, 0x62, 0x8e, 0xc2, 0x6f, 0x03, 0x50, 0xdb, 0xb8, 0x8d, 0x01, 0xb7, 0xf1, 0xdb, - 0x73, 0x94, 0xa9, 0xcd, 0xfa, 0x8e, 0x48, 0xcc, 0x7c, 0x8e, 0xfb, 0xba, 0x8c, 0x25, 0x71, 0x4b, - 0x7f, 0x0c, 0xcb, 0xde, 0x94, 0xbb, 0x56, 0xcc, 0x05, 0x2b, 0x58, 0x55, 0x5d, 0x56, 0xd5, 0x51, - 0x70, 0xb3, 0x26, 0xf0, 0xa2, 0x24, 0xfb, 0x94, 0x26, 0x99, 0x6b, 0x25, 0xab, 0x37, 0x94, 0x5c, - 0x46, 0xc4, 0xb8, 0xe8, 0x77, 0xa1, 0xe2, 0xf3, 0xc0, 0x1b, 0xcf, 0xc8, 0xfa, 0x5e, 0x43, 0x3a, - 0x52, 0xe6, 0x48, 0x33, 0xca, 0x31, 0x75, 0x2c, 0xe3, 0x37, 0x33, 0x50, 0xd1, 0xc6, 0xc0, 0x36, - 0x60, 0xb5, 0xd5, 0xeb, 0x1d, 0xb7, 0xcd, 0xe6, 0xa0, 0xf3, 0x79, 0xdb, 0x6a, 0x1d, 0xf6, 0xfa, - 0xed, 0xfa, 0x2d, 0x01, 0x3e, 0xec, 0xb5, 0x9a, 0x87, 0xd6, 0x7e, 0xcf, 0x6c, 0x29, 0x70, 0x86, - 0x6d, 0x02, 0x33, 0xdb, 0x47, 0xbd, 0x41, 0x3b, 0x01, 0xcf, 0xb2, 0x3a, 0x54, 0x77, 0xcd, 0x76, - 0xb3, 0x75, 0x20, 0x21, 0x39, 0xb6, 0x0e, 0xf5, 0xfd, 0x93, 0xee, 0x5e, 0xa7, 0xfb, 0xc4, 0x6a, - 0x35, 0xbb, 0xad, 0xf6, 0x61, 0x7b, 0xaf, 0x9e, 0x67, 0x35, 0x28, 0x37, 0x77, 0x9b, 0xdd, 0xbd, - 0x5e, 0xb7, 0xbd, 0x57, 0x2f, 0x18, 0xff, 0x23, 0x03, 0x10, 0x77, 0x54, 0xf0, 0xd5, 0xb8, 0xab, - 0xba, 0xb7, 0x6b, 0x63, 0x61, 0x50, 0xc4, 0x57, 0xfd, 0x44, 0x9a, 0x3d, 0x86, 0xa2, 0x37, 0x0b, - 0x87, 0xde, 0x84, 0x04, 0xf5, 0xe5, 0xc7, 0x8d, 0x85, 0x72, 0x3d, 0xca, 0x37, 0x15, 0x62, 0xc2, - 0xa3, 0x95, 0x7b, 0x99, 0x47, 0x2b, 0xe9, 0x3a, 0x23, 0xb9, 0x4e, 0x73, 0x9d, 0xdd, 0x05, 0x08, - 0x9e, 0x73, 0x3e, 0x45, 0x63, 0x8c, 0xdc, 0x05, 0x65, 0x84, 0x0c, 0x84, 0x1e, 0xf7, 0x87, 0x19, - 0xd8, 0x40, 0x5a, 0x1a, 0xcd, 0x33, 0xb1, 0xfb, 0x50, 0x19, 0x7a, 0xde, 0x54, 0xa8, 0xfe, 0xb1, - 0xbc, 0xa6, 0x83, 0x04, 0x83, 0x22, 0x86, 0x7c, 0xe6, 0xf9, 0x43, 0x2e, 0x79, 0x18, 0x20, 0x68, - 0x5f, 0x40, 0xc4, 0x1e, 0x92, 0x9b, 0x90, 0x30, 0x88, 0x85, 0x55, 0x08, 0x46, 0x28, 0x9b, 0xb0, - 0x74, 0xea, 0x73, 0x7b, 0x78, 0x21, 0xb9, 0x97, 0x4c, 0xb1, 0x6f, 0xc7, 0x46, 0xa9, 0xa1, 0xd8, - 0x13, 0x63, 0x4e, 0x9d, 0x2f, 0x99, 0x2b, 0x12, 0xde, 0x92, 0x60, 0x71, 0xce, 0xdb, 0xa7, 0xb6, - 0x3b, 0xf2, 0x5c, 0x3e, 0x92, 0x5a, 0x6e, 0x0c, 0x30, 0x8e, 0x61, 0x73, 0x7e, 0x7c, 0x92, 0xdf, - 0x7d, 0xa4, 0xf1, 0x3b, 0x52, 0x2f, 0xb7, 0x6f, 0xde, 0x63, 0x1a, 0xef, 0xfb, 0x4b, 0x79, 0xc8, - 0x0b, 0x75, 0xe3, 0x46, 0xcd, 0x44, 0xd7, 0x1f, 0x73, 0x0b, 0x7e, 0x4e, 0xb4, 0x7d, 0x91, 0x00, - 0x26, 0x17, 0x0b, 0x21, 0x28, 0x78, 0x45, 0xd9, 0x3e, 0x1f, 0x5e, 0x4a, 0xc9, 0x9b, 0xb2, 0x4d, - 0x3e, 0xbc, 0x44, 0x75, 0xde, 0x0e, 0xa9, 0x2c, 0xf1, 0xab, 0x62, 0x60, 0x87, 0x58, 0x52, 0x66, - 0x61, 0xb9, 0x62, 0x94, 0x85, 0xa5, 0x1a, 0x50, 0x74, 0xdc, 0x53, 0x6f, 0xe6, 0x8e, 0x90, 0x3d, - 0x95, 0x4c, 0x95, 0x44, 0xb7, 0x2a, 0x72, 0x52, 0x71, 0xb4, 0x13, 0x37, 0x2a, 0x09, 0xc0, 0x40, - 0x1c, 0xee, 0x1f, 0x40, 0x39, 0xb8, 0x76, 0x87, 0x3a, 0x0f, 0x5a, 0x97, 0xf3, 0x23, 0x46, 0xbf, - 0xd3, 0xbf, 0x76, 0x87, 0x48, 0xf1, 0xa5, 0x40, 0xfe, 0x62, 0x1f, 0x42, 0x29, 0x72, 0x44, 0xd0, - 0x09, 0x72, 0x5b, 0x2f, 0xa1, 0xbc, 0x0f, 0x64, 0xef, 0x89, 0x50, 0xd9, 0xfb, 0xb0, 0x84, 0xde, - 0x82, 0xa0, 0x51, 0xc5, 0x42, 0x4a, 0xa9, 0x14, 0xdd, 0x40, 0x8f, 0x26, 0x1f, 0xa1, 0xe7, 0xc0, - 0x94, 0x68, 0xdb, 0x4f, 0xa1, 0x96, 0xa8, 0x4b, 0xb7, 0xdf, 0xd4, 0xc8, 0x7e, 0xf3, 0x96, 0x6e, - 0xbf, 0x89, 0x4f, 0x32, 0x59, 0x4c, 0xb7, 0xe7, 0xfc, 0x10, 0x4a, 0x6a, 0x28, 0x82, 0x65, 0x9c, - 0x74, 0x9f, 0x76, 0x7b, 0x5f, 0x74, 0xad, 0xfe, 0x97, 0xdd, 0x56, 0xfd, 0x16, 0x5b, 0x81, 0x4a, - 0xb3, 0x85, 0x5c, 0x08, 0x01, 0x19, 0x81, 0x72, 0xdc, 0xec, 0xf7, 0x23, 0x48, 0xd6, 0xd8, 0x87, - 0xfa, 0x7c, 0x4f, 0x05, 0x4d, 0x86, 0x0a, 0x26, 0x7d, 0x29, 0x31, 0x40, 0xe8, 0xe1, 0xe4, 0x1e, - 0x21, 0x2d, 0x87, 0x12, 0xc6, 0x87, 0x50, 0x17, 0xe7, 0xb2, 0x98, 0x2a, 0xdd, 0x4b, 0x3a, 0x16, - 0x92, 0xb3, 0xee, 0x4f, 0x29, 0x99, 0x15, 0x82, 0x61, 0x53, 0xc6, 0x47, 0xb0, 0xaa, 0x15, 0x8b, - 0xed, 0x26, 0xe2, 0xac, 0x9f, 0xb7, 0x9b, 0xa0, 0x96, 0x4c, 0x39, 0xc6, 0x16, 0x6c, 0x88, 0x64, - 0xfb, 0x92, 0xbb, 0x61, 0x7f, 0x76, 0x4a, 0xce, 0x75, 0xc7, 0x73, 0x85, 0xf6, 0x5c, 0x8e, 0x72, - 0x6e, 0x26, 0xf2, 0x1d, 0x69, 0x62, 0x21, 0xae, 0xb6, 0xad, 0xb5, 0x80, 0x05, 0x77, 0xf0, 0x6f, - 0xc2, 0xd4, 0x52, 0x8e, 0x40, 0x62, 0x5a, 0x8f, 0xdb, 0x6d, 0xd3, 0xea, 0x75, 0x0f, 0x3b, 0x5d, - 0xc1, 0xdb, 0xc5, 0xb4, 0x22, 0x60, 0x7f, 0x1f, 0x21, 0x19, 0xa3, 0x0e, 0xcb, 0x4f, 0x78, 0xd8, - 0x71, 0xcf, 0x3c, 0x39, 0x19, 0xc6, 0x9f, 0x5f, 0x82, 0x95, 0x08, 0x14, 0x9b, 0x6a, 0x2e, 0xb9, - 0x1f, 0x38, 0x9e, 0x8b, 0xea, 0x46, 0xd9, 0x54, 0x49, 0xc1, 0x9d, 0xa4, 0x92, 0x85, 0x52, 0xc2, - 0x3a, 0xe6, 0x4a, 0xb5, 0x0c, 0x45, 0x84, 0x77, 0x60, 0xc5, 0x19, 0x71, 0x37, 0x74, 0xc2, 0x6b, - 0x2b, 0x61, 0x24, 0x5e, 0x56, 0x60, 0x29, 0x26, 0xac, 0x43, 0xc1, 0x1e, 0x3b, 0xb6, 0x0a, 0x5a, - 0xa0, 0x84, 0x80, 0x0e, 0xbd, 0xb1, 0xe7, 0xa3, 0xda, 0x51, 0x36, 0x29, 0xc1, 0x1e, 0xc1, 0xba, - 0x50, 0x81, 0x74, 0xcb, 0x3d, 0x32, 0x18, 0xb2, 0x57, 0x33, 0x77, 0x36, 0x39, 0x8e, 0xad, 0xf7, - 0x22, 0x47, 0x08, 0x07, 0xa2, 0x84, 0x94, 0x06, 0xa3, 0x02, 0x64, 0x54, 0x58, 0x75, 0x67, 0x93, - 0x26, 0xe6, 0x44, 0xf8, 0x8f, 0x61, 0x43, 0xe0, 0x47, 0xf2, 0x63, 0x54, 0x62, 0x05, 0x4b, 0x88, - 0xca, 0x3a, 0x32, 0x2f, 0x2a, 0x73, 0x07, 0xca, 0xd4, 0x2b, 0x41, 0x12, 0x05, 0x32, 0x39, 0x60, - 0x57, 0xb8, 0x1f, 0x2c, 0xc4, 0x17, 0x90, 0x1e, 0x3f, 0x1f, 0x5f, 0xa0, 0x45, 0x28, 0x94, 0xe6, - 0x23, 0x14, 0x1e, 0xc3, 0xc6, 0xa9, 0xa0, 0xd1, 0x0b, 0x6e, 0x8f, 0xb8, 0x6f, 0xc5, 0x94, 0x4f, - 0xda, 0xe2, 0x9a, 0xc8, 0x3c, 0xc0, 0xbc, 0x68, 0xa3, 0x08, 0x41, 0x4e, 0xf0, 0x0d, 0x3e, 0xb2, - 0x42, 0xcf, 0x42, 0xf9, 0x0e, 0x39, 0x50, 0xc9, 0xac, 0x11, 0x78, 0xe0, 0xb5, 0x04, 0x30, 0x89, - 0x77, 0xee, 0xdb, 0xd3, 0x0b, 0xa9, 0xcb, 0x45, 0x78, 0x4f, 0x04, 0x90, 0xbd, 0x06, 0x45, 0xb1, - 0x27, 0x5c, 0x4e, 0xee, 0x5a, 0xd2, 0x92, 0x14, 0x88, 0xbd, 0x05, 0x4b, 0xd8, 0x46, 0xd0, 0xa8, - 0xe3, 0x86, 0xa8, 0xc6, 0x9c, 0xde, 0x71, 0x4d, 0x99, 0x27, 0xa4, 0xe5, 0x99, 0xef, 0x10, 0x1b, - 0x2a, 0x9b, 0xf8, 0x9b, 0xfd, 0x48, 0xe3, 0x69, 0x6b, 0x58, 0xf6, 0x2d, 0x59, 0x76, 0x8e, 0x14, - 0x6f, 0x62, 0x6f, 0xdf, 0x28, 0xb7, 0xfa, 0x71, 0xbe, 0x54, 0xa9, 0x57, 0x8d, 0x06, 0x86, 0x55, - 0x98, 0x7c, 0xe8, 0x5d, 0x72, 0xff, 0x3a, 0xb1, 0x47, 0x32, 0xb0, 0xb5, 0x90, 0x15, 0x7b, 0x67, - 0x7d, 0x09, 0xb7, 0x26, 0xde, 0x48, 0x9d, 0xe9, 0x55, 0x05, 0x3c, 0xf2, 0x46, 0x42, 0xf6, 0x58, - 0x8d, 0x90, 0xce, 0x1c, 0xd7, 0x09, 0x2e, 0xf8, 0x48, 0x1e, 0xed, 0x75, 0x95, 0xb1, 0x2f, 0xe1, - 0x42, 0x80, 0x9e, 0xfa, 0xde, 0x79, 0x74, 0xd2, 0x65, 0xcc, 0x28, 0x6d, 0x7c, 0x0c, 0x05, 0x5a, - 0x41, 0xb1, 0x51, 0x70, 0x7d, 0x33, 0x72, 0xa3, 0x20, 0xb4, 0x01, 0x45, 0x97, 0x87, 0xcf, 0x3d, - 0xff, 0x99, 0x72, 0xf5, 0xc8, 0xa4, 0xf1, 0x53, 0xb4, 0x3b, 0x46, 0xf1, 0x31, 0x64, 0x3b, 0x10, - 0x24, 0x4c, 0x24, 0x18, 0x5c, 0xd8, 0xd2, 0x14, 0x5a, 0x42, 0x40, 0xff, 0xc2, 0x5e, 0x20, 0xe1, - 0xec, 0x62, 0x88, 0xcc, 0x5b, 0xb0, 0xac, 0x22, 0x72, 0x02, 0x6b, 0xcc, 0xcf, 0x42, 0xb9, 0x25, - 0xab, 0x32, 0x1c, 0x27, 0x38, 0xe4, 0x67, 0xa1, 0x71, 0x04, 0xab, 0x72, 0xd3, 0xf4, 0xa6, 0x5c, - 0x35, 0xfd, 0x49, 0x9a, 0x52, 0x53, 0x79, 0xbc, 0x96, 0x94, 0x16, 0x48, 0x2e, 0x4b, 0x68, 0x3a, - 0xc6, 0x4f, 0x62, 0x03, 0xa0, 0x90, 0x25, 0x64, 0x7d, 0x52, 0xb5, 0x50, 0x1e, 0x32, 0xe5, 0x68, - 0x8e, 0x14, 0x18, 0x67, 0x24, 0x66, 0x27, 0x98, 0x0d, 0x87, 0x2a, 0x52, 0xaa, 0x64, 0xaa, 0xa4, - 0xf1, 0xef, 0x33, 0xb0, 0x86, 0x95, 0x29, 0xa5, 0x4c, 0x9e, 0x14, 0x3f, 0x77, 0x27, 0xc5, 0xfa, - 0xe8, 0x02, 0x1c, 0x25, 0xbe, 0xbe, 0xf7, 0x21, 0xbf, 0xe0, 0x7d, 0xf8, 0x36, 0xd4, 0x47, 0x7c, - 0xec, 0x20, 0x29, 0x29, 0x79, 0x88, 0x04, 0xd0, 0x15, 0x05, 0x97, 0x46, 0x02, 0xe3, 0xaf, 0x65, - 0x60, 0x95, 0xc4, 0x2d, 0x34, 0xbb, 0xc8, 0x89, 0xfa, 0x4c, 0xd9, 0x17, 0x24, 0x3b, 0x95, 0x63, - 0x8a, 0xc5, 0x10, 0x84, 0x12, 0xf2, 0xc1, 0x2d, 0x69, 0x77, 0x90, 0x50, 0xf6, 0x7d, 0x54, 0x24, - 0x5d, 0x0b, 0x81, 0x52, 0x8c, 0xbe, 0x9d, 0x22, 0xe0, 0x45, 0xc5, 0x85, 0x96, 0xe9, 0x22, 0x68, - 0xb7, 0x04, 0x4b, 0x64, 0xc4, 0x32, 0xf6, 0xa1, 0x96, 0x68, 0x26, 0xe1, 0x0c, 0xa9, 0x92, 0x33, - 0x64, 0xc1, 0x39, 0x99, 0x5d, 0x74, 0x4e, 0x5e, 0xc3, 0x9a, 0xc9, 0xed, 0xd1, 0xf5, 0xbe, 0xe7, - 0x1f, 0x07, 0xa7, 0xe1, 0x3e, 0xc9, 0xb0, 0xe2, 0x0c, 0x8a, 0x3c, 0xee, 0x09, 0x8f, 0x83, 0x72, - 0xbc, 0x2a, 0x2b, 0xca, 0xb7, 0x60, 0x39, 0x76, 0xcd, 0x6b, 0x56, 0xeb, 0x5a, 0xe4, 0x9d, 0x47, - 0xe3, 0xb5, 0xd0, 0xf7, 0x83, 0xd3, 0x50, 0xda, 0xad, 0xf1, 0xb7, 0xf1, 0x2f, 0xf3, 0xc0, 0x04, - 0x35, 0xcf, 0x11, 0xcc, 0x5c, 0x50, 0x41, 0x76, 0x21, 0xa8, 0xe0, 0x11, 0x30, 0x0d, 0x41, 0xc5, - 0x3a, 0xe4, 0xa2, 0x58, 0x87, 0x7a, 0x8c, 0x2b, 0x43, 0x1d, 0x1e, 0xc1, 0xba, 0x54, 0x08, 0x92, - 0x5d, 0x25, 0xd2, 0x60, 0xa4, 0x19, 0x24, 0xfa, 0xab, 0x02, 0x0a, 0x94, 0xa1, 0x39, 0x47, 0x01, - 0x05, 0xca, 0x1e, 0xa4, 0x11, 0xe0, 0xd2, 0x4b, 0x09, 0xb0, 0xb8, 0x40, 0x80, 0x9a, 0x6d, 0xb0, - 0x94, 0xb4, 0x0d, 0x2e, 0x58, 0xb9, 0x49, 0xfa, 0x4d, 0x58, 0xb9, 0x1f, 0x40, 0x5d, 0xd9, 0x89, - 0x22, 0x0b, 0x24, 0x45, 0x02, 0x49, 0x1b, 0x70, 0x4b, 0xd9, 0x20, 0x13, 0x6e, 0xaf, 0xca, 0x9c, - 0xdb, 0xeb, 0x5d, 0x58, 0x0d, 0x04, 0xfd, 0x5a, 0x33, 0x57, 0x86, 0x03, 0xf2, 0x11, 0xaa, 0xd3, - 0x25, 0xb3, 0x8e, 0x19, 0x27, 0x31, 0x7c, 0xd1, 0xa2, 0x56, 0x4b, 0xb1, 0xa8, 0x7d, 0x18, 0x7b, - 0xd8, 0x83, 0x0b, 0x67, 0x82, 0x82, 0x4f, 0x1c, 0xe2, 0x26, 0x27, 0xb8, 0x7f, 0xe1, 0x4c, 0x4c, - 0x15, 0xce, 0x21, 0x12, 0xac, 0x05, 0xf7, 0xe4, 0x78, 0x52, 0x22, 0x31, 0x68, 0x16, 0x56, 0x50, - 0x52, 0xdd, 0x26, 0xb4, 0xa3, 0xb9, 0xa0, 0x0c, 0x31, 0x29, 0xc6, 0xff, 0xca, 0x40, 0x5d, 0x10, - 0x53, 0x62, 0x9f, 0x7e, 0x0a, 0xc8, 0x51, 0x5e, 0x71, 0x9b, 0x56, 0x04, 0xae, 0xda, 0xa5, 0x1f, - 0x03, 0x6e, 0x3b, 0xcb, 0x9b, 0x72, 0x57, 0x6e, 0xd2, 0x46, 0x72, 0x93, 0xc6, 0x8c, 0xf8, 0xe0, - 0x16, 0x69, 0x61, 0x02, 0xc2, 0x3e, 0x85, 0xb2, 0xa0, 0x6e, 0x24, 0x35, 0x19, 0xf6, 0xb9, 0x1d, - 0x69, 0xd6, 0x0b, 0x1b, 0x4d, 0x14, 0x9d, 0xca, 0x64, 0x5a, 0x64, 0x45, 0x3e, 0x25, 0xb2, 0x42, - 0xe3, 0x02, 0x07, 0x00, 0x4f, 0xf9, 0xf5, 0xa1, 0x37, 0x44, 0x1b, 0xc7, 0x5d, 0x00, 0xb1, 0x21, - 0xce, 0xec, 0x89, 0x23, 0xad, 0x7b, 0x05, 0xb3, 0xfc, 0x8c, 0x5f, 0xef, 0x23, 0x40, 0x50, 0x83, - 0xc8, 0x8e, 0x59, 0x41, 0xc1, 0x2c, 0x3d, 0xe3, 0xd7, 0xc4, 0x07, 0x2c, 0xa8, 0x3d, 0xe5, 0xd7, - 0x7b, 0x9c, 0xc4, 0x6d, 0xcf, 0x17, 0x94, 0xe8, 0xdb, 0xcf, 0x85, 0x7c, 0x9d, 0x88, 0x8a, 0xa8, - 0xf8, 0xf6, 0xf3, 0xa7, 0xfc, 0x5a, 0x45, 0x68, 0x14, 0x45, 0xfe, 0xd8, 0x1b, 0x4a, 0x01, 0x41, - 0x19, 0x54, 0xe2, 0x4e, 0x99, 0x4b, 0xcf, 0xf0, 0xb7, 0xf1, 0xc7, 0x19, 0xa8, 0x89, 0xfe, 0x23, - 0x6f, 0xc7, 0x75, 0x97, 0x61, 0x82, 0x99, 0x38, 0x4c, 0xf0, 0xb1, 0x64, 0x8d, 0x74, 0x50, 0x64, - 0x6f, 0x3e, 0x28, 0x70, 0x6d, 0xe8, 0x94, 0xf8, 0x00, 0xca, 0xb4, 0xb7, 0x05, 0xb3, 0xc8, 0x25, - 0x16, 0x38, 0x31, 0x20, 0xb3, 0x84, 0x68, 0x4f, 0x29, 0x2a, 0x49, 0xb3, 0x5d, 0xd3, 0x14, 0x97, - 0xfd, 0xc8, 0x62, 0x9d, 0xb2, 0x0c, 0x85, 0x1b, 0xa2, 0x92, 0x74, 0xc3, 0xf0, 0xd2, 0xbc, 0x61, - 0xd8, 0x70, 0xa1, 0x24, 0x96, 0x1a, 0x07, 0x9b, 0x52, 0x69, 0x26, 0xad, 0x52, 0x21, 0x4e, 0xd8, - 0xe2, 0x64, 0x11, 0xdc, 0x32, 0x2b, 0xc5, 0x09, 0x3b, 0xe0, 0xa2, 0x22, 0xd1, 0x71, 0xd7, 0xb3, - 0xd0, 0xd2, 0x2a, 0x6d, 0x90, 0x25, 0xb3, 0xec, 0x7a, 0xc7, 0x04, 0x30, 0xfe, 0x6c, 0x06, 0x2a, - 0xda, 0x2e, 0x43, 0xd3, 0x7b, 0x34, 0x9d, 0xb4, 0x25, 0x93, 0x3b, 0x20, 0xb1, 0x1e, 0x07, 0xb7, - 0xcc, 0xda, 0x30, 0xb1, 0x40, 0x3b, 0x92, 0x94, 0xb1, 0x64, 0x36, 0x61, 0xef, 0x51, 0xe3, 0x52, - 0xf4, 0x2b, 0x7e, 0xef, 0x2e, 0x41, 0x5e, 0xa0, 0x1a, 0x9f, 0xc1, 0xaa, 0xd6, 0x0d, 0xb2, 0x87, - 0xbc, 0xea, 0x04, 0x18, 0xbf, 0x1a, 0x15, 0x16, 0x6d, 0x90, 0xbf, 0x58, 0x05, 0x80, 0xf1, 0x11, - 0xcd, 0x8b, 0x0c, 0x34, 0x23, 0x10, 0xce, 0xcc, 0xab, 0x06, 0x25, 0xfd, 0x1a, 0xac, 0x69, 0xb5, - 0xef, 0x3b, 0xae, 0x3d, 0x76, 0x7e, 0x8a, 0x42, 0x45, 0xe0, 0x9c, 0xbb, 0x73, 0xf5, 0x13, 0xe8, - 0x6b, 0xd5, 0xff, 0xd7, 0xb3, 0xb0, 0x2e, 0x1b, 0xc0, 0x90, 0x5e, 0x47, 0x48, 0x8a, 0x47, 0xc1, - 0x39, 0xfb, 0x14, 0x6a, 0x62, 0x6e, 0x2c, 0x9f, 0x9f, 0x3b, 0x41, 0xc8, 0x95, 0x9f, 0x3a, 0x85, - 0x39, 0x0a, 0x81, 0x41, 0xa0, 0x9a, 0x12, 0x93, 0x7d, 0x06, 0x15, 0x2c, 0x4a, 0xf6, 0x26, 0xb9, - 0x10, 0x8d, 0xc5, 0x82, 0x34, 0xd1, 0x07, 0xb7, 0x4c, 0x08, 0xe2, 0x69, 0xff, 0x0c, 0x2a, 0xb8, - 0x86, 0x97, 0x38, 0x91, 0x73, 0x9c, 0x6c, 0x61, 0xa2, 0x45, 0xe1, 0x69, 0x3c, 0xed, 0x4d, 0xa8, - 0x11, 0x2f, 0x93, 0xf3, 0x24, 0x43, 0x05, 0xb7, 0x17, 0x8b, 0xab, 0x99, 0x14, 0x9d, 0x9f, 0x6a, - 0xe9, 0xdd, 0x32, 0x14, 0x43, 0xdf, 0x39, 0x3f, 0xe7, 0xbe, 0xb1, 0x19, 0x4d, 0x8d, 0x60, 0xd2, - 0xbc, 0x1f, 0xf2, 0xa9, 0x50, 0x01, 0x8c, 0x7f, 0x95, 0x81, 0x8a, 0x64, 0xbb, 0x3f, 0xb7, 0x73, - 0x7c, 0x7b, 0xce, 0x32, 0x59, 0xd6, 0x0c, 0x91, 0xef, 0xc0, 0xca, 0x44, 0xe8, 0x2b, 0x42, 0x9f, - 0x4e, 0x78, 0xc6, 0x97, 0x15, 0x58, 0x8a, 0xe2, 0x3b, 0xb0, 0x86, 0x92, 0x79, 0x60, 0x85, 0xce, - 0xd8, 0x52, 0x99, 0x32, 0xae, 0x7d, 0x95, 0xb2, 0x06, 0xce, 0xf8, 0x48, 0x66, 0x08, 0x01, 0x35, - 0x08, 0xed, 0x73, 0x2e, 0xb7, 0x3e, 0x25, 0x84, 0x0e, 0x34, 0xa7, 0x4a, 0x2b, 0x1d, 0xe8, 0xff, - 0xac, 0xc2, 0xd6, 0x42, 0x96, 0xd4, 0x81, 0x22, 0x57, 0xe8, 0xd8, 0x99, 0x9c, 0x7a, 0x91, 0x29, - 0x3e, 0xa3, 0xb9, 0x42, 0x0f, 0x45, 0x8e, 0x32, 0xc5, 0x73, 0xd8, 0x50, 0x04, 0x89, 0xb6, 0xf4, - 0x48, 0xdb, 0xce, 0xa2, 0x2e, 0xf8, 0x41, 0xf2, 0x8c, 0x9b, 0x6f, 0x4e, 0xc1, 0x75, 0xf1, 0x6b, - 0x6d, 0xba, 0x00, 0x0b, 0xd8, 0x9f, 0x82, 0x46, 0x44, 0xf7, 0x52, 0x35, 0xd0, 0x4c, 0x07, 0xa2, - 0xa5, 0xf7, 0x5e, 0xd2, 0x52, 0xc2, 0xc8, 0x89, 0xf2, 0xd9, 0xa6, 0xda, 0x32, 0x54, 0x61, 0xd4, - 0xd6, 0x25, 0xbc, 0xae, 0xda, 0x42, 0x51, 0x7f, 0xb1, 0xc5, 0xfc, 0x2b, 0x8d, 0x0d, 0x0d, 0xb8, - 0x89, 0x66, 0xcd, 0x3b, 0xb2, 0xe2, 0x28, 0x4b, 0x6f, 0xf7, 0x02, 0x36, 0x9f, 0xdb, 0x4e, 0xa8, - 0xc6, 0xa8, 0x59, 0x2e, 0x0a, 0xd8, 0xde, 0xe3, 0x97, 0xb4, 0xf7, 0x05, 0x15, 0x4e, 0x28, 0x3f, - 0xeb, 0xcf, 0x17, 0x81, 0xc1, 0xf6, 0xdf, 0xcd, 0xc1, 0x72, 0xb2, 0x16, 0xc1, 0x58, 0xe4, 0x59, - 0xa4, 0x64, 0x5a, 0x29, 0x68, 0x4b, 0x37, 0x51, 0x97, 0x64, 0xd9, 0x45, 0x07, 0x56, 0x36, 0xc5, - 0x81, 0xa5, 0xfb, 0x8d, 0x72, 0x2f, 0x0b, 0x23, 0xc8, 0xbf, 0x52, 0x18, 0x41, 0x21, 0x2d, 0x8c, - 0xe0, 0xbb, 0x37, 0xfa, 0x9d, 0xc9, 0xfa, 0x9b, 0xea, 0x73, 0xfe, 0xf0, 0x66, 0x9f, 0x33, 0x49, - 0xc8, 0x37, 0xf9, 0x9b, 0x35, 0x6f, 0x79, 0xe9, 0x06, 0x6f, 0x8f, 0xe6, 0x3f, 0x4f, 0xf1, 0x37, - 0x97, 0xbf, 0x86, 0xbf, 0x79, 0xfb, 0x8f, 0x33, 0xc0, 0x16, 0x77, 0x07, 0x7b, 0x42, 0xbe, 0x41, - 0x97, 0x8f, 0x25, 0xe7, 0xfe, 0xce, 0xab, 0xed, 0x30, 0x45, 0x10, 0xaa, 0x34, 0x7b, 0x1f, 0xd6, - 0xf4, 0xdb, 0x37, 0xba, 0x65, 0xa0, 0x66, 0x32, 0x3d, 0x2b, 0xb6, 0x71, 0x69, 0x31, 0x1b, 0xf9, - 0x97, 0xc6, 0x6c, 0x14, 0x5e, 0x1a, 0xb3, 0xb1, 0x94, 0x8c, 0xd9, 0xd8, 0xfe, 0x77, 0x19, 0x58, - 0x4b, 0x21, 0xe2, 0x6f, 0x6e, 0xcc, 0x82, 0xf6, 0x12, 0x6c, 0x2d, 0x2b, 0x69, 0x4f, 0xe7, 0x68, - 0x87, 0xca, 0x2e, 0x2a, 0x96, 0x22, 0x90, 0x27, 0xd5, 0xc3, 0x97, 0x71, 0x97, 0xb8, 0x84, 0xa9, - 0x17, 0xdf, 0xfe, 0xfb, 0x59, 0xa8, 0x68, 0x99, 0x62, 0x16, 0x89, 0x64, 0xb5, 0x88, 0x41, 0x12, - 0x1c, 0xd1, 0xae, 0x71, 0x0f, 0xa4, 0xf7, 0x87, 0xf2, 0x69, 0x73, 0x49, 0x29, 0x11, 0x11, 0x76, - 0x60, 0x4d, 0xf9, 0x6d, 0x79, 0x1c, 0x44, 0x2c, 0xcf, 0x1a, 0xe9, 0x82, 0x97, 0x9d, 0x44, 0xfc, - 0xf7, 0x95, 0xca, 0x19, 0xaf, 0x9d, 0xe6, 0x07, 0x5b, 0x95, 0xce, 0x7f, 0xb9, 0x88, 0x82, 0xce, - 0x3f, 0x80, 0x8d, 0xc8, 0xfb, 0x9f, 0x28, 0x41, 0xde, 0x16, 0xa6, 0xbc, 0xfc, 0x5a, 0x91, 0x1f, - 0xc1, 0xdd, 0xb9, 0x3e, 0xcd, 0x15, 0xa5, 0x68, 0xf7, 0xdb, 0x89, 0xde, 0xe9, 0x35, 0x6c, 0xff, - 0x69, 0xa8, 0x25, 0x18, 0xe5, 0x37, 0xb7, 0xe4, 0xf3, 0xb6, 0x24, 0x9a, 0x51, 0xdd, 0x96, 0xb4, - 0xfd, 0x3f, 0x73, 0xc0, 0x16, 0x79, 0xf5, 0x2f, 0xb2, 0x0b, 0x8b, 0x84, 0x99, 0x4b, 0x21, 0xcc, - 0xff, 0x67, 0xf2, 0x43, 0x6c, 0xd2, 0xd4, 0x9c, 0xef, 0xb4, 0x39, 0xeb, 0x51, 0x86, 0xea, 0xc5, - 0xc7, 0xf3, 0x21, 0x4a, 0xa5, 0xc4, 0x05, 0x32, 0x4d, 0x80, 0x9a, 0x8b, 0x54, 0x3a, 0x81, 0x25, - 0xdb, 0x1d, 0x5e, 0x78, 0xbe, 0xe4, 0x83, 0xbf, 0xf4, 0xb5, 0x8f, 0xcf, 0x9d, 0x26, 0x96, 0x47, - 0xa9, 0xcd, 0x94, 0x95, 0x19, 0x1f, 0x40, 0x45, 0x03, 0xb3, 0x32, 0x14, 0x0e, 0x3b, 0x47, 0xbb, - 0xbd, 0xfa, 0x2d, 0x56, 0x83, 0xb2, 0xd9, 0x6e, 0xf5, 0x3e, 0x6f, 0x9b, 0xed, 0xbd, 0x7a, 0x86, - 0x95, 0x20, 0x7f, 0xd8, 0xeb, 0x0f, 0xea, 0x59, 0x63, 0x1b, 0x1a, 0xb2, 0xc6, 0x45, 0xe7, 0xce, - 0x6f, 0xe5, 0x23, 0x93, 0x24, 0x66, 0x4a, 0x0d, 0xfe, 0xbb, 0x50, 0xd5, 0xc5, 0x1b, 0x49, 0x11, - 0x73, 0xf1, 0x1f, 0x42, 0x77, 0xf7, 0x34, 0x5e, 0xdd, 0x02, 0xf2, 0xfe, 0x8f, 0xa2, 0x62, 0xd9, - 0x84, 0xdc, 0x9a, 0xe2, 0x46, 0x45, 0xe5, 0x27, 0x41, 0x86, 0xff, 0x1f, 0x2c, 0x27, 0x1d, 0x19, - 0x92, 0x23, 0xa5, 0xe9, 0xa3, 0xa2, 0x74, 0xc2, 0xb3, 0xc1, 0x7e, 0x04, 0xf5, 0x79, 0x47, 0x88, - 0x14, 0x9e, 0x6f, 0x28, 0xbf, 0xe2, 0x24, 0x7d, 0x23, 0xec, 0x00, 0xd6, 0xd3, 0x04, 0x3c, 0xa4, - 0x8f, 0x9b, 0x6d, 0x18, 0x6c, 0x51, 0x88, 0x63, 0x9f, 0x48, 0x87, 0x58, 0x01, 0x97, 0xff, 0xad, - 0x64, 0xfb, 0xda, 0x64, 0xef, 0xd0, 0x3f, 0xcd, 0x35, 0x76, 0x09, 0x10, 0xc3, 0x58, 0x1d, 0xaa, - 0xbd, 0xe3, 0x76, 0xd7, 0x6a, 0x1d, 0x34, 0xbb, 0xdd, 0xf6, 0x61, 0xfd, 0x16, 0x63, 0xb0, 0x8c, - 0x21, 0x0c, 0x7b, 0x11, 0x2c, 0x23, 0x60, 0xd2, 0x31, 0xa9, 0x60, 0x59, 0xb6, 0x0e, 0xf5, 0x4e, - 0x77, 0x0e, 0x9a, 0x63, 0x0d, 0x58, 0x3f, 0x6e, 0x53, 0xd4, 0x43, 0xa2, 0xde, 0xbc, 0x50, 0x1a, - 0xe4, 0x70, 0x85, 0xd2, 0xf0, 0x85, 0x3d, 0x1e, 0xf3, 0x50, 0xee, 0x03, 0x25, 0x4b, 0xff, 0x8d, - 0x0c, 0x6c, 0xcc, 0x65, 0xc4, 0xde, 0x04, 0x92, 0xa4, 0x93, 0x32, 0x74, 0x15, 0x81, 0x6a, 0x37, - 0xbd, 0x0b, 0xab, 0x91, 0x71, 0x6b, 0xee, 0x54, 0xaa, 0x47, 0x19, 0x0a, 0xf9, 0x7d, 0x58, 0xd3, - 0x6c, 0x64, 0x73, 0xbc, 0x82, 0x69, 0x59, 0xb2, 0x80, 0xb1, 0x15, 0xdd, 0xa9, 0x99, 0xeb, 0xf5, - 0x08, 0x36, 0xe7, 0x33, 0x62, 0x7f, 0x61, 0xb2, 0xbf, 0x2a, 0xc9, 0x1e, 0xcd, 0x11, 0x42, 0xb2, - 0xb7, 0xfa, 0x82, 0xab, 0xe6, 0x7f, 0x67, 0x09, 0xd8, 0x4f, 0x66, 0xdc, 0xbf, 0xc6, 0xbb, 0x5c, - 0xc1, 0xcb, 0x02, 0x96, 0x95, 0x21, 0x26, 0xfb, 0x4a, 0xf7, 0x35, 0xd3, 0xee, 0x4b, 0xe6, 0x5f, - 0x7e, 0x5f, 0xb2, 0xf0, 0xb2, 0xfb, 0x92, 0x6f, 0x42, 0xcd, 0x39, 0x77, 0x3d, 0xc1, 0x0a, 0x85, - 0x24, 0x1c, 0x34, 0x96, 0xee, 0xe7, 0x1e, 0x54, 0xcd, 0xaa, 0x04, 0x0a, 0x39, 0x38, 0x60, 0x9f, - 0xc5, 0x48, 0x7c, 0x74, 0x8e, 0x77, 0x86, 0x75, 0x26, 0xd8, 0x1e, 0x9d, 0x73, 0x69, 0x77, 0x42, - 0x4d, 0x43, 0x15, 0x16, 0xf0, 0x80, 0xbd, 0x05, 0xcb, 0x81, 0x37, 0x13, 0x8a, 0x85, 0x9a, 0x06, - 0x72, 0x18, 0x56, 0x09, 0x7a, 0xac, 0xdc, 0xc7, 0x6b, 0xb3, 0x80, 0x5b, 0x13, 0x27, 0x08, 0x84, - 0x78, 0x36, 0xf4, 0xdc, 0xd0, 0xf7, 0xc6, 0xd2, 0x07, 0xb8, 0x3a, 0x0b, 0xf8, 0x11, 0xe5, 0xb4, - 0x28, 0x83, 0x7d, 0x2f, 0xee, 0xd2, 0xd4, 0x76, 0xfc, 0xa0, 0x01, 0xd8, 0x25, 0x35, 0x52, 0x94, - 0xdf, 0x6d, 0xc7, 0x8f, 0xfa, 0x22, 0x12, 0xc1, 0xdc, 0x3d, 0xce, 0xca, 0xfc, 0x3d, 0xce, 0x5f, - 0x4f, 0xbf, 0xc7, 0x49, 0x51, 0x4b, 0x8f, 0x64, 0xd5, 0x8b, 0x4b, 0xfc, 0xb5, 0xae, 0x73, 0x2e, - 0x5e, 0x4f, 0x5d, 0xfe, 0x3a, 0xd7, 0x53, 0x57, 0xd2, 0xae, 0xa7, 0x7e, 0x00, 0x15, 0xbc, 0x38, - 0x68, 0x5d, 0x60, 0xec, 0x22, 0xf9, 0x34, 0xeb, 0xfa, 0xcd, 0xc2, 0x03, 0xc7, 0x0d, 0x4d, 0xf0, - 0xd5, 0xcf, 0x60, 0xf1, 0xa6, 0xe8, 0xea, 0x2f, 0xf0, 0xa6, 0xa8, 0xbc, 0xe0, 0xb8, 0x03, 0x25, - 0xb5, 0x4e, 0x8c, 0x41, 0xfe, 0xcc, 0xf7, 0x26, 0xca, 0x8f, 0x22, 0x7e, 0xb3, 0x65, 0xc8, 0x86, - 0x9e, 0x2c, 0x9c, 0x0d, 0x3d, 0xe3, 0xff, 0x87, 0x8a, 0x46, 0x6a, 0xec, 0x0d, 0x32, 0x5b, 0x0a, - 0xdd, 0x4c, 0xca, 0x96, 0x34, 0x8b, 0x65, 0x09, 0xed, 0x8c, 0x04, 0xbf, 0x19, 0x39, 0x3e, 0xc7, - 0x3b, 0xdd, 0x96, 0xcf, 0x2f, 0xb9, 0x1f, 0x28, 0xbf, 0x56, 0x3d, 0xca, 0x30, 0x09, 0x6e, 0xfc, - 0x1a, 0xac, 0x25, 0xd6, 0x56, 0xb2, 0x88, 0xb7, 0x60, 0x09, 0xe7, 0x4d, 0x05, 0x4f, 0x24, 0x6f, - 0x6c, 0xca, 0x3c, 0xbc, 0xbf, 0x4e, 0x2e, 0x39, 0x6b, 0xea, 0x7b, 0xa7, 0xd8, 0x48, 0xc6, 0xac, - 0x48, 0xd8, 0xb1, 0xef, 0x9d, 0x1a, 0x7f, 0x90, 0x83, 0xdc, 0x81, 0x37, 0xd5, 0xe3, 0x1d, 0x33, - 0x0b, 0xf1, 0x8e, 0x52, 0xe1, 0xb4, 0x22, 0x85, 0x52, 0xca, 0xec, 0xe8, 0x8c, 0x52, 0x4a, 0xe5, - 0x03, 0x58, 0x16, 0x7c, 0x22, 0xf4, 0x84, 0xc6, 0xfe, 0xdc, 0xf6, 0x49, 0x20, 0xa6, 0xf0, 0xe1, - 0xaa, 0x3d, 0x09, 0x07, 0xde, 0x3e, 0xc1, 0xd9, 0x3a, 0xe4, 0x22, 0xf5, 0x05, 0xb3, 0x45, 0x92, - 0x6d, 0xc2, 0x12, 0xde, 0x4e, 0xb8, 0x96, 0xce, 0x7f, 0x99, 0x62, 0xdf, 0x81, 0xb5, 0x64, 0xbd, - 0xc4, 0x8a, 0xa4, 0x6c, 0xa4, 0x57, 0x8c, 0x3c, 0xe9, 0x36, 0x08, 0x3e, 0x42, 0x38, 0x32, 0xc8, - 0xe8, 0x8c, 0x73, 0xcc, 0xd2, 0x98, 0x5e, 0x29, 0xc1, 0xf4, 0xee, 0x41, 0x25, 0x1c, 0x5f, 0x5a, - 0x53, 0xfb, 0x7a, 0xec, 0xd9, 0x23, 0xb9, 0xbf, 0x21, 0x1c, 0x5f, 0x1e, 0x13, 0x84, 0xbd, 0x0f, - 0x30, 0x99, 0x4e, 0xe5, 0xde, 0x43, 0x07, 0x4b, 0x4c, 0xca, 0x47, 0xc7, 0xc7, 0x44, 0x72, 0x66, - 0x79, 0x32, 0x9d, 0xd2, 0x4f, 0xb6, 0x07, 0xcb, 0xa9, 0xf7, 0xae, 0xef, 0xaa, 0x28, 0x72, 0x6f, - 0xba, 0x93, 0xb2, 0x39, 0x6b, 0x43, 0x1d, 0xb6, 0xfd, 0x23, 0x60, 0x7f, 0xc2, 0xdb, 0xcf, 0x03, - 0x28, 0x47, 0xfd, 0xd3, 0x2f, 0x0f, 0xe3, 0xf5, 0x98, 0x4a, 0xe2, 0xf2, 0x70, 0x73, 0x34, 0xf2, - 0x05, 0x5f, 0xa4, 0x03, 0x33, 0x62, 0xf9, 0xa0, 0x9d, 0x98, 0xf2, 0xfe, 0x85, 0xf1, 0x5f, 0x33, - 0x50, 0xa0, 0x9b, 0xcc, 0x6f, 0xc3, 0x0a, 0xe1, 0x47, 0xb1, 0xa3, 0x32, 0x64, 0x80, 0xce, 0xdd, - 0x81, 0x0c, 0x1b, 0x15, 0xdb, 0x42, 0x7b, 0xdd, 0x21, 0x1b, 0xad, 0xbc, 0xf6, 0xc2, 0xc3, 0x3d, - 0x28, 0x47, 0x4d, 0x6b, 0xa4, 0x53, 0x52, 0x2d, 0xb3, 0xd7, 0x21, 0x7f, 0xe1, 0x4d, 0x95, 0xe5, - 0x07, 0xe2, 0x99, 0x34, 0x11, 0x1e, 0xf7, 0x45, 0xb4, 0x11, 0xdf, 0x0b, 0xc9, 0xc9, 0xbe, 0x88, - 0x46, 0x90, 0x0c, 0x16, 0xc7, 0xb8, 0x94, 0x32, 0xc6, 0x13, 0x58, 0x11, 0x7c, 0x40, 0x8b, 0x5b, - 0xb8, 0xf9, 0xd0, 0xfc, 0xb6, 0x90, 0xf0, 0x86, 0xe3, 0xd9, 0x88, 0xeb, 0xb6, 0x37, 0x0c, 0x04, - 0x94, 0x70, 0x25, 0x59, 0x1b, 0xbf, 0x93, 0x21, 0xfe, 0x22, 0xea, 0x65, 0x0f, 0x20, 0xef, 0xaa, - 0x18, 0x87, 0x58, 0x8e, 0x8b, 0xee, 0x29, 0x09, 0x3c, 0x13, 0x31, 0xc4, 0xd2, 0x61, 0x64, 0x80, - 0x5e, 0x7b, 0xcd, 0xac, 0xb8, 0xb3, 0x49, 0x64, 0xba, 0xfa, 0x96, 0x1a, 0xd6, 0x9c, 0xd9, 0x87, - 0x46, 0x1f, 0x6d, 0xd3, 0x1d, 0x2d, 0xa2, 0x30, 0x9f, 0x38, 0x31, 0x95, 0x14, 0x38, 0x3a, 0xe7, - 0x5a, 0x24, 0xe1, 0xef, 0x66, 0xa1, 0x96, 0xe8, 0x11, 0x86, 0x54, 0x8a, 0x03, 0x80, 0xfc, 0x4e, - 0x72, 0xbd, 0x41, 0x80, 0xa4, 0xa0, 0xae, 0xcd, 0x53, 0x36, 0x31, 0x4f, 0x51, 0x90, 0x52, 0x4e, - 0x0f, 0x52, 0x7a, 0x04, 0xe5, 0xf8, 0x55, 0x8f, 0x64, 0x97, 0x44, 0x7b, 0xea, 0xb6, 0x56, 0x8c, - 0x14, 0x87, 0x35, 0x15, 0xf4, 0xb0, 0xa6, 0x1f, 0x68, 0x51, 0x30, 0x4b, 0x58, 0x8d, 0x91, 0x36, - 0xa3, 0xbf, 0x90, 0x18, 0x18, 0xe3, 0x33, 0xa8, 0x68, 0x9d, 0xd7, 0x23, 0x49, 0x32, 0x89, 0x48, - 0x92, 0xe8, 0xb6, 0x65, 0x36, 0xbe, 0x6d, 0x69, 0xfc, 0xb9, 0x2c, 0xd4, 0xc4, 0xfe, 0x72, 0xdc, - 0xf3, 0x63, 0x6f, 0xec, 0x0c, 0xd1, 0x0f, 0x15, 0xed, 0x30, 0x29, 0x68, 0xa9, 0x7d, 0x26, 0xb7, - 0x18, 0xc9, 0x59, 0xfa, 0x55, 0x73, 0x62, 0xd2, 0xd1, 0x55, 0x73, 0x03, 0x6a, 0x82, 0x31, 0xa2, - 0x47, 0x29, 0x7e, 0x1b, 0xc4, 0xac, 0x9c, 0x71, 0xbe, 0x6b, 0x07, 0xc4, 0x21, 0xbf, 0x03, 0x6b, - 0x02, 0x07, 0x6f, 0xd9, 0x4e, 0x9c, 0xf1, 0xd8, 0x89, 0x2f, 0x62, 0xe5, 0xcc, 0xfa, 0x19, 0xe7, - 0xa6, 0x1d, 0xf2, 0x23, 0x91, 0x21, 0x9f, 0x12, 0x29, 0x8d, 0x9c, 0xc0, 0x3e, 0x8d, 0x03, 0x5f, - 0xa3, 0x34, 0x7a, 0xaf, 0xed, 0x2b, 0xcd, 0x7b, 0xbd, 0x24, 0xef, 0x68, 0xd9, 0x57, 0x91, 0xf7, - 0x7a, 0x8e, 0x92, 0x8a, 0xf3, 0x94, 0x64, 0xfc, 0xf3, 0x2c, 0x54, 0x34, 0xb2, 0x7c, 0x95, 0xd3, - 0xf5, 0xee, 0x82, 0xdf, 0xb0, 0xac, 0xbb, 0x08, 0xdf, 0x4c, 0x36, 0x99, 0x8b, 0x6e, 0xeb, 0xe8, - 0x04, 0x7c, 0x07, 0xca, 0x62, 0xd7, 0x7d, 0x80, 0x26, 0x58, 0xf9, 0x94, 0x0f, 0x02, 0x8e, 0x67, - 0xa7, 0x2a, 0xf3, 0x31, 0x66, 0x16, 0xe2, 0xcc, 0xc7, 0x22, 0xf3, 0x45, 0xd1, 0xfa, 0x1f, 0x43, - 0x55, 0xd6, 0x8a, 0x6b, 0x8a, 0xc3, 0x8d, 0x77, 0x7d, 0x62, 0xbd, 0xcd, 0x0a, 0x35, 0x47, 0x8b, - 0x2f, 0x0b, 0x3e, 0x56, 0x05, 0x4b, 0x2f, 0x2b, 0xf8, 0x98, 0x12, 0xc6, 0x7e, 0x74, 0x01, 0x02, - 0xe3, 0xcf, 0x14, 0x1f, 0x7b, 0x1f, 0xd6, 0x14, 0xbb, 0x9a, 0xb9, 0xb6, 0xeb, 0x7a, 0x33, 0x77, - 0xc8, 0xd5, 0x85, 0x4b, 0x26, 0xb3, 0x4e, 0xe2, 0x1c, 0x63, 0x14, 0xdd, 0xde, 0xa7, 0x38, 0xb6, - 0x87, 0x50, 0x20, 0xb9, 0x9c, 0x84, 0x8f, 0x74, 0xc6, 0x45, 0x28, 0xec, 0x01, 0x14, 0x48, 0x3c, - 0xcf, 0xde, 0xc8, 0x6c, 0x08, 0xc1, 0x68, 0x02, 0x13, 0x05, 0x8f, 0x78, 0xe8, 0x3b, 0xc3, 0x20, - 0xbe, 0xcb, 0x59, 0x10, 0xfa, 0x27, 0xb5, 0x15, 0x5b, 0x6e, 0x63, 0x4c, 0xd4, 0x51, 0x09, 0x47, - 0x1c, 0x4c, 0x6b, 0x89, 0x3a, 0xa4, 0xb8, 0x34, 0x86, 0xcd, 0x53, 0x1e, 0x3e, 0xe7, 0xdc, 0x75, - 0x85, 0x30, 0x34, 0xe4, 0x6e, 0xe8, 0xdb, 0x63, 0xb1, 0x48, 0x34, 0x82, 0x0f, 0x17, 0x6a, 0x8d, - 0x6d, 0x20, 0xbb, 0x71, 0xc1, 0x56, 0x54, 0x8e, 0x78, 0xc7, 0xc6, 0x69, 0x5a, 0xde, 0xf6, 0xaf, - 0xc2, 0xf6, 0xcd, 0x85, 0x52, 0xee, 0x71, 0x3f, 0x48, 0x72, 0x95, 0xc8, 0x0f, 0x38, 0xf6, 0xec, - 0x90, 0x7a, 0xa3, 0x73, 0x96, 0x2e, 0x54, 0xb4, 0x9c, 0xf8, 0xec, 0xcf, 0xa0, 0x70, 0x47, 0x09, - 0x71, 0x22, 0xb9, 0x9e, 0x3f, 0x41, 0xbf, 0xdb, 0xc8, 0x8a, 0x6b, 0xcf, 0x98, 0x2b, 0x31, 0x1c, - 0x23, 0x27, 0x8c, 0x1d, 0x58, 0x41, 0xc9, 0x5e, 0x3b, 0xe8, 0x5e, 0x24, 0x0c, 0x1a, 0xeb, 0xc0, - 0xba, 0xc4, 0xbb, 0xf4, 0x98, 0xbe, 0xff, 0x90, 0x83, 0x8a, 0x06, 0x16, 0xa7, 0x11, 0x06, 0x42, - 0x5a, 0x23, 0xc7, 0x9e, 0x70, 0xe5, 0xe4, 0xac, 0x99, 0x35, 0x84, 0xee, 0x49, 0xa0, 0x38, 0x8b, - 0xed, 0xcb, 0x73, 0xcb, 0x9b, 0x85, 0xd6, 0x88, 0x9f, 0xfb, 0x5c, 0xf5, 0xb2, 0x6a, 0x5f, 0x9e, - 0xf7, 0x66, 0xe1, 0x1e, 0xc2, 0x04, 0x96, 0xe0, 0x25, 0x1a, 0x96, 0x8c, 0x8b, 0x9b, 0xd8, 0x57, - 0x31, 0x96, 0x0c, 0x20, 0x25, 0xca, 0xcc, 0x47, 0x01, 0xa4, 0xa4, 0x2d, 0xce, 0x1f, 0xa0, 0x85, - 0xc5, 0x03, 0xf4, 0x7b, 0xb0, 0x49, 0x07, 0xa8, 0x64, 0xcd, 0xd6, 0xdc, 0x4e, 0x5e, 0xc7, 0x5c, - 0x39, 0x48, 0x4d, 0xec, 0xad, 0x8b, 0x11, 0x28, 0xb6, 0x14, 0x38, 0x3f, 0x25, 0x46, 0x96, 0x31, - 0xc5, 0xc8, 0x64, 0xe5, 0x7d, 0xe7, 0xa7, 0x5c, 0x60, 0x62, 0x04, 0x8e, 0x8e, 0x29, 0xef, 0xe2, - 0x4c, 0x1c, 0x77, 0x1e, 0xd3, 0xbe, 0x4a, 0x62, 0x96, 0x25, 0xa6, 0x7d, 0xa5, 0x63, 0x7e, 0x08, - 0x5b, 0x13, 0x3e, 0x72, 0xec, 0x64, 0xb5, 0x56, 0x2c, 0xb8, 0xad, 0x53, 0xb6, 0x56, 0xa6, 0x4f, - 0x8a, 0xbb, 0x98, 0x8d, 0x9f, 0x7a, 0x93, 0x53, 0x87, 0x64, 0x16, 0x8a, 0x09, 0xca, 0x9b, 0xcb, - 0xee, 0x6c, 0xf2, 0x2b, 0x08, 0x16, 0x45, 0x02, 0xa3, 0x06, 0x95, 0x7e, 0xe8, 0x4d, 0xd5, 0x32, - 0x2f, 0x43, 0x95, 0x92, 0xf2, 0x16, 0xf3, 0x1d, 0xb8, 0x8d, 0x2c, 0x61, 0xe0, 0x4d, 0xbd, 0xb1, - 0x77, 0x7e, 0x9d, 0xb0, 0xe3, 0xfd, 0xeb, 0x0c, 0xac, 0x25, 0x72, 0x25, 0x7b, 0xfd, 0x1e, 0xf1, - 0xb3, 0xe8, 0x0e, 0x66, 0x26, 0x71, 0x01, 0x47, 0xac, 0x17, 0x21, 0x12, 0x33, 0x53, 0xf7, 0x32, - 0x9b, 0xf1, 0x73, 0x2b, 0xaa, 0x20, 0xb1, 0x94, 0xc6, 0x22, 0x4b, 0x91, 0xe5, 0xd5, 0x43, 0x2c, - 0xaa, 0x8a, 0x5f, 0x92, 0xf7, 0xa5, 0x46, 0x72, 0xc8, 0xb9, 0xe4, 0x8d, 0x0a, 0xdd, 0xe6, 0xa7, - 0x7a, 0x10, 0x1b, 0x02, 0x03, 0xe3, 0xef, 0x65, 0x00, 0xe2, 0xde, 0xe1, 0x9d, 0x8e, 0x48, 0x6e, - 0xc9, 0x60, 0x38, 0xae, 0x26, 0xa3, 0xbc, 0x01, 0xd5, 0x28, 0x72, 0x3b, 0x96, 0x84, 0x2a, 0x0a, - 0x26, 0xc4, 0xa1, 0x77, 0x60, 0xe5, 0x7c, 0xec, 0x9d, 0xa2, 0xc4, 0x2a, 0xe5, 0x16, 0x8a, 0x89, - 0x5b, 0x26, 0xb0, 0x92, 0x46, 0x62, 0xb9, 0x29, 0x9f, 0x1a, 0xdc, 0xad, 0x4b, 0x41, 0xc6, 0x5f, - 0xc9, 0x46, 0xe1, 0xa1, 0xf1, 0x4c, 0xbc, 0x58, 0xbd, 0xfb, 0x79, 0x42, 0x6d, 0x5e, 0xe4, 0x5e, - 0xfc, 0x0c, 0x96, 0x7d, 0x3a, 0x94, 0xd4, 0x89, 0x95, 0x7f, 0xc1, 0x89, 0x55, 0xf3, 0x13, 0x92, - 0xce, 0xb7, 0xa1, 0x6e, 0x8f, 0x2e, 0xb9, 0x1f, 0x3a, 0x68, 0xad, 0x47, 0xf9, 0x58, 0x06, 0x64, - 0x6a, 0x70, 0x14, 0x44, 0xdf, 0x81, 0x15, 0x79, 0xb3, 0x3e, 0xc2, 0x94, 0xef, 0x7a, 0xc5, 0x60, - 0x81, 0x68, 0xfc, 0x23, 0x15, 0x8f, 0x9a, 0x5c, 0xdd, 0x17, 0xcf, 0x8a, 0x3e, 0xc2, 0xec, 0xa2, - 0x03, 0x55, 0x12, 0x92, 0x74, 0x02, 0x48, 0x7e, 0x44, 0x40, 0xe9, 0x02, 0x48, 0x4e, 0x6b, 0xfe, - 0x55, 0xa6, 0xd5, 0xf8, 0x37, 0x19, 0x28, 0x1e, 0x78, 0xd3, 0x03, 0x87, 0x6e, 0x35, 0xe0, 0x36, - 0x89, 0x7c, 0x54, 0x4b, 0x22, 0x89, 0x71, 0x41, 0x2f, 0xb8, 0x9b, 0x98, 0x2a, 0xe6, 0xd5, 0x92, - 0x62, 0xde, 0x0f, 0xe0, 0x0e, 0xba, 0x00, 0x7d, 0x6f, 0xea, 0xf9, 0x62, 0xab, 0xda, 0x63, 0x12, - 0xf7, 0x3c, 0x37, 0xbc, 0x50, 0xbc, 0xf3, 0xf6, 0x19, 0xe7, 0xc7, 0x1a, 0xc6, 0x51, 0x84, 0x80, - 0xf7, 0x92, 0xc7, 0xe1, 0xa5, 0x45, 0x1a, 0xba, 0x94, 0x47, 0x89, 0xa3, 0xae, 0x88, 0x8c, 0x36, - 0xc2, 0x51, 0x22, 0x35, 0x3e, 0x81, 0x72, 0x64, 0xec, 0x61, 0xef, 0x42, 0xf9, 0xc2, 0x9b, 0x4a, - 0x8b, 0x50, 0x26, 0x71, 0x7f, 0x53, 0x8e, 0xda, 0x2c, 0x5d, 0xd0, 0x8f, 0xc0, 0xf8, 0x83, 0x22, - 0x14, 0x3b, 0xee, 0xa5, 0xe7, 0x0c, 0x31, 0xa2, 0x75, 0xc2, 0x27, 0x9e, 0x7a, 0xde, 0x43, 0xfc, - 0xc6, 0xd0, 0xad, 0xf8, 0x75, 0xae, 0x9c, 0x0c, 0xdd, 0x8a, 0xde, 0xe5, 0xda, 0x80, 0x25, 0x5f, - 0x7f, 0x5e, 0xab, 0xe0, 0xe3, 0x3d, 0x80, 0xe8, 0xbc, 0x2c, 0x68, 0x8f, 0xa6, 0x88, 0xba, 0x28, - 0xd8, 0x10, 0xa7, 0x8c, 0xee, 0x16, 0x97, 0x11, 0x82, 0x13, 0xf6, 0x1a, 0x14, 0xe5, 0x75, 0x49, - 0xba, 0xbc, 0x45, 0x81, 0xfb, 0x12, 0x84, 0xd4, 0xe0, 0x73, 0x72, 0xe1, 0x46, 0x82, 0x6c, 0xce, - 0xac, 0x2a, 0xe0, 0x9e, 0xa0, 0xb5, 0x7b, 0x50, 0x21, 0x7c, 0x42, 0x29, 0xc9, 0x40, 0x50, 0x04, - 0x21, 0x42, 0xca, 0x2b, 0x75, 0xe5, 0xd4, 0x57, 0xea, 0x30, 0x64, 0x39, 0xe2, 0xb2, 0x34, 0x44, - 0xa0, 0xb7, 0xc9, 0x34, 0xb8, 0x7a, 0xfa, 0x51, 0xda, 0x54, 0xe8, 0xda, 0xbd, 0xb2, 0xa9, 0xbc, - 0x09, 0xb5, 0x33, 0x7b, 0x3c, 0x3e, 0xb5, 0x87, 0xcf, 0xc8, 0x14, 0x50, 0x25, 0xeb, 0xa7, 0x02, - 0xa2, 0x2d, 0xe0, 0x1e, 0x54, 0xb4, 0x55, 0xc6, 0x28, 0xcf, 0xbc, 0x09, 0xf1, 0xfa, 0xce, 0x5b, - 0xf8, 0x96, 0x5f, 0xc1, 0xc2, 0xa7, 0x45, 0xbb, 0xae, 0x24, 0xa3, 0x5d, 0xef, 0x20, 0x37, 0x95, - 0x11, 0x89, 0x75, 0x7a, 0x08, 0xcb, 0x1e, 0x8d, 0x30, 0x22, 0x11, 0x0d, 0x59, 0x34, 0x79, 0x94, - 0xbf, 0x4a, 0xba, 0x04, 0xc1, 0x08, 0xe5, 0x2e, 0x99, 0xa9, 0xa7, 0xb6, 0x33, 0xc2, 0xcb, 0x17, - 0x64, 0x3d, 0x28, 0xda, 0x93, 0xf0, 0xd8, 0x76, 0x46, 0xec, 0x3e, 0x54, 0x55, 0x36, 0x9e, 0x8e, - 0x6b, 0x34, 0xff, 0x32, 0xbb, 0x4f, 0x8f, 0x4a, 0x44, 0x18, 0x93, 0xe8, 0xde, 0xbc, 0x59, 0x91, - 0x28, 0x48, 0x07, 0x1f, 0x60, 0x94, 0x4f, 0xc8, 0xf1, 0x66, 0xfc, 0xf2, 0xe3, 0x3b, 0x51, 0xf0, - 0x01, 0x52, 0xa9, 0xfa, 0x4f, 0xce, 0x31, 0xc2, 0x14, 0xc2, 0x1d, 0xf9, 0xe8, 0x36, 0x13, 0xf2, - 0xaf, 0x44, 0x45, 0x1f, 0x1d, 0x21, 0xb0, 0x4f, 0x34, 0xfd, 0xb5, 0x81, 0xc8, 0xaf, 0xcd, 0xd5, - 0x7f, 0xd3, 0xe5, 0xb4, 0xbb, 0x00, 0x4e, 0x20, 0x4e, 0x99, 0x80, 0xbb, 0x23, 0xbc, 0xe0, 0x5e, - 0x32, 0xcb, 0x4e, 0xf0, 0x94, 0x00, 0xdf, 0xac, 0x62, 0xdb, 0x84, 0xaa, 0x3e, 0x4c, 0x56, 0x82, - 0x7c, 0xef, 0xb8, 0xdd, 0xad, 0xdf, 0x62, 0x15, 0x28, 0xf6, 0xdb, 0x83, 0xc1, 0x21, 0x7a, 0xfa, - 0xaa, 0x50, 0x8a, 0xae, 0xaf, 0x66, 0x45, 0xaa, 0xd9, 0x6a, 0xb5, 0x8f, 0x07, 0xed, 0xbd, 0x7a, - 0xee, 0xc7, 0xf9, 0x52, 0xb6, 0x9e, 0x33, 0xfe, 0x30, 0x07, 0x15, 0x6d, 0x16, 0x5e, 0xcc, 0x8c, - 0xef, 0x02, 0xa0, 0x26, 0x19, 0x07, 0xac, 0xe6, 0xcd, 0xb2, 0x80, 0xd0, 0xe2, 0xeb, 0x3e, 0x0a, - 0x7a, 0x61, 0x24, 0xf2, 0x51, 0xbc, 0x09, 0x35, 0x7a, 0x03, 0x44, 0xf7, 0xd7, 0x16, 0xcc, 0x2a, - 0x01, 0x25, 0xab, 0xc6, 0xcb, 0xf0, 0x88, 0x84, 0xd7, 0x0c, 0xe5, 0x23, 0x45, 0x04, 0xc2, 0x8b, - 0x86, 0x78, 0x4b, 0x34, 0xf0, 0xc6, 0x97, 0x9c, 0x30, 0x48, 0x22, 0xac, 0x48, 0xd8, 0x40, 0x3e, - 0x34, 0x20, 0xf9, 0xa1, 0x76, 0x1b, 0xbb, 0x60, 0x56, 0x09, 0x28, 0x1b, 0xfa, 0x8e, 0x22, 0x20, - 0x8a, 0x5e, 0xd9, 0x5a, 0xa4, 0x86, 0x04, 0xf1, 0x1c, 0x2e, 0x98, 0x11, 0xcb, 0x48, 0x18, 0xdf, - 0x5a, 0x2c, 0xf7, 0x72, 0x73, 0x22, 0x7b, 0x17, 0xd8, 0x64, 0x3a, 0xb5, 0x52, 0x0c, 0x7c, 0x79, - 0x73, 0x65, 0x32, 0x9d, 0x0e, 0x34, 0xfb, 0xd7, 0x37, 0x60, 0x7b, 0xfc, 0x0a, 0x58, 0x53, 0x6c, - 0x60, 0xec, 0x62, 0xa4, 0x8a, 0xc5, 0x6c, 0x39, 0xa3, 0xb3, 0xe5, 0x14, 0xee, 0x97, 0x4d, 0xe5, - 0x7e, 0x2f, 0xe2, 0x13, 0xc6, 0x3e, 0x54, 0x8e, 0xb5, 0xa7, 0x10, 0xef, 0x8b, 0x13, 0x42, 0x3d, - 0x82, 0x48, 0x67, 0x07, 0xd9, 0x14, 0x7d, 0xf9, 0xf6, 0xa1, 0xd6, 0x9b, 0xac, 0xd6, 0x1b, 0xe3, - 0xef, 0x64, 0xe8, 0xe9, 0xa8, 0xa8, 0xf3, 0xf1, 0xeb, 0x8b, 0xca, 0xfd, 0x16, 0x3f, 0x9a, 0x50, - 0x51, 0x6e, 0x37, 0xf9, 0xde, 0x01, 0x76, 0xcd, 0xf2, 0xce, 0xce, 0x02, 0xae, 0x62, 0x3c, 0x2a, - 0x08, 0xeb, 0x21, 0x48, 0x09, 0xdf, 0x42, 0xc2, 0x77, 0xa8, 0xfe, 0x40, 0x06, 0x76, 0x08, 0xe1, - 0xfb, 0xc8, 0xbe, 0x92, 0xad, 0x06, 0x42, 0x04, 0x91, 0xfe, 0x01, 0x75, 0x69, 0x38, 0x4a, 0x1b, - 0x7f, 0x53, 0xbe, 0xeb, 0x30, 0x3f, 0xbf, 0x0f, 0xa1, 0x14, 0xd5, 0x9a, 0x3c, 0x61, 0x15, 0x66, - 0x94, 0x2f, 0xce, 0x71, 0x34, 0x86, 0x24, 0x7a, 0x4c, 0x9b, 0x0b, 0x7d, 0x3c, 0x1d, 0xad, 0xd7, - 0xef, 0x01, 0x3b, 0x73, 0xfc, 0x79, 0x64, 0xda, 0x6c, 0x75, 0xcc, 0xd1, 0xb0, 0x8d, 0x13, 0x58, - 0x53, 0x5c, 0x42, 0xd3, 0x08, 0x92, 0x8b, 0x97, 0x79, 0x09, 0x93, 0xcf, 0x2e, 0x30, 0x79, 0xe3, - 0x37, 0x0b, 0x50, 0x54, 0xcf, 0x8a, 0xa6, 0x3d, 0x85, 0x59, 0x4e, 0x3e, 0x85, 0xd9, 0x48, 0x3c, - 0x90, 0x86, 0x4b, 0x2f, 0xcf, 0xfb, 0x77, 0xe6, 0x8f, 0x6c, 0xcd, 0x57, 0x91, 0x38, 0xb6, 0xa5, - 0xaf, 0xa2, 0x90, 0xf4, 0x55, 0xa4, 0x3d, 0x0f, 0x4a, 0xa2, 0xe7, 0xc2, 0xf3, 0xa0, 0x77, 0x80, - 0xe4, 0x08, 0x2d, 0xb8, 0xad, 0x84, 0x00, 0x79, 0xf1, 0x5d, 0x13, 0x3b, 0x4a, 0xf3, 0x62, 0xc7, - 0x2b, 0x8b, 0x04, 0xdf, 0x83, 0x25, 0x7a, 0x23, 0x46, 0x5e, 0x82, 0x56, 0x07, 0x87, 0x9c, 0x2b, - 0xf5, 0x9f, 0x2e, 0x44, 0x98, 0x12, 0x57, 0x7f, 0x6b, 0xaf, 0x92, 0x78, 0x6b, 0x4f, 0xf7, 0xa1, - 0x54, 0x93, 0x3e, 0x94, 0x07, 0x50, 0x8f, 0x26, 0x0e, 0x2d, 0x92, 0x6e, 0x20, 0x6f, 0x50, 0x2e, - 0x2b, 0xb8, 0xe0, 0x86, 0xdd, 0x20, 0x3e, 0xf8, 0x96, 0x13, 0x07, 0x9f, 0xe0, 0x55, 0xcd, 0x30, - 0xe4, 0x93, 0x69, 0xa8, 0x0e, 0x3e, 0xed, 0x45, 0x56, 0x5a, 0x79, 0xba, 0xe2, 0xa1, 0x96, 0x97, - 0xa8, 0x63, 0x17, 0x96, 0xcf, 0x6c, 0x67, 0x3c, 0xf3, 0xb9, 0xe5, 0x73, 0x3b, 0xf0, 0x5c, 0xdc, - 0xfc, 0xf1, 0x19, 0x2c, 0x87, 0xb8, 0x4f, 0x38, 0x26, 0xa2, 0x98, 0xb5, 0x33, 0x3d, 0x89, 0x17, - 0xa5, 0xf4, 0x99, 0x10, 0x47, 0x96, 0xbc, 0x4b, 0x4d, 0xb1, 0x2a, 0x9d, 0xae, 0xb5, 0x7f, 0xd8, - 0x79, 0x72, 0x30, 0xa8, 0x67, 0x44, 0xb2, 0x7f, 0xd2, 0x6a, 0xb5, 0xdb, 0x7b, 0x78, 0x84, 0x01, - 0x2c, 0xed, 0x37, 0x3b, 0x87, 0xf2, 0x00, 0xcb, 0xd7, 0x0b, 0xc6, 0x3f, 0xcb, 0x42, 0x45, 0x1b, - 0x0d, 0xfb, 0x30, 0x5a, 0x04, 0x7a, 0x7c, 0xe1, 0xee, 0xe2, 0x88, 0x77, 0x14, 0x87, 0xd7, 0x56, - 0x21, 0x7a, 0x7b, 0x35, 0x7b, 0xe3, 0xdb, 0xab, 0xec, 0x6d, 0x58, 0xb1, 0xa9, 0x86, 0x68, 0xd2, - 0xa5, 0x71, 0x5f, 0x82, 0xe5, 0x9c, 0xbf, 0x2d, 0x1f, 0x82, 0x90, 0xc7, 0x94, 0xc0, 0xcb, 0xab, - 0xa0, 0xcd, 0xe8, 0xa4, 0xc2, 0xb5, 0x29, 0xca, 0x99, 0x91, 0xce, 0xf8, 0xe8, 0xc0, 0x97, 0xf3, - 0xa5, 0xb2, 0xe9, 0xf6, 0xa4, 0x46, 0xe1, 0x55, 0x33, 0x4a, 0x1b, 0x1f, 0x01, 0xc4, 0xe3, 0x49, - 0x4e, 0xdf, 0xad, 0xe4, 0xf4, 0x65, 0xb4, 0xe9, 0xcb, 0x1a, 0xff, 0x50, 0xb2, 0x2e, 0xb9, 0x16, - 0x91, 0xa9, 0xef, 0x3b, 0xa0, 0x8c, 0x8f, 0x16, 0x06, 0x79, 0x4f, 0xc7, 0x3c, 0x54, 0x17, 0x40, - 0x57, 0x65, 0x4e, 0x27, 0xca, 0x58, 0x60, 0xb5, 0xd9, 0x45, 0x56, 0xfb, 0x06, 0x54, 0xf1, 0x65, - 0x31, 0xd9, 0x90, 0x64, 0x57, 0x95, 0x89, 0x7d, 0xa5, 0xda, 0x4e, 0xf0, 0xd8, 0xfc, 0x1c, 0x8f, - 0xfd, 0x5b, 0x19, 0x7a, 0x86, 0x26, 0xee, 0x68, 0xcc, 0x64, 0xa3, 0x3a, 0x93, 0x4c, 0x56, 0xa2, - 0x9a, 0x51, 0xfe, 0x0d, 0x8c, 0x33, 0x9b, 0xce, 0x38, 0xd3, 0x59, 0x72, 0x2e, 0x95, 0x25, 0x1b, - 0xdb, 0xd0, 0xd8, 0xe3, 0x62, 0x2a, 0x9a, 0xe3, 0xf1, 0xdc, 0x5c, 0x1a, 0x77, 0xe0, 0x76, 0x4a, - 0x9e, 0xb4, 0xda, 0xfc, 0x04, 0x36, 0x9a, 0xf4, 0xf8, 0xc4, 0x37, 0x75, 0x41, 0xd3, 0x68, 0xc0, - 0xe6, 0x7c, 0x95, 0xb2, 0xb1, 0x7d, 0x58, 0xdd, 0xe3, 0xa7, 0xb3, 0xf3, 0x43, 0x7e, 0x19, 0x37, - 0xc4, 0x20, 0x1f, 0x5c, 0x78, 0xcf, 0xe5, 0xe2, 0xe2, 0x6f, 0x0c, 0xcb, 0x14, 0x38, 0x56, 0x30, - 0xe5, 0x43, 0x65, 0xb9, 0x47, 0x48, 0x7f, 0xca, 0x87, 0xc6, 0x87, 0xc0, 0xf4, 0x7a, 0xe4, 0x4a, - 0x08, 0xb5, 0x6a, 0x76, 0x6a, 0x05, 0xd7, 0x41, 0xc8, 0x27, 0xea, 0x62, 0x22, 0x04, 0xb3, 0xd3, - 0x3e, 0x41, 0x8c, 0x77, 0xa0, 0x7a, 0x6c, 0x5f, 0x9b, 0xfc, 0x2b, 0x79, 0xff, 0x6f, 0x0b, 0x8a, - 0x53, 0xfb, 0x5a, 0xf0, 0xd3, 0xc8, 0x89, 0x87, 0xd9, 0xc6, 0x3f, 0xce, 0xc3, 0x12, 0x61, 0xb2, - 0xfb, 0xf4, 0xb2, 0xb9, 0xe3, 0x22, 0x3f, 0x53, 0x27, 0x8b, 0x06, 0x5a, 0x38, 0x7c, 0xb2, 0x8b, - 0x87, 0x8f, 0xb4, 0x38, 0xaa, 0xe7, 0xc9, 0x94, 0xbb, 0xc5, 0x9d, 0x4d, 0xd4, 0x9b, 0x64, 0xc9, - 0x17, 0x18, 0xf2, 0xf1, 0x8b, 0xf8, 0x74, 0xfb, 0x3c, 0xe9, 0x10, 0x8f, 0x95, 0x37, 0xea, 0x9d, - 0x3a, 0x53, 0xe5, 0xb9, 0xa3, 0x83, 0x52, 0x35, 0xc4, 0xa2, 0xba, 0xd4, 0x9a, 0xd4, 0x10, 0x17, - 0x34, 0xc1, 0xd2, 0xcb, 0x35, 0x41, 0x32, 0x45, 0xbe, 0x40, 0x13, 0x84, 0x57, 0xd0, 0x04, 0x5f, - 0xc1, 0x19, 0x7d, 0x1b, 0x4a, 0x28, 0x28, 0x69, 0xc7, 0x90, 0x10, 0x90, 0xc4, 0x31, 0xf4, 0xb1, - 0xa6, 0x2b, 0x51, 0x24, 0x8c, 0x76, 0x0e, 0x98, 0xfc, 0xab, 0x5f, 0x8c, 0x93, 0xef, 0x4b, 0x28, - 0x4a, 0xa8, 0x20, 0x68, 0xd7, 0x9e, 0xa8, 0x87, 0x2e, 0xf1, 0xb7, 0x98, 0x36, 0x7c, 0x96, 0xee, - 0xab, 0x99, 0xe3, 0xf3, 0x91, 0x7a, 0x1c, 0xcb, 0xc1, 0x3d, 0x2a, 0x20, 0x62, 0x80, 0x42, 0x6f, - 0x73, 0xbd, 0xe7, 0xae, 0xe4, 0x3d, 0x45, 0x27, 0x78, 0x2a, 0x92, 0x06, 0x83, 0x3a, 0x3e, 0x8b, - 0x3b, 0xf5, 0x7c, 0x75, 0xca, 0x1b, 0x3f, 0xcb, 0x40, 0x5d, 0xee, 0xae, 0x28, 0x4f, 0x57, 0x9b, - 0x0a, 0x37, 0x05, 0x6e, 0xbc, 0xf8, 0xa9, 0x2b, 0x03, 0x6a, 0x68, 0x2d, 0x8a, 0x8e, 0x7c, 0xb2, - 0x76, 0x55, 0x04, 0x70, 0x5f, 0x1e, 0xfb, 0xaf, 0x43, 0x45, 0x05, 0x8d, 0x4f, 0x9c, 0xb1, 0xfa, - 0xf8, 0x05, 0x45, 0x8d, 0x1f, 0x39, 0x63, 0x25, 0x31, 0xf8, 0xb6, 0xbc, 0x64, 0x9d, 0x41, 0x89, - 0xc1, 0xb4, 0x43, 0x6e, 0xfc, 0xd3, 0x0c, 0xac, 0x6a, 0x43, 0x91, 0xfb, 0xf6, 0xfb, 0x50, 0x8d, - 0xde, 0xa3, 0xe6, 0x91, 0xa8, 0xba, 0x95, 0x64, 0x34, 0x71, 0xb1, 0xca, 0x30, 0x82, 0x04, 0xa2, - 0x33, 0x23, 0xfb, 0x9a, 0x22, 0x9b, 0x67, 0x13, 0xa5, 0x0d, 0x8e, 0xec, 0xeb, 0x7d, 0xce, 0xfb, - 0xb3, 0x89, 0xd0, 0xf5, 0x9f, 0x73, 0xfe, 0x2c, 0x42, 0x20, 0xf6, 0x09, 0x02, 0x26, 0x31, 0x0c, - 0xa8, 0x4d, 0x3c, 0x37, 0xbc, 0x88, 0x50, 0xa4, 0x98, 0x8e, 0x40, 0xc2, 0x31, 0x7e, 0x3f, 0x0b, - 0x6b, 0x64, 0x93, 0x94, 0xb6, 0x60, 0xc9, 0xba, 0x1a, 0xb0, 0x44, 0xe6, 0x59, 0x62, 0x5e, 0x07, - 0xb7, 0x4c, 0x99, 0x66, 0xdf, 0x7b, 0x45, 0x3b, 0xaa, 0xba, 0xc7, 0x7d, 0xc3, 0xf4, 0xe7, 0x16, - 0xa7, 0xff, 0xe6, 0xe9, 0x4d, 0xf3, 0x0c, 0x17, 0xd2, 0x3c, 0xc3, 0xaf, 0xe2, 0x8f, 0x5d, 0xb8, - 0x71, 0x5c, 0x5c, 0x7c, 0x57, 0xf3, 0x43, 0xd8, 0x4a, 0xe0, 0x20, 0xb7, 0x76, 0xce, 0x1c, 0xae, - 0x5e, 0xee, 0x59, 0xd7, 0xb0, 0xfb, 0x2a, 0x6f, 0xb7, 0x08, 0x85, 0x60, 0xe8, 0x4d, 0xb9, 0xb1, - 0x09, 0xeb, 0xc9, 0x59, 0x95, 0xc7, 0xc4, 0x6f, 0x67, 0xa0, 0x21, 0xe3, 0x78, 0x1c, 0xf7, 0xfc, - 0xc0, 0x09, 0x42, 0xcf, 0x8f, 0xde, 0x6d, 0xbe, 0x0b, 0x40, 0x1f, 0xe2, 0x40, 0xe5, 0x5b, 0xbe, - 0x55, 0x83, 0x10, 0x54, 0xbd, 0x6f, 0x43, 0x89, 0xbb, 0x23, 0xca, 0x24, 0x6a, 0x28, 0x72, 0x77, - 0xa4, 0x14, 0xf7, 0x85, 0xa3, 0xb4, 0x96, 0x14, 0x12, 0xe4, 0xab, 0x0b, 0x62, 0x76, 0xf8, 0x25, - 0x1e, 0xe9, 0xf9, 0xe8, 0xd5, 0x85, 0x23, 0xfb, 0x0a, 0xa3, 0x62, 0x03, 0xe3, 0xaf, 0x66, 0x61, - 0x25, 0xee, 0x1f, 0xbd, 0x3b, 0xf3, 0xe2, 0x17, 0x74, 0xee, 0x4b, 0x72, 0x70, 0x84, 0xc2, 0xa3, - 0x59, 0x6a, 0x4b, 0xb4, 0x39, 0x3b, 0x2e, 0x33, 0xa0, 0xa2, 0x30, 0xbc, 0x59, 0xa8, 0xbd, 0x05, - 0x5a, 0x26, 0x94, 0xde, 0x2c, 0x14, 0x1a, 0xaa, 0x50, 0xd5, 0x1d, 0x57, 0xea, 0x88, 0x05, 0x7b, - 0x12, 0x76, 0xf0, 0x6b, 0x2f, 0x02, 0x2c, 0x8a, 0xd1, 0x42, 0x0a, 0x2c, 0x81, 0x5f, 0x27, 0x85, - 0x85, 0x56, 0x0e, 0x95, 0x15, 0x5d, 0x9a, 0xa7, 0x07, 0xea, 0x23, 0x69, 0xfe, 0x75, 0xa8, 0x50, - 0xe5, 0xf1, 0x05, 0x73, 0x7c, 0x98, 0x2b, 0xec, 0xb8, 0x98, 0x2f, 0xad, 0x66, 0xde, 0x2c, 0x61, - 0x2b, 0x00, 0x6a, 0x0a, 0xc3, 0x64, 0xfe, 0x62, 0x06, 0x6e, 0xa7, 0x2c, 0x9b, 0xdc, 0xe5, 0x2d, - 0x58, 0x3d, 0x8b, 0x32, 0xd5, 0xec, 0xd2, 0x56, 0xdf, 0x54, 0x6c, 0x35, 0x39, 0xa7, 0x66, 0xfd, - 0x2c, 0x09, 0x88, 0xb5, 0x54, 0x5a, 0xc1, 0xc4, 0xf3, 0x05, 0x28, 0x12, 0xd1, 0x32, 0x92, 0x82, - 0x78, 0x0c, 0xdb, 0xed, 0x2b, 0xc1, 0x31, 0xa2, 0xd0, 0xda, 0xe1, 0xb3, 0x99, 0xf2, 0x5e, 0xcd, - 0x59, 0xe4, 0x33, 0xaf, 0x64, 0x91, 0x1f, 0xd1, 0x55, 0xe5, 0xa8, 0xae, 0x9f, 0xa7, 0x12, 0x3c, - 0x40, 0x45, 0x99, 0x53, 0xac, 0x42, 0xbd, 0x63, 0x20, 0x40, 0x54, 0xa9, 0x11, 0xc0, 0xca, 0xd1, - 0x6c, 0x1c, 0x3a, 0xad, 0x08, 0xc4, 0xbe, 0x27, 0xcb, 0x60, 0x3b, 0x6a, 0xd6, 0x52, 0x1b, 0x82, - 0xa8, 0x21, 0x9c, 0xac, 0x89, 0xa8, 0xc8, 0x5a, 0x6c, 0x6f, 0x65, 0x92, 0x6c, 0xc1, 0xb8, 0x0d, - 0x5b, 0x71, 0x8a, 0xa6, 0x4d, 0x1d, 0x35, 0x7f, 0x3b, 0x43, 0x21, 0xf8, 0x94, 0xd7, 0x77, 0xed, - 0x69, 0x70, 0xe1, 0x85, 0xac, 0x0d, 0x6b, 0x81, 0xe3, 0x9e, 0x8f, 0xb9, 0x5e, 0x7d, 0x20, 0x27, - 0x61, 0x23, 0xd9, 0x37, 0x2a, 0x1a, 0x98, 0xab, 0x54, 0x22, 0xae, 0x2d, 0x60, 0xbb, 0x37, 0x75, - 0x32, 0x26, 0x8b, 0xb9, 0xd9, 0x58, 0xec, 0x7c, 0x07, 0x96, 0x93, 0x0d, 0xb1, 0x8f, 0xe5, 0x0d, - 0xff, 0xb8, 0x57, 0xb9, 0xb9, 0xfb, 0xcd, 0x31, 0x41, 0x54, 0xe2, 0xb9, 0x0f, 0x8c, 0xbf, 0x9c, - 0x81, 0x86, 0xc9, 0x05, 0xe5, 0x6a, 0xbd, 0x54, 0x34, 0xf3, 0xfd, 0x85, 0x5a, 0x6f, 0x1e, 0xab, - 0x7a, 0x38, 0x40, 0xf5, 0xe8, 0xbd, 0x1b, 0x17, 0xe3, 0xe0, 0xd6, 0xc2, 0x88, 0x76, 0x4b, 0xb0, - 0x44, 0x28, 0xc6, 0x16, 0x6c, 0xc8, 0xfe, 0xa8, 0xbe, 0xc4, 0xee, 0xd6, 0x44, 0x8b, 0x09, 0x77, - 0xeb, 0x36, 0x34, 0xe8, 0xae, 0xae, 0x3e, 0x08, 0x59, 0x70, 0x0f, 0xd8, 0x91, 0x3d, 0xb4, 0x7d, - 0xcf, 0x73, 0x8f, 0xb9, 0x2f, 0x03, 0x9a, 0x51, 0xc2, 0x44, 0x6f, 0xa4, 0x12, 0x85, 0x29, 0xa5, - 0x5e, 0x40, 0xf6, 0x5c, 0x15, 0xbf, 0x45, 0x29, 0xc3, 0x84, 0xb5, 0x5d, 0xfb, 0x19, 0x57, 0x35, - 0xa9, 0x29, 0xfa, 0x0c, 0x2a, 0xd3, 0xa8, 0x52, 0x35, 0xef, 0xea, 0x19, 0x93, 0xc5, 0x66, 0x4d, - 0x1d, 0xdb, 0x78, 0x0c, 0xeb, 0xc9, 0x3a, 0x25, 0xeb, 0xd8, 0x86, 0xd2, 0x44, 0xc2, 0x64, 0xef, - 0xa2, 0xb4, 0xf1, 0x5b, 0x25, 0x28, 0x4a, 0x4d, 0x95, 0xed, 0x40, 0x7e, 0xa8, 0x62, 0xe8, 0xe2, - 0x27, 0xbe, 0x64, 0xae, 0xfa, 0xdf, 0xc2, 0x48, 0x3a, 0x81, 0xc7, 0x3e, 0x83, 0xe5, 0xa4, 0x1b, - 0x79, 0xee, 0xa1, 0x80, 0xa4, 0xff, 0xb7, 0x36, 0x9c, 0x73, 0x18, 0x96, 0xe3, 0xc3, 0x91, 0x64, - 0x86, 0xd2, 0x85, 0x76, 0x7a, 0x7a, 0xae, 0x90, 0xb7, 0x83, 0x0b, 0xdb, 0x7a, 0xfc, 0xe1, 0x47, - 0xf2, 0xa5, 0x80, 0x0a, 0x02, 0xfb, 0x17, 0xf6, 0xe3, 0x0f, 0x3f, 0x9a, 0x97, 0xa4, 0xe5, 0x3b, - 0x01, 0x9a, 0x24, 0xbd, 0x0e, 0x05, 0x7a, 0xe6, 0x97, 0x82, 0xa1, 0x28, 0xc1, 0x1e, 0xc1, 0xba, - 0x32, 0x7e, 0xc8, 0xb0, 0x75, 0xe2, 0x82, 0x25, 0xba, 0x29, 0x28, 0xf3, 0xfa, 0x98, 0x45, 0xe6, - 0x92, 0x4d, 0x58, 0xba, 0x88, 0xdf, 0x6d, 0xae, 0x99, 0x32, 0x65, 0xfc, 0x7e, 0x01, 0x2a, 0xda, - 0xa4, 0xb0, 0x2a, 0x94, 0xcc, 0x76, 0xbf, 0x6d, 0x7e, 0xde, 0xde, 0xab, 0xdf, 0x62, 0x0f, 0xe0, - 0xad, 0x4e, 0xb7, 0xd5, 0x33, 0xcd, 0x76, 0x6b, 0x60, 0xf5, 0x4c, 0x4b, 0x3d, 0x34, 0x77, 0xdc, - 0xfc, 0xf2, 0xa8, 0xdd, 0x1d, 0x58, 0x7b, 0xed, 0x41, 0xb3, 0x73, 0xd8, 0xaf, 0x67, 0xd8, 0x6b, - 0xd0, 0x88, 0x31, 0x55, 0x76, 0xf3, 0xa8, 0x77, 0xd2, 0x1d, 0xd4, 0xb3, 0xec, 0x1e, 0xdc, 0xd9, - 0xef, 0x74, 0x9b, 0x87, 0x56, 0x8c, 0xd3, 0x3a, 0x1c, 0x7c, 0x6e, 0xb5, 0x7f, 0xf9, 0xb8, 0x63, - 0x7e, 0x59, 0xcf, 0xa5, 0x21, 0x1c, 0x0c, 0x0e, 0x5b, 0xaa, 0x86, 0x3c, 0xbb, 0x0d, 0x1b, 0x84, - 0x40, 0x45, 0xac, 0x41, 0xaf, 0x67, 0xf5, 0x7b, 0xbd, 0x6e, 0xbd, 0xc0, 0x56, 0xa1, 0xd6, 0xe9, - 0x7e, 0xde, 0x3c, 0xec, 0xec, 0x59, 0x66, 0xbb, 0x79, 0x78, 0x54, 0x5f, 0x62, 0x6b, 0xb0, 0x32, - 0x8f, 0x57, 0x14, 0x55, 0x28, 0xbc, 0x5e, 0xb7, 0xd3, 0xeb, 0x5a, 0x9f, 0xb7, 0xcd, 0x7e, 0xa7, - 0xd7, 0xad, 0x97, 0xd8, 0x26, 0xb0, 0x64, 0xd6, 0xc1, 0x51, 0xb3, 0x55, 0x2f, 0xb3, 0x0d, 0x58, - 0x4d, 0xc2, 0x9f, 0xb6, 0xbf, 0xac, 0x03, 0x6b, 0xc0, 0x3a, 0x75, 0xcc, 0xda, 0x6d, 0x1f, 0xf6, - 0xbe, 0xb0, 0x8e, 0x3a, 0xdd, 0xce, 0xd1, 0xc9, 0x51, 0xbd, 0x82, 0xaf, 0x75, 0xb6, 0xdb, 0x56, - 0xa7, 0xdb, 0x3f, 0xd9, 0xdf, 0xef, 0xb4, 0x3a, 0xed, 0xee, 0xa0, 0x5e, 0xa5, 0x96, 0xd3, 0x06, - 0x5e, 0x13, 0x05, 0xe4, 0xdd, 0x16, 0x6b, 0xaf, 0xd3, 0x6f, 0xee, 0x1e, 0xb6, 0xf7, 0xea, 0xcb, - 0xec, 0x2e, 0xdc, 0x1e, 0xb4, 0x8f, 0x8e, 0x7b, 0x66, 0xd3, 0xfc, 0x52, 0xdd, 0x7d, 0xb1, 0xf6, - 0x9b, 0x9d, 0xc3, 0x13, 0xb3, 0x5d, 0x5f, 0x61, 0x6f, 0xc0, 0x5d, 0xb3, 0xfd, 0x93, 0x93, 0x8e, - 0xd9, 0xde, 0xb3, 0xba, 0xbd, 0xbd, 0xb6, 0xb5, 0xdf, 0x6e, 0x0e, 0x4e, 0xcc, 0xb6, 0x75, 0xd4, - 0xe9, 0xf7, 0x3b, 0xdd, 0x27, 0xf5, 0x3a, 0x7b, 0x0b, 0xee, 0x47, 0x28, 0x51, 0x05, 0x73, 0x58, - 0xab, 0x62, 0x7c, 0x6a, 0x49, 0xbb, 0xed, 0x5f, 0x1e, 0x58, 0xc7, 0xed, 0xb6, 0x59, 0x67, 0x6c, - 0x1b, 0x36, 0xe3, 0xe6, 0xa9, 0x01, 0xd9, 0xf6, 0x9a, 0xc8, 0x3b, 0x6e, 0x9b, 0x47, 0xcd, 0xae, - 0x58, 0xe0, 0x44, 0xde, 0xba, 0xe8, 0x76, 0x9c, 0x37, 0xdf, 0xed, 0x0d, 0xc6, 0x60, 0x59, 0x5b, - 0x95, 0xfd, 0xa6, 0x59, 0xdf, 0x64, 0x2b, 0x50, 0x39, 0x3a, 0x3e, 0xb6, 0x06, 0x9d, 0xa3, 0x76, - 0xef, 0x64, 0x50, 0xdf, 0x62, 0x1b, 0x50, 0xef, 0x74, 0x07, 0x6d, 0x53, 0xac, 0xb5, 0x2a, 0xfa, - 0xdf, 0x8a, 0x6c, 0x1d, 0x56, 0x54, 0x4f, 0x15, 0xf4, 0x8f, 0x8a, 0x6c, 0x0b, 0xd8, 0x49, 0xd7, - 0x6c, 0x37, 0xf7, 0xc4, 0xc4, 0x45, 0x19, 0xff, 0xbd, 0x28, 0x5d, 0x4a, 0x3f, 0xcb, 0x45, 0x87, - 0x75, 0x1c, 0xa3, 0x91, 0xfc, 0x98, 0x41, 0x55, 0xfb, 0x08, 0xc1, 0xcb, 0x3e, 0x49, 0xa4, 0xa9, - 0x56, 0xb9, 0x05, 0xd5, 0x6a, 0x41, 0x77, 0xaf, 0xe9, 0xb2, 0xdf, 0x9b, 0x50, 0x9b, 0xd0, 0x87, - 0x0d, 0xe4, 0xab, 0xdd, 0x20, 0x03, 0x96, 0x08, 0x48, 0x4f, 0x76, 0x2f, 0x7c, 0x93, 0xa7, 0xb0, - 0xf8, 0x4d, 0x9e, 0x34, 0xf9, 0x7e, 0x29, 0x4d, 0xbe, 0x7f, 0x08, 0xab, 0xc4, 0x9a, 0x1c, 0xd7, - 0x99, 0x28, 0xad, 0x99, 0xa4, 0xc0, 0x15, 0x64, 0x51, 0x04, 0x57, 0xea, 0x84, 0x52, 0x39, 0x24, - 0x0b, 0x29, 0x4a, 0x6d, 0x23, 0xa1, 0x69, 0x10, 0xe7, 0x88, 0x34, 0x8d, 0xa8, 0x05, 0xfb, 0x2a, - 0x6e, 0xa1, 0xa2, 0xb5, 0x40, 0x70, 0x6c, 0xe1, 0x21, 0xac, 0xf2, 0xab, 0xd0, 0xb7, 0x2d, 0x6f, - 0x6a, 0x7f, 0x35, 0x43, 0x9f, 0xb7, 0x8d, 0x3a, 0x7c, 0xd5, 0x5c, 0xc1, 0x8c, 0x1e, 0xc2, 0xf7, - 0xec, 0xd0, 0x7e, 0xf8, 0x67, 0xa0, 0xa2, 0x7d, 0xf4, 0x82, 0x6d, 0xc1, 0xda, 0x17, 0x9d, 0x41, - 0xb7, 0xdd, 0xef, 0x5b, 0xc7, 0x27, 0xbb, 0x4f, 0xdb, 0x5f, 0x5a, 0x07, 0xcd, 0xfe, 0x41, 0xfd, - 0x96, 0xd8, 0xb4, 0xdd, 0x76, 0x7f, 0xd0, 0xde, 0x4b, 0xc0, 0x33, 0xec, 0x75, 0xd8, 0x3e, 0xe9, - 0x9e, 0xf4, 0xdb, 0x7b, 0x56, 0x5a, 0xb9, 0xac, 0xa0, 0x52, 0x99, 0x9f, 0x52, 0x3c, 0xf7, 0xf0, - 0xd7, 0x60, 0x39, 0x79, 0x0d, 0x9c, 0x01, 0x2c, 0x1d, 0xb6, 0x9f, 0x34, 0x5b, 0x5f, 0xd2, 0x7b, - 0xbe, 0xfd, 0x41, 0x73, 0xd0, 0x69, 0x59, 0xf2, 0xfd, 0x5e, 0xc1, 0x11, 0x32, 0xac, 0x02, 0xc5, - 0x66, 0xb7, 0x75, 0xd0, 0x33, 0xfb, 0xf5, 0x2c, 0x7b, 0x0d, 0xb6, 0x14, 0xad, 0xb6, 0x7a, 0x47, - 0x47, 0x9d, 0x01, 0x32, 0xc3, 0xc1, 0x97, 0xc7, 0x82, 0x34, 0x1f, 0xda, 0x50, 0x8e, 0x9f, 0x1e, - 0x46, 0x06, 0xd3, 0x19, 0x74, 0x9a, 0x83, 0x98, 0xbb, 0xd6, 0x6f, 0x09, 0xfe, 0x15, 0x83, 0xf1, - 0xfd, 0xe0, 0x7a, 0x86, 0x6e, 0xca, 0x29, 0x20, 0xb5, 0x5e, 0xcf, 0x8a, 0x4d, 0x15, 0x43, 0x77, - 0x7b, 0x03, 0x31, 0x84, 0x5f, 0x87, 0xe5, 0xe4, 0x0b, 0xbf, 0xac, 0x0e, 0x55, 0xd1, 0xbe, 0xd6, - 0x04, 0xc0, 0x12, 0xf5, 0xb8, 0x9e, 0x21, 0x0e, 0xda, 0xea, 0x1d, 0x75, 0xba, 0x4f, 0x90, 0xed, - 0xd6, 0xb3, 0x02, 0xd4, 0x3b, 0x19, 0x3c, 0xe9, 0x45, 0xa0, 0x9c, 0x28, 0x41, 0xc3, 0xa9, 0xe7, - 0x1f, 0x7e, 0x05, 0xab, 0x0b, 0x6f, 0x01, 0x8b, 0x5e, 0xf7, 0x4e, 0x06, 0xad, 0xde, 0x91, 0xde, - 0x4e, 0x05, 0x8a, 0xad, 0xc3, 0x66, 0xe7, 0x08, 0xad, 0xbe, 0x35, 0x28, 0x9f, 0x74, 0x55, 0x32, - 0x9b, 0x7c, 0xc5, 0x38, 0x27, 0x78, 0xc1, 0x7e, 0xc7, 0xec, 0x0f, 0xac, 0xfe, 0xa0, 0xf9, 0xa4, - 0x5d, 0xcf, 0x8b, 0xb2, 0x8a, 0x31, 0x14, 0x1e, 0x7e, 0x0a, 0xcb, 0xc9, 0x20, 0xcf, 0xa4, 0xb5, - 0x7e, 0x1b, 0x36, 0x77, 0xdb, 0x83, 0x2f, 0xda, 0xed, 0x2e, 0x2e, 0x79, 0xab, 0xdd, 0x1d, 0x98, - 0xcd, 0xc3, 0xce, 0xe0, 0xcb, 0x7a, 0xe6, 0xe1, 0x67, 0x50, 0x9f, 0xf7, 0xa8, 0x26, 0x5c, 0xd0, - 0x2f, 0xf2, 0x55, 0x3f, 0xfc, 0xcf, 0x19, 0x58, 0x4f, 0x73, 0x26, 0x08, 0xc2, 0x94, 0x1c, 0x47, - 0x9c, 0x3b, 0xfd, 0x5e, 0xd7, 0xea, 0xf6, 0xf0, 0x5d, 0xd0, 0x6d, 0xd8, 0x9c, 0xcb, 0x50, 0xa3, - 0xc8, 0xb0, 0x3b, 0xb0, 0xb5, 0x50, 0xc8, 0x32, 0x7b, 0x27, 0xb8, 0x96, 0x0d, 0x58, 0x9f, 0xcb, - 0x6c, 0x9b, 0x66, 0xcf, 0xac, 0xe7, 0xd8, 0x7b, 0xf0, 0x60, 0x2e, 0x67, 0xf1, 0xb4, 0x55, 0x87, - 0x71, 0x9e, 0xbd, 0x03, 0x6f, 0x2e, 0x60, 0xc7, 0x07, 0x92, 0xb5, 0xdb, 0x3c, 0x14, 0xc3, 0xab, - 0x17, 0x1e, 0xfe, 0x83, 0x1c, 0x40, 0x7c, 0x8b, 0x4a, 0xb4, 0xbf, 0xd7, 0x1c, 0x34, 0x0f, 0x7b, - 0x62, 0xcf, 0x98, 0xbd, 0x81, 0xa8, 0xdd, 0x6c, 0xff, 0xa4, 0x7e, 0x2b, 0x35, 0xa7, 0x77, 0x2c, - 0x06, 0xb4, 0x05, 0x6b, 0x44, 0x7f, 0x87, 0x62, 0x18, 0x82, 0x5c, 0xf0, 0x89, 0x59, 0x3c, 0xd2, - 0x4f, 0x8e, 0xf7, 0xcd, 0x5e, 0x77, 0x60, 0xf5, 0x0f, 0x4e, 0x06, 0x7b, 0xf8, 0x40, 0x6d, 0xcb, - 0xec, 0x1c, 0x53, 0x9d, 0xf9, 0x17, 0x21, 0x88, 0xaa, 0x0b, 0x62, 0x83, 0x3f, 0xe9, 0xf5, 0xfb, - 0x9d, 0x63, 0xeb, 0x27, 0x27, 0x6d, 0xb3, 0xd3, 0xee, 0x63, 0xc1, 0xa5, 0x14, 0xb8, 0xc0, 0x2f, - 0x0a, 0x9a, 0x1d, 0x1c, 0x7e, 0x2e, 0x4f, 0x6a, 0x81, 0x5a, 0x4a, 0x82, 0x04, 0x56, 0x59, 0xac, - 0x8e, 0x38, 0xea, 0x52, 0x6a, 0x86, 0x1b, 0xf2, 0x44, 0xb9, 0x8a, 0x38, 0xc4, 0x17, 0x76, 0x3e, - 0x16, 0xab, 0xa6, 0x67, 0x89, 0x52, 0x78, 0xbe, 0x47, 0xd2, 0xd0, 0xde, 0x9e, 0x89, 0x05, 0x96, - 0x17, 0xa0, 0x02, 0x77, 0x45, 0x10, 0xa1, 0x38, 0x0b, 0x05, 0x4a, 0x5d, 0x25, 0x44, 0xce, 0xea, - 0xe3, 0x7f, 0x7b, 0x0f, 0xca, 0x51, 0x34, 0x35, 0xfb, 0x31, 0xd4, 0x12, 0xd7, 0x5b, 0x99, 0xb2, - 0x75, 0xa6, 0xdd, 0x86, 0xdd, 0x7e, 0x2d, 0x3d, 0x53, 0x8a, 0xd5, 0x47, 0x9a, 0xda, 0x44, 0x95, - 0xbd, 0x36, 0xaf, 0xca, 0x24, 0x6a, 0xbb, 0x7b, 0x43, 0xae, 0xac, 0xee, 0x29, 0xbe, 0x76, 0xab, - 0x7f, 0x3b, 0x95, 0xdd, 0x8d, 0x9f, 0x1e, 0x4d, 0xf9, 0xa6, 0xea, 0xf6, 0xed, 0xc5, 0xaf, 0x9c, - 0xaa, 0xcf, 0xa2, 0xee, 0x41, 0x45, 0xfb, 0x24, 0x18, 0xbb, 0x7d, 0xe3, 0xe7, 0xcb, 0xb6, 0xb7, - 0xd3, 0xb2, 0x64, 0x97, 0x7e, 0x00, 0xe5, 0xe8, 0xf3, 0x50, 0x6c, 0x4b, 0xfb, 0xb4, 0x97, 0xfe, - 0x91, 0xab, 0xed, 0xc6, 0x62, 0x86, 0x2c, 0xbf, 0x07, 0x15, 0xed, 0x2b, 0x4f, 0x51, 0x2f, 0x16, - 0xbf, 0x24, 0x15, 0xf5, 0x22, 0xed, 0xa3, 0x50, 0x87, 0xb0, 0x21, 0x95, 0xb3, 0x53, 0xfe, 0x75, - 0xa6, 0x27, 0xe5, 0x23, 0xb0, 0x8f, 0x32, 0xec, 0x33, 0x28, 0xa9, 0xef, 0x79, 0xb1, 0xcd, 0xf4, - 0xaf, 0x95, 0x6d, 0x6f, 0x2d, 0xc0, 0x65, 0x57, 0x9a, 0x00, 0xf1, 0xf7, 0xa3, 0x98, 0x1a, 0xf8, - 0xc2, 0xf7, 0xa8, 0xa2, 0x95, 0x49, 0xf9, 0xd8, 0xd4, 0x1e, 0x54, 0xb4, 0x4f, 0x45, 0x45, 0x73, - 0xb2, 0xf8, 0x99, 0xa9, 0x68, 0x4e, 0xd2, 0xbe, 0x2c, 0xf5, 0x63, 0xa8, 0x25, 0xbe, 0xf9, 0x14, - 0xd1, 0x71, 0xda, 0x17, 0xa5, 0x22, 0x3a, 0x4e, 0xff, 0x4c, 0xd4, 0x1e, 0x54, 0xb4, 0x2f, 0x34, - 0x45, 0x3d, 0x5a, 0xfc, 0x18, 0x54, 0xd4, 0xa3, 0x94, 0x0f, 0x3a, 0x89, 0xdd, 0x90, 0xfc, 0x3c, - 0x53, 0xb4, 0x1b, 0x52, 0xbf, 0xf3, 0x14, 0xed, 0x86, 0xf4, 0x6f, 0x3a, 0x09, 0xd2, 0x8b, 0x9e, - 0xb9, 0x66, 0x5b, 0x1a, 0x75, 0xe8, 0xef, 0x65, 0x47, 0xa4, 0xb7, 0xf8, 0x22, 0xf6, 0x13, 0x58, - 0x8b, 0x88, 0x26, 0x7a, 0xa4, 0x3a, 0x88, 0xfa, 0x94, 0xfa, 0x14, 0xf6, 0x76, 0x7d, 0x3e, 0xf7, - 0x51, 0x86, 0x7d, 0x02, 0x45, 0xf9, 0xf2, 0x2f, 0xdb, 0x98, 0x7f, 0x09, 0x98, 0x3a, 0xb1, 0x99, - 0xfe, 0x40, 0x30, 0x3b, 0xc6, 0x0d, 0xad, 0x3f, 0xcd, 0xab, 0x53, 0x6c, 0xca, 0x6b, 0xbe, 0xdb, - 0xaf, 0xdf, 0x94, 0x1d, 0xd7, 0x38, 0xff, 0x9c, 0xf4, 0xdd, 0x9b, 0x9e, 0x9d, 0x48, 0xd6, 0x78, - 0xd3, 0xfb, 0x58, 0x4f, 0xa0, 0xaa, 0x7f, 0x1c, 0x84, 0xe9, 0xfb, 0x70, 0xbe, 0xae, 0x3b, 0xa9, - 0x79, 0xb2, 0xa2, 0xcf, 0x61, 0x33, 0x9a, 0x6f, 0xfd, 0x0d, 0x84, 0x80, 0xdd, 0x4b, 0x79, 0x19, - 0x21, 0x31, 0xeb, 0xb7, 0x6f, 0x7c, 0x3a, 0xe1, 0x51, 0x06, 0x99, 0x6c, 0xe2, 0x3d, 0xff, 0x98, - 0xc9, 0xa6, 0x7d, 0xc6, 0x20, 0x66, 0xb2, 0xe9, 0x1f, 0x01, 0x68, 0xc2, 0x8a, 0xf6, 0x86, 0x43, - 0xff, 0xda, 0x1d, 0x46, 0xf4, 0xbe, 0xf8, 0x66, 0xea, 0x76, 0x9a, 0x89, 0x90, 0xb5, 0xa0, 0xa2, - 0x3f, 0x03, 0xf1, 0x82, 0xe2, 0x5b, 0x5a, 0x96, 0xfe, 0x80, 0xe6, 0xa3, 0x0c, 0x3b, 0x84, 0xfa, - 0xfc, 0xa3, 0x6d, 0xd1, 0x16, 0x4e, 0x7b, 0xe8, 0x6e, 0x7b, 0x2e, 0x33, 0xf1, 0xd4, 0x9b, 0xa0, - 0x8b, 0xc4, 0xc7, 0x46, 0x3d, 0x7f, 0xfe, 0x28, 0x4a, 0x7e, 0x84, 0x34, 0xaa, 0x2d, 0xed, 0xf3, - 0xb3, 0x0f, 0x32, 0x8f, 0x32, 0x6c, 0x1f, 0xaa, 0x89, 0x37, 0x8b, 0x12, 0x81, 0xfd, 0x73, 0xc3, - 0x6c, 0xe8, 0x79, 0x73, 0xe3, 0x3c, 0x82, 0xe5, 0xa4, 0x2f, 0x3b, 0xea, 0x58, 0xaa, 0xd7, 0x3c, - 0x5a, 0xbe, 0x74, 0x07, 0x38, 0xfb, 0x21, 0x7d, 0x4a, 0x5b, 0xc5, 0x2d, 0xb1, 0xc5, 0x4f, 0x2f, - 0x47, 0x6b, 0xa6, 0x7f, 0xa8, 0xd8, 0xc8, 0xfd, 0x85, 0x6c, 0x06, 0xc7, 0xf5, 0x7d, 0xfa, 0x90, - 0xa5, 0x0a, 0x5d, 0x11, 0xeb, 0xff, 0xaa, 0x95, 0xb0, 0x7d, 0x6a, 0x5c, 0x7e, 0x46, 0x38, 0xe6, - 0xdc, 0x0b, 0x9f, 0x16, 0x7e, 0x49, 0x1f, 0x9a, 0xd4, 0x07, 0x59, 0x26, 0x41, 0x83, 0xaf, 0x58, - 0x17, 0xfb, 0x18, 0x20, 0x8e, 0x07, 0x64, 0x73, 0x51, 0x69, 0xd1, 0x86, 0x4a, 0x09, 0x19, 0x6c, - 0xd3, 0x7e, 0x8f, 0xc2, 0xe2, 0xf4, 0x23, 0x39, 0x19, 0xa1, 0x97, 0x38, 0x92, 0xe7, 0xab, 0xf9, - 0x2e, 0xd4, 0x0e, 0x3d, 0xef, 0xd9, 0x6c, 0x1a, 0x05, 0x95, 0x27, 0x63, 0x36, 0x84, 0xce, 0xbf, - 0x3d, 0xd7, 0x2d, 0xd6, 0x84, 0xd5, 0x88, 0x45, 0xc4, 0x71, 0x79, 0x49, 0xa4, 0x04, 0x63, 0x98, - 0xab, 0xe0, 0x51, 0x86, 0x3d, 0x86, 0xea, 0x1e, 0x1f, 0xe2, 0x9b, 0x02, 0x18, 0x5d, 0xb0, 0x96, - 0xf0, 0x54, 0x53, 0x58, 0xc2, 0x76, 0x2d, 0x01, 0x54, 0x2c, 0x2e, 0x8e, 0x52, 0xd1, 0xcf, 0x8c, - 0x64, 0xa8, 0x47, 0x82, 0xc5, 0x2d, 0x44, 0xaa, 0x7c, 0x0e, 0xab, 0x0b, 0x71, 0x20, 0x11, 0x77, - 0xbb, 0x29, 0x7a, 0x64, 0xfb, 0xfe, 0xcd, 0x08, 0xb2, 0xde, 0x1f, 0x41, 0x8d, 0xde, 0x53, 0x3d, - 0xe5, 0x74, 0x27, 0x70, 0xee, 0x41, 0x1d, 0xfd, 0xc2, 0xe1, 0x3c, 0x4b, 0xa2, 0x02, 0x4f, 0xf0, - 0xdb, 0x09, 0xda, 0x8d, 0xbb, 0x68, 0x5d, 0x17, 0x6f, 0x01, 0x46, 0xeb, 0x9a, 0x76, 0xb9, 0xef, - 0x53, 0xa8, 0x3c, 0xe1, 0xa1, 0xba, 0xc3, 0x16, 0xc9, 0x47, 0x73, 0x97, 0xda, 0xb6, 0x53, 0x6e, - 0x1e, 0xb2, 0x8f, 0xb0, 0x68, 0x74, 0x1f, 0x7b, 0x53, 0x6b, 0x45, 0x2f, 0xba, 0x32, 0x07, 0x17, - 0xd2, 0x87, 0xf6, 0x2a, 0x43, 0xd4, 0xf1, 0xc5, 0x57, 0x38, 0xa2, 0x8e, 0xa7, 0x3d, 0xe2, 0xf0, - 0x43, 0x9a, 0x01, 0xed, 0xd6, 0x5c, 0x2c, 0x82, 0xcd, 0x5f, 0xb0, 0x8b, 0xba, 0xaf, 0xa3, 0x7f, - 0x08, 0xd0, 0x0f, 0xbd, 0xe9, 0x9e, 0xcd, 0x27, 0x9e, 0x1b, 0xf3, 0x84, 0xf8, 0xbe, 0x56, 0xbc, - 0x11, 0xb5, 0x4b, 0x5b, 0xec, 0x0b, 0x4d, 0x36, 0x4d, 0x2c, 0x89, 0x5a, 0xf6, 0x1b, 0xaf, 0x74, - 0x45, 0xc3, 0x49, 0xb9, 0xd6, 0x85, 0x4c, 0x02, 0xe2, 0x10, 0x9d, 0x48, 0xd2, 0x5c, 0x88, 0xfe, - 0x89, 0xf6, 0x7a, 0x4a, 0x3c, 0xcf, 0x0f, 0xa0, 0x1c, 0xc7, 0x36, 0x6c, 0xc5, 0x4f, 0xc4, 0x24, - 0x22, 0x21, 0x22, 0xee, 0xbd, 0x18, 0x57, 0xd0, 0x85, 0x35, 0xea, 0x4e, 0x74, 0xfc, 0xe1, 0xad, - 0xa2, 0xe8, 0xd3, 0x1f, 0x8b, 0x0e, 0xfd, 0x68, 0xff, 0xa4, 0xb9, 0xa5, 0xc5, 0xfe, 0x59, 0x70, - 0x6f, 0x46, 0xfb, 0xe7, 0x26, 0x7f, 0x75, 0xb4, 0x7f, 0x6e, 0xf6, 0x8c, 0x76, 0x61, 0x2d, 0xc5, - 0x51, 0xc9, 0xde, 0x50, 0x8a, 0xcd, 0x8d, 0x4e, 0xcc, 0xed, 0x54, 0x87, 0x16, 0x1b, 0xc0, 0x16, - 0x95, 0x69, 0x8e, 0xc7, 0x73, 0x7e, 0xb1, 0xd7, 0xb5, 0x02, 0x29, 0xbe, 0xbe, 0x84, 0x28, 0x33, - 0xe7, 0xef, 0xeb, 0x42, 0x7d, 0xde, 0xa5, 0xc4, 0x6e, 0x46, 0xdf, 0xbe, 0x97, 0x10, 0xd9, 0x17, - 0xdd, 0x50, 0xec, 0xf3, 0xc8, 0xb1, 0x35, 0xd7, 0xc7, 0x7b, 0xf1, 0x07, 0xa7, 0x52, 0xdd, 0x70, - 0x91, 0x36, 0x90, 0xea, 0x17, 0x63, 0xbf, 0x0c, 0x5b, 0xf3, 0x14, 0xad, 0x6a, 0xbe, 0x9f, 0x36, - 0x5d, 0x37, 0x8a, 0x72, 0xc9, 0x01, 0x3d, 0xca, 0x08, 0x46, 0xac, 0xbb, 0xa7, 0x22, 0x42, 0x4a, - 0xf1, 0x83, 0x45, 0x84, 0x94, 0xe6, 0xcf, 0xda, 0x7d, 0xe7, 0x57, 0xbe, 0x75, 0xee, 0x84, 0x17, - 0xb3, 0xd3, 0x9d, 0xa1, 0x37, 0x79, 0x7f, 0xac, 0x94, 0x7b, 0x79, 0xd7, 0xf4, 0xfd, 0xb1, 0x3b, - 0x7a, 0x1f, 0x4b, 0x9f, 0x2e, 0x4d, 0x7d, 0x2f, 0xf4, 0xbe, 0xfb, 0x7f, 0x03, 0x00, 0x00, 0xff, - 0xff, 0x39, 0x56, 0x74, 0xe8, 0xdb, 0x86, 0x00, 0x00, + // 12005 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0xbd, 0x59, 0x6c, 0x23, 0x49, + 0x9a, 0x18, 0x5c, 0xbc, 0x44, 0xf2, 0xe3, 0x21, 0x2a, 0x74, 0xb1, 0x54, 0x5d, 0x5d, 0xd5, 0xd9, + 0x3d, 0xdd, 0x35, 0xd5, 0x3d, 0xea, 0xea, 0xea, 0xae, 0x3e, 0xa6, 0xfe, 0xed, 0x19, 0x8a, 0xa2, + 0x4a, 0x9c, 0x92, 0x48, 0x4d, 0x92, 0xea, 0xde, 0x5e, 0xec, 0x6e, 0x6e, 0x8a, 0x0c, 0x49, 0xf9, + 0x17, 0x99, 0xc9, 0xce, 0x4c, 0xaa, 0xa4, 0x31, 0x0c, 0xf8, 0x61, 0x7d, 0x60, 0xb1, 0x30, 0x60, + 0xc0, 0x6b, 0xc0, 0xc7, 0xc2, 0x17, 0x6c, 0xbf, 0x2d, 0x0c, 0xef, 0xda, 0x4f, 0x7e, 0xf6, 0xc2, + 0x80, 0x0f, 0x18, 0x5e, 0xc3, 0x07, 0x16, 0x0b, 0x18, 0xb0, 0xd7, 0x0f, 0x06, 0x8c, 0x05, 0xec, + 0x17, 0x3f, 0x18, 0x30, 0xe2, 0x8b, 0x23, 0x23, 0x93, 0xa9, 0xaa, 0xea, 0x99, 0xf6, 0xbc, 0x48, + 0x8c, 0xef, 0xfb, 0xe2, 0x8e, 0xf8, 0xe2, 0xbb, 0x22, 0x12, 0xca, 0xfe, 0x6c, 0xb4, 0x3d, 0xf3, + 0xbd, 0xd0, 0x23, 0x85, 0x89, 0xeb, 0xcf, 0x46, 0xc6, 0x1f, 0x67, 0x20, 0x7f, 0x1c, 0x5e, 0x7a, + 0xe4, 0x11, 0x54, 0xed, 0xf1, 0xd8, 0xa7, 0x41, 0x60, 0x85, 0x57, 0x33, 0xda, 0xcc, 0xdc, 0xcd, + 0xdc, 0xab, 0x3f, 0x24, 0xdb, 0x48, 0xb6, 0xdd, 0xe2, 0xa8, 0xe1, 0xd5, 0x8c, 0x9a, 0x15, 0x3b, + 0x4a, 0x90, 0x26, 0x14, 0x45, 0xb2, 0x99, 0xbd, 0x9b, 0xb9, 0x57, 0x36, 0x65, 0x92, 0xdc, 0x06, + 0xb0, 0xa7, 0xde, 0xdc, 0x0d, 0xad, 0xc0, 0x0e, 0x9b, 0xb9, 0xbb, 0x99, 0x7b, 0x39, 0xb3, 0xcc, + 0x21, 0x03, 0x3b, 0x24, 0xb7, 0xa0, 0x3c, 0x7b, 0x66, 0x05, 0x23, 0xdf, 0x99, 0x85, 0xcd, 0x3c, + 0x66, 0x2d, 0xcd, 0x9e, 0x0d, 0x30, 0x4d, 0xde, 0x85, 0x92, 0x37, 0x0f, 0x67, 0x9e, 0xe3, 0x86, + 0xcd, 0xc2, 0xdd, 0xcc, 0xbd, 0xca, 0xc3, 0x65, 0xd1, 0x90, 0xfe, 0x3c, 0x3c, 0x62, 0x60, 0x53, + 0x11, 0x90, 0xb7, 0xa0, 0x36, 0xf2, 0xdc, 0x53, 0xc7, 0x9f, 0xda, 0xa1, 0xe3, 0xb9, 0x41, 0x73, + 0x09, 0xeb, 0x8a, 0x03, 0x8d, 0x7f, 0x96, 0x85, 0xca, 0xd0, 0xb7, 0xdd, 0xc0, 0x1e, 0x31, 0x00, + 0xd9, 0x84, 0x62, 0x78, 0x69, 0x9d, 0xdb, 0xc1, 0x39, 0x76, 0xb5, 0x6c, 0x2e, 0x85, 0x97, 0xfb, + 0x76, 0x70, 0x4e, 0x36, 0x60, 0x89, 0xb7, 0x12, 0x3b, 0x94, 0x33, 0x45, 0x8a, 0xbc, 0x0b, 0x2b, + 0xee, 0x7c, 0x6a, 0xc5, 0xab, 0x62, 0xdd, 0x2a, 0x98, 0x0d, 0x77, 0x3e, 0x6d, 0xeb, 0x70, 0xd6, + 0xf9, 0x93, 0x89, 0x37, 0x7a, 0xc6, 0x2b, 0xe0, 0xdd, 0x2b, 0x23, 0x04, 0xeb, 0x78, 0x03, 0xaa, + 0x02, 0x4d, 0x9d, 0xb3, 0x73, 0xde, 0xc7, 0x82, 0x59, 0xe1, 0x04, 0x08, 0x62, 0x25, 0x84, 0xce, + 0x94, 0x5a, 0x41, 0x68, 0x4f, 0x67, 0xa2, 0x4b, 0x65, 0x06, 0x19, 0x30, 0x00, 0xa2, 0xbd, 0xd0, + 0x9e, 0x58, 0xa7, 0x94, 0x06, 0xcd, 0xa2, 0x40, 0x33, 0xc8, 0x1e, 0xa5, 0x01, 0xf9, 0x0e, 0xd4, + 0xc7, 0x34, 0x08, 0x2d, 0x31, 0x19, 0x34, 0x68, 0x96, 0xee, 0xe6, 0xee, 0x95, 0xcd, 0x1a, 0x83, + 0xb6, 0x24, 0x90, 0xbc, 0x06, 0xe0, 0xdb, 0xcf, 0x2d, 0x36, 0x10, 0xf4, 0xb2, 0x59, 0xe6, 0xb3, + 0xe0, 0xdb, 0xcf, 0x87, 0x97, 0xfb, 0xf4, 0x92, 0xac, 0x41, 0x61, 0x62, 0x9f, 0xd0, 0x49, 0x13, + 0x10, 0xc1, 0x13, 0xc6, 0x2f, 0xc1, 0xc6, 0x13, 0x1a, 0x6a, 0x43, 0x19, 0x98, 0xf4, 0xeb, 0x39, + 0x0d, 0x42, 0xd6, 0xab, 0x20, 0xb4, 0xfd, 0x50, 0xf6, 0x2a, 0xc3, 0x7b, 0x85, 0xb0, 0xa8, 0x57, + 0xd4, 0x1d, 0x4b, 0x82, 0x2c, 0x12, 0x94, 0xa9, 0x3b, 0xe6, 0x68, 0xe3, 0x00, 0x88, 0x56, 0xf0, + 0x2e, 0x0d, 0x6d, 0x67, 0x12, 0x90, 0x8f, 0xa1, 0x1a, 0x6a, 0xd5, 0x35, 0x33, 0x77, 0x73, 0xf7, + 0x2a, 0x6a, 0x69, 0x6a, 0x19, 0xcc, 0x18, 0x9d, 0x71, 0x0e, 0xa5, 0x3d, 0x4a, 0x0f, 0x9c, 0xa9, + 0x13, 0x92, 0x0d, 0x28, 0x9c, 0x3a, 0x97, 0x74, 0x8c, 0x8d, 0xca, 0xed, 0xdf, 0x30, 0x79, 0x92, + 0xdc, 0x01, 0xc0, 0x1f, 0xd6, 0x54, 0xad, 0xd2, 0xfd, 0x1b, 0x66, 0x19, 0x61, 0x87, 0x81, 0x1d, + 0x92, 0x2d, 0x28, 0xce, 0xa8, 0x3f, 0xa2, 0x72, 0x3d, 0xec, 0xdf, 0x30, 0x25, 0x60, 0xa7, 0x08, + 0x85, 0x09, 0x2b, 0xdd, 0xf8, 0xfd, 0x02, 0x54, 0x06, 0xd4, 0x1d, 0xcb, 0x91, 0x20, 0x90, 0x67, + 0x03, 0x8d, 0x95, 0x55, 0x4d, 0xfc, 0x4d, 0xde, 0x84, 0x0a, 0x4e, 0x49, 0x10, 0xfa, 0x8e, 0x7b, + 0xc6, 0x77, 0xcb, 0x4e, 0xb6, 0x99, 0x31, 0x81, 0x81, 0x07, 0x08, 0x25, 0x0d, 0xc8, 0xd9, 0x53, + 0xb9, 0x5b, 0xd8, 0x4f, 0x72, 0x13, 0x4a, 0xf6, 0x34, 0xe4, 0xcd, 0xab, 0x22, 0xb8, 0x68, 0x4f, + 0x43, 0x6c, 0xda, 0x1b, 0x50, 0x9d, 0xd9, 0x57, 0x53, 0xea, 0x86, 0xd1, 0x32, 0xab, 0x9a, 0x15, + 0x01, 0xc3, 0x85, 0xf6, 0x10, 0x56, 0x75, 0x12, 0x59, 0x79, 0x41, 0x55, 0xbe, 0xa2, 0x51, 0x8b, + 0x36, 0xbc, 0x03, 0xcb, 0x32, 0x8f, 0xcf, 0xfb, 0x83, 0xcb, 0xaf, 0x6c, 0xd6, 0x05, 0x58, 0xf6, + 0xf2, 0x1e, 0x34, 0x4e, 0x1d, 0xd7, 0x9e, 0x58, 0xa3, 0x49, 0x78, 0x61, 0x8d, 0xe9, 0x24, 0xb4, + 0x71, 0x25, 0x16, 0xcc, 0x3a, 0xc2, 0xdb, 0x93, 0xf0, 0x62, 0x97, 0x41, 0xc9, 0x7b, 0x50, 0x3e, + 0xa5, 0xd4, 0xc2, 0xc1, 0x6a, 0x96, 0x62, 0x1b, 0x5a, 0xce, 0x90, 0x59, 0x3a, 0x95, 0x73, 0xf5, + 0x1e, 0x34, 0xbc, 0x79, 0x78, 0xe6, 0x39, 0xee, 0x99, 0x35, 0x3a, 0xb7, 0x5d, 0xcb, 0x19, 0xe3, + 0xda, 0xcc, 0xef, 0x64, 0x1f, 0x64, 0xcc, 0xba, 0xc4, 0xb5, 0xcf, 0x6d, 0xb7, 0x3b, 0x26, 0x6f, + 0xc3, 0xf2, 0xc4, 0x0e, 0x42, 0xeb, 0xdc, 0x9b, 0x59, 0xb3, 0xf9, 0xc9, 0x33, 0x7a, 0xd5, 0xac, + 0xe1, 0x40, 0xd4, 0x18, 0x78, 0xdf, 0x9b, 0x1d, 0x21, 0x90, 0x2d, 0x3d, 0x6c, 0x27, 0x6f, 0x04, + 0x5b, 0xd2, 0x35, 0xb3, 0xcc, 0x20, 0xbc, 0xd2, 0xaf, 0x60, 0x15, 0xa7, 0x67, 0x34, 0x0f, 0x42, + 0x6f, 0x6a, 0xf9, 0x74, 0xe4, 0xf9, 0xe3, 0xa0, 0x59, 0xc1, 0xb5, 0xf6, 0x5d, 0xd1, 0x58, 0x6d, + 0x8e, 0xb7, 0x77, 0x69, 0x10, 0xb6, 0x91, 0xd8, 0xe4, 0xb4, 0x1d, 0x37, 0xf4, 0xaf, 0xcc, 0x95, + 0x71, 0x12, 0x4e, 0xde, 0x03, 0x62, 0x4f, 0x26, 0xde, 0x73, 0x2b, 0xa0, 0x93, 0x53, 0x4b, 0x0c, + 0x62, 0xb3, 0x7e, 0x37, 0x73, 0xaf, 0x64, 0x36, 0x10, 0x33, 0xa0, 0x93, 0xd3, 0x23, 0x0e, 0x27, + 0x1f, 0x03, 0x6e, 0x52, 0xeb, 0x94, 0xda, 0xe1, 0xdc, 0xa7, 0x41, 0x73, 0xf9, 0x6e, 0xee, 0x5e, + 0xfd, 0xe1, 0x8a, 0x1a, 0x2f, 0x04, 0xef, 0x38, 0xa1, 0x59, 0x65, 0x74, 0x22, 0x1d, 0x6c, 0xed, + 0xc2, 0x46, 0x7a, 0x93, 0xd8, 0xa2, 0x62, 0xa3, 0xc2, 0x16, 0x63, 0xde, 0x64, 0x3f, 0xd9, 0xce, + 0xbe, 0xb0, 0x27, 0x73, 0x8a, 0xab, 0xb0, 0x6a, 0xf2, 0xc4, 0xf7, 0xb3, 0x9f, 0x66, 0x8c, 0xdf, + 0xcb, 0x40, 0x95, 0xf7, 0x32, 0x98, 0x79, 0x6e, 0x40, 0xc9, 0x9b, 0x50, 0x93, 0xab, 0x81, 0xfa, + 0xbe, 0xe7, 0x0b, 0x6e, 0x29, 0x57, 0x5e, 0x87, 0xc1, 0xc8, 0x77, 0xa1, 0x21, 0x89, 0x66, 0x3e, + 0x75, 0xa6, 0xf6, 0x99, 0x2c, 0x5a, 0x2e, 0xa5, 0x23, 0x01, 0x26, 0x1f, 0x44, 0xe5, 0xf9, 0xde, + 0x3c, 0xa4, 0xb8, 0xd6, 0x2b, 0x0f, 0xab, 0xa2, 0x7b, 0x26, 0x83, 0xa9, 0xd2, 0x31, 0xf5, 0x0a, + 0xeb, 0xdc, 0xf8, 0xad, 0x0c, 0x10, 0xd6, 0xec, 0xa1, 0xc7, 0x0b, 0x88, 0x38, 0x52, 0x2c, 0x67, + 0xe6, 0x95, 0x77, 0x48, 0xf6, 0x45, 0x3b, 0xc4, 0x80, 0x02, 0x6f, 0x7b, 0x3e, 0xa5, 0xed, 0x1c, + 0xf5, 0xa3, 0x7c, 0x29, 0xd7, 0xc8, 0x1b, 0xff, 0x29, 0x07, 0x6b, 0x6c, 0x9d, 0xba, 0x74, 0xd2, + 0x1a, 0x8d, 0xe8, 0x4c, 0xed, 0x9d, 0x3b, 0x50, 0x71, 0xbd, 0x31, 0x95, 0x2b, 0x96, 0x37, 0x0c, + 0x18, 0x48, 0x5b, 0xae, 0xe7, 0xb6, 0xe3, 0xf2, 0x86, 0xf3, 0xc1, 0x2c, 0x23, 0x04, 0x9b, 0xfd, + 0x36, 0x2c, 0xcf, 0xa8, 0x3b, 0xd6, 0xb7, 0x48, 0x8e, 0xaf, 0x7a, 0x01, 0x16, 0xbb, 0xe3, 0x0e, + 0x54, 0x4e, 0xe7, 0x9c, 0x8e, 0x31, 0x96, 0x3c, 0xae, 0x01, 0x10, 0xa0, 0x16, 0xe7, 0x2f, 0xb3, + 0x79, 0x70, 0x8e, 0xd8, 0x02, 0x62, 0x8b, 0x2c, 0xcd, 0x50, 0xb7, 0x01, 0xc6, 0xf3, 0x20, 0x14, + 0x3b, 0x66, 0x09, 0x91, 0x65, 0x06, 0xe1, 0x3b, 0xe6, 0x7b, 0xb0, 0x3a, 0xb5, 0x2f, 0x2d, 0x5c, + 0x3b, 0x96, 0xe3, 0x5a, 0xa7, 0x13, 0x64, 0xea, 0x45, 0xa4, 0x6b, 0x4c, 0xed, 0xcb, 0x2f, 0x18, + 0xa6, 0xeb, 0xee, 0x21, 0x9c, 0xb1, 0x95, 0x11, 0x1f, 0x09, 0xcb, 0xa7, 0x01, 0xf5, 0x2f, 0x28, + 0x72, 0x82, 0xbc, 0x59, 0x17, 0x60, 0x93, 0x43, 0x59, 0x8b, 0xa6, 0xac, 0xdf, 0xe1, 0x64, 0xc4, + 0xb7, 0xbd, 0x59, 0x9c, 0x3a, 0xee, 0x7e, 0x38, 0x19, 0xb1, 0xf3, 0x8a, 0xf1, 0x91, 0x19, 0xf5, + 0xad, 0x67, 0xcf, 0x71, 0x0f, 0xe7, 0x91, 0x6f, 0x1c, 0x51, 0xff, 0xe9, 0x73, 0x26, 0x52, 0x8c, + 0x02, 0x64, 0x44, 0xf6, 0x55, 0xb3, 0x82, 0x1b, 0xbc, 0x34, 0x0a, 0x18, 0x0b, 0xb2, 0xaf, 0xd8, + 0x26, 0x64, 0xad, 0xb5, 0x71, 0x16, 0xe8, 0x18, 0x8b, 0x0f, 0x90, 0xa3, 0xd6, 0xb0, 0xb1, 0x2d, + 0x81, 0x60, 0xf5, 0x04, 0x6c, 0xd5, 0xcb, 0xc6, 0x9e, 0x4e, 0xec, 0xb3, 0x00, 0x59, 0x4a, 0xcd, + 0xac, 0x0a, 0xe0, 0x1e, 0x83, 0x19, 0x5f, 0xc2, 0x7a, 0x62, 0x6e, 0xc5, 0x9e, 0x61, 0x22, 0x04, + 0x42, 0x70, 0x5e, 0x4b, 0xa6, 0x48, 0xa5, 0x4d, 0x5a, 0x36, 0x65, 0xd2, 0x8c, 0xdf, 0xce, 0x40, + 0x55, 0x94, 0x8c, 0xc2, 0x0e, 0xd9, 0x06, 0x22, 0x67, 0x31, 0xbc, 0x74, 0xc6, 0xd6, 0xc9, 0x55, + 0x48, 0x03, 0xbe, 0x68, 0xf6, 0x6f, 0x98, 0x0d, 0x81, 0x1b, 0x5e, 0x3a, 0xe3, 0x1d, 0x86, 0x21, + 0xf7, 0xa1, 0x11, 0xa3, 0x0f, 0x42, 0x9f, 0xaf, 0xe8, 0xfd, 0x1b, 0x66, 0x5d, 0xa3, 0x1e, 0x84, + 0x3e, 0xdb, 0x23, 0x4c, 0x94, 0x9a, 0x87, 0x96, 0xe3, 0x8e, 0xe9, 0x25, 0x2e, 0xa3, 0x9a, 0x59, + 0xe1, 0xb0, 0x2e, 0x03, 0xed, 0xd4, 0xa1, 0xaa, 0x17, 0x67, 0x9c, 0x41, 0x49, 0xca, 0x61, 0x28, + 0x88, 0x24, 0x9a, 0x64, 0x96, 0x43, 0xd5, 0x92, 0x9b, 0x50, 0x8a, 0xb7, 0xc0, 0x2c, 0x86, 0xaf, + 0x5c, 0xb1, 0xf1, 0x39, 0x34, 0x0e, 0xd8, 0xe2, 0x71, 0xd9, 0x62, 0x15, 0x72, 0xe5, 0x06, 0x2c, + 0x69, 0x9b, 0xa6, 0x6c, 0x8a, 0x14, 0x3b, 0x73, 0xcf, 0xbd, 0x20, 0x14, 0xb5, 0xe0, 0x6f, 0xe3, + 0xf7, 0x33, 0x40, 0x3a, 0x41, 0xe8, 0x4c, 0xed, 0x90, 0xee, 0x51, 0xc5, 0x16, 0xfa, 0x50, 0x65, + 0xa5, 0x0d, 0xbd, 0x16, 0x17, 0xf4, 0xb8, 0x40, 0xf1, 0xae, 0xd8, 0xc6, 0x8b, 0x19, 0xb6, 0x75, + 0x6a, 0xce, 0xe6, 0x63, 0x05, 0xb0, 0x5d, 0x16, 0xda, 0xfe, 0x19, 0x0d, 0x51, 0x3c, 0x14, 0x72, + 0x0d, 0x70, 0x10, 0x13, 0x0c, 0xb7, 0x7e, 0x00, 0x2b, 0x0b, 0x65, 0xe8, 0x7c, 0xb9, 0x9c, 0xc2, + 0x97, 0x73, 0x3a, 0x5f, 0xb6, 0x60, 0x35, 0xd6, 0x2e, 0xb1, 0xd2, 0x36, 0xa1, 0xc8, 0x36, 0x04, + 0x13, 0x0e, 0x32, 0x5c, 0x5a, 0x3d, 0xa5, 0x94, 0x89, 0xd7, 0xef, 0xc3, 0xda, 0x29, 0xa5, 0xbe, + 0x1d, 0x22, 0x12, 0x77, 0x0c, 0x9b, 0x21, 0x51, 0xf0, 0x8a, 0xc0, 0x0d, 0xec, 0xf0, 0x88, 0xfa, + 0x6c, 0xa6, 0x8c, 0xff, 0x9d, 0x81, 0x65, 0xc6, 0x41, 0x0f, 0x6d, 0xf7, 0x4a, 0x8e, 0xd3, 0x41, + 0xea, 0x38, 0xdd, 0xd3, 0x0e, 0x43, 0x8d, 0xfa, 0x9b, 0x0e, 0x52, 0x2e, 0x39, 0x48, 0xe4, 0x2e, + 0x54, 0x63, 0x6d, 0x2d, 0x60, 0x5b, 0x21, 0x50, 0x8d, 0x8c, 0x24, 0xd2, 0x25, 0x4d, 0x22, 0xfd, + 0xd9, 0x07, 0xf7, 0x6d, 0x68, 0x44, 0x9d, 0x11, 0x23, 0x4b, 0x20, 0xcf, 0x16, 0xaa, 0x28, 0x00, + 0x7f, 0x1b, 0xff, 0x28, 0xc3, 0x09, 0xdb, 0x9e, 0x13, 0x49, 0xbd, 0x04, 0xf2, 0x4c, 0xca, 0x96, + 0x84, 0xec, 0xf7, 0xb5, 0x3a, 0xc4, 0xb7, 0x30, 0x04, 0x37, 0xa1, 0x14, 0x30, 0x11, 0xda, 0x9e, + 0xf0, 0x51, 0x28, 0x99, 0x45, 0x96, 0x6e, 0x4d, 0x26, 0xd1, 0xe8, 0x14, 0x75, 0x79, 0xfd, 0x1d, + 0x58, 0xd1, 0xda, 0xfc, 0x82, 0xde, 0xf5, 0x80, 0x1c, 0x38, 0x41, 0x78, 0xec, 0x06, 0x33, 0x4d, + 0xc8, 0xbb, 0x05, 0x65, 0xc6, 0x8d, 0x59, 0x7b, 0x03, 0x21, 0xd1, 0x33, 0xf6, 0xcc, 0x5a, 0x1b, + 0x20, 0xd2, 0xbe, 0x14, 0xc8, 0xac, 0x40, 0xda, 0x97, 0x88, 0x34, 0x3e, 0x85, 0xd5, 0x58, 0x79, + 0xa2, 0xea, 0x37, 0xa0, 0x30, 0x0f, 0x2f, 0x3d, 0x29, 0xc6, 0x57, 0xc4, 0x6a, 0x62, 0x4a, 0xa8, + 0xc9, 0x31, 0xc6, 0x63, 0x58, 0xe9, 0xd1, 0xe7, 0x62, 0xc3, 0xcb, 0x86, 0xbc, 0x0d, 0xf9, 0x97, + 0x28, 0xa6, 0x88, 0x37, 0xb6, 0x81, 0xe8, 0x99, 0x45, 0xad, 0x9a, 0x9e, 0x9a, 0x89, 0xe9, 0xa9, + 0xc6, 0xdb, 0x40, 0x06, 0xce, 0x99, 0x7b, 0x48, 0x83, 0xc0, 0x3e, 0x53, 0x2c, 0xa2, 0x01, 0xb9, + 0x69, 0x70, 0x26, 0xf8, 0x19, 0xfb, 0x69, 0x7c, 0x08, 0xab, 0x31, 0x3a, 0x51, 0xf0, 0x6b, 0x50, + 0x0e, 0x9c, 0x33, 0x17, 0x85, 0x30, 0x51, 0x74, 0x04, 0x30, 0xf6, 0x60, 0xed, 0x0b, 0xea, 0x3b, + 0xa7, 0x57, 0x2f, 0x2b, 0x3e, 0x5e, 0x4e, 0x36, 0x59, 0x4e, 0x07, 0xd6, 0x13, 0xe5, 0x88, 0xea, + 0xf9, 0xa2, 0x16, 0x33, 0x59, 0x32, 0x79, 0x42, 0xe3, 0x91, 0x59, 0x9d, 0x47, 0x1a, 0x1e, 0x90, + 0xb6, 0xe7, 0xba, 0x74, 0x14, 0x1e, 0x51, 0xea, 0xcb, 0xc6, 0xbc, 0xab, 0xad, 0xe0, 0xca, 0xc3, + 0x4d, 0x31, 0xb2, 0x49, 0xc6, 0x2b, 0x96, 0x36, 0x81, 0xfc, 0x8c, 0xfa, 0x53, 0x2c, 0xb8, 0x64, + 0xe2, 0x6f, 0x36, 0xb8, 0x4c, 0x33, 0xf5, 0xe6, 0x5c, 0x73, 0xc9, 0x9b, 0x32, 0x69, 0xac, 0xc3, + 0x6a, 0xac, 0x42, 0xde, 0x6a, 0xe3, 0x01, 0xac, 0xef, 0x3a, 0xc1, 0x68, 0xb1, 0x29, 0x9b, 0x50, + 0x9c, 0xcd, 0x4f, 0xac, 0x38, 0x77, 0x7f, 0x4a, 0xaf, 0x8c, 0x26, 0x6c, 0x24, 0x73, 0x88, 0xb2, + 0x7e, 0x3d, 0x03, 0xf9, 0xfd, 0xe1, 0x41, 0x9b, 0x6c, 0x41, 0xc9, 0x71, 0x47, 0xde, 0x94, 0x89, + 0x6f, 0x7c, 0x34, 0x54, 0xfa, 0xda, 0x0d, 0x79, 0x0b, 0xca, 0x28, 0xf5, 0x31, 0xc5, 0x5b, 0x08, + 0x50, 0x25, 0x06, 0x38, 0xf0, 0x46, 0xcf, 0x98, 0xc6, 0x4f, 0x2f, 0x67, 0x8e, 0x8f, 0x3a, 0xbd, + 0xd4, 0x59, 0xf3, 0x5c, 0x62, 0x88, 0x10, 0x42, 0x75, 0xfd, 0xf5, 0x2c, 0x10, 0x71, 0x66, 0xb7, + 0x3d, 0x37, 0x08, 0x7d, 0xdb, 0x71, 0xc3, 0x20, 0x2e, 0x93, 0x64, 0x12, 0x32, 0xc9, 0x3d, 0x68, + 0xa0, 0x1c, 0x20, 0xe4, 0x21, 0x64, 0xe3, 0xd9, 0x48, 0x26, 0x12, 0x02, 0x11, 0x63, 0xe7, 0x6f, + 0x41, 0x3d, 0x12, 0xc5, 0x94, 0x41, 0x25, 0x6f, 0x56, 0x95, 0x38, 0x26, 0x98, 0x3e, 0xdb, 0x8e, + 0x52, 0xc6, 0x50, 0x7a, 0x23, 0x97, 0xfa, 0x56, 0xa6, 0xf6, 0xe5, 0x11, 0x95, 0x82, 0x1f, 0x6a, + 0x90, 0x06, 0xd4, 0xa4, 0xa8, 0xc5, 0x29, 0xb9, 0x04, 0x58, 0x11, 0xf2, 0x16, 0xd2, 0xa4, 0x0b, + 0x4e, 0x4b, 0xe9, 0x82, 0x93, 0xf1, 0xef, 0xcb, 0x50, 0x14, 0xc3, 0xc0, 0xc5, 0xa0, 0xd0, 0xb9, + 0xa0, 0x91, 0x18, 0xc4, 0x52, 0x4c, 0xb8, 0xf2, 0xe9, 0xd4, 0x0b, 0x95, 0xf4, 0xcb, 0x17, 0x69, + 0x95, 0x03, 0x85, 0xfc, 0xab, 0x49, 0x60, 0xdc, 0x0e, 0x94, 0xe3, 0x44, 0x23, 0x5d, 0x2e, 0xba, + 0x05, 0x45, 0x29, 0x48, 0xe5, 0x95, 0x82, 0xb8, 0x34, 0xe2, 0xa2, 0xef, 0x16, 0x94, 0x46, 0xf6, + 0xcc, 0x1e, 0x39, 0xe1, 0x95, 0xe0, 0xa3, 0x2a, 0xcd, 0x4a, 0x9f, 0x78, 0x23, 0x7b, 0x62, 0x9d, + 0xd8, 0x13, 0xdb, 0x1d, 0x51, 0x61, 0x60, 0xa9, 0x22, 0x70, 0x87, 0xc3, 0xc8, 0x77, 0xa0, 0x2e, + 0xda, 0x29, 0xa9, 0xb8, 0x9d, 0x45, 0xb4, 0x5e, 0x92, 0x31, 0x49, 0xdd, 0x9b, 0xb2, 0x79, 0x39, + 0xa5, 0x5c, 0xa6, 0xcd, 0x99, 0x65, 0x0e, 0xd9, 0xa3, 0xd8, 0x5b, 0x81, 0x7e, 0xce, 0x57, 0x50, + 0x99, 0x57, 0xc5, 0x81, 0x5f, 0x72, 0xbb, 0xc8, 0xa2, 0x60, 0x9b, 0xd3, 0x04, 0xdb, 0x77, 0x61, + 0x65, 0xee, 0x06, 0x34, 0x0c, 0x27, 0x74, 0xac, 0xda, 0x52, 0x41, 0xa2, 0x86, 0x42, 0xc8, 0xe6, + 0x6c, 0xc3, 0x2a, 0xb7, 0x0c, 0x05, 0x76, 0xe8, 0x05, 0xe7, 0x4e, 0x60, 0x05, 0x4c, 0xdd, 0xe4, + 0xb6, 0x83, 0x15, 0x44, 0x0d, 0x04, 0x66, 0xc0, 0xf5, 0xcd, 0xcd, 0x04, 0xbd, 0x4f, 0x47, 0xd4, + 0xb9, 0xa0, 0x63, 0x14, 0x7a, 0x73, 0xe6, 0x7a, 0x2c, 0x8f, 0x29, 0x90, 0xa8, 0xc1, 0xcc, 0xa7, + 0xd6, 0x7c, 0x36, 0xb6, 0x99, 0xe4, 0x57, 0xe7, 0x9a, 0x85, 0x3b, 0x9f, 0x1e, 0x73, 0x08, 0x79, + 0x00, 0x52, 0xac, 0x15, 0x6b, 0x66, 0x39, 0xc6, 0xf0, 0xd9, 0x9e, 0x35, 0xab, 0x82, 0x82, 0x4b, + 0xdd, 0x77, 0xf4, 0xcd, 0xd2, 0x60, 0x2b, 0x0c, 0x35, 0xb0, 0x68, 0xc3, 0x34, 0xa1, 0x38, 0xf3, + 0x9d, 0x0b, 0x3b, 0xa4, 0xcd, 0x15, 0x7e, 0xf6, 0x89, 0x24, 0x63, 0x9f, 0x8e, 0xeb, 0x84, 0x8e, + 0x1d, 0x7a, 0x7e, 0x93, 0x20, 0x2e, 0x02, 0x90, 0xfb, 0xb0, 0x82, 0xeb, 0x24, 0x08, 0xed, 0x70, + 0x1e, 0x08, 0x91, 0x7e, 0x15, 0x17, 0x14, 0x2a, 0x25, 0x03, 0x84, 0xa3, 0x54, 0x4f, 0x3e, 0x81, + 0x0d, 0xbe, 0x34, 0x16, 0xb6, 0xe6, 0x1a, 0x1b, 0x0e, 0x6c, 0xd1, 0x2a, 0x52, 0xb4, 0xe3, 0x7b, + 0xf4, 0x33, 0xd8, 0x14, 0xcb, 0x65, 0x21, 0xe7, 0xba, 0xca, 0xb9, 0xc6, 0x49, 0x12, 0x59, 0xb7, + 0x61, 0x85, 0x35, 0xcd, 0x19, 0x59, 0xa2, 0x04, 0xb6, 0x2b, 0x36, 0x58, 0x2f, 0x30, 0xd3, 0x32, + 0x47, 0x9a, 0x88, 0x7b, 0x4a, 0xaf, 0xc8, 0xe7, 0xb0, 0xcc, 0x97, 0x0f, 0xea, 0xad, 0x78, 0x2c, + 0x6e, 0xe1, 0xb1, 0xb8, 0x2e, 0x06, 0xb7, 0xad, 0xb0, 0x78, 0x32, 0xd6, 0x47, 0xb1, 0x34, 0xdb, + 0x1a, 0x13, 0xe7, 0x94, 0x32, 0x2e, 0xdd, 0xdc, 0xe4, 0x8b, 0x4d, 0xa6, 0xd9, 0xae, 0x9d, 0xcf, + 0x10, 0xd3, 0xe4, 0xac, 0x92, 0xa7, 0x70, 0x1d, 0x4f, 0xbc, 0x80, 0x4a, 0x9b, 0x62, 0xf3, 0xa6, + 0xd8, 0x90, 0x0c, 0x28, 0x85, 0x73, 0xa6, 0xe1, 0x70, 0x6d, 0x52, 0x59, 0x7e, 0x6f, 0xe1, 0xc2, + 0xa8, 0x71, 0xa5, 0x52, 0x5a, 0x7f, 0x99, 0x20, 0x74, 0x6e, 0x3f, 0x97, 0x4c, 0xf5, 0x35, 0xe4, + 0x26, 0xc0, 0x40, 0xc2, 0x50, 0xb8, 0x07, 0x2b, 0x62, 0x16, 0x22, 0x66, 0xda, 0xbc, 0x8d, 0x07, + 0xd4, 0x4d, 0xd9, 0xc7, 0x05, 0x6e, 0x6b, 0x36, 0xf8, 0xbc, 0x68, 0xfc, 0x77, 0x1f, 0x88, 0x9c, + 0x14, 0xad, 0xa0, 0xd7, 0x5f, 0x56, 0xd0, 0x8a, 0x98, 0xa6, 0x08, 0x64, 0xfc, 0x6e, 0x86, 0xcb, + 0x33, 0x82, 0x3a, 0xd0, 0x34, 0x79, 0xce, 0xd7, 0x2c, 0xcf, 0x9d, 0x5c, 0x09, 0x56, 0x07, 0x1c, + 0xd4, 0x77, 0x27, 0xc8, 0x6b, 0x1c, 0x57, 0x27, 0xe1, 0x47, 0x67, 0x55, 0x02, 0x91, 0xe8, 0x0e, + 0x54, 0x66, 0xf3, 0x93, 0x89, 0x33, 0xe2, 0x24, 0x39, 0x5e, 0x0a, 0x07, 0x21, 0xc1, 0x1b, 0x50, + 0x15, 0x6b, 0x9d, 0x53, 0xe4, 0x91, 0xa2, 0x22, 0x60, 0x48, 0x82, 0x47, 0x33, 0xf5, 0x91, 0xd9, + 0x55, 0x4d, 0xfc, 0x6d, 0xec, 0xc0, 0x5a, 0xbc, 0xd1, 0x42, 0x6e, 0xb8, 0x0f, 0x25, 0xc1, 0x49, + 0xa5, 0x8d, 0xab, 0x1e, 0x1f, 0x0d, 0x53, 0xe1, 0x8d, 0xff, 0x50, 0x80, 0x55, 0x39, 0x46, 0x6c, + 0xb2, 0x07, 0xf3, 0xe9, 0xd4, 0xf6, 0x53, 0x58, 0x74, 0xe6, 0xc5, 0x2c, 0x3a, 0xbb, 0xc0, 0xa2, + 0xe3, 0x46, 0x0e, 0xce, 0xe1, 0xe3, 0x46, 0x0e, 0xb6, 0xba, 0xb8, 0xde, 0xa9, 0x9b, 0xd2, 0x6b, + 0x02, 0x3c, 0xe4, 0x26, 0xfb, 0x85, 0x03, 0xa5, 0x90, 0x72, 0xa0, 0xe8, 0xc7, 0xc1, 0x52, 0xe2, + 0x38, 0x78, 0x03, 0xf8, 0x32, 0x96, 0xeb, 0xb1, 0xc8, 0x55, 0x51, 0x84, 0x89, 0x05, 0xf9, 0x0e, + 0x2c, 0x27, 0x39, 0x30, 0x67, 0xf5, 0xf5, 0x14, 0xfe, 0xeb, 0x4c, 0x29, 0x8a, 0x14, 0x1a, 0x71, + 0x59, 0xf0, 0x5f, 0x67, 0x4a, 0x0f, 0x10, 0x23, 0xe9, 0x3b, 0x00, 0xbc, 0x6e, 0xdc, 0xc6, 0x80, + 0xdb, 0xf8, 0xed, 0xc4, 0xca, 0xd4, 0x46, 0x7d, 0x9b, 0x25, 0xe6, 0x3e, 0xc5, 0x7d, 0x5d, 0xc6, + 0x9c, 0xb8, 0xa5, 0x3f, 0x81, 0xba, 0x37, 0xa3, 0xae, 0x15, 0x71, 0xc1, 0x0a, 0x16, 0xd5, 0x10, + 0x45, 0x75, 0x25, 0xdc, 0xac, 0x31, 0x3a, 0x95, 0x24, 0x9f, 0xf1, 0x41, 0xa6, 0x5a, 0xce, 0xea, + 0x35, 0x39, 0xeb, 0x48, 0x18, 0x65, 0xfd, 0x10, 0x2a, 0x3e, 0x0d, 0xbc, 0xc9, 0x9c, 0xdb, 0xe5, + 0x6b, 0xb8, 0x8e, 0xa4, 0xa1, 0xd2, 0x54, 0x18, 0x53, 0xa7, 0x32, 0x7e, 0x23, 0x03, 0x15, 0xad, + 0x0f, 0x64, 0x1d, 0x56, 0xda, 0xfd, 0xfe, 0x51, 0xc7, 0x6c, 0x0d, 0xbb, 0x5f, 0x74, 0xac, 0xf6, + 0x41, 0x7f, 0xd0, 0x69, 0xdc, 0x60, 0xe0, 0x83, 0x7e, 0xbb, 0x75, 0x60, 0xed, 0xf5, 0xcd, 0xb6, + 0x04, 0x67, 0xc8, 0x06, 0x10, 0xb3, 0x73, 0xd8, 0x1f, 0x76, 0x62, 0xf0, 0x2c, 0x69, 0x40, 0x75, + 0xc7, 0xec, 0xb4, 0xda, 0xfb, 0x02, 0x92, 0x23, 0x6b, 0xd0, 0xd8, 0x3b, 0xee, 0xed, 0x76, 0x7b, + 0x4f, 0xac, 0x76, 0xab, 0xd7, 0xee, 0x1c, 0x74, 0x76, 0x1b, 0x79, 0x52, 0x83, 0x72, 0x6b, 0xa7, + 0xd5, 0xdb, 0xed, 0xf7, 0x3a, 0xbb, 0x8d, 0x82, 0xf1, 0xdf, 0x33, 0x00, 0x51, 0x43, 0x19, 0x5f, + 0x8d, 0x9a, 0xaa, 0xfb, 0xc1, 0xd6, 0x17, 0x3a, 0xc5, 0xf9, 0xaa, 0x1f, 0x4b, 0x93, 0x87, 0x50, + 0xf4, 0xe6, 0xe1, 0xc8, 0x9b, 0x72, 0x11, 0xbe, 0xfe, 0xb0, 0xb9, 0x90, 0xaf, 0xcf, 0xf1, 0xa6, + 0x24, 0x8c, 0xf9, 0xba, 0x72, 0x2f, 0xf3, 0x75, 0xc5, 0x9d, 0x6a, 0x5c, 0xae, 0xd3, 0x9c, 0x6a, + 0xb7, 0x01, 0x82, 0xe7, 0x94, 0xce, 0xd0, 0x4c, 0x23, 0x76, 0x41, 0x19, 0x21, 0x43, 0xa6, 0xe1, + 0xfd, 0x51, 0x06, 0xd6, 0x71, 0x2d, 0x8d, 0x93, 0x4c, 0xec, 0x2e, 0x54, 0x46, 0x9e, 0x37, 0xa3, + 0x4c, 0xa4, 0x55, 0xf2, 0x9a, 0x0e, 0x62, 0x0c, 0x8a, 0x33, 0xe4, 0x53, 0xcf, 0x1f, 0x51, 0xc1, + 0xc3, 0x00, 0x41, 0x7b, 0x0c, 0xc2, 0xf6, 0x90, 0xd8, 0x84, 0x9c, 0x82, 0xb3, 0xb0, 0x0a, 0x87, + 0x71, 0x92, 0x0d, 0x58, 0x3a, 0xf1, 0xa9, 0x3d, 0x3a, 0x17, 0xdc, 0x4b, 0xa4, 0xc8, 0x77, 0x23, + 0x73, 0xd5, 0x88, 0xed, 0x89, 0x09, 0xe5, 0x8d, 0x2f, 0x99, 0xcb, 0x02, 0xde, 0x16, 0x60, 0x76, + 0xce, 0xdb, 0x27, 0xb6, 0x3b, 0xf6, 0x5c, 0x3a, 0x16, 0xfa, 0x6f, 0x04, 0x30, 0x8e, 0x60, 0x23, + 0xd9, 0x3f, 0xc1, 0xef, 0x3e, 0xd6, 0xf8, 0x1d, 0x57, 0x3c, 0xb7, 0xae, 0xdf, 0x63, 0x1a, 0xef, + 0xfb, 0x57, 0x79, 0xc8, 0x33, 0x75, 0xe3, 0x5a, 0xcd, 0x44, 0xd7, 0x2c, 0x73, 0x0b, 0x1e, 0x50, + 0xb4, 0x8a, 0x71, 0x01, 0x4c, 0x4c, 0x16, 0x42, 0x50, 0xf0, 0x52, 0x68, 0x9f, 0x8e, 0x2e, 0x84, + 0xe4, 0xcd, 0xd1, 0x26, 0x1d, 0x5d, 0xa0, 0xa2, 0x6f, 0x87, 0x3c, 0x2f, 0xe7, 0x57, 0xc5, 0xc0, + 0x0e, 0x31, 0xa7, 0x40, 0x61, 0xbe, 0xa2, 0x42, 0x61, 0xae, 0x26, 0x14, 0x1d, 0xf7, 0xc4, 0x9b, + 0xbb, 0x63, 0x64, 0x4f, 0x25, 0x53, 0x26, 0xd1, 0xe1, 0x8a, 0x9c, 0x94, 0x1d, 0xed, 0x9c, 0x1b, + 0x95, 0x18, 0x60, 0xc8, 0x0e, 0xf7, 0x0f, 0xa0, 0x1c, 0x5c, 0xb9, 0x23, 0x9d, 0x07, 0xad, 0x89, + 0xf1, 0x61, 0xbd, 0xdf, 0x1e, 0x5c, 0xb9, 0x23, 0x5c, 0xf1, 0xa5, 0x40, 0xfc, 0x22, 0x8f, 0xa0, + 0xa4, 0x5c, 0x14, 0xfc, 0x04, 0xb9, 0xa9, 0xe7, 0x90, 0x7e, 0x09, 0x6e, 0x09, 0x52, 0xa4, 0xe4, + 0x7d, 0x58, 0x42, 0x3f, 0x42, 0xd0, 0xac, 0x62, 0x26, 0xa9, 0x6e, 0xb2, 0x66, 0xa0, 0xaf, 0x93, + 0x8e, 0xd1, 0xa7, 0x60, 0x0a, 0x32, 0x36, 0x4c, 0xa7, 0x13, 0x7b, 0x66, 0x8d, 0x50, 0x7d, 0xab, + 0x71, 0x97, 0x21, 0x83, 0xb4, 0x51, 0x83, 0xbb, 0x0b, 0x55, 0x74, 0xff, 0x20, 0x8d, 0xcb, 0xe5, + 0xd0, 0x9c, 0x09, 0x0c, 0xb6, 0x37, 0xb1, 0x67, 0xbd, 0x60, 0xeb, 0x29, 0xd4, 0x62, 0x8d, 0xd1, + 0x4d, 0x43, 0x35, 0x6e, 0x1a, 0x7a, 0x4b, 0x37, 0x0d, 0x45, 0x47, 0xa1, 0xc8, 0xa6, 0x9b, 0x8a, + 0x7e, 0x00, 0x25, 0x39, 0x16, 0x8c, 0xe7, 0x1c, 0xf7, 0x9e, 0xf6, 0xfa, 0x5f, 0xf6, 0xac, 0xc1, + 0x57, 0xbd, 0x76, 0xe3, 0x06, 0x59, 0x86, 0x4a, 0xab, 0x8d, 0x6c, 0x0c, 0x01, 0x19, 0x46, 0x72, + 0xd4, 0x1a, 0x0c, 0x14, 0x24, 0x6b, 0xec, 0x41, 0x23, 0xd9, 0x55, 0xb6, 0xa8, 0x43, 0x09, 0x13, + 0x6e, 0x9a, 0x08, 0xc0, 0x54, 0x7c, 0xee, 0x79, 0xe1, 0x6a, 0x12, 0x4f, 0x18, 0x8f, 0xa0, 0xc1, + 0x0e, 0x76, 0x36, 0xd6, 0xba, 0x03, 0x76, 0xc2, 0x44, 0x6f, 0xdd, 0x55, 0x53, 0x32, 0x2b, 0x1c, + 0x86, 0x55, 0x19, 0x1f, 0xc3, 0x8a, 0x96, 0x2d, 0x32, 0xc9, 0x30, 0x61, 0x21, 0x69, 0x92, 0x41, + 0x35, 0x9b, 0x63, 0x8c, 0x4d, 0x58, 0x67, 0xc9, 0xce, 0x05, 0x75, 0xc3, 0xc1, 0xfc, 0x84, 0xfb, + 0xed, 0x1d, 0xcf, 0x65, 0xea, 0x77, 0x59, 0x61, 0xae, 0xdf, 0x25, 0xdb, 0xc2, 0x7a, 0xc3, 0xd9, + 0xe2, 0x96, 0x56, 0x03, 0x66, 0xdc, 0xc6, 0xbf, 0x31, 0x2b, 0x4e, 0x59, 0x81, 0xd8, 0xb0, 0x1e, + 0x75, 0x3a, 0xa6, 0xd5, 0xef, 0x1d, 0x74, 0x7b, 0xec, 0x70, 0x60, 0xc3, 0x8a, 0x80, 0xbd, 0x3d, + 0x84, 0x64, 0x8c, 0x06, 0xd4, 0x9f, 0xd0, 0xb0, 0xeb, 0x9e, 0x7a, 0x62, 0x30, 0x8c, 0x3f, 0xbf, + 0x04, 0xcb, 0x0a, 0x14, 0x59, 0x81, 0x2e, 0xa8, 0x1f, 0x38, 0x9e, 0x8b, 0xeb, 0xa4, 0x6c, 0xca, + 0x24, 0x63, 0x6f, 0x42, 0x4b, 0x43, 0x31, 0x63, 0x0d, 0xb1, 0x42, 0xaf, 0x43, 0x19, 0xe3, 0x1d, + 0x58, 0x76, 0xc6, 0xd4, 0x0d, 0x9d, 0xf0, 0xca, 0x8a, 0xd9, 0x9f, 0xeb, 0x12, 0x2c, 0xe4, 0x8c, + 0x35, 0x28, 0xd8, 0x13, 0xc7, 0x96, 0xf1, 0x10, 0x3c, 0xc1, 0xa0, 0x23, 0x6f, 0xe2, 0xf9, 0xa8, + 0xb7, 0x94, 0x4d, 0x9e, 0x20, 0x0f, 0x60, 0x8d, 0xe9, 0x50, 0xba, 0x53, 0x00, 0x39, 0x14, 0x37, + 0x85, 0x13, 0x77, 0x3e, 0x3d, 0x8a, 0x1c, 0x03, 0x0c, 0xc3, 0xa4, 0x0b, 0x96, 0x43, 0x88, 0x93, + 0x2a, 0x03, 0xb7, 0x4a, 0xac, 0xb8, 0xf3, 0x69, 0x0b, 0x31, 0x8a, 0xfe, 0x21, 0xac, 0x33, 0x7a, + 0x25, 0x80, 0xaa, 0x1c, 0xcb, 0x98, 0x83, 0x15, 0xd6, 0x15, 0x38, 0x95, 0xe7, 0x16, 0x94, 0x79, + 0xab, 0xd8, 0x92, 0x28, 0x70, 0x9b, 0x05, 0x36, 0x85, 0xfa, 0xc1, 0x42, 0xe8, 0x02, 0x37, 0x04, + 0x24, 0x43, 0x17, 0xb4, 0xe0, 0x87, 0x52, 0x32, 0xf8, 0xe1, 0x21, 0xac, 0x9f, 0xb0, 0x35, 0x7a, + 0x4e, 0xed, 0x31, 0xf5, 0xad, 0x68, 0xe5, 0x73, 0x75, 0x73, 0x95, 0x21, 0xf7, 0x11, 0xa7, 0x36, + 0x0a, 0x93, 0x04, 0x19, 0xe3, 0xa1, 0x63, 0x2b, 0xf4, 0x2c, 0x14, 0x10, 0x91, 0x85, 0x95, 0xcc, + 0x1a, 0x07, 0x0f, 0xbd, 0x36, 0x03, 0xc6, 0xe9, 0xce, 0x7c, 0x7b, 0x76, 0x2e, 0x94, 0x41, 0x45, + 0xf7, 0x84, 0x01, 0xc9, 0x6b, 0x50, 0x64, 0x7b, 0xc2, 0xa5, 0xdc, 0x13, 0xcc, 0xd5, 0x2c, 0x09, + 0x22, 0x6f, 0xc1, 0x12, 0xd6, 0x11, 0x34, 0x1b, 0xb8, 0x21, 0xaa, 0xd1, 0x51, 0xe1, 0xb8, 0xa6, + 0xc0, 0x31, 0x71, 0x7b, 0xee, 0x3b, 0x9c, 0x8f, 0x95, 0x4d, 0xfc, 0x4d, 0x7e, 0xa8, 0x31, 0xc5, + 0x55, 0xcc, 0xfb, 0x96, 0xc8, 0x9b, 0x58, 0x8a, 0xd7, 0xf1, 0xc7, 0x6f, 0x95, 0x5b, 0xfd, 0x28, + 0x5f, 0xaa, 0x34, 0xaa, 0x46, 0x13, 0x23, 0x36, 0x4c, 0x3a, 0xf2, 0x2e, 0xa8, 0x7f, 0x15, 0xdb, + 0x23, 0x19, 0xd8, 0x5c, 0x40, 0x45, 0x8e, 0x5f, 0x5f, 0xc0, 0xad, 0xa9, 0x37, 0x96, 0x42, 0x41, + 0x55, 0x02, 0x0f, 0xbd, 0x31, 0x13, 0x5e, 0x56, 0x14, 0xd1, 0xa9, 0xe3, 0x3a, 0xc1, 0x39, 0x1d, + 0x0b, 0xd9, 0xa0, 0x21, 0x11, 0x7b, 0x02, 0xce, 0x24, 0xf0, 0x99, 0xef, 0x9d, 0xa9, 0xa3, 0x32, + 0x63, 0xaa, 0xb4, 0xf1, 0x09, 0x14, 0xf8, 0x0c, 0xb2, 0x8d, 0x82, 0xf3, 0x9b, 0x11, 0x1b, 0x05, + 0xa1, 0x4d, 0x28, 0xba, 0x34, 0x7c, 0xee, 0xf9, 0xcf, 0xa4, 0x17, 0x49, 0x24, 0x8d, 0x9f, 0xa0, + 0x49, 0x53, 0x85, 0xde, 0x70, 0xe3, 0x03, 0x5b, 0xc2, 0x7c, 0x09, 0x06, 0xe7, 0xb6, 0xb0, 0xb2, + 0x96, 0x10, 0x30, 0x38, 0xb7, 0x17, 0x96, 0x70, 0x76, 0x31, 0xfa, 0xe6, 0x2d, 0xa8, 0xcb, 0x60, + 0x9f, 0xc0, 0x9a, 0xd0, 0xd3, 0x50, 0x6c, 0xc9, 0xaa, 0x88, 0xf4, 0x09, 0x0e, 0xe8, 0x69, 0x68, + 0x1c, 0xc2, 0x8a, 0xd8, 0x34, 0xfd, 0x19, 0x95, 0x55, 0x7f, 0x9a, 0xa6, 0x15, 0x55, 0x1e, 0xae, + 0xc6, 0xc5, 0x0d, 0x2e, 0xd8, 0xc5, 0x54, 0x25, 0xe3, 0xc7, 0x91, 0x05, 0x91, 0x09, 0x23, 0xa2, + 0x3c, 0xa1, 0x9b, 0x48, 0xe7, 0x9b, 0xf4, 0x61, 0x2b, 0x0d, 0xc8, 0x19, 0xb3, 0xd1, 0x09, 0xe6, + 0xa3, 0x91, 0x0c, 0xc2, 0x2a, 0x99, 0x32, 0x69, 0xfc, 0xdb, 0x0c, 0xac, 0x62, 0x61, 0x52, 0xab, + 0x13, 0x27, 0xc5, 0x4f, 0xdd, 0x48, 0x36, 0x3f, 0xba, 0x04, 0xc8, 0x13, 0xdf, 0xdc, 0xb1, 0x91, + 0x5f, 0x70, 0x6c, 0x7c, 0x17, 0x1a, 0x63, 0x3a, 0x71, 0x70, 0x29, 0x49, 0x81, 0x8a, 0x4b, 0xb0, + 0xcb, 0x12, 0x2e, 0xac, 0x0c, 0xc6, 0x5f, 0xc9, 0xc0, 0x0a, 0x97, 0xd7, 0xd0, 0x6e, 0x23, 0x06, + 0xea, 0xb1, 0x34, 0x50, 0x08, 0x76, 0x2a, 0xfa, 0x14, 0xc9, 0x31, 0x08, 0xe5, 0xc4, 0xfb, 0x37, + 0x84, 0xe1, 0x42, 0x40, 0xc9, 0xf7, 0x51, 0x13, 0x75, 0x2d, 0x04, 0x0a, 0x39, 0xfc, 0x66, 0x8a, + 0x84, 0xa8, 0xb2, 0x33, 0x35, 0xd5, 0x45, 0xd0, 0x4e, 0x09, 0x96, 0xb8, 0x15, 0xcc, 0xd8, 0x83, + 0x5a, 0xac, 0x9a, 0x98, 0x9f, 0xa5, 0xca, 0xfd, 0x2c, 0x0b, 0x7e, 0xcf, 0xec, 0xa2, 0xdf, 0xf3, + 0x0a, 0x56, 0x4d, 0x6a, 0x8f, 0xaf, 0xf6, 0x3c, 0xff, 0x28, 0x38, 0x09, 0xf7, 0xb8, 0x10, 0xcc, + 0xce, 0x20, 0xe5, 0xcc, 0x8f, 0x39, 0x33, 0xa4, 0x4f, 0x57, 0x9a, 0x61, 0xbe, 0x03, 0xf5, 0xc8, + 0xeb, 0xaf, 0x99, 0xbd, 0x6b, 0xca, 0xf1, 0x8f, 0xb2, 0x13, 0x81, 0xfc, 0x2c, 0x38, 0x09, 0x85, + 0xe1, 0x1b, 0x7f, 0x1b, 0xff, 0x2b, 0x0f, 0x84, 0xad, 0xe6, 0xc4, 0x82, 0x49, 0xc4, 0x2b, 0x64, + 0x17, 0xe2, 0x15, 0x1e, 0x00, 0xd1, 0x08, 0x64, 0x18, 0x45, 0x4e, 0x85, 0x51, 0x34, 0x22, 0x5a, + 0x11, 0x45, 0xf1, 0x00, 0xd6, 0x84, 0x46, 0x11, 0x6f, 0x2a, 0x5f, 0x1a, 0x84, 0xab, 0x16, 0xb1, + 0xf6, 0xca, 0x58, 0x05, 0x69, 0xa9, 0xce, 0xf1, 0x58, 0x05, 0x69, 0x50, 0xd2, 0x16, 0xe0, 0xd2, + 0x4b, 0x17, 0x60, 0x71, 0x61, 0x01, 0x6a, 0xc6, 0xc5, 0x52, 0xdc, 0xb8, 0xb8, 0x60, 0x26, 0xe7, + 0xe2, 0x73, 0xcc, 0x4c, 0x7e, 0x0f, 0x1a, 0xd2, 0xd0, 0xa4, 0x4c, 0x98, 0x3c, 0xc8, 0x48, 0x18, + 0x91, 0xdb, 0xd2, 0x88, 0x19, 0xf3, 0xa8, 0x55, 0x12, 0x1e, 0xb5, 0x77, 0x61, 0x25, 0x60, 0xeb, + 0xd7, 0x9a, 0xbb, 0x22, 0xd2, 0x90, 0x8e, 0x51, 0x1f, 0x2f, 0x99, 0x0d, 0x44, 0x1c, 0x47, 0xf0, + 0x45, 0x93, 0x5c, 0x2d, 0xc5, 0x24, 0xf7, 0x28, 0x72, 0xde, 0x07, 0xe7, 0xce, 0x14, 0x05, 0x9f, + 0x28, 0x7a, 0x4e, 0x0c, 0xf0, 0xe0, 0xdc, 0x99, 0x9a, 0x32, 0x52, 0x84, 0x25, 0x48, 0x1b, 0xee, + 0x88, 0xfe, 0xa4, 0x04, 0x79, 0xf0, 0x51, 0x58, 0x46, 0x49, 0x75, 0x8b, 0x93, 0x1d, 0x26, 0xe2, + 0x3d, 0x12, 0x83, 0xc2, 0x0a, 0xe1, 0x56, 0xe0, 0x86, 0x3e, 0x28, 0x87, 0xf6, 0x25, 0xf7, 0x1b, + 0xfc, 0xcf, 0x0c, 0x34, 0xd8, 0xb2, 0x8b, 0xed, 0xe8, 0xcf, 0x00, 0x79, 0xcf, 0x2b, 0x6e, 0xe8, + 0x0a, 0xa3, 0x95, 0xfb, 0xf9, 0x13, 0xc0, 0x0d, 0x6a, 0x79, 0x33, 0xea, 0x8a, 0xed, 0xdc, 0x8c, + 0x6f, 0xe7, 0x88, 0x65, 0xef, 0xdf, 0xe0, 0x0a, 0x1f, 0x83, 0x90, 0xcf, 0xa0, 0xcc, 0xf6, 0x01, + 0x2e, 0x4a, 0x11, 0x7b, 0xba, 0xa5, 0x94, 0xf8, 0x85, 0x2d, 0xc9, 0xb2, 0xce, 0x44, 0x32, 0x2d, + 0xbc, 0x23, 0x9f, 0x12, 0xde, 0xa1, 0xf1, 0x8b, 0x7d, 0x80, 0xa7, 0xf4, 0xea, 0xc0, 0x1b, 0xa1, + 0x39, 0xe5, 0x36, 0x00, 0xdb, 0x3a, 0xa7, 0xf6, 0xd4, 0x11, 0x86, 0xc4, 0x82, 0x59, 0x7e, 0x46, + 0xaf, 0xf6, 0x10, 0xc0, 0xd6, 0x0d, 0x43, 0x47, 0x4c, 0xa3, 0x60, 0x96, 0x9e, 0xd1, 0x2b, 0xce, + 0x31, 0x2c, 0xa8, 0x3d, 0xa5, 0x57, 0xbb, 0x94, 0x0b, 0xe6, 0x9e, 0xcf, 0xd6, 0xac, 0x6f, 0x3f, + 0x67, 0x92, 0x78, 0x2c, 0x34, 0xa3, 0xe2, 0xdb, 0xcf, 0x9f, 0xd2, 0x2b, 0x19, 0x26, 0x52, 0x64, + 0xf8, 0x89, 0x37, 0x12, 0xa2, 0x84, 0xb4, 0xdd, 0x44, 0x8d, 0x32, 0x97, 0x9e, 0xe1, 0x6f, 0xe3, + 0x4f, 0x32, 0x50, 0x63, 0xed, 0xc7, 0x53, 0x00, 0x57, 0x88, 0x88, 0x55, 0xcc, 0x44, 0xb1, 0x8a, + 0x0f, 0x05, 0x13, 0xe5, 0x47, 0x4a, 0xf6, 0xfa, 0x23, 0x05, 0xe7, 0x86, 0x9f, 0x27, 0x1f, 0x40, + 0x99, 0x73, 0x01, 0xc6, 0x56, 0x72, 0xb1, 0x09, 0x8e, 0x75, 0xc8, 0x2c, 0x21, 0xd9, 0x53, 0x1e, + 0x1a, 0xa5, 0x99, 0xc9, 0xf9, 0x10, 0x97, 0x7d, 0x65, 0x1c, 0x4f, 0x99, 0x86, 0xc2, 0x35, 0xa1, + 0x51, 0xba, 0x0d, 0x7a, 0x29, 0x69, 0x83, 0x36, 0x5c, 0x28, 0xb1, 0xa9, 0xc6, 0xce, 0xa6, 0x14, + 0x9a, 0x49, 0x2b, 0x94, 0x09, 0x1e, 0x36, 0x3b, 0x83, 0x18, 0x5f, 0xcd, 0x0a, 0xc1, 0xc3, 0x0e, + 0x28, 0x2b, 0x88, 0x35, 0xdc, 0xf5, 0x2c, 0x34, 0xea, 0x0a, 0x73, 0x67, 0xc9, 0x2c, 0xbb, 0xde, + 0x11, 0x07, 0x18, 0x7f, 0x36, 0x03, 0x15, 0x6d, 0x3f, 0xa2, 0x95, 0x5f, 0x0d, 0x27, 0xdf, 0xbc, + 0xf1, 0x1d, 0x10, 0x9b, 0x8f, 0xfd, 0x1b, 0x66, 0x6d, 0x14, 0x9b, 0xa0, 0x6d, 0xb1, 0x94, 0x31, + 0x67, 0x36, 0x66, 0x5a, 0x92, 0xfd, 0x92, 0xeb, 0x97, 0xfd, 0xde, 0x59, 0x82, 0x3c, 0x23, 0x35, + 0x1e, 0xc3, 0x8a, 0xd6, 0x0c, 0x6e, 0x7a, 0x79, 0xd5, 0x01, 0x30, 0x7e, 0x59, 0x65, 0x66, 0x75, + 0x70, 0xa7, 0xb5, 0x8c, 0x42, 0xa3, 0x63, 0x3e, 0x2e, 0x22, 0xda, 0x8d, 0x83, 0x70, 0x64, 0x5e, + 0x35, 0x32, 0xea, 0xcf, 0x64, 0x60, 0x55, 0x2b, 0x7e, 0xcf, 0x71, 0xed, 0x89, 0xf3, 0x13, 0x94, + 0x3f, 0x02, 0xe7, 0xcc, 0x4d, 0x54, 0xc0, 0x41, 0xdf, 0xa4, 0x02, 0x76, 0x4c, 0xf0, 0x98, 0x56, + 0x1e, 0x17, 0x2d, 0x8e, 0x46, 0x40, 0x98, 0x69, 0x3f, 0x1f, 0x5e, 0x1a, 0x7f, 0x35, 0x0b, 0x6b, + 0xa2, 0x09, 0x18, 0x7a, 0xec, 0x30, 0xb1, 0xf3, 0x30, 0x38, 0x23, 0x9f, 0x41, 0x8d, 0x0d, 0x9f, + 0xe5, 0xd3, 0x33, 0x27, 0x08, 0xa9, 0xf4, 0xa7, 0xa7, 0x70, 0x5a, 0x26, 0x7d, 0x30, 0x52, 0x53, + 0x50, 0x92, 0xc7, 0x50, 0xc1, 0xac, 0xdc, 0xfa, 0x25, 0xe6, 0xaa, 0xb9, 0x98, 0x91, 0xcf, 0xc5, + 0xfe, 0x0d, 0x13, 0x82, 0x68, 0x66, 0x1e, 0x43, 0x05, 0xa7, 0xf9, 0x02, 0xc7, 0x3a, 0xc1, 0xec, + 0x16, 0xe6, 0x82, 0x65, 0x9e, 0x45, 0x33, 0xd3, 0x82, 0x1a, 0x67, 0x77, 0x62, 0x24, 0x45, 0x48, + 0xe3, 0xd6, 0x62, 0x76, 0x39, 0xd6, 0xac, 0xf1, 0x33, 0x2d, 0xbd, 0x53, 0x86, 0x62, 0xe8, 0x3b, + 0x67, 0x67, 0xd4, 0x37, 0x36, 0xd4, 0xd0, 0x30, 0x3e, 0x4e, 0x07, 0x21, 0x9d, 0x31, 0x7d, 0xc2, + 0xf8, 0x17, 0x19, 0xa8, 0x08, 0xce, 0xfc, 0x53, 0xbb, 0xea, 0xb7, 0x12, 0x76, 0xd2, 0xb2, 0x66, + 0x16, 0x7d, 0x07, 0x96, 0xa7, 0x4c, 0xf9, 0x61, 0xca, 0x79, 0xcc, 0x4f, 0x5f, 0x97, 0x60, 0x21, + 0xd7, 0x6f, 0xc3, 0x2a, 0x8a, 0xf9, 0x81, 0x15, 0x3a, 0x13, 0x4b, 0x22, 0x45, 0xfc, 0xfd, 0x0a, + 0x47, 0x0d, 0x9d, 0xc9, 0xa1, 0x40, 0x30, 0x69, 0x37, 0x08, 0xed, 0x33, 0x2a, 0xb8, 0x03, 0x4f, + 0x30, 0x85, 0x2a, 0xa1, 0x97, 0x4b, 0x85, 0xea, 0xff, 0xac, 0xc0, 0xe6, 0x02, 0x4a, 0x28, 0x54, + 0xca, 0x31, 0x3b, 0x71, 0xa6, 0x27, 0x9e, 0x72, 0x0c, 0x64, 0x34, 0xc7, 0xec, 0x01, 0xc3, 0x48, + 0xc7, 0x00, 0x85, 0x75, 0xb9, 0x64, 0xd1, 0xb2, 0xaf, 0x54, 0xf7, 0x2c, 0x2a, 0x96, 0x1f, 0xc4, + 0x8f, 0xc1, 0x64, 0x75, 0x12, 0xae, 0xcb, 0x72, 0xab, 0xb3, 0x05, 0x58, 0x40, 0xfe, 0x7f, 0x68, + 0xaa, 0x9d, 0x21, 0xf4, 0x0c, 0xcd, 0x0e, 0xc1, 0x6a, 0x7a, 0xef, 0x25, 0x35, 0xc5, 0x4c, 0xae, + 0x28, 0xec, 0x6d, 0xc8, 0x4d, 0xc5, 0x0b, 0x54, 0x75, 0x5d, 0xc0, 0xeb, 0xb2, 0x2e, 0xd4, 0x1b, + 0x16, 0x6b, 0xcc, 0xbf, 0x52, 0xdf, 0xd0, 0x9c, 0x1c, 0xab, 0xd6, 0xbc, 0x25, 0x0a, 0x56, 0x28, + 0xbd, 0xde, 0x73, 0xd8, 0x78, 0x6e, 0x3b, 0xa1, 0xec, 0xa3, 0x66, 0x06, 0x29, 0x60, 0x7d, 0x0f, + 0x5f, 0x52, 0xdf, 0x97, 0x3c, 0x73, 0x4c, 0x93, 0x5a, 0x7b, 0xbe, 0x08, 0x0c, 0xb6, 0xfe, 0x76, + 0x0e, 0xea, 0xf1, 0x52, 0x18, 0xeb, 0x11, 0xc7, 0x95, 0x14, 0x90, 0x85, 0xd4, 0x2e, 0x9c, 0x56, + 0x3d, 0x2e, 0x18, 0x2f, 0xba, 0xd3, 0xb2, 0x29, 0xee, 0x34, 0xdd, 0x8b, 0x95, 0x7b, 0x59, 0x50, + 0x43, 0xfe, 0x95, 0x82, 0x1a, 0x0a, 0x69, 0x41, 0x0d, 0x1f, 0x5e, 0xeb, 0x05, 0xe7, 0xb6, 0xe8, + 0x54, 0x0f, 0xf8, 0xa3, 0xeb, 0x3d, 0xe0, 0x5c, 0xdc, 0xbe, 0xce, 0xfb, 0xad, 0xf9, 0xee, 0x4b, + 0xd7, 0xf8, 0x9e, 0x34, 0x6f, 0x7e, 0x8a, 0xf7, 0xbb, 0xfc, 0x0d, 0xbc, 0xdf, 0x5b, 0x7f, 0x92, + 0x01, 0xb2, 0xb8, 0x3b, 0xc8, 0x13, 0xee, 0xa9, 0x74, 0xe9, 0x44, 0x70, 0xee, 0xef, 0xbd, 0xda, + 0x0e, 0x93, 0x0b, 0x42, 0xe6, 0x26, 0xef, 0xc3, 0xaa, 0x7e, 0x4b, 0x48, 0x37, 0x33, 0xd4, 0x4c, + 0xa2, 0xa3, 0x22, 0x83, 0x99, 0x16, 0x41, 0x92, 0x7f, 0x69, 0x04, 0x49, 0xe1, 0xa5, 0x11, 0x24, + 0x4b, 0xf1, 0x08, 0x92, 0xad, 0x7f, 0x93, 0x81, 0xd5, 0x94, 0x45, 0xfc, 0xed, 0xf5, 0x99, 0xad, + 0xbd, 0x18, 0x5b, 0xcb, 0x8a, 0xb5, 0xa7, 0x73, 0xb4, 0x03, 0x69, 0x64, 0x65, 0x53, 0x11, 0x88, + 0x93, 0xea, 0xfe, 0xcb, 0xb8, 0x4b, 0x94, 0xc3, 0xd4, 0xb3, 0x6f, 0xfd, 0xdd, 0x2c, 0x54, 0x34, + 0x24, 0x1b, 0x45, 0xbe, 0x64, 0xb5, 0xc8, 0x46, 0x2e, 0x5b, 0xa2, 0x91, 0xe4, 0x0e, 0x08, 0x5f, + 0x14, 0xc7, 0xf3, 0xcd, 0x25, 0x04, 0x49, 0x24, 0xd8, 0x86, 0x55, 0xe9, 0x45, 0xa6, 0x51, 0xb0, + 0xb3, 0x38, 0x6b, 0x44, 0x40, 0x80, 0x68, 0x24, 0xd2, 0xbf, 0x2f, 0xf5, 0xd7, 0x68, 0xee, 0x34, + 0xaf, 0xdc, 0x8a, 0x08, 0x45, 0x10, 0x93, 0xc8, 0xd6, 0xf9, 0x07, 0xb0, 0xae, 0x62, 0x11, 0x62, + 0x39, 0xb8, 0xef, 0x87, 0xc8, 0x98, 0x03, 0x2d, 0xcb, 0x0f, 0xe1, 0x76, 0xa2, 0x4d, 0x89, 0xac, + 0x3c, 0x2a, 0xff, 0x66, 0xac, 0x75, 0x7a, 0x09, 0x5b, 0x7f, 0x0a, 0x6a, 0x31, 0x46, 0xf9, 0xed, + 0x4d, 0x79, 0xd2, 0x30, 0xc5, 0x47, 0x54, 0x37, 0x4c, 0x6d, 0xfd, 0x8f, 0x1c, 0x90, 0x45, 0x5e, + 0xfd, 0xf3, 0x6c, 0xc2, 0xe2, 0xc2, 0xcc, 0xa5, 0x2c, 0xcc, 0xff, 0x67, 0xf2, 0x43, 0x64, 0x1f, + 0xd5, 0x42, 0x01, 0xf8, 0xe6, 0x6c, 0x28, 0x84, 0x6c, 0xc5, 0x27, 0xc9, 0x80, 0xa9, 0x52, 0xec, + 0xa2, 0x9b, 0x26, 0x40, 0x25, 0xe2, 0xa6, 0x8e, 0x61, 0xc9, 0x76, 0x47, 0xe7, 0x9e, 0x2f, 0xf8, + 0xe0, 0x2f, 0x7c, 0xe3, 0xe3, 0x73, 0xbb, 0x85, 0xf9, 0x51, 0x6a, 0x33, 0x45, 0x61, 0xc6, 0x07, + 0x50, 0xd1, 0xc0, 0xa4, 0x0c, 0x85, 0x83, 0xee, 0xe1, 0x4e, 0xbf, 0x71, 0x83, 0xd4, 0xa0, 0x6c, + 0x76, 0xda, 0xfd, 0x2f, 0x3a, 0x66, 0x67, 0xb7, 0x91, 0x21, 0x25, 0xc8, 0x1f, 0xf4, 0x07, 0xc3, + 0x46, 0xd6, 0xd8, 0x82, 0xa6, 0x28, 0x71, 0xd1, 0x53, 0xf4, 0x5b, 0x79, 0x65, 0xdf, 0x44, 0xa4, + 0x50, 0xf2, 0x3f, 0x84, 0xaa, 0x2e, 0xde, 0x88, 0x15, 0x91, 0x88, 0x46, 0x61, 0xea, 0xbd, 0xa7, + 0xf1, 0xea, 0x36, 0xf0, 0x58, 0x84, 0xb1, 0xca, 0x96, 0x8d, 0xc9, 0xad, 0x29, 0x4e, 0x5d, 0xd4, + 0x8f, 0x62, 0xcb, 0xf0, 0xff, 0x83, 0x7a, 0xdc, 0x2b, 0x22, 0x38, 0x52, 0x9a, 0xca, 0xca, 0x72, + 0xc7, 0xdc, 0x24, 0xe4, 0x87, 0xd0, 0x48, 0x7a, 0x55, 0x84, 0xf0, 0x7c, 0x4d, 0xfe, 0x65, 0x27, + 0xee, 0x68, 0x21, 0xfb, 0xb0, 0x96, 0x26, 0xe0, 0xe1, 0xfa, 0xb8, 0xde, 0xcc, 0x41, 0x16, 0x85, + 0x38, 0xf2, 0xa9, 0xf0, 0xae, 0x15, 0x70, 0xfa, 0xdf, 0x8a, 0xd7, 0xaf, 0x0d, 0xf6, 0x36, 0xff, + 0xa7, 0xf9, 0xd9, 0x2e, 0x00, 0x22, 0x18, 0x69, 0x40, 0xb5, 0x7f, 0xd4, 0xe9, 0x59, 0xed, 0xfd, + 0x56, 0xaf, 0xd7, 0x39, 0x68, 0xdc, 0x20, 0x04, 0xea, 0x18, 0x50, 0xb1, 0xab, 0x60, 0x19, 0x06, + 0x13, 0x5e, 0x4e, 0x09, 0xcb, 0x92, 0x35, 0x68, 0x74, 0x7b, 0x09, 0x68, 0x8e, 0x34, 0x61, 0xed, + 0xa8, 0xc3, 0x63, 0x30, 0x62, 0xe5, 0xe6, 0x99, 0xd2, 0x20, 0xba, 0xcb, 0x94, 0x86, 0x2f, 0xed, + 0xc9, 0x84, 0x86, 0x62, 0x1f, 0x48, 0x59, 0xfa, 0xaf, 0x65, 0x60, 0x3d, 0x81, 0x88, 0x5c, 0x13, + 0x5c, 0x92, 0x8e, 0xcb, 0xd0, 0x55, 0x04, 0xca, 0xdd, 0xf4, 0x2e, 0xac, 0x28, 0x4b, 0x59, 0xe2, + 0x54, 0x6a, 0x28, 0x84, 0x24, 0x7e, 0x1f, 0x56, 0x35, 0x83, 0x5b, 0x82, 0x57, 0x10, 0x0d, 0x25, + 0x32, 0x18, 0x9b, 0xea, 0xee, 0x4f, 0xa2, 0xd5, 0x63, 0xd8, 0x48, 0x22, 0x22, 0xe7, 0x63, 0xbc, + 0xbd, 0x32, 0x49, 0x1e, 0x24, 0x16, 0x42, 0xbc, 0xb5, 0xfa, 0x84, 0xcb, 0xea, 0x7f, 0x67, 0x09, + 0xc8, 0x8f, 0xe7, 0xd4, 0xbf, 0xc2, 0x3b, 0x67, 0xc1, 0xcb, 0xc2, 0xa7, 0xa5, 0xad, 0x26, 0xfb, + 0x4a, 0xf7, 0x4a, 0xd3, 0xee, 0x75, 0xe6, 0x5f, 0x7e, 0xaf, 0xb3, 0xf0, 0xb2, 0x7b, 0x9d, 0x6f, + 0x42, 0xcd, 0x39, 0x73, 0x3d, 0xc6, 0x0a, 0x99, 0x24, 0x1c, 0x34, 0x97, 0xee, 0xe6, 0xee, 0x55, + 0xcd, 0xaa, 0x00, 0x32, 0x39, 0x38, 0x20, 0x8f, 0x23, 0x22, 0x3a, 0x3e, 0xc3, 0xbb, 0xcd, 0x3a, + 0x13, 0xec, 0x8c, 0xcf, 0xa8, 0x30, 0x4d, 0xa1, 0xa6, 0x21, 0x33, 0x33, 0x78, 0x40, 0xde, 0x82, + 0x7a, 0xe0, 0xcd, 0x99, 0x62, 0x21, 0x87, 0x81, 0x7b, 0x1f, 0xab, 0x1c, 0x7a, 0x24, 0x7d, 0xd1, + 0xab, 0xf3, 0x80, 0x5a, 0x53, 0x27, 0x08, 0x98, 0x78, 0x36, 0xf2, 0xdc, 0xd0, 0xf7, 0x26, 0xc2, + 0xa1, 0xb8, 0x32, 0x0f, 0xe8, 0x21, 0xc7, 0xb4, 0x39, 0x82, 0x7c, 0x14, 0x35, 0x69, 0x66, 0x3b, + 0x7e, 0xd0, 0x04, 0x6c, 0x92, 0xec, 0x29, 0xca, 0xef, 0xb6, 0xe3, 0xab, 0xb6, 0xb0, 0x44, 0x90, + 0xb8, 0x6f, 0x5a, 0x49, 0xde, 0x37, 0xfd, 0xb5, 0xf4, 0xfb, 0xa6, 0x3c, 0x86, 0xea, 0x81, 0x28, + 0x7a, 0x71, 0x8a, 0xbf, 0xd1, 0xb5, 0xd3, 0xc5, 0x6b, 0xb4, 0xf5, 0x6f, 0x72, 0x8d, 0x76, 0x39, + 0xed, 0x1a, 0xed, 0x07, 0x50, 0xc1, 0x0b, 0x8e, 0xd6, 0x39, 0x46, 0x52, 0x72, 0x07, 0x69, 0x43, + 0xbf, 0x01, 0xb9, 0xef, 0xb8, 0xa1, 0x09, 0xbe, 0xfc, 0x19, 0x2c, 0xde, 0x68, 0x5d, 0xf9, 0x39, + 0xde, 0x68, 0x15, 0x17, 0x31, 0xb7, 0xa1, 0x24, 0xe7, 0x89, 0x10, 0xc8, 0x9f, 0xfa, 0xde, 0x54, + 0x3a, 0x65, 0xd8, 0x6f, 0x52, 0x87, 0x6c, 0xe8, 0x89, 0xcc, 0xd9, 0xd0, 0x33, 0x7e, 0x05, 0x2a, + 0xda, 0x52, 0x23, 0x6f, 0x70, 0xcb, 0x26, 0xd3, 0xcd, 0x84, 0x6c, 0xc9, 0x47, 0xb1, 0x2c, 0xa0, + 0xdd, 0x31, 0xe3, 0x37, 0x63, 0xc7, 0xa7, 0x78, 0xf7, 0xdc, 0xf2, 0xe9, 0x05, 0xf5, 0x03, 0xe9, + 0x24, 0x6b, 0x28, 0x84, 0xc9, 0xe1, 0xc6, 0xaf, 0xc2, 0x6a, 0x6c, 0x6e, 0x05, 0x8b, 0x78, 0x0b, + 0x96, 0x70, 0xdc, 0x64, 0x24, 0x46, 0xfc, 0x66, 0xa9, 0xc0, 0xe1, 0x3d, 0x7b, 0xee, 0xdf, 0xb3, + 0x66, 0xbe, 0x77, 0x82, 0x95, 0x64, 0xcc, 0x8a, 0x80, 0x1d, 0xf9, 0xde, 0x89, 0xf1, 0x87, 0x39, + 0xc8, 0xed, 0x7b, 0x33, 0x3d, 0xfa, 0x32, 0xb3, 0x10, 0x7d, 0x29, 0x14, 0x4e, 0x4b, 0x29, 0x94, + 0x42, 0x66, 0x47, 0xcf, 0x96, 0x54, 0x2a, 0xef, 0x41, 0x9d, 0xf1, 0x89, 0xd0, 0x63, 0x1a, 0xfb, + 0x73, 0xdb, 0xe7, 0x02, 0x31, 0x0f, 0x66, 0xae, 0xda, 0xd3, 0x70, 0xe8, 0xed, 0x71, 0x38, 0x59, + 0x83, 0x9c, 0x52, 0x5f, 0x10, 0xcd, 0x92, 0x64, 0x03, 0x96, 0xf0, 0xae, 0xc4, 0x95, 0x88, 0x24, + 0x10, 0x29, 0xf2, 0x3d, 0x58, 0x8d, 0x97, 0xcb, 0x59, 0x91, 0x90, 0x8d, 0xf4, 0x82, 0x91, 0x27, + 0xdd, 0x04, 0xc6, 0x47, 0x38, 0x8d, 0x08, 0x79, 0x3a, 0xa5, 0x14, 0x51, 0x1a, 0xd3, 0x2b, 0xc5, + 0x98, 0xde, 0x1d, 0xa8, 0x84, 0x93, 0x0b, 0x6b, 0x66, 0x5f, 0x4d, 0x3c, 0x7b, 0x2c, 0xf6, 0x37, + 0x84, 0x93, 0x8b, 0x23, 0x0e, 0x21, 0xef, 0x03, 0x4c, 0x67, 0x33, 0xb1, 0xf7, 0xd0, 0x5b, 0x13, + 0x2d, 0xe5, 0xc3, 0xa3, 0x23, 0xbe, 0xe4, 0xcc, 0xf2, 0x74, 0x36, 0xe3, 0x3f, 0xc9, 0x2e, 0xd4, + 0x53, 0xef, 0x87, 0xdf, 0x96, 0x31, 0xed, 0xde, 0x6c, 0x3b, 0x65, 0x73, 0xd6, 0x46, 0x3a, 0x6c, + 0xeb, 0x87, 0x40, 0x7e, 0xc6, 0x5b, 0xda, 0x43, 0x28, 0xab, 0xf6, 0xe9, 0x97, 0x9c, 0xf1, 0x1a, + 0x4f, 0x25, 0x76, 0xc9, 0xb9, 0x35, 0x1e, 0xfb, 0x8c, 0x2f, 0xf2, 0x03, 0x53, 0xb1, 0x7c, 0xd0, + 0x4e, 0x4c, 0x71, 0x1b, 0xc4, 0xf8, 0xcf, 0x19, 0x28, 0xf0, 0x1b, 0xd7, 0x6f, 0xc3, 0x32, 0xa7, + 0x57, 0x91, 0xac, 0x22, 0xfe, 0x80, 0x9f, 0xbb, 0x43, 0x11, 0xc4, 0xca, 0xb6, 0x85, 0xf6, 0x0a, + 0x45, 0x56, 0xcd, 0xbc, 0xf6, 0x12, 0xc5, 0x1d, 0x28, 0xab, 0xaa, 0xb5, 0xa5, 0x53, 0x92, 0x35, + 0x93, 0xd7, 0x21, 0x7f, 0xee, 0xcd, 0xa4, 0xe5, 0x07, 0xa2, 0x91, 0x34, 0x11, 0x1e, 0xb5, 0x85, + 0xd5, 0x11, 0xdd, 0x52, 0xc9, 0x89, 0xb6, 0xb0, 0x4a, 0x70, 0x19, 0x2c, 0xf6, 0x71, 0x29, 0xa5, + 0x8f, 0xc7, 0xb0, 0xcc, 0xf8, 0x80, 0x16, 0x04, 0x71, 0xfd, 0xa1, 0xf9, 0x5d, 0x26, 0xe1, 0x8d, + 0x26, 0xf3, 0x31, 0xd5, 0x6d, 0x6f, 0x18, 0x96, 0x28, 0xe0, 0x52, 0xb2, 0x36, 0x7e, 0x27, 0xc3, + 0xf9, 0x0b, 0x2b, 0x97, 0xdc, 0x83, 0xbc, 0x2b, 0x03, 0x26, 0x22, 0x39, 0x4e, 0xdd, 0xa7, 0x62, + 0x74, 0x26, 0x52, 0xb0, 0xa9, 0xc3, 0x30, 0x03, 0xbd, 0xf4, 0x9a, 0x59, 0x71, 0xe7, 0x53, 0x65, + 0xba, 0xfa, 0x8e, 0xec, 0x56, 0xc2, 0xec, 0xc3, 0x7b, 0xaf, 0xb6, 0xe9, 0xb6, 0x16, 0xdf, 0x98, + 0x8f, 0x9d, 0x98, 0x52, 0x0a, 0x1c, 0x9f, 0x51, 0x2d, 0xae, 0xf1, 0xf7, 0xb2, 0x50, 0x8b, 0xb5, + 0x08, 0x03, 0x3c, 0xd9, 0x01, 0xc0, 0x5d, 0x53, 0x62, 0xbe, 0x31, 0x8e, 0x4e, 0x08, 0xea, 0xda, + 0x38, 0x65, 0x63, 0xe3, 0xa4, 0x22, 0x9e, 0x72, 0x7a, 0xc4, 0xd3, 0x03, 0x28, 0x47, 0xaf, 0x8f, + 0xc4, 0x9b, 0xc4, 0xea, 0x93, 0xb7, 0xca, 0x22, 0xa2, 0x28, 0x46, 0xaa, 0xa0, 0xc7, 0x48, 0x7d, + 0xae, 0x85, 0xd4, 0x2c, 0x61, 0x31, 0x46, 0xda, 0x88, 0xfe, 0x5c, 0x02, 0x6a, 0x8c, 0xc7, 0x50, + 0xd1, 0x1a, 0xaf, 0x87, 0xa5, 0x64, 0x62, 0x61, 0x29, 0xea, 0x56, 0x68, 0x36, 0xba, 0x15, 0x6a, + 0xfc, 0xb9, 0x2c, 0xd4, 0xd8, 0xfe, 0x72, 0xdc, 0xb3, 0x23, 0x6f, 0xe2, 0x8c, 0xd0, 0x55, 0xa5, + 0x76, 0x98, 0x10, 0xb4, 0xe4, 0x3e, 0x13, 0x5b, 0x8c, 0xcb, 0x59, 0xfa, 0x95, 0x78, 0xce, 0xa4, + 0xd5, 0x95, 0x78, 0x03, 0x6a, 0x8c, 0x31, 0xa2, 0xd3, 0x29, 0x7a, 0xc3, 0xc4, 0xac, 0x9c, 0x52, + 0xba, 0x63, 0x07, 0x9c, 0x43, 0x7e, 0x0f, 0x56, 0x19, 0x0d, 0xde, 0x06, 0x9e, 0x3a, 0x93, 0x89, + 0x13, 0x5d, 0x0b, 0xcb, 0x99, 0x8d, 0x53, 0x4a, 0x4d, 0x3b, 0xa4, 0x87, 0x0c, 0x21, 0x9e, 0x3c, + 0x29, 0x8d, 0x9d, 0xc0, 0x3e, 0x89, 0xc2, 0x70, 0x55, 0x1a, 0x5d, 0xe1, 0xc2, 0x95, 0x1b, 0x6d, + 0xb2, 0xbc, 0x59, 0x99, 0x72, 0x47, 0x2e, 0xe6, 0x4f, 0xac, 0xa4, 0x62, 0x72, 0x25, 0x19, 0xff, + 0x34, 0x0b, 0x15, 0x6d, 0x59, 0xbe, 0xca, 0xe9, 0x7a, 0x7b, 0xc1, 0xb5, 0x58, 0xd6, 0xbd, 0x88, + 0x6f, 0xc6, 0xab, 0xcc, 0xa9, 0xbb, 0x43, 0xfa, 0x02, 0xbe, 0x05, 0x65, 0xb6, 0xeb, 0x3e, 0x40, + 0x13, 0xac, 0x78, 0x72, 0x08, 0x01, 0x47, 0xf3, 0x13, 0x89, 0x7c, 0x88, 0xc8, 0x42, 0x84, 0x7c, + 0xc8, 0x90, 0x2f, 0xba, 0x3b, 0xf0, 0x09, 0x54, 0x45, 0xa9, 0x38, 0xa7, 0xd8, 0xdd, 0x68, 0xd7, + 0xc7, 0xe6, 0xdb, 0xac, 0xf0, 0xea, 0xf8, 0xe4, 0x8b, 0x8c, 0x0f, 0x65, 0xc6, 0xd2, 0xcb, 0x32, + 0x3e, 0xe4, 0x09, 0x63, 0x4f, 0x5d, 0xc7, 0xc0, 0x60, 0x36, 0xc9, 0xc7, 0xde, 0x87, 0x55, 0xc9, + 0xae, 0xe6, 0xae, 0xed, 0xba, 0xde, 0xdc, 0x1d, 0x51, 0x79, 0x31, 0x94, 0x08, 0xd4, 0x71, 0x84, + 0x31, 0xc6, 0xea, 0x95, 0x01, 0x1e, 0x14, 0x77, 0x1f, 0x0a, 0x5c, 0x2e, 0xe7, 0xc2, 0x47, 0x3a, + 0xe3, 0xe2, 0x24, 0xe4, 0x1e, 0x14, 0xb8, 0x78, 0x9e, 0xbd, 0x96, 0xd9, 0x70, 0x02, 0xa3, 0x05, + 0x84, 0x65, 0x3c, 0xa4, 0xa1, 0xef, 0x8c, 0x82, 0xe8, 0xce, 0x69, 0x81, 0xe9, 0x9f, 0xbc, 0xae, + 0xc8, 0x72, 0x1b, 0x51, 0xa2, 0x8e, 0xca, 0x69, 0xd8, 0xc1, 0xb4, 0x1a, 0x2b, 0x43, 0x88, 0x4b, + 0x13, 0xd8, 0x38, 0xa1, 0xe1, 0x73, 0x4a, 0x5d, 0x97, 0x09, 0x43, 0x23, 0xea, 0x86, 0xbe, 0x3d, + 0x61, 0x93, 0xc4, 0x7b, 0xf0, 0x68, 0xa1, 0xd4, 0xc8, 0x06, 0xb2, 0x13, 0x65, 0x6c, 0xab, 0x7c, + 0x9c, 0x77, 0xac, 0x9f, 0xa4, 0xe1, 0xb6, 0x7e, 0x19, 0xb6, 0xae, 0xcf, 0x94, 0x72, 0xdf, 0xfc, + 0x5e, 0x9c, 0xab, 0x28, 0x3f, 0xe0, 0xc4, 0xb3, 0x43, 0xde, 0x1a, 0x9d, 0xb3, 0xf4, 0xa0, 0xa2, + 0x61, 0xa2, 0xb3, 0x3f, 0x83, 0xc2, 0x1d, 0x4f, 0xb0, 0x13, 0xc9, 0xf5, 0xfc, 0x29, 0xfa, 0xdd, + 0xc6, 0x56, 0x54, 0x7a, 0xc6, 0x5c, 0x8e, 0xe0, 0x18, 0x86, 0x61, 0x6c, 0xc3, 0x32, 0x4a, 0xf6, + 0xda, 0x41, 0xf7, 0x22, 0x61, 0xd0, 0x58, 0x03, 0xd2, 0xe3, 0xbc, 0x4b, 0x0f, 0x10, 0xfc, 0x77, + 0x39, 0xa8, 0x68, 0x60, 0x76, 0x1a, 0x61, 0x54, 0xa5, 0x35, 0x76, 0xec, 0x29, 0x95, 0x4e, 0xce, + 0x9a, 0x59, 0x43, 0xe8, 0xae, 0x00, 0xb2, 0xb3, 0xd8, 0xbe, 0x38, 0xb3, 0xbc, 0x79, 0x68, 0x8d, + 0xe9, 0x99, 0x4f, 0x65, 0x2b, 0xab, 0xf6, 0xc5, 0x59, 0x7f, 0x1e, 0xee, 0x22, 0x8c, 0x51, 0x31, + 0x5e, 0xa2, 0x51, 0x89, 0x20, 0xbb, 0xa9, 0x7d, 0x19, 0x51, 0x89, 0x68, 0x54, 0xbe, 0x32, 0xf3, + 0x2a, 0x1a, 0x95, 0x6b, 0x8b, 0xc9, 0x03, 0xb4, 0xb0, 0x78, 0x80, 0x7e, 0x04, 0x1b, 0xfc, 0x00, + 0x15, 0xac, 0xd9, 0x4a, 0xec, 0xe4, 0x35, 0xc4, 0x8a, 0x4e, 0x6a, 0x62, 0x6f, 0x83, 0xf5, 0x40, + 0xb2, 0xa5, 0xc0, 0xf9, 0x09, 0x67, 0x64, 0x19, 0x93, 0xf5, 0x4c, 0x14, 0x3e, 0x70, 0x7e, 0x42, + 0x19, 0x25, 0x86, 0xf3, 0xe8, 0x94, 0xe2, 0x66, 0xd0, 0xd4, 0x71, 0x93, 0x94, 0xf6, 0x65, 0x9c, + 0xb2, 0x2c, 0x28, 0xed, 0x4b, 0x9d, 0xf2, 0x11, 0x6c, 0x4e, 0xe9, 0xd8, 0xb1, 0xe3, 0xc5, 0x5a, + 0x91, 0xe0, 0xb6, 0xc6, 0xd1, 0x5a, 0x9e, 0x01, 0x57, 0xdc, 0xd9, 0x68, 0xfc, 0xc4, 0x9b, 0x9e, + 0x38, 0x5c, 0x66, 0xe1, 0x01, 0x46, 0x79, 0xb3, 0xee, 0xce, 0xa7, 0xbf, 0x84, 0x60, 0x96, 0x25, + 0x30, 0x6a, 0x50, 0x19, 0x84, 0xde, 0x4c, 0x4e, 0x73, 0x1d, 0xaa, 0x3c, 0x29, 0xee, 0x54, 0xdf, + 0x82, 0x9b, 0xc8, 0x12, 0x86, 0xde, 0xcc, 0x9b, 0x78, 0x67, 0x57, 0x31, 0x3b, 0xde, 0xbf, 0xcc, + 0xc0, 0x6a, 0x0c, 0x2b, 0xd8, 0xeb, 0x47, 0x9c, 0x9f, 0xa9, 0x1b, 0xa1, 0x99, 0xd8, 0x75, 0x20, + 0x36, 0x5f, 0x9c, 0x90, 0x33, 0x33, 0x79, 0x4b, 0xb4, 0x15, 0x3d, 0x0b, 0x23, 0x33, 0x72, 0x96, + 0xd2, 0x5c, 0x64, 0x29, 0x22, 0xbf, 0x7c, 0x30, 0x46, 0x16, 0xf1, 0x0b, 0xe2, 0xf6, 0xd6, 0x58, + 0x74, 0x39, 0x17, 0xbf, 0xdf, 0xa1, 0xdb, 0xfc, 0x64, 0x0b, 0x22, 0x43, 0x60, 0x60, 0xfc, 0x9d, + 0x0c, 0x40, 0xd4, 0x3a, 0xbc, 0x61, 0xa2, 0xe4, 0x96, 0x0c, 0xc6, 0xf6, 0x6a, 0x32, 0xca, 0x1b, + 0x50, 0x55, 0x61, 0xe0, 0x91, 0x24, 0x54, 0x91, 0x30, 0x26, 0x0e, 0xbd, 0x03, 0xcb, 0x67, 0x13, + 0xef, 0x04, 0x25, 0x56, 0x21, 0xb7, 0xf0, 0x28, 0x82, 0x3a, 0x07, 0x4b, 0x69, 0x24, 0x92, 0x9b, + 0xf2, 0xa9, 0x91, 0xe2, 0xba, 0x14, 0x64, 0xfc, 0xa5, 0xac, 0x8a, 0x35, 0x8d, 0x46, 0xe2, 0xc5, + 0xea, 0xdd, 0x4f, 0x13, 0x8d, 0xf3, 0x22, 0xf7, 0xe2, 0x63, 0xa8, 0xfb, 0xfc, 0x50, 0x92, 0x27, + 0x56, 0xfe, 0x05, 0x27, 0x56, 0xcd, 0x8f, 0x49, 0x3a, 0xdf, 0x85, 0x86, 0x3d, 0xbe, 0xa0, 0x7e, + 0xe8, 0xa0, 0xb5, 0x1e, 0xe5, 0x63, 0x11, 0xdd, 0xa9, 0xc1, 0x51, 0x10, 0x7d, 0x07, 0x96, 0xc5, + 0x3d, 0x7f, 0x45, 0x29, 0xde, 0x1f, 0x8b, 0xc0, 0x8c, 0xd0, 0xf8, 0x07, 0x32, 0xb8, 0x35, 0x3e, + 0xbb, 0x2f, 0x1e, 0x15, 0xbd, 0x87, 0xd9, 0x45, 0x07, 0xaa, 0x58, 0x48, 0xc2, 0x09, 0x20, 0xf8, + 0x11, 0x07, 0x0a, 0x17, 0x40, 0x7c, 0x58, 0xf3, 0xaf, 0x32, 0xac, 0xc6, 0xbf, 0xce, 0x40, 0x71, + 0xdf, 0x9b, 0xed, 0x3b, 0xfc, 0x8a, 0x04, 0x6e, 0x13, 0xe5, 0xa3, 0x5a, 0x62, 0x49, 0x0c, 0x1d, + 0x7a, 0xc1, 0x4d, 0xc9, 0x54, 0x31, 0xaf, 0x16, 0x17, 0xf3, 0x3e, 0x87, 0x5b, 0xe8, 0x02, 0xf4, + 0xbd, 0x99, 0xe7, 0xb3, 0xad, 0x6a, 0x4f, 0xb8, 0xb8, 0xe7, 0xb9, 0xe1, 0xb9, 0xe4, 0x9d, 0x37, + 0x4f, 0x29, 0x3d, 0xd2, 0x28, 0x0e, 0x15, 0x01, 0xde, 0x92, 0x9e, 0x84, 0x17, 0x16, 0xd7, 0xd0, + 0x85, 0x3c, 0xca, 0x39, 0xea, 0x32, 0x43, 0x74, 0x10, 0x8e, 0x12, 0xa9, 0xf1, 0x29, 0x94, 0x95, + 0xb1, 0x87, 0xbc, 0x0b, 0xe5, 0x73, 0x6f, 0x26, 0x2c, 0x42, 0x99, 0xd8, 0x6d, 0x52, 0xd1, 0x6b, + 0xb3, 0x74, 0xce, 0x7f, 0x04, 0xc6, 0x1f, 0x16, 0xa1, 0xd8, 0x75, 0x2f, 0x3c, 0x67, 0x84, 0xe1, + 0xb1, 0x53, 0x3a, 0xf5, 0xe4, 0x33, 0x24, 0xec, 0x37, 0x46, 0x77, 0x45, 0xaf, 0x88, 0xe5, 0x44, + 0x74, 0x97, 0x7a, 0x3f, 0x6c, 0x1d, 0x96, 0x7c, 0xfd, 0x19, 0xb0, 0x82, 0x8f, 0x97, 0x0a, 0xd4, + 0x79, 0x59, 0xd0, 0x1e, 0x77, 0x61, 0x65, 0xf1, 0xc8, 0x45, 0x1c, 0x32, 0x7e, 0xd3, 0xb9, 0x8c, + 0x10, 0x1c, 0xb0, 0xd7, 0xa0, 0x28, 0x2e, 0x6f, 0xf2, 0xab, 0x64, 0xfc, 0x16, 0x80, 0x00, 0xe1, + 0x6a, 0xf0, 0x29, 0x77, 0xe1, 0x2a, 0x41, 0x36, 0x67, 0x56, 0x25, 0x70, 0x97, 0xad, 0xb5, 0x3b, + 0x50, 0xe1, 0xf4, 0x9c, 0xa4, 0x24, 0xa2, 0x4a, 0x11, 0x84, 0x04, 0x29, 0xaf, 0xe9, 0x95, 0x53, + 0x5f, 0xd3, 0xc3, 0xf8, 0x67, 0xc5, 0x65, 0x79, 0x17, 0x81, 0xbf, 0xa1, 0xa6, 0xc1, 0xe5, 0x13, + 0x95, 0xc2, 0xa6, 0xc2, 0x1f, 0x01, 0x90, 0x36, 0x95, 0x37, 0xa1, 0x76, 0x6a, 0x4f, 0x26, 0x27, + 0xf6, 0xe8, 0x19, 0x37, 0x05, 0x54, 0xb9, 0xf5, 0x53, 0x02, 0xd1, 0x16, 0x70, 0x07, 0x2a, 0xda, + 0x2c, 0x63, 0xc8, 0x68, 0xde, 0x84, 0x68, 0x7e, 0x93, 0x16, 0xbe, 0xfa, 0x2b, 0x58, 0xf8, 0xb4, + 0xd0, 0xd9, 0xe5, 0x78, 0xe8, 0xec, 0x2d, 0xe4, 0xa6, 0x22, 0x68, 0xb1, 0xc1, 0x1f, 0xec, 0xb2, + 0xc7, 0x63, 0x0c, 0x5a, 0x44, 0x43, 0x16, 0x1f, 0x3c, 0x8e, 0x5f, 0xe1, 0xba, 0x04, 0x87, 0x71, + 0x92, 0xdb, 0xdc, 0x4c, 0x3d, 0xb3, 0x9d, 0x31, 0xde, 0xe4, 0xe0, 0xd6, 0x83, 0xa2, 0x3d, 0x0d, + 0x8f, 0x6c, 0x07, 0xc3, 0xb5, 0x24, 0x1a, 0x4f, 0xc7, 0x55, 0x3e, 0xfe, 0x02, 0x3d, 0xe0, 0x4f, + 0x5c, 0x28, 0x8a, 0xa9, 0xba, 0xc5, 0x6f, 0x56, 0x04, 0x09, 0xae, 0x83, 0x0f, 0x30, 0xca, 0x27, + 0xa4, 0x78, 0x4f, 0xbf, 0xfe, 0xf0, 0x96, 0x0a, 0x3e, 0xc0, 0x55, 0x2a, 0xff, 0x73, 0xe7, 0x18, + 0xa7, 0x64, 0xc2, 0x1d, 0xf7, 0xd1, 0x6d, 0xc4, 0xe4, 0x5f, 0x41, 0x8a, 0x3e, 0x3a, 0x4e, 0x40, + 0x3e, 0xd5, 0xf4, 0xd7, 0x26, 0x12, 0xbf, 0x96, 0x28, 0xff, 0xba, 0xab, 0x72, 0xb7, 0x01, 0x9c, + 0x80, 0x9d, 0x32, 0x01, 0x75, 0xc7, 0x78, 0xdd, 0xbe, 0x64, 0x96, 0x9d, 0xe0, 0x29, 0x07, 0x7c, + 0xbb, 0x8a, 0x6d, 0x0b, 0xaa, 0x7a, 0x37, 0x49, 0x09, 0xf2, 0xfd, 0xa3, 0x4e, 0xaf, 0x71, 0x83, + 0x54, 0xa0, 0x38, 0xe8, 0x0c, 0x87, 0x07, 0xe8, 0xe9, 0xab, 0x42, 0x49, 0x5d, 0xa6, 0xcd, 0xb2, + 0x54, 0xab, 0xdd, 0xee, 0x1c, 0x0d, 0x3b, 0xbb, 0x8d, 0xdc, 0x8f, 0xf2, 0xa5, 0x6c, 0x23, 0x67, + 0xfc, 0x51, 0x0e, 0x2a, 0xda, 0x28, 0xbc, 0x98, 0x19, 0xdf, 0x06, 0x40, 0x4d, 0x32, 0x8a, 0x69, + 0xcd, 0x9b, 0x65, 0x06, 0xe1, 0x93, 0xaf, 0xfb, 0x28, 0xc4, 0xc3, 0x32, 0xd2, 0x47, 0xf1, 0x26, + 0xd4, 0xf8, 0x8b, 0x24, 0xba, 0xbf, 0xb6, 0x60, 0x56, 0x39, 0x50, 0xb0, 0x6a, 0xbc, 0x9a, 0x8f, + 0x44, 0x78, 0xe9, 0x51, 0x3c, 0xa6, 0xc4, 0x41, 0x78, 0xed, 0x11, 0xef, 0xac, 0x06, 0xde, 0xe4, + 0x82, 0x72, 0x0a, 0x2e, 0x11, 0x56, 0x04, 0x6c, 0x28, 0x9e, 0x3d, 0x10, 0xfc, 0x50, 0xbb, 0x1b, + 0x5e, 0x30, 0xab, 0x1c, 0x28, 0x2a, 0xfa, 0x9e, 0x5c, 0x40, 0x3c, 0x7a, 0x65, 0x73, 0x71, 0x35, + 0xc4, 0x16, 0xcf, 0xc1, 0x82, 0x19, 0xb1, 0x8c, 0x0b, 0xe3, 0x3b, 0x8b, 0xf9, 0x5e, 0x6e, 0x4e, + 0x24, 0xef, 0x02, 0x99, 0xce, 0x66, 0x56, 0x8a, 0x81, 0x2f, 0x6f, 0x2e, 0x4f, 0x67, 0xb3, 0xa1, + 0x66, 0xff, 0xfa, 0x16, 0x6c, 0x8f, 0x5f, 0x03, 0x69, 0xb1, 0x0d, 0x8c, 0x4d, 0x54, 0xaa, 0x58, + 0xc4, 0x96, 0x33, 0x3a, 0x5b, 0x4e, 0xe1, 0x7e, 0xd9, 0x54, 0xee, 0xf7, 0x22, 0x3e, 0x61, 0xec, + 0x41, 0xe5, 0x48, 0x7b, 0xb2, 0xf1, 0x2e, 0x3b, 0x21, 0xe4, 0x63, 0x8d, 0xfc, 0xec, 0xe0, 0x36, + 0x45, 0x5f, 0xbc, 0xd1, 0xa8, 0xb5, 0x26, 0xab, 0xb5, 0xc6, 0xf8, 0x5b, 0x19, 0xfe, 0xc4, 0x95, + 0x6a, 0x7c, 0xf4, 0x4a, 0xa4, 0x74, 0xbf, 0x45, 0x4f, 0x38, 0x54, 0xa4, 0xdb, 0x4d, 0xbc, 0xbe, + 0x80, 0x4d, 0xb3, 0xbc, 0xd3, 0xd3, 0x80, 0xca, 0x18, 0x8f, 0x0a, 0xc2, 0xfa, 0x08, 0x92, 0xc2, + 0x37, 0x93, 0xf0, 0x1d, 0x5e, 0x7e, 0x20, 0x02, 0x3b, 0x98, 0xf0, 0x7d, 0x68, 0x5f, 0x8a, 0x5a, + 0x03, 0x26, 0x82, 0x08, 0xff, 0x80, 0xbc, 0xc2, 0xac, 0xd2, 0xc6, 0x5f, 0x17, 0xaf, 0x4c, 0x24, + 0xc7, 0xf7, 0x3e, 0x94, 0x54, 0xa9, 0xf1, 0x13, 0x56, 0x52, 0x2a, 0x3c, 0x3b, 0xc7, 0xd1, 0x18, + 0x12, 0x6b, 0x31, 0xdf, 0x5c, 0xe8, 0xe3, 0xe9, 0x6a, 0xad, 0x7e, 0x0f, 0xc8, 0xa9, 0xe3, 0x27, + 0x89, 0xf9, 0x66, 0x6b, 0x20, 0x46, 0xa3, 0x36, 0x8e, 0x61, 0x55, 0x72, 0x09, 0x4d, 0x23, 0x88, + 0x4f, 0x5e, 0xe6, 0x25, 0x4c, 0x3e, 0xbb, 0xc0, 0xe4, 0x8d, 0xdf, 0x28, 0x40, 0x51, 0x3e, 0x7f, + 0x9a, 0xf6, 0x64, 0x67, 0x39, 0xfe, 0x64, 0x67, 0x33, 0xf6, 0x90, 0x1b, 0x4e, 0xbd, 0x38, 0xef, + 0xdf, 0x49, 0x1e, 0xd9, 0x9a, 0xaf, 0x22, 0x76, 0x6c, 0x0b, 0x5f, 0x45, 0x21, 0xee, 0xab, 0x48, + 0x7b, 0xc6, 0x94, 0x8b, 0x9e, 0x0b, 0xcf, 0x98, 0xde, 0x02, 0x2e, 0x47, 0x68, 0xc1, 0x6d, 0x25, + 0x04, 0x88, 0x6b, 0xf8, 0x9a, 0xd8, 0x51, 0x4a, 0x8a, 0x1d, 0xaf, 0x2c, 0x12, 0x7c, 0x04, 0x4b, + 0xfc, 0xc5, 0x1a, 0x71, 0x25, 0x5b, 0x1e, 0x1c, 0x62, 0xac, 0xe4, 0x7f, 0x7e, 0x67, 0xc2, 0x14, + 0xb4, 0xfa, 0x9b, 0x80, 0x95, 0xd8, 0x9b, 0x80, 0xba, 0x0f, 0xa5, 0x1a, 0xf7, 0xa1, 0xdc, 0x83, + 0x86, 0x1a, 0x38, 0xb4, 0x48, 0xba, 0x81, 0xb8, 0x8e, 0x59, 0x97, 0x70, 0xc6, 0x0d, 0x7b, 0x41, + 0x74, 0xf0, 0xd5, 0x63, 0x07, 0x1f, 0xe3, 0x55, 0xad, 0x30, 0xa4, 0xd3, 0x59, 0x28, 0x0f, 0x3e, + 0xed, 0xe5, 0x58, 0x3e, 0xf3, 0xfc, 0xbe, 0x88, 0x9c, 0x5e, 0xbe, 0x3a, 0x76, 0xa0, 0x7e, 0x6a, + 0x3b, 0x93, 0xb9, 0x4f, 0x2d, 0x9f, 0xda, 0x81, 0xe7, 0xe2, 0xe6, 0x8f, 0xce, 0x60, 0xd1, 0xc5, + 0x3d, 0x4e, 0x63, 0x22, 0x89, 0x59, 0x3b, 0xd5, 0x93, 0x78, 0xeb, 0x4a, 0x1f, 0x09, 0x76, 0x64, + 0x89, 0x8b, 0xd9, 0x3c, 0x56, 0xa5, 0xdb, 0xb3, 0xf6, 0x0e, 0xba, 0x4f, 0xf6, 0x87, 0x8d, 0x0c, + 0x4b, 0x0e, 0x8e, 0xdb, 0xed, 0x4e, 0x67, 0x17, 0x8f, 0x30, 0x80, 0xa5, 0xbd, 0x56, 0xf7, 0x40, + 0x1c, 0x60, 0xf9, 0x46, 0xc1, 0xf8, 0x27, 0x59, 0xa8, 0x68, 0xbd, 0x21, 0x8f, 0xd4, 0x24, 0xf0, + 0xa7, 0x20, 0x6e, 0x2f, 0xf6, 0x78, 0x5b, 0x72, 0x78, 0x6d, 0x16, 0xd4, 0x1b, 0xb1, 0xd9, 0x6b, + 0xdf, 0x88, 0x25, 0x6f, 0xc3, 0xb2, 0xcd, 0x4b, 0x50, 0x83, 0x2e, 0x8c, 0xfb, 0x02, 0x2c, 0xc6, + 0xfc, 0x6d, 0xf1, 0x2c, 0x85, 0x38, 0xa6, 0x18, 0x5d, 0x5e, 0x06, 0x6d, 0xaa, 0x93, 0x0a, 0xe7, + 0xa6, 0x28, 0x46, 0x46, 0x38, 0xe3, 0xd5, 0x81, 0x2f, 0xc6, 0x4b, 0xa2, 0xf9, 0x55, 0x4c, 0x6d, + 0x85, 0x57, 0x4d, 0x95, 0x36, 0x3e, 0x06, 0x88, 0xfa, 0x13, 0x1f, 0xbe, 0x1b, 0xf1, 0xe1, 0xcb, + 0x68, 0xc3, 0x97, 0x35, 0xfe, 0xbe, 0x60, 0x5d, 0x62, 0x2e, 0x94, 0xa9, 0xef, 0x7b, 0x20, 0x8d, + 0x8f, 0x16, 0x06, 0x79, 0xcf, 0x26, 0x34, 0x94, 0xb7, 0x49, 0x57, 0x04, 0xa6, 0xab, 0x10, 0x0b, + 0xac, 0x36, 0xbb, 0xc8, 0x6a, 0xdf, 0x80, 0x2a, 0xbe, 0x73, 0x26, 0x2a, 0x12, 0xec, 0xaa, 0x32, + 0xb5, 0x2f, 0x65, 0xdd, 0x31, 0x1e, 0x9b, 0x4f, 0xf0, 0xd8, 0xbf, 0x91, 0xe1, 0x8f, 0xe2, 0x44, + 0x0d, 0x8d, 0x98, 0xac, 0x2a, 0x33, 0xce, 0x64, 0x05, 0xa9, 0xa9, 0xf0, 0xd7, 0x30, 0xce, 0x6c, + 0x3a, 0xe3, 0x4c, 0x67, 0xc9, 0xb9, 0x54, 0x96, 0x6c, 0x6c, 0x41, 0x73, 0x97, 0xb2, 0xa1, 0x68, + 0x4d, 0x26, 0x89, 0xb1, 0x34, 0x6e, 0xc1, 0xcd, 0x14, 0x9c, 0xb0, 0xda, 0xfc, 0x66, 0x06, 0xd6, + 0x5b, 0xfc, 0x2d, 0x8c, 0x6f, 0xed, 0xba, 0xe7, 0x67, 0x70, 0x53, 0x45, 0x6c, 0x6b, 0xb7, 0xc8, + 0xf4, 0x87, 0x8c, 0x64, 0xb0, 0xb7, 0x76, 0x4f, 0x81, 0x9d, 0x99, 0x46, 0x13, 0x36, 0x92, 0xad, + 0x11, 0x0d, 0xdd, 0x83, 0x95, 0x5d, 0x7a, 0x32, 0x3f, 0x3b, 0xa0, 0x17, 0x51, 0x1b, 0x09, 0xe4, + 0x83, 0x73, 0xef, 0xb9, 0x58, 0x18, 0xf8, 0x1b, 0x43, 0x3a, 0x19, 0x8d, 0x15, 0xcc, 0xe8, 0x48, + 0x5a, 0xfd, 0x11, 0x32, 0x98, 0xd1, 0x91, 0xf1, 0x08, 0x88, 0x5e, 0x8e, 0x98, 0x45, 0xa6, 0x92, + 0xcd, 0x4f, 0xac, 0xe0, 0x2a, 0x08, 0xe9, 0x54, 0xde, 0x90, 0x84, 0x60, 0x7e, 0x32, 0xe0, 0x10, + 0xe3, 0x1d, 0xa8, 0x1e, 0xd9, 0x57, 0x26, 0xfd, 0x5a, 0x5c, 0x44, 0xdc, 0x84, 0xe2, 0xcc, 0xbe, + 0x62, 0xbc, 0x58, 0x39, 0x00, 0x11, 0x6d, 0xfc, 0xc3, 0x3c, 0x2c, 0x71, 0x4a, 0x72, 0x97, 0xbf, + 0xde, 0xee, 0xb8, 0xc8, 0x0b, 0xe5, 0xa9, 0xa4, 0x81, 0x16, 0x0e, 0xae, 0xec, 0xe2, 0xc1, 0x25, + 0xac, 0x95, 0xf2, 0xa1, 0x35, 0xe9, 0xaa, 0x71, 0xe7, 0x53, 0xf9, 0xba, 0x5a, 0xfc, 0x29, 0x88, + 0x7c, 0xf4, 0xea, 0x3f, 0xbf, 0x06, 0x1f, 0x77, 0xa6, 0x47, 0x8a, 0x1f, 0x6f, 0x9d, 0x3c, 0x8f, + 0xc5, 0x99, 0xa5, 0x83, 0x52, 0xb5, 0xcb, 0xa2, 0xbc, 0x5d, 0x1b, 0xd7, 0x2e, 0x17, 0xb4, 0xc8, + 0xd2, 0xcb, 0xb5, 0x48, 0x6e, 0xc6, 0x7c, 0x81, 0x16, 0x09, 0xaf, 0xa0, 0x45, 0xbe, 0x82, 0x23, + 0xfb, 0x26, 0x94, 0x50, 0xc8, 0xd2, 0x8e, 0x30, 0x26, 0x5c, 0xb1, 0x23, 0xec, 0x13, 0x4d, 0xcf, + 0xe2, 0x51, 0x34, 0xda, 0x19, 0x62, 0xd2, 0xaf, 0x7f, 0x3e, 0x0e, 0xc2, 0xaf, 0xa0, 0x28, 0xa0, + 0x6c, 0x41, 0xbb, 0xf6, 0x54, 0x3e, 0xe6, 0x89, 0xbf, 0xd9, 0xb0, 0xe1, 0x03, 0x7b, 0x5f, 0xcf, + 0x1d, 0x9f, 0x8e, 0xe5, 0x33, 0x5f, 0x0e, 0xee, 0x6f, 0x06, 0x61, 0x1d, 0x64, 0x3a, 0x9f, 0xeb, + 0x3d, 0x77, 0x05, 0xdf, 0x2a, 0x3a, 0xc1, 0x53, 0x96, 0x34, 0x08, 0x34, 0xf0, 0xe9, 0xdf, 0x99, + 0xe7, 0x4b, 0x09, 0xc1, 0xf8, 0xdd, 0x0c, 0x34, 0xc4, 0xee, 0x52, 0x38, 0x5d, 0xe5, 0x2a, 0x5c, + 0x17, 0xf4, 0xf1, 0xe2, 0x47, 0xbb, 0x0c, 0xa8, 0xa1, 0xa5, 0x49, 0x89, 0x0b, 0xdc, 0x52, 0x56, + 0x61, 0xc0, 0x3d, 0x21, 0x32, 0xbc, 0x0e, 0x15, 0x19, 0x70, 0x3e, 0x75, 0x26, 0xf2, 0x03, 0x1f, + 0x3c, 0xe2, 0xfc, 0xd0, 0x99, 0x48, 0x69, 0xc3, 0xb7, 0xc5, 0x6d, 0xef, 0x0c, 0x4a, 0x1b, 0xa6, + 0x1d, 0x52, 0xe3, 0x1f, 0x67, 0x60, 0x45, 0xeb, 0x8a, 0xd8, 0xb7, 0xdf, 0x87, 0xaa, 0x7a, 0x73, + 0x9b, 0x2a, 0x31, 0x77, 0x33, 0xce, 0xa3, 0xa2, 0x6c, 0x95, 0x91, 0x82, 0x04, 0xac, 0x31, 0x63, + 0xfb, 0x8a, 0x47, 0x45, 0xcf, 0xa7, 0x52, 0x93, 0x1c, 0xdb, 0x57, 0x7b, 0x94, 0x0e, 0xe6, 0x53, + 0x72, 0x17, 0xaa, 0xcf, 0x29, 0x7d, 0xa6, 0x08, 0x38, 0xeb, 0x05, 0x06, 0x13, 0x14, 0x06, 0xd4, + 0xa6, 0x9e, 0x1b, 0x9e, 0x2b, 0x12, 0x21, 0xe2, 0x23, 0x90, 0xd3, 0x18, 0x7f, 0x90, 0x85, 0x55, + 0x6e, 0xcf, 0x14, 0x76, 0x64, 0xc1, 0xba, 0x9a, 0xb0, 0xc4, 0x4d, 0xbb, 0x9c, 0x79, 0xed, 0xdf, + 0x30, 0x45, 0x9a, 0x7c, 0xf4, 0x8a, 0x36, 0x58, 0x79, 0xa1, 0xfc, 0x9a, 0xe1, 0xcf, 0x2d, 0x0e, + 0xff, 0xf5, 0xc3, 0x9b, 0xe6, 0x55, 0x2e, 0xa4, 0x79, 0x95, 0x5f, 0xc5, 0x97, 0xbb, 0x70, 0xf5, + 0xb9, 0xb8, 0xf8, 0x42, 0xe8, 0x23, 0xd8, 0x8c, 0xd1, 0x20, 0xb7, 0x76, 0x4e, 0x1d, 0x2a, 0xdf, + 0x20, 0x5a, 0xd3, 0xa8, 0x07, 0x12, 0xb7, 0x53, 0x84, 0x42, 0x30, 0xf2, 0x66, 0xd4, 0xd8, 0x80, + 0xb5, 0xf8, 0xa8, 0x8a, 0x63, 0xe2, 0xb7, 0x33, 0xd0, 0x14, 0x31, 0x40, 0x8e, 0x7b, 0xb6, 0xef, + 0x04, 0xa1, 0xe7, 0xab, 0xb7, 0xa9, 0x6f, 0x03, 0xf0, 0x8f, 0x8d, 0xa0, 0xe2, 0x2e, 0x1e, 0xcd, + 0x41, 0x08, 0xaa, 0xed, 0x37, 0xa1, 0x44, 0xdd, 0x31, 0x47, 0xf2, 0xd5, 0x50, 0xa4, 0xee, 0x58, + 0x2a, 0xfd, 0x0b, 0xc7, 0x70, 0x2d, 0x2e, 0x60, 0x88, 0xe7, 0x1f, 0xd8, 0xe8, 0xd0, 0x0b, 0x14, + 0x07, 0xf2, 0xea, 0xf9, 0x87, 0x43, 0xfb, 0x12, 0x23, 0x6a, 0x03, 0xe3, 0x2f, 0x67, 0x61, 0x39, + 0x6a, 0x1f, 0x7f, 0x00, 0xe7, 0xc5, 0x4f, 0xf9, 0xdc, 0x15, 0xcb, 0xc1, 0x61, 0xca, 0x92, 0x66, + 0xe5, 0x2d, 0xf1, 0xcd, 0xd9, 0x75, 0x89, 0x01, 0x15, 0x49, 0xe1, 0xcd, 0x43, 0xed, 0x55, 0xd3, + 0x32, 0x27, 0xe9, 0xcf, 0x43, 0xa6, 0xdd, 0x32, 0x35, 0xdf, 0x71, 0x85, 0x7e, 0x59, 0xb0, 0xa7, + 0x61, 0x17, 0xbf, 0x68, 0xc3, 0xc0, 0x2c, 0x1b, 0x9f, 0x48, 0x46, 0xc5, 0xe8, 0x1b, 0x5c, 0xd9, + 0xe1, 0x33, 0x87, 0x8a, 0x8e, 0xae, 0x09, 0xf0, 0x47, 0xf8, 0x95, 0x26, 0xf0, 0x3a, 0x54, 0x78, + 0xe1, 0xd1, 0x4d, 0x77, 0x7c, 0x62, 0x2c, 0xec, 0xba, 0x88, 0x17, 0x16, 0x37, 0x6f, 0x1e, 0xb3, + 0x33, 0x00, 0xaf, 0x0a, 0x43, 0x6c, 0x7e, 0x33, 0x03, 0x37, 0x53, 0xa6, 0x4d, 0xec, 0xf2, 0x36, + 0xac, 0x9c, 0x2a, 0xa4, 0x1c, 0x5d, 0xbe, 0xd5, 0x37, 0x24, 0x5b, 0x8d, 0x8f, 0xa9, 0xd9, 0x38, + 0x8d, 0x03, 0x22, 0x0d, 0x97, 0xcf, 0x60, 0xec, 0x1d, 0x05, 0x14, 0xa7, 0xf8, 0x34, 0x72, 0xe5, + 0xf2, 0x08, 0xb6, 0x3a, 0x97, 0x8c, 0x63, 0xa8, 0xb0, 0xdc, 0xd1, 0xb3, 0xb9, 0xf4, 0x7c, 0x25, + 0xac, 0xf9, 0x99, 0x57, 0xb2, 0xe6, 0x8f, 0xf9, 0x4d, 0x68, 0x55, 0xd6, 0x4f, 0x53, 0x08, 0x1e, + 0xa0, 0x2c, 0xcf, 0x09, 0x16, 0x21, 0x1f, 0x54, 0x60, 0x20, 0x5e, 0xa8, 0x11, 0xc0, 0xf2, 0xe1, + 0x7c, 0x12, 0x3a, 0x6d, 0x05, 0x22, 0x1f, 0x89, 0x3c, 0x58, 0x8f, 0x1c, 0xb5, 0xd4, 0x8a, 0x40, + 0x55, 0x84, 0x83, 0x35, 0x65, 0x05, 0x59, 0x8b, 0xf5, 0x2d, 0x4f, 0xe3, 0x35, 0x18, 0x37, 0x61, + 0x33, 0x4a, 0xf1, 0x61, 0x93, 0x47, 0xcd, 0xdf, 0xcc, 0xf0, 0xf0, 0x7d, 0x8e, 0x1b, 0xb8, 0xf6, + 0x2c, 0x38, 0xf7, 0x42, 0xd2, 0x81, 0xd5, 0xc0, 0x71, 0xcf, 0x26, 0x54, 0x2f, 0x3e, 0x10, 0x83, + 0xb0, 0x1e, 0x6f, 0x1b, 0xcf, 0x1a, 0x98, 0x2b, 0x3c, 0x47, 0x54, 0x5a, 0x40, 0x76, 0xae, 0x6b, + 0x64, 0xb4, 0x2c, 0x12, 0xa3, 0xb1, 0xd8, 0xf8, 0x2e, 0xd4, 0xe3, 0x15, 0x91, 0x4f, 0xc4, 0x03, + 0x02, 0x51, 0xab, 0x72, 0x89, 0xeb, 0xd3, 0xd1, 0x82, 0xa8, 0x44, 0x63, 0x1f, 0x18, 0x7f, 0x31, + 0x03, 0x4d, 0x93, 0xb2, 0x95, 0xab, 0xb5, 0x52, 0xae, 0x99, 0xef, 0x2f, 0x94, 0x7a, 0x7d, 0x5f, + 0xe5, 0xbb, 0x04, 0xb2, 0x45, 0xef, 0x5d, 0x3b, 0x19, 0xfb, 0x37, 0x16, 0x7a, 0xb4, 0x53, 0x82, + 0x25, 0x4e, 0x62, 0x6c, 0xc2, 0xba, 0x68, 0x8f, 0x6c, 0x4b, 0xe4, 0xaa, 0x8d, 0xd5, 0x18, 0x73, + 0xd5, 0x6e, 0x41, 0x93, 0xdf, 0xf3, 0xd5, 0x3b, 0x21, 0x32, 0xee, 0x02, 0x39, 0xb4, 0x47, 0xb6, + 0xef, 0x79, 0xee, 0x11, 0xf5, 0x45, 0x30, 0x34, 0x4a, 0x98, 0xe8, 0xc9, 0x94, 0xa2, 0x30, 0x4f, + 0xc9, 0xb7, 0x9c, 0x3d, 0x57, 0xc6, 0x7e, 0xf1, 0x94, 0xe1, 0xc3, 0xea, 0x8e, 0xfd, 0x8c, 0xca, + 0x92, 0xe4, 0x10, 0x3d, 0x86, 0xca, 0x4c, 0x15, 0x2a, 0xc7, 0x5d, 0xbe, 0xa7, 0xb2, 0x58, 0xad, + 0xa9, 0x53, 0x33, 0x16, 0xe4, 0x7b, 0x5e, 0x88, 0x6f, 0x17, 0x48, 0x67, 0x98, 0x59, 0x66, 0xa0, + 0xa7, 0xf4, 0xaa, 0x3b, 0x36, 0x1e, 0xc2, 0x5a, 0xbc, 0x4e, 0xc1, 0x5a, 0xb6, 0xa0, 0x34, 0x15, + 0x30, 0xd1, 0x7a, 0x95, 0x66, 0xca, 0x08, 0x53, 0xf9, 0x64, 0x9e, 0xee, 0xae, 0x52, 0xa9, 0x1e, + 0xc3, 0xe6, 0x02, 0x46, 0x14, 0x78, 0x17, 0xaa, 0x5a, 0x43, 0x78, 0x37, 0xf2, 0x4c, 0x64, 0x15, + 0x2d, 0x09, 0x8c, 0xcf, 0x60, 0x93, 0xeb, 0x63, 0x51, 0x76, 0x39, 0x04, 0x89, 0x5e, 0x64, 0x92, + 0xbd, 0xf8, 0x48, 0xaa, 0x79, 0x7a, 0xd6, 0xe8, 0xaa, 0xc0, 0x18, 0x71, 0x32, 0x7c, 0x47, 0x26, + 0x8d, 0x63, 0xd8, 0x58, 0x1c, 0x3e, 0xd6, 0xfe, 0x9f, 0x69, 0xc8, 0xe5, 0xf0, 0x44, 0x68, 0x35, + 0x3c, 0xff, 0x25, 0xc3, 0xc7, 0x27, 0x86, 0x12, 0xcd, 0x1c, 0x03, 0x99, 0xd2, 0xf0, 0xdc, 0x1b, + 0x5b, 0x8b, 0x35, 0x3f, 0x52, 0xd1, 0x43, 0xa9, 0x79, 0xb7, 0x0f, 0x31, 0xa3, 0x86, 0x11, 0x71, + 0xec, 0xd3, 0x24, 0x7c, 0x6b, 0x04, 0x1b, 0xe9, 0xc4, 0x29, 0x31, 0x37, 0x1f, 0xc6, 0x05, 0xf5, + 0xdb, 0xd7, 0x76, 0x9f, 0x35, 0x4b, 0x97, 0xdb, 0x7f, 0xab, 0x04, 0x45, 0x61, 0x25, 0x21, 0xdb, + 0x90, 0x1f, 0xc9, 0xf8, 0xcd, 0xe8, 0xad, 0x3a, 0x81, 0x95, 0xff, 0xdb, 0x18, 0xc5, 0xc9, 0xe8, + 0xc8, 0x63, 0xa8, 0xc7, 0x43, 0x18, 0x12, 0xef, 0x58, 0xc4, 0x63, 0x0f, 0x6a, 0xa3, 0x84, 0xb3, + 0xba, 0x1c, 0x09, 0x57, 0x5c, 0xe6, 0x2c, 0x9d, 0x6b, 0xd2, 0x97, 0xe7, 0x32, 0x7d, 0x2d, 0x38, + 0xb7, 0xad, 0x87, 0x8f, 0x3e, 0x16, 0x0f, 0x59, 0x54, 0x10, 0x38, 0x38, 0xb7, 0x1f, 0x3e, 0xfa, + 0x38, 0xa9, 0x89, 0x89, 0x67, 0x2c, 0x34, 0x4d, 0x6c, 0x0d, 0x0a, 0xfc, 0xc1, 0x6b, 0x1e, 0x88, + 0xc7, 0x13, 0xe4, 0x01, 0xac, 0x49, 0xc3, 0x9b, 0xb8, 0x32, 0xc1, 0x4f, 0xd1, 0x12, 0xbf, 0xa5, + 0x2a, 0x70, 0x03, 0x44, 0x71, 0x53, 0xdd, 0x06, 0x2c, 0x9d, 0x47, 0x2f, 0x98, 0xd7, 0x4c, 0x91, + 0x32, 0xfe, 0xa0, 0x00, 0x15, 0x6d, 0x50, 0x48, 0x15, 0x4a, 0x66, 0x67, 0xd0, 0x31, 0xbf, 0xe8, + 0xec, 0x36, 0x6e, 0x90, 0x7b, 0xf0, 0x56, 0xb7, 0xd7, 0xee, 0x9b, 0x66, 0xa7, 0x3d, 0xb4, 0xfa, + 0xa6, 0x25, 0x5f, 0x4c, 0x3c, 0x6a, 0x7d, 0x75, 0xd8, 0xe9, 0x0d, 0xad, 0xdd, 0xce, 0xb0, 0xd5, + 0x3d, 0x18, 0x34, 0x32, 0xe4, 0x35, 0x68, 0x46, 0x94, 0x12, 0xdd, 0x3a, 0xec, 0x1f, 0xf7, 0x86, + 0x8d, 0x2c, 0xb9, 0x03, 0xb7, 0xf6, 0xba, 0xbd, 0xd6, 0x81, 0x15, 0xd1, 0xb4, 0x0f, 0x86, 0x5f, + 0x58, 0x9d, 0x5f, 0x3c, 0xea, 0x9a, 0x5f, 0x35, 0x72, 0x69, 0x04, 0xfb, 0xc3, 0x83, 0xb6, 0x2c, + 0x21, 0x4f, 0x6e, 0xc2, 0x3a, 0x27, 0xe0, 0x59, 0xac, 0x61, 0xbf, 0x6f, 0x0d, 0xfa, 0xfd, 0x5e, + 0xa3, 0x40, 0x56, 0xa0, 0xd6, 0xed, 0x7d, 0xd1, 0x3a, 0xe8, 0xee, 0x5a, 0x66, 0xa7, 0x75, 0x70, + 0xd8, 0x58, 0x22, 0xab, 0xb0, 0x9c, 0xa4, 0x2b, 0xb2, 0x22, 0x24, 0x5d, 0xbf, 0xd7, 0xed, 0xf7, + 0xac, 0x2f, 0x3a, 0xe6, 0xa0, 0xdb, 0xef, 0x35, 0x4a, 0x64, 0x03, 0x48, 0x1c, 0xb5, 0x7f, 0xd8, + 0x6a, 0x37, 0xca, 0x64, 0x1d, 0x56, 0xe2, 0xf0, 0xa7, 0x9d, 0xaf, 0x1a, 0x40, 0x9a, 0xb0, 0xc6, + 0x1b, 0x66, 0xed, 0x74, 0x0e, 0xfa, 0x5f, 0x5a, 0x87, 0xdd, 0x5e, 0xf7, 0xf0, 0xf8, 0xb0, 0x51, + 0xc1, 0x77, 0x6b, 0x3b, 0x1d, 0xab, 0xdb, 0x1b, 0x1c, 0xef, 0xed, 0x75, 0xdb, 0xdd, 0x4e, 0x6f, + 0xd8, 0xa8, 0xf2, 0x9a, 0xd3, 0x3a, 0x5e, 0x63, 0x19, 0xc4, 0xbd, 0x2a, 0x6b, 0xb7, 0x3b, 0x68, + 0xed, 0x1c, 0x74, 0x76, 0x1b, 0x75, 0x72, 0x1b, 0x6e, 0x0e, 0x3b, 0x87, 0x47, 0x7d, 0xb3, 0x65, + 0x7e, 0x25, 0xef, 0x5d, 0x59, 0x7b, 0xad, 0xee, 0xc1, 0xb1, 0xd9, 0x69, 0x2c, 0x93, 0x37, 0xe0, + 0xb6, 0xd9, 0xf9, 0xf1, 0x71, 0xd7, 0xec, 0xec, 0x5a, 0xbd, 0xfe, 0x6e, 0xc7, 0xda, 0xeb, 0xb4, + 0x86, 0xc7, 0x66, 0xc7, 0x3a, 0xec, 0x0e, 0x06, 0xdd, 0xde, 0x93, 0x46, 0x83, 0xbc, 0x05, 0x77, + 0x15, 0x89, 0x2a, 0x20, 0x41, 0xb5, 0xc2, 0xfa, 0x27, 0xa7, 0xb4, 0xd7, 0xf9, 0xc5, 0xa1, 0x75, + 0xd4, 0xe9, 0x98, 0x0d, 0x42, 0xb6, 0x60, 0x23, 0xaa, 0x9e, 0x57, 0x20, 0xea, 0x5e, 0x65, 0xb8, + 0xa3, 0x8e, 0x79, 0xd8, 0xea, 0xb1, 0x09, 0x8e, 0xe1, 0xd6, 0x58, 0xb3, 0x23, 0x5c, 0xb2, 0xd9, + 0xeb, 0x84, 0x40, 0x5d, 0x9b, 0x95, 0xbd, 0x96, 0xd9, 0xd8, 0x20, 0xcb, 0x50, 0x39, 0x3c, 0x3a, + 0xb2, 0x86, 0xdd, 0xc3, 0x4e, 0xff, 0x78, 0xd8, 0xd8, 0x24, 0xeb, 0xd0, 0xe8, 0xf6, 0x86, 0x1d, + 0x93, 0xcd, 0xb5, 0xcc, 0xfa, 0x5f, 0x8b, 0x64, 0x0d, 0x96, 0x65, 0x4b, 0x25, 0xf4, 0x8f, 0x8b, + 0x64, 0x13, 0xc8, 0x71, 0xcf, 0xec, 0xb4, 0x76, 0xd9, 0xc0, 0x29, 0xc4, 0x7f, 0x2b, 0x0a, 0x77, + 0xe6, 0xef, 0xe6, 0x94, 0xb0, 0x17, 0xc5, 0x07, 0xc5, 0x3f, 0xf8, 0x51, 0xd5, 0x3e, 0xd4, 0xf1, + 0xb2, 0xcf, 0x76, 0x69, 0xaa, 0x79, 0x6e, 0x41, 0x35, 0x5f, 0xb0, 0xfd, 0xd4, 0x74, 0xdd, 0xe1, + 0x4d, 0xa8, 0x4d, 0xf9, 0xc7, 0x3f, 0xc4, 0xfb, 0xf5, 0x20, 0x82, 0xe5, 0x38, 0x90, 0x3f, 0x5e, + 0xbf, 0xf0, 0xdd, 0xaa, 0xc2, 0xe2, 0x77, 0xab, 0xd2, 0xf4, 0xc3, 0xa5, 0x34, 0xfd, 0xf0, 0x3e, + 0xac, 0x70, 0xd6, 0xe4, 0xb8, 0xce, 0x54, 0x5a, 0x5d, 0xb8, 0x16, 0xb1, 0x8c, 0x2c, 0x8a, 0xc3, + 0xa5, 0x3a, 0x2a, 0x55, 0x56, 0xc1, 0x42, 0x8a, 0x42, 0x5b, 0x8d, 0x69, 0xaa, 0x9c, 0x73, 0x28, + 0x4d, 0x55, 0xd5, 0x60, 0x5f, 0x46, 0x35, 0x54, 0xb4, 0x1a, 0x38, 0x1c, 0x6b, 0xb8, 0x0f, 0x2b, + 0xf4, 0x32, 0xf4, 0x6d, 0xcb, 0x9b, 0xd9, 0x5f, 0xcf, 0x31, 0xde, 0xc2, 0x46, 0x1b, 0x50, 0xd5, + 0x5c, 0x46, 0x44, 0x1f, 0xe1, 0xbb, 0x76, 0x68, 0x1b, 0xbf, 0x02, 0xa0, 0x4e, 0xd5, 0x31, 0x63, + 0x80, 0xae, 0x27, 0xaf, 0xdd, 0x55, 0x4d, 0x9e, 0xc0, 0x79, 0x0c, 0x3d, 0xdf, 0x3e, 0xa3, 0x5d, + 0xf9, 0x16, 0x4c, 0x04, 0x20, 0xb7, 0x20, 0xe7, 0xcd, 0x64, 0x28, 0x59, 0x59, 0x3e, 0xc8, 0x3c, + 0x33, 0x19, 0xd4, 0xf8, 0x18, 0xb2, 0xfd, 0xd9, 0xb5, 0xa2, 0x52, 0x13, 0x8a, 0xf2, 0x4b, 0x95, + 0x59, 0x0c, 0x1f, 0x93, 0xc9, 0xfb, 0x7f, 0x1a, 0x2a, 0xda, 0xf7, 0x6a, 0xc8, 0x26, 0xac, 0x7e, + 0xd9, 0x1d, 0xf6, 0x3a, 0x83, 0x81, 0x75, 0x74, 0xbc, 0xf3, 0xb4, 0xf3, 0x95, 0xb5, 0xdf, 0x1a, + 0xec, 0x37, 0x6e, 0x30, 0x5e, 0xd2, 0xeb, 0x0c, 0x86, 0x9d, 0xdd, 0x18, 0x3c, 0x43, 0x5e, 0x87, + 0xad, 0xe3, 0xde, 0xf1, 0xa0, 0xb3, 0x6b, 0xa5, 0xe5, 0xcb, 0xb2, 0xcd, 0x23, 0xf0, 0x29, 0xd9, + 0x73, 0xf7, 0x7f, 0x15, 0xea, 0xf1, 0x97, 0x11, 0x08, 0xc0, 0xd2, 0x41, 0xe7, 0x49, 0xab, 0xfd, + 0x15, 0x7f, 0x70, 0x7b, 0x30, 0x6c, 0x0d, 0xbb, 0x6d, 0x4b, 0x3c, 0xb0, 0xcd, 0x18, 0x55, 0x86, + 0x54, 0xa0, 0xd8, 0xea, 0xb5, 0xf7, 0xfb, 0xe6, 0xa0, 0x91, 0x25, 0xaf, 0xc1, 0xa6, 0xdc, 0x42, + 0xed, 0xfe, 0xe1, 0x61, 0x77, 0x88, 0x3c, 0x7a, 0xf8, 0xd5, 0x11, 0xdb, 0x31, 0xf7, 0x6d, 0x28, + 0x47, 0x6f, 0x83, 0x23, 0xdf, 0xeb, 0x0e, 0xbb, 0xad, 0x61, 0xc4, 0xf4, 0x1b, 0x37, 0x18, 0x5b, + 0x8d, 0xc0, 0xf8, 0xc0, 0x77, 0x23, 0xc3, 0x2f, 0x8f, 0x4a, 0x20, 0xaf, 0xbd, 0x91, 0x65, 0x7b, + 0x3d, 0x82, 0xee, 0xf4, 0x87, 0xac, 0x0b, 0xbf, 0x06, 0xf5, 0xf8, 0x13, 0xdc, 0xa4, 0x01, 0x55, + 0x56, 0xbf, 0x56, 0x05, 0xc0, 0x12, 0x6f, 0x71, 0x23, 0xc3, 0x19, 0x7b, 0xbb, 0x7f, 0xd8, 0xed, + 0x3d, 0xc1, 0xd3, 0xa0, 0x91, 0x65, 0xa0, 0xfe, 0xf1, 0xf0, 0x49, 0x5f, 0x81, 0x72, 0x2c, 0x07, + 0xef, 0x4e, 0x23, 0x7f, 0xff, 0x6b, 0x58, 0x59, 0x78, 0xac, 0x9b, 0xb5, 0xba, 0x7f, 0x3c, 0x6c, + 0xf7, 0x0f, 0xf5, 0x7a, 0x2a, 0x50, 0x6c, 0x1f, 0xb4, 0xba, 0x87, 0xe8, 0x08, 0xa9, 0x41, 0xf9, + 0xb8, 0x27, 0x93, 0xd9, 0xf8, 0x33, 0xe3, 0x39, 0xc6, 0xa2, 0xf6, 0xba, 0xe6, 0x60, 0x68, 0x0d, + 0x86, 0xad, 0x27, 0x9d, 0x46, 0x9e, 0xe5, 0x95, 0xfc, 0xaa, 0x70, 0xff, 0x33, 0xa8, 0xc7, 0xe3, + 0x9e, 0xe3, 0x0e, 0xac, 0x2d, 0xd8, 0xd8, 0xe9, 0x0c, 0xbf, 0xec, 0x74, 0x7a, 0x38, 0xe5, 0xed, + 0x4e, 0x6f, 0x68, 0xb6, 0x0e, 0xba, 0xc3, 0xaf, 0x1a, 0x99, 0xfb, 0x8f, 0xa1, 0x91, 0x0c, 0x32, + 0x88, 0x45, 0x65, 0xbc, 0x28, 0x7c, 0xe3, 0xfe, 0x7f, 0xcc, 0xc0, 0x5a, 0x9a, 0x7f, 0x8d, 0x2d, + 0x4c, 0xc1, 0x08, 0xd9, 0x71, 0x38, 0xe8, 0xf7, 0xac, 0x5e, 0x1f, 0xdf, 0xdd, 0xdd, 0x82, 0x8d, + 0x04, 0x42, 0xf6, 0x22, 0x43, 0x6e, 0xc1, 0xe6, 0x42, 0x26, 0xcb, 0xec, 0x1f, 0xe3, 0x5c, 0x36, + 0x61, 0x2d, 0x81, 0xec, 0x98, 0x66, 0xdf, 0x6c, 0xe4, 0xc8, 0x7b, 0x70, 0x2f, 0x81, 0x59, 0x14, + 0x02, 0xa4, 0x8c, 0x90, 0x27, 0xef, 0xc0, 0x9b, 0x0b, 0xd4, 0xd1, 0x39, 0x69, 0xed, 0xb4, 0x0e, + 0x58, 0xf7, 0x1a, 0x85, 0xfb, 0x7f, 0x2f, 0x07, 0x10, 0x5d, 0x2c, 0x64, 0xf5, 0xef, 0xb6, 0x86, + 0xad, 0x83, 0x3e, 0xdb, 0x33, 0x66, 0x7f, 0xc8, 0x4a, 0x37, 0x3b, 0x3f, 0x6e, 0xdc, 0x48, 0xc5, + 0xf4, 0x8f, 0x58, 0x87, 0x36, 0x61, 0x95, 0xaf, 0xbf, 0x03, 0xd6, 0x0d, 0xb6, 0x5c, 0xf0, 0x09, + 0x67, 0x94, 0x34, 0x8e, 0x8f, 0xf6, 0xcc, 0x7e, 0x6f, 0x68, 0x0d, 0xf6, 0x8f, 0x87, 0xbb, 0xf8, + 0x00, 0x74, 0xdb, 0xec, 0x1e, 0xf1, 0x32, 0xf3, 0x2f, 0x22, 0x60, 0x45, 0x17, 0xd8, 0x06, 0x7f, + 0xd2, 0x1f, 0x0c, 0xba, 0x47, 0xd6, 0x8f, 0x8f, 0x3b, 0x66, 0xb7, 0x33, 0xc0, 0x8c, 0x4b, 0x29, + 0x70, 0x46, 0x5f, 0x64, 0x6b, 0x76, 0x78, 0xf0, 0x85, 0x10, 0x20, 0x18, 0x69, 0x29, 0x0e, 0x62, + 0x54, 0x65, 0x36, 0x3b, 0xec, 0x04, 0x4e, 0x29, 0x19, 0xae, 0xc1, 0xb1, 0x7c, 0x15, 0x26, 0x5b, + 0x2c, 0xec, 0x7c, 0xcc, 0x56, 0x4d, 0x47, 0xb1, 0x5c, 0x28, 0x76, 0x28, 0x21, 0x6d, 0x77, 0xd7, + 0xc4, 0x0c, 0xf5, 0x05, 0x28, 0xa3, 0x5d, 0x66, 0x8b, 0x90, 0x1d, 0xd1, 0x8c, 0xa4, 0x21, 0x13, + 0x0c, 0xb3, 0xf2, 0xf0, 0x9f, 0xbf, 0x01, 0x65, 0x75, 0xc1, 0x80, 0xfc, 0x08, 0x6a, 0xb1, 0x1b, + 0xdf, 0x44, 0x9a, 0xf0, 0xd3, 0x2e, 0x88, 0x6f, 0xbd, 0x96, 0x8e, 0x14, 0xca, 0xc9, 0xa1, 0x66, + 0x0d, 0xe0, 0x85, 0xbd, 0x96, 0xd4, 0xd0, 0x63, 0xa5, 0xdd, 0xbe, 0x06, 0x2b, 0x8a, 0x7b, 0x8a, + 0xaf, 0x49, 0xeb, 0x9f, 0x3d, 0x26, 0xb7, 0xa3, 0xa7, 0x7d, 0x53, 0x3e, 0x87, 0xbc, 0x75, 0x73, + 0xf1, 0x03, 0xc5, 0xf2, 0x8b, 0xc6, 0xbb, 0x50, 0xd1, 0xbe, 0xe6, 0x47, 0x6e, 0x5e, 0xfb, 0xe5, + 0xc1, 0xad, 0xad, 0x34, 0x94, 0x68, 0xd2, 0xe7, 0x50, 0x56, 0x5f, 0x76, 0x23, 0x9b, 0xda, 0x57, + 0xf9, 0xf4, 0xef, 0xd3, 0x6d, 0x35, 0x17, 0x11, 0x22, 0xff, 0x2e, 0x54, 0xb4, 0x0f, 0xb4, 0xa9, + 0x56, 0x2c, 0x7e, 0x04, 0x4e, 0xb5, 0x22, 0xed, 0x7b, 0x6e, 0x07, 0xb0, 0x2e, 0x6c, 0x0e, 0x27, + 0xf4, 0x9b, 0x0c, 0x4f, 0xca, 0xf7, 0x9b, 0x1f, 0x64, 0xc8, 0x63, 0x28, 0xc9, 0x4f, 0xf1, 0x91, + 0x8d, 0xf4, 0x0f, 0x0d, 0x6e, 0x6d, 0x2e, 0xc0, 0x45, 0x53, 0x5a, 0x00, 0xd1, 0xa7, 0xdf, 0x88, + 0xec, 0xf8, 0xc2, 0xa7, 0xe4, 0xd4, 0xcc, 0xa4, 0x7c, 0x27, 0x6e, 0x17, 0x2a, 0xda, 0x57, 0xde, + 0xd4, 0x98, 0x2c, 0x7e, 0x21, 0x4e, 0x8d, 0x49, 0xda, 0x47, 0xe1, 0x7e, 0x04, 0xb5, 0xd8, 0xe7, + 0xda, 0xd4, 0x3a, 0x4e, 0xfb, 0x18, 0x9c, 0x5a, 0xc7, 0xe9, 0x5f, 0x78, 0xdb, 0x85, 0x8a, 0xf6, + 0x09, 0x35, 0xd5, 0xa2, 0xc5, 0xef, 0xb8, 0xa9, 0x16, 0xa5, 0x7c, 0x71, 0x8d, 0xed, 0x86, 0xf8, + 0xf7, 0xd3, 0xd4, 0x6e, 0x48, 0xfd, 0x10, 0x9b, 0xda, 0x0d, 0xe9, 0x1f, 0x5d, 0x63, 0x4b, 0x4f, + 0x3d, 0x23, 0x4f, 0x36, 0x63, 0xaa, 0x7e, 0xf4, 0x1e, 0xbd, 0x5a, 0x7a, 0x8b, 0x2f, 0xce, 0x3f, + 0x81, 0x55, 0xb5, 0x68, 0xd4, 0x23, 0xf0, 0x81, 0x6a, 0x53, 0xea, 0x53, 0xf3, 0x5b, 0x8d, 0x24, + 0xf6, 0x41, 0x86, 0x7c, 0x0a, 0x45, 0xf1, 0xb2, 0x36, 0x59, 0x4f, 0xbe, 0xb4, 0xcd, 0x1b, 0xb1, + 0x91, 0xfe, 0x00, 0x37, 0x39, 0xc2, 0x0d, 0xad, 0x3f, 0x7d, 0xad, 0xaf, 0xd8, 0x94, 0xd7, 0xb2, + 0xb7, 0x5e, 0xbf, 0x0e, 0x1d, 0x95, 0x98, 0x7c, 0xae, 0xfd, 0xf6, 0x75, 0x2f, 0xb1, 0xc4, 0x4b, + 0xbc, 0xee, 0xc9, 0xb8, 0x27, 0x50, 0xd5, 0xbf, 0xde, 0x43, 0xf4, 0x7d, 0x98, 0x2c, 0xeb, 0x56, + 0x2a, 0x4e, 0x14, 0xf4, 0x05, 0x6c, 0xa8, 0xf1, 0xd6, 0x9f, 0x05, 0x09, 0xc8, 0x9d, 0x94, 0xc7, + 0x42, 0x62, 0xa3, 0x7e, 0xf3, 0xda, 0xd7, 0x44, 0x1e, 0x64, 0x90, 0xc9, 0xc6, 0x3e, 0xb8, 0x11, + 0x31, 0xd9, 0xb4, 0xef, 0x8c, 0x44, 0x4c, 0x36, 0xfd, 0x2b, 0x1d, 0x2d, 0x58, 0xd6, 0x9e, 0x35, + 0x19, 0x5c, 0xb9, 0x23, 0xb5, 0xde, 0x17, 0xdf, 0x24, 0xde, 0x4a, 0xb3, 0x7c, 0x93, 0x36, 0x54, + 0xf4, 0x97, 0x51, 0x5e, 0x90, 0x7d, 0x53, 0x43, 0xe9, 0xcf, 0xce, 0x3e, 0xc8, 0x90, 0x03, 0x68, + 0x24, 0xdf, 0x31, 0x54, 0x5b, 0x38, 0xed, 0xed, 0xc7, 0xad, 0x04, 0x32, 0xf6, 0xfa, 0x21, 0x5b, + 0x17, 0xb1, 0xef, 0x04, 0x7b, 0x7e, 0xf2, 0x28, 0x8a, 0x7f, 0x3f, 0x58, 0x95, 0x96, 0xf6, 0xe5, + 0xe8, 0x7b, 0x99, 0x07, 0x19, 0xb2, 0x07, 0xd5, 0xd8, 0x33, 0x5e, 0xb1, 0xbb, 0x2e, 0x89, 0x6e, + 0x36, 0x75, 0x5c, 0xa2, 0x9f, 0x87, 0x50, 0x8f, 0x87, 0x68, 0xa8, 0x86, 0xa5, 0xc6, 0x91, 0xa8, + 0xe9, 0x4b, 0x8f, 0xeb, 0x20, 0x3f, 0xe0, 0x5f, 0xc1, 0x97, 0xa1, 0x7c, 0x64, 0xf1, 0xab, 0xe9, + 0x6a, 0xce, 0xf4, 0x6f, 0x8c, 0x1b, 0xb9, 0xbf, 0x90, 0xcd, 0x60, 0xbf, 0xbe, 0xcf, 0xbf, 0x41, + 0x2b, 0xa3, 0xb9, 0xd8, 0xfc, 0xbf, 0x6a, 0x21, 0x64, 0x8f, 0x57, 0x2e, 0xbe, 0x00, 0x1e, 0x71, + 0xee, 0x85, 0xaf, 0x82, 0xbf, 0xa4, 0x0d, 0x2d, 0xde, 0x06, 0x91, 0x27, 0xb6, 0x06, 0x5f, 0xb1, + 0x2c, 0xf2, 0x09, 0x40, 0x14, 0x22, 0x4b, 0x12, 0x81, 0x9a, 0x6a, 0x43, 0xa5, 0x44, 0xd1, 0x76, + 0xf8, 0x7e, 0x57, 0x91, 0xa2, 0xfa, 0x91, 0x1c, 0x0f, 0x5a, 0x8d, 0x1d, 0xc9, 0xc9, 0x62, 0x3e, + 0x84, 0xda, 0x81, 0xe7, 0x3d, 0x9b, 0xcf, 0xd4, 0x3d, 0x8b, 0x78, 0x18, 0xd3, 0xbe, 0x1d, 0x9c, + 0x6f, 0x25, 0x9a, 0x45, 0x5a, 0xb0, 0xa2, 0x58, 0x44, 0x14, 0xaa, 0x1a, 0x27, 0x8a, 0x31, 0x86, + 0x44, 0x01, 0x0f, 0x32, 0xe4, 0x21, 0x54, 0x77, 0xe9, 0x08, 0x9f, 0xd9, 0xc0, 0xa0, 0x99, 0xd5, + 0x58, 0x00, 0x06, 0x8f, 0xb6, 0xd9, 0xaa, 0xc5, 0x80, 0x92, 0xc5, 0x45, 0x81, 0x5b, 0xfa, 0x99, + 0x11, 0x8f, 0x7e, 0x8a, 0xb1, 0xb8, 0x85, 0xe0, 0xad, 0x2f, 0x60, 0x65, 0x21, 0x34, 0x4a, 0x71, + 0xb7, 0xeb, 0x02, 0xaa, 0xb6, 0xee, 0x5e, 0x4f, 0x20, 0xca, 0xfd, 0x21, 0xd4, 0xf8, 0x2b, 0xc4, + 0x27, 0x94, 0x5f, 0x93, 0x4d, 0xbc, 0x31, 0xa5, 0xdf, 0xc1, 0x4d, 0xb2, 0x24, 0x9e, 0xe1, 0x09, + 0x7e, 0x9b, 0x44, 0xbb, 0x84, 0xaa, 0xe6, 0x75, 0xf1, 0x62, 0xac, 0x9a, 0xd7, 0xb4, 0xfb, 0xae, + 0x9f, 0x41, 0xe5, 0x09, 0x0d, 0xe5, 0xb5, 0x4e, 0x25, 0x1f, 0x25, 0xee, 0x79, 0x6e, 0xa5, 0x5c, + 0xc6, 0x25, 0x1f, 0x63, 0x56, 0xf5, 0x44, 0xc1, 0x86, 0x56, 0x8b, 0x9e, 0x75, 0x39, 0x01, 0x67, + 0xd2, 0x87, 0xf6, 0x50, 0x89, 0x6a, 0xf8, 0xe2, 0xc3, 0x34, 0xaa, 0xe1, 0x69, 0xef, 0x9a, 0xfc, + 0x80, 0x8f, 0x80, 0x76, 0x91, 0x34, 0x12, 0xc1, 0x92, 0x77, 0x4e, 0x55, 0xf3, 0x75, 0xf2, 0x47, + 0x00, 0x83, 0xd0, 0x9b, 0xed, 0xda, 0x74, 0xea, 0xb9, 0x11, 0x4f, 0x88, 0xae, 0x30, 0x46, 0x1b, + 0x51, 0xbb, 0xc7, 0x48, 0xbe, 0xd4, 0x64, 0xd3, 0xd8, 0x94, 0xc8, 0x69, 0xbf, 0xf6, 0x96, 0xa3, + 0xea, 0x4e, 0xca, 0x4d, 0x47, 0x64, 0x12, 0x10, 0x45, 0x9e, 0x29, 0x49, 0x73, 0x21, 0xa8, 0x4d, + 0xed, 0xf5, 0x94, 0x30, 0xb5, 0xcf, 0xa1, 0x1c, 0x85, 0xec, 0x6c, 0x46, 0xaf, 0x26, 0xc5, 0x02, + 0x7c, 0x14, 0xf7, 0x5e, 0x0c, 0x97, 0xe9, 0xc1, 0x2a, 0x6f, 0x8e, 0x3a, 0xfe, 0xf0, 0xa2, 0x9d, + 0xfa, 0xb4, 0xce, 0x62, 0x9c, 0x8a, 0xda, 0x3f, 0x69, 0xd1, 0x16, 0x6c, 0xff, 0x2c, 0x78, 0xed, + 0xd5, 0xfe, 0xb9, 0x2e, 0x0c, 0x43, 0xed, 0x9f, 0xeb, 0x1d, 0xfe, 0x3d, 0x58, 0x4d, 0xf1, 0xbf, + 0x93, 0x37, 0xa4, 0x62, 0x73, 0xad, 0x6f, 0x7e, 0x2b, 0xd5, 0x4f, 0x4b, 0x86, 0xb0, 0xc9, 0xf3, + 0xb4, 0x26, 0x93, 0x84, 0xbb, 0xf7, 0x75, 0x2d, 0x43, 0x8a, 0x0b, 0x3b, 0x26, 0xca, 0x24, 0xdc, + 0xd8, 0x3d, 0x68, 0x24, 0x3d, 0xa5, 0xe4, 0x7a, 0xf2, 0xad, 0x3b, 0x31, 0x91, 0x7d, 0xd1, 0xbb, + 0x4a, 0xbe, 0x50, 0xfe, 0xda, 0x44, 0x1b, 0xef, 0x44, 0x5f, 0x84, 0x4b, 0xf5, 0x2e, 0x2b, 0x6d, + 0x20, 0xd5, 0xdd, 0x4b, 0x7e, 0x11, 0x36, 0x93, 0x2b, 0x5a, 0x96, 0x7c, 0x37, 0x6d, 0xb8, 0xae, + 0x15, 0xe5, 0xe2, 0x1d, 0x7a, 0x90, 0x61, 0x8c, 0x58, 0xf7, 0xaa, 0xaa, 0x85, 0x94, 0xe2, 0xde, + 0x55, 0x0b, 0x29, 0xd5, 0x0d, 0x7b, 0x04, 0xcb, 0x09, 0x87, 0xaa, 0x12, 0x83, 0xd3, 0x5d, 0xb0, + 0x4a, 0x0c, 0xbe, 0xce, 0x0f, 0x3b, 0x80, 0x46, 0xd2, 0x55, 0xaa, 0xe6, 0xfa, 0x1a, 0xf7, 0xeb, + 0xd6, 0x9d, 0x6b, 0xf1, 0xf1, 0x66, 0x6a, 0x4e, 0xc5, 0x58, 0x33, 0x17, 0x5d, 0xa1, 0xb1, 0x66, + 0xa6, 0xb8, 0x34, 0x77, 0xde, 0xf9, 0xa5, 0xef, 0x9c, 0x39, 0xe1, 0xf9, 0xfc, 0x64, 0x7b, 0xe4, + 0x4d, 0xdf, 0x9f, 0x48, 0xab, 0x86, 0xb8, 0x77, 0xfe, 0xfe, 0xc4, 0x1d, 0xbf, 0x8f, 0x05, 0x9c, + 0x2c, 0xcd, 0x7c, 0x2f, 0xf4, 0x3e, 0xfc, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x34, 0x23, 0x20, + 0x4e, 0x8f, 0x8b, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -12946,8 +13420,10 @@ type LightningClient interface { // lncli: `abandonchannel` //AbandonChannel removes all channel state from the database except for a //close summary. This method can be used to get rid of permanently unusable - //channels due to bugs fixed in newer versions of lnd. Only available - //when in debug builds of lnd. + //channels due to bugs fixed in newer versions of lnd. This method can also be + //used to remove externally funded channels where the funding transaction was + //never broadcast. Only available for non-externally funded channels in dev + //build. AbandonChannel(ctx context.Context, in *AbandonChannelRequest, opts ...grpc.CallOption) (*AbandonChannelResponse, error) // lncli: `sendpayment` //Deprecated, use routerrpc.SendPaymentV2. SendPayment dispatches a @@ -13130,6 +13606,17 @@ type LightningClient interface { //write permissions. No first-party caveats are added since this can be done //offline. BakeMacaroon(ctx context.Context, in *BakeMacaroonRequest, opts ...grpc.CallOption) (*BakeMacaroonResponse, error) + // lncli: `listmacaroonids` + //ListMacaroonIDs returns all root key IDs that are in use. + ListMacaroonIDs(ctx context.Context, in *ListMacaroonIDsRequest, opts ...grpc.CallOption) (*ListMacaroonIDsResponse, error) + // lncli: `deletemacaroonid` + //DeleteMacaroonID deletes the specified macaroon ID and invalidates all + //macaroons derived from that ID. + DeleteMacaroonID(ctx context.Context, in *DeleteMacaroonIDRequest, opts ...grpc.CallOption) (*DeleteMacaroonIDResponse, error) + // lncli: `listpermissions` + //ListPermissions lists all RPC method URIs and their required macaroon + //permissions to access them. + ListPermissions(ctx context.Context, in *ListPermissionsRequest, opts ...grpc.CallOption) (*ListPermissionsResponse, error) } type lightningClient struct { @@ -13896,6 +14383,33 @@ func (c *lightningClient) BakeMacaroon(ctx context.Context, in *BakeMacaroonRequ return out, nil } +func (c *lightningClient) ListMacaroonIDs(ctx context.Context, in *ListMacaroonIDsRequest, opts ...grpc.CallOption) (*ListMacaroonIDsResponse, error) { + out := new(ListMacaroonIDsResponse) + err := c.cc.Invoke(ctx, "/lnrpc.Lightning/ListMacaroonIDs", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *lightningClient) DeleteMacaroonID(ctx context.Context, in *DeleteMacaroonIDRequest, opts ...grpc.CallOption) (*DeleteMacaroonIDResponse, error) { + out := new(DeleteMacaroonIDResponse) + err := c.cc.Invoke(ctx, "/lnrpc.Lightning/DeleteMacaroonID", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *lightningClient) ListPermissions(ctx context.Context, in *ListPermissionsRequest, opts ...grpc.CallOption) (*ListPermissionsResponse, error) { + out := new(ListPermissionsResponse) + err := c.cc.Invoke(ctx, "/lnrpc.Lightning/ListPermissions", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // LightningServer is the server API for Lightning service. type LightningServer interface { // lncli: `walletbalance` @@ -14051,8 +14565,10 @@ type LightningServer interface { // lncli: `abandonchannel` //AbandonChannel removes all channel state from the database except for a //close summary. This method can be used to get rid of permanently unusable - //channels due to bugs fixed in newer versions of lnd. Only available - //when in debug builds of lnd. + //channels due to bugs fixed in newer versions of lnd. This method can also be + //used to remove externally funded channels where the funding transaction was + //never broadcast. Only available for non-externally funded channels in dev + //build. AbandonChannel(context.Context, *AbandonChannelRequest) (*AbandonChannelResponse, error) // lncli: `sendpayment` //Deprecated, use routerrpc.SendPaymentV2. SendPayment dispatches a @@ -14235,6 +14751,17 @@ type LightningServer interface { //write permissions. No first-party caveats are added since this can be done //offline. BakeMacaroon(context.Context, *BakeMacaroonRequest) (*BakeMacaroonResponse, error) + // lncli: `listmacaroonids` + //ListMacaroonIDs returns all root key IDs that are in use. + ListMacaroonIDs(context.Context, *ListMacaroonIDsRequest) (*ListMacaroonIDsResponse, error) + // lncli: `deletemacaroonid` + //DeleteMacaroonID deletes the specified macaroon ID and invalidates all + //macaroons derived from that ID. + DeleteMacaroonID(context.Context, *DeleteMacaroonIDRequest) (*DeleteMacaroonIDResponse, error) + // lncli: `listpermissions` + //ListPermissions lists all RPC method URIs and their required macaroon + //permissions to access them. + ListPermissions(context.Context, *ListPermissionsRequest) (*ListPermissionsResponse, error) } // UnimplementedLightningServer can be embedded to have forward compatible implementations. @@ -14409,6 +14936,15 @@ func (*UnimplementedLightningServer) SubscribeChannelBackups(req *ChannelBackupS func (*UnimplementedLightningServer) BakeMacaroon(ctx context.Context, req *BakeMacaroonRequest) (*BakeMacaroonResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method BakeMacaroon not implemented") } +func (*UnimplementedLightningServer) ListMacaroonIDs(ctx context.Context, req *ListMacaroonIDsRequest) (*ListMacaroonIDsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListMacaroonIDs not implemented") +} +func (*UnimplementedLightningServer) DeleteMacaroonID(ctx context.Context, req *DeleteMacaroonIDRequest) (*DeleteMacaroonIDResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteMacaroonID not implemented") +} +func (*UnimplementedLightningServer) ListPermissions(ctx context.Context, req *ListPermissionsRequest) (*ListPermissionsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListPermissions not implemented") +} func RegisterLightningServer(s *grpc.Server, srv LightningServer) { s.RegisterService(&_Lightning_serviceDesc, srv) @@ -15470,6 +16006,60 @@ func _Lightning_BakeMacaroon_Handler(srv interface{}, ctx context.Context, dec f return interceptor(ctx, in, info, handler) } +func _Lightning_ListMacaroonIDs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListMacaroonIDsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LightningServer).ListMacaroonIDs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/lnrpc.Lightning/ListMacaroonIDs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LightningServer).ListMacaroonIDs(ctx, req.(*ListMacaroonIDsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Lightning_DeleteMacaroonID_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteMacaroonIDRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LightningServer).DeleteMacaroonID(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/lnrpc.Lightning/DeleteMacaroonID", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LightningServer).DeleteMacaroonID(ctx, req.(*DeleteMacaroonIDRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Lightning_ListPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListPermissionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LightningServer).ListPermissions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/lnrpc.Lightning/ListPermissions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LightningServer).ListPermissions(ctx, req.(*ListPermissionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _Lightning_serviceDesc = grpc.ServiceDesc{ ServiceName: "lnrpc.Lightning", HandlerType: (*LightningServer)(nil), @@ -15654,6 +16244,18 @@ var _Lightning_serviceDesc = grpc.ServiceDesc{ MethodName: "BakeMacaroon", Handler: _Lightning_BakeMacaroon_Handler, }, + { + MethodName: "ListMacaroonIDs", + Handler: _Lightning_ListMacaroonIDs_Handler, + }, + { + MethodName: "DeleteMacaroonID", + Handler: _Lightning_DeleteMacaroonID_Handler, + }, + { + MethodName: "ListPermissions", + Handler: _Lightning_ListPermissions_Handler, + }, }, Streams: []grpc.StreamDesc{ { diff --git a/lnrpc/rpc.pb.gw.go b/lnrpc/rpc.pb.gw.go index a9fb54317..e3f63d797 100644 --- a/lnrpc/rpc.pb.gw.go +++ b/lnrpc/rpc.pb.gw.go @@ -1913,6 +1913,96 @@ func local_request_Lightning_BakeMacaroon_0(ctx context.Context, marshaler runti } +func request_Lightning_ListMacaroonIDs_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListMacaroonIDsRequest + var metadata runtime.ServerMetadata + + msg, err := client.ListMacaroonIDs(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Lightning_ListMacaroonIDs_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListMacaroonIDsRequest + var metadata runtime.ServerMetadata + + msg, err := server.ListMacaroonIDs(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Lightning_DeleteMacaroonID_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DeleteMacaroonIDRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["root_key_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "root_key_id") + } + + protoReq.RootKeyId, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "root_key_id", err) + } + + msg, err := client.DeleteMacaroonID(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Lightning_DeleteMacaroonID_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq DeleteMacaroonIDRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["root_key_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "root_key_id") + } + + protoReq.RootKeyId, err = runtime.Uint64(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "root_key_id", err) + } + + msg, err := server.DeleteMacaroonID(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Lightning_ListPermissions_0(ctx context.Context, marshaler runtime.Marshaler, client LightningClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListPermissionsRequest + var metadata runtime.ServerMetadata + + msg, err := client.ListPermissions(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Lightning_ListPermissions_0(ctx context.Context, marshaler runtime.Marshaler, server LightningServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ListPermissionsRequest + var metadata runtime.ServerMetadata + + msg, err := server.ListPermissions(ctx, &protoReq) + return msg, metadata, err + +} + // RegisterLightningHandlerServer registers the http handlers for service Lightning to "mux". // UnaryRPC :call LightningServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. @@ -2874,6 +2964,66 @@ func RegisterLightningHandlerServer(ctx context.Context, mux *runtime.ServeMux, }) + mux.Handle("GET", pattern_Lightning_ListMacaroonIDs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Lightning_ListMacaroonIDs_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Lightning_ListMacaroonIDs_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_Lightning_DeleteMacaroonID_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Lightning_DeleteMacaroonID_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Lightning_DeleteMacaroonID_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Lightning_ListPermissions_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Lightning_ListPermissions_0(rctx, inboundMarshaler, server, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Lightning_ListPermissions_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + return nil } @@ -3975,6 +4125,66 @@ func RegisterLightningHandlerClient(ctx context.Context, mux *runtime.ServeMux, }) + mux.Handle("GET", pattern_Lightning_ListMacaroonIDs_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Lightning_ListMacaroonIDs_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Lightning_ListMacaroonIDs_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("DELETE", pattern_Lightning_DeleteMacaroonID_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Lightning_DeleteMacaroonID_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Lightning_DeleteMacaroonID_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Lightning_ListPermissions_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Lightning_ListPermissions_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Lightning_ListPermissions_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + return nil } @@ -4084,6 +4294,12 @@ var ( pattern_Lightning_SubscribeChannelBackups_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"v1", "channels", "backup", "subscribe"}, "", runtime.AssumeColonVerbOpt(true))) pattern_Lightning_BakeMacaroon_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "macaroon"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Lightning_ListMacaroonIDs_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "macaroon", "ids"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Lightning_DeleteMacaroonID_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 1, 0, 4, 1, 5, 2}, []string{"v1", "macaroon", "root_key_id"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Lightning_ListPermissions_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "macaroon", "permissions"}, "", runtime.AssumeColonVerbOpt(true))) ) var ( @@ -4192,4 +4408,10 @@ var ( forward_Lightning_SubscribeChannelBackups_0 = runtime.ForwardResponseStream forward_Lightning_BakeMacaroon_0 = runtime.ForwardResponseMessage + + forward_Lightning_ListMacaroonIDs_0 = runtime.ForwardResponseMessage + + forward_Lightning_DeleteMacaroonID_0 = runtime.ForwardResponseMessage + + forward_Lightning_ListPermissions_0 = runtime.ForwardResponseMessage ) diff --git a/lnrpc/rpc.proto b/lnrpc/rpc.proto index ee5504d76..d550a5318 100644 --- a/lnrpc/rpc.proto +++ b/lnrpc/rpc.proto @@ -235,8 +235,10 @@ service Lightning { /* lncli: `abandonchannel` AbandonChannel removes all channel state from the database except for a close summary. This method can be used to get rid of permanently unusable - channels due to bugs fixed in newer versions of lnd. Only available - when in debug builds of lnd. + channels due to bugs fixed in newer versions of lnd. This method can also be + used to remove externally funded channels where the funding transaction was + never broadcast. Only available for non-externally funded channels in dev + build. */ rpc AbandonChannel (AbandonChannelRequest) returns (AbandonChannelResponse); @@ -491,6 +493,26 @@ service Lightning { offline. */ rpc BakeMacaroon (BakeMacaroonRequest) returns (BakeMacaroonResponse); + + /* lncli: `listmacaroonids` + ListMacaroonIDs returns all root key IDs that are in use. + */ + rpc ListMacaroonIDs (ListMacaroonIDsRequest) + returns (ListMacaroonIDsResponse); + + /* lncli: `deletemacaroonid` + DeleteMacaroonID deletes the specified macaroon ID and invalidates all + macaroons derived from that ID. + */ + rpc DeleteMacaroonID (DeleteMacaroonIDRequest) + returns (DeleteMacaroonIDResponse); + + /* lncli: `listpermissions` + ListPermissions lists all RPC method URIs and their required macaroon + permissions to access them. + */ + rpc ListPermissions (ListPermissionsRequest) + returns (ListPermissionsResponse); } message Utxo { @@ -947,6 +969,12 @@ message ConnectPeerRequest { /* If set, the daemon will attempt to persistently connect to the target * peer. Otherwise, the call will be synchronous. */ bool perm = 2; + + /* + The connection timeout value (in seconds) for this request. It won't affect + other requests. + */ + uint64 timeout = 3; } message ConnectPeerResponse { } @@ -1393,6 +1421,20 @@ message Peer { spamming us with errors at no cost. */ repeated TimestampedError errors = 12; + + /* + The number of times we have recorded this peer going offline or coming + online, recorded across restarts. Note that this value is decreased over + time if the peer has not recently flapped, so that we can forgive peers + with historically high flap counts. + */ + int32 flap_count = 13; + + /* + The timestamp of the last flap we observed for this peer. If this value is + zero, we have not observed any flaps for this peer. + */ + int64 last_flap_ns = 14; } message TimestampedError { @@ -1672,6 +1714,12 @@ message OpenChannelRequest { the channel. It only applies to the remote party. */ uint64 remote_max_value_in_flight_msat = 15; + + /* + The maximum number of concurrent HTLCs we will allow the remote party to add + to the commitment transaction. + */ + uint32 remote_max_htlcs = 16; } message OpenStatusUpdate { oneof update { @@ -1817,12 +1865,19 @@ message FundingPsbtFinalize { /* The funded PSBT that contains all witness data to send the exact channel capacity amount to the PK script returned in the open channel message in a - previous step. + previous step. Cannot be set at the same time as final_raw_tx. */ bytes signed_psbt = 1; // The pending channel ID of the channel to get the PSBT for. bytes pending_chan_id = 2; + + /* + As an alternative to the signed PSBT with all witness data, the final raw + wire format transaction can also be specified directly. Cannot be set at the + same time as signed_psbt. + */ + bytes final_raw_tx = 3; } message FundingTransitionMsg { @@ -3039,6 +3094,8 @@ message DeleteAllPaymentsResponse { message AbandonChannelRequest { ChannelPoint channel_point = 1; + + bool pending_funding_shim_only = 2; } message AbandonChannelResponse { @@ -3327,12 +3384,46 @@ message MacaroonPermission { message BakeMacaroonRequest { // The list of permissions the new macaroon should grant. repeated MacaroonPermission permissions = 1; + + // The root key ID used to create the macaroon, must be a positive integer. + uint64 root_key_id = 2; } message BakeMacaroonResponse { // The hex encoded macaroon, serialized in binary format. string macaroon = 1; } +message ListMacaroonIDsRequest { +} +message ListMacaroonIDsResponse { + // The list of root key IDs that are in use. + repeated uint64 root_key_ids = 1; +} + +message DeleteMacaroonIDRequest { + // The root key ID to be removed. + uint64 root_key_id = 1; +} +message DeleteMacaroonIDResponse { + // A boolean indicates that the deletion is successful. + bool deleted = 1; +} + +message MacaroonPermissionList { + // A list of macaroon permissions. + repeated MacaroonPermission permissions = 1; +} + +message ListPermissionsRequest { +} +message ListPermissionsResponse { + /* + A map between all RPC method URIs and their required macaroon permissions to + access them. + */ + map method_permissions = 1; +} + message Failure { enum FailureCode { /* @@ -3495,3 +3586,14 @@ message ChannelUpdate { */ bytes extra_opaque_data = 12; } + +message MacaroonId { + bytes nonce = 1; + bytes storageId = 2; + repeated Op ops = 3; +} + +message Op { + string entity = 1; + repeated string actions = 2; +} diff --git a/lnrpc/rpc.swagger.json b/lnrpc/rpc.swagger.json index 501d4396e..4c66faab1 100644 --- a/lnrpc/rpc.swagger.json +++ b/lnrpc/rpc.swagger.json @@ -151,7 +151,7 @@ }, "/v1/channels/abandon/{channel_point.funding_txid_str}/{channel_point.output_index}": { "delete": { - "summary": "lncli: `abandonchannel`\nAbandonChannel removes all channel state from the database except for a\nclose summary. This method can be used to get rid of permanently unusable\nchannels due to bugs fixed in newer versions of lnd. Only available\nwhen in debug builds of lnd.", + "summary": "lncli: `abandonchannel`\nAbandonChannel removes all channel state from the database except for a\nclose summary. This method can be used to get rid of permanently unusable\nchannels due to bugs fixed in newer versions of lnd. This method can also be\nused to remove externally funded channels where the funding transaction was\nnever broadcast. Only available for non-externally funded channels in dev\nbuild.", "operationId": "AbandonChannel", "responses": { "200": { @@ -190,6 +190,13 @@ "required": false, "type": "string", "format": "byte" + }, + { + "name": "pending_funding_shim_only", + "in": "query", + "required": false, + "type": "boolean", + "format": "boolean" } ], "tags": [ @@ -1426,6 +1433,85 @@ ] } }, + "/v1/macaroon/ids": { + "get": { + "summary": "lncli: `listmacaroonids`\nListMacaroonIDs returns all root key IDs that are in use.", + "operationId": "ListMacaroonIDs", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/lnrpcListMacaroonIDsResponse" + } + }, + "default": { + "description": "An unexpected error response", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + }, + "tags": [ + "Lightning" + ] + } + }, + "/v1/macaroon/permissions": { + "get": { + "summary": "lncli: `listpermissions`\nListPermissions lists all RPC method URIs and their required macaroon\npermissions to access them.", + "operationId": "ListPermissions", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/lnrpcListPermissionsResponse" + } + }, + "default": { + "description": "An unexpected error response", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + }, + "tags": [ + "Lightning" + ] + } + }, + "/v1/macaroon/{root_key_id}": { + "delete": { + "summary": "lncli: `deletemacaroonid`\nDeleteMacaroonID deletes the specified macaroon ID and invalidates all\nmacaroons derived from that ID.", + "operationId": "DeleteMacaroonID", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/lnrpcDeleteMacaroonIDResponse" + } + }, + "default": { + "description": "An unexpected error response", + "schema": { + "$ref": "#/definitions/runtimeError" + } + } + }, + "parameters": [ + { + "name": "root_key_id", + "description": "The root key ID to be removed.", + "in": "path", + "required": true, + "type": "string", + "format": "uint64" + } + ], + "tags": [ + "Lightning" + ] + } + }, "/v1/newaddress": { "get": { "summary": "lncli: `newaddress`\nNewAddress creates a new address under control of the local wallet.", @@ -2399,6 +2485,11 @@ "$ref": "#/definitions/lnrpcMacaroonPermission" }, "description": "The list of permissions the new macaroon should grant." + }, + "root_key_id": { + "type": "string", + "format": "uint64", + "description": "The root key ID used to create the macaroon, must be a positive integer." } } }, @@ -3132,6 +3223,11 @@ "type": "boolean", "format": "boolean", "description": "If set, the daemon will attempt to persistently connect to the target\npeer. Otherwise, the call will be synchronous." + }, + "timeout": { + "type": "string", + "format": "uint64", + "description": "The connection timeout value (in seconds) for this request. It won't affect\nother requests." } } }, @@ -3161,6 +3257,16 @@ "lnrpcDeleteAllPaymentsResponse": { "type": "object" }, + "lnrpcDeleteMacaroonIDResponse": { + "type": "object", + "properties": { + "deleted": { + "type": "boolean", + "format": "boolean", + "description": "A boolean indicates that the deletion is successful." + } + } + }, "lnrpcDisconnectPeerResponse": { "type": "object" }, @@ -3436,12 +3542,17 @@ "signed_psbt": { "type": "string", "format": "byte", - "description": "The funded PSBT that contains all witness data to send the exact channel\ncapacity amount to the PK script returned in the open channel message in a\nprevious step." + "description": "The funded PSBT that contains all witness data to send the exact channel\ncapacity amount to the PK script returned in the open channel message in a\nprevious step. Cannot be set at the same time as final_raw_tx." }, "pending_chan_id": { "type": "string", "format": "byte", "description": "The pending channel ID of the channel to get the PSBT for." + }, + "final_raw_tx": { + "type": "string", + "format": "byte", + "description": "As an alternative to the signed PSBT with all witness data, the final raw\nwire format transaction can also be specified directly. Cannot be set at the\nsame time as signed_psbt." } } }, @@ -4095,6 +4206,19 @@ } } }, + "lnrpcListMacaroonIDsResponse": { + "type": "object", + "properties": { + "root_key_ids": { + "type": "array", + "items": { + "type": "string", + "format": "uint64" + }, + "description": "The list of root key IDs that are in use." + } + } + }, "lnrpcListPaymentsResponse": { "type": "object", "properties": { @@ -4129,6 +4253,18 @@ } } }, + "lnrpcListPermissionsResponse": { + "type": "object", + "properties": { + "method_permissions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/lnrpcMacaroonPermissionList" + }, + "description": "A map between all RPC method URIs and their required macaroon permissions to\naccess them." + } + } + }, "lnrpcListUnspentResponse": { "type": "object", "properties": { @@ -4169,6 +4305,18 @@ } } }, + "lnrpcMacaroonPermissionList": { + "type": "object", + "properties": { + "permissions": { + "type": "array", + "items": { + "$ref": "#/definitions/lnrpcMacaroonPermission" + }, + "description": "A list of macaroon permissions." + } + } + }, "lnrpcMultiChanBackup": { "type": "object", "properties": { @@ -4410,6 +4558,11 @@ "type": "string", "format": "uint64", "description": "The maximum amount of coins in millisatoshi that can be pending within\nthe channel. It only applies to the remote party." + }, + "remote_max_htlcs": { + "type": "integer", + "format": "int64", + "description": "The maximum number of concurrent HTLCs we will allow the remote party to add\nto the commitment transaction." } } }, @@ -4657,6 +4810,16 @@ "$ref": "#/definitions/lnrpcTimestampedError" }, "description": "The latest errors received from our peer with timestamps, limited to the 10\nmost recent errors. These errors are tracked across peer connections, but\nare not persisted across lnd restarts. Note that these errors are only\nstored for peers that we have channels open with, to prevent peers from\nspamming us with errors at no cost." + }, + "flap_count": { + "type": "integer", + "format": "int32", + "description": "The number of times we have recorded this peer going offline or coming\nonline, recorded across restarts. Note that this value is decreased over\ntime if the peer has not recently flapped, so that we can forgive peers\nwith historically high flap counts." + }, + "last_flap_ns": { + "type": "string", + "format": "int64", + "description": "The timestamp of the last flap we observed for this peer. If this value is\nzero, we have not observed any flaps for this peer." } } }, diff --git a/lnrpc/signrpc/signer.pb.go b/lnrpc/signrpc/signer.pb.go index 8e78c8a9e..75e7ff7da 100644 --- a/lnrpc/signrpc/signer.pb.go +++ b/lnrpc/signrpc/signer.pb.go @@ -673,12 +673,18 @@ type SharedKeyRequest struct { // The ephemeral public key to use for the DH key derivation. EphemeralPubkey []byte `protobuf:"bytes,1,opt,name=ephemeral_pubkey,json=ephemeralPubkey,proto3" json:"ephemeral_pubkey,omitempty"` // - //The optional key locator of the local key that should be used. If this - //parameter is not set then the node's identity private key will be used. - KeyLoc *KeyLocator `protobuf:"bytes,2,opt,name=key_loc,json=keyLoc,proto3" json:"key_loc,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + //Deprecated. The optional key locator of the local key that should be used. + //If this parameter is not set then the node's identity private key will be + //used. + KeyLoc *KeyLocator `protobuf:"bytes,2,opt,name=key_loc,json=keyLoc,proto3" json:"key_loc,omitempty"` // Deprecated: Do not use. + // + //A key descriptor describes the key used for performing ECDH. Either a key + //locator or a raw public key is expected, if neither is supplied, defaults to + //the node's identity private key. + KeyDesc *KeyDescriptor `protobuf:"bytes,3,opt,name=key_desc,json=keyDesc,proto3" json:"key_desc,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *SharedKeyRequest) Reset() { *m = SharedKeyRequest{} } @@ -713,6 +719,7 @@ func (m *SharedKeyRequest) GetEphemeralPubkey() []byte { return nil } +// Deprecated: Do not use. func (m *SharedKeyRequest) GetKeyLoc() *KeyLocator { if m != nil { return m.KeyLoc @@ -720,6 +727,13 @@ func (m *SharedKeyRequest) GetKeyLoc() *KeyLocator { return nil } +func (m *SharedKeyRequest) GetKeyDesc() *KeyDescriptor { + if m != nil { + return m.KeyDesc + } + return nil +} + type SharedKeyResponse struct { // The shared public key, hashed with sha256. SharedKey []byte `protobuf:"bytes,1,opt,name=shared_key,json=sharedKey,proto3" json:"shared_key,omitempty"` @@ -780,55 +794,56 @@ func init() { func init() { proto.RegisterFile("signrpc/signer.proto", fileDescriptor_4ecd772f6c7ffacf) } var fileDescriptor_4ecd772f6c7ffacf = []byte{ - // 756 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0x5d, 0x8f, 0xdb, 0x44, - 0x14, 0xd5, 0x26, 0x6c, 0x92, 0xbd, 0x4e, 0x76, 0xb3, 0xc3, 0xaa, 0xb8, 0x0b, 0x88, 0x60, 0xa9, - 0x28, 0x95, 0x20, 0x11, 0x01, 0x21, 0xc1, 0x13, 0x2a, 0xd5, 0xaa, 0x55, 0x8a, 0x5a, 0x39, 0x2b, - 0x1e, 0xfa, 0x62, 0x39, 0xce, 0xad, 0x33, 0xb2, 0x63, 0xcf, 0xce, 0x8c, 0xeb, 0xf8, 0x77, 0xf0, - 0xd7, 0xf8, 0x41, 0x68, 0x3e, 0xe2, 0xd8, 0x29, 0x54, 0xea, 0xd3, 0xfa, 0x9e, 0xb9, 0x73, 0xee, - 0xd9, 0x73, 0xae, 0x63, 0xb8, 0x11, 0x34, 0xce, 0x38, 0x8b, 0xe6, 0xea, 0x2f, 0xf2, 0x19, 0xe3, - 0xb9, 0xcc, 0x49, 0xdf, 0xa2, 0xde, 0x0b, 0x80, 0x25, 0x56, 0xaf, 0xf2, 0x28, 0x94, 0x39, 0x27, - 0x5f, 0x03, 0x24, 0x58, 0x05, 0xef, 0xc2, 0x1d, 0x4d, 0x2b, 0xf7, 0x6c, 0x72, 0x36, 0x3d, 0xf7, - 0x2f, 0x12, 0xac, 0xee, 0x34, 0x40, 0xbe, 0x04, 0x55, 0x04, 0x34, 0xdb, 0xe0, 0xde, 0xed, 0xe8, - 0xd3, 0x41, 0x82, 0xd5, 0x4b, 0x55, 0x7b, 0x21, 0x8c, 0x96, 0x58, 0x3d, 0x47, 0x11, 0x71, 0xca, - 0x14, 0x99, 0x07, 0x23, 0x1e, 0x96, 0x81, 0xba, 0xb1, 0xae, 0x24, 0x0a, 0xcd, 0x37, 0xf4, 0x1d, - 0x1e, 0x96, 0x4b, 0xac, 0x9e, 0x29, 0x88, 0x7c, 0x0f, 0x7d, 0x75, 0x9e, 0xe6, 0x91, 0xe6, 0x73, - 0x16, 0x9f, 0xcf, 0xac, 0xb2, 0xd9, 0x51, 0x96, 0xdf, 0x4b, 0xf4, 0xb3, 0xf7, 0x1b, 0x9c, 0xdf, - 0xef, 0x5f, 0x17, 0x92, 0xdc, 0xc0, 0xf9, 0xfb, 0x30, 0x2d, 0x50, 0x53, 0x76, 0x7d, 0x53, 0x28, - 0x79, 0x2c, 0x09, 0xcc, 0x7c, 0x4d, 0x37, 0xf4, 0x07, 0x2c, 0x59, 0xe9, 0xda, 0xfb, 0xbb, 0x03, - 0x97, 0x2b, 0x1a, 0x67, 0x0d, 0x81, 0x3f, 0x82, 0x52, 0x1f, 0x6c, 0x50, 0x44, 0x9a, 0xc8, 0x59, - 0x3c, 0x6a, 0x4e, 0x3f, 0x76, 0xfa, 0x4a, 0xa4, 0x2a, 0xc9, 0xb7, 0x30, 0x14, 0x34, 0x8b, 0x53, - 0x0c, 0x64, 0x89, 0x61, 0x62, 0xa7, 0x38, 0x06, 0xbb, 0x57, 0x90, 0x6a, 0xd9, 0xe4, 0xc5, 0xba, - 0x6e, 0xe9, 0x9a, 0x16, 0x83, 0x99, 0x96, 0x27, 0x70, 0x59, 0x52, 0x99, 0xa1, 0x10, 0x07, 0xb5, - 0x9f, 0xe9, 0xa6, 0x91, 0x45, 0x8d, 0x64, 0xf2, 0x1d, 0xf4, 0xf2, 0x42, 0xb2, 0x42, 0xba, 0xe7, - 0x5a, 0xdd, 0x65, 0xad, 0x4e, 0xbb, 0xe0, 0xdb, 0x53, 0xe2, 0x82, 0x8a, 0x73, 0x1b, 0x8a, 0xad, - 0xdb, 0x9f, 0x9c, 0x4d, 0x47, 0xfe, 0xa1, 0x24, 0xdf, 0x80, 0x43, 0x33, 0x56, 0x48, 0x1b, 0xd9, - 0x40, 0x47, 0x06, 0x1a, 0x32, 0xa1, 0x45, 0xd0, 0x57, 0xa6, 0xf8, 0xf8, 0x40, 0x26, 0x30, 0x54, - 0x71, 0xc9, 0x7d, 0x2b, 0x2d, 0xe0, 0x61, 0x79, 0xbf, 0x37, 0x61, 0xfd, 0x02, 0xa0, 0x04, 0x68, - 0xc3, 0x84, 0xdb, 0x99, 0x74, 0xa7, 0xce, 0xe2, 0x8b, 0x5a, 0x53, 0xdb, 0x5c, 0xff, 0x42, 0xd8, - 0x5a, 0x78, 0x4f, 0x60, 0x60, 0x86, 0x08, 0x46, 0x1e, 0xc3, 0x40, 0x4d, 0x11, 0x34, 0x56, 0x13, - 0xba, 0xd3, 0xa1, 0xdf, 0xe7, 0x61, 0xb9, 0xa2, 0xb1, 0xf0, 0xee, 0xc0, 0x79, 0xa9, 0x94, 0xd9, - 0xff, 0xde, 0x85, 0xbe, 0xb5, 0xe3, 0xd0, 0x68, 0x4b, 0xb5, 0xa5, 0x82, 0xc6, 0xed, 0xa0, 0xd5, - 0x38, 0x9b, 0xf4, 0x2b, 0xb8, 0x6a, 0xf0, 0xe8, 0xa9, 0xbf, 0xc2, 0xc8, 0xf8, 0x60, 0xee, 0x18, - 0x46, 0x67, 0x71, 0x53, 0x8b, 0x6f, 0x5e, 0x18, 0xd2, 0x63, 0x21, 0xbc, 0x37, 0x66, 0x6d, 0xfe, - 0x44, 0x21, 0xc2, 0x18, 0x95, 0x51, 0x63, 0xe8, 0xee, 0x44, 0x6c, 0xfd, 0x51, 0x8f, 0x9f, 0xb8, - 0xc5, 0x73, 0xb8, 0x6a, 0x31, 0x0a, 0x46, 0xbe, 0x02, 0x6d, 0x57, 0x28, 0x0b, 0x8e, 0x96, 0xf8, - 0x08, 0x78, 0x6f, 0x61, 0xfc, 0x17, 0x72, 0xfa, 0xae, 0xfa, 0xa8, 0x88, 0x16, 0x47, 0xe7, 0x84, - 0x83, 0x3c, 0x82, 0x1e, 0x2b, 0xd6, 0x09, 0x56, 0x76, 0x1f, 0x6d, 0xe5, 0x3d, 0x85, 0xeb, 0x13, - 0x6e, 0xc1, 0xec, 0xeb, 0x45, 0x37, 0x9a, 0x7e, 0xe0, 0x9b, 0xc2, 0x4b, 0x60, 0xbc, 0xda, 0x86, - 0x1c, 0x37, 0x4b, 0xac, 0x7c, 0x7c, 0x28, 0x50, 0x48, 0xf2, 0x14, 0xc6, 0xc8, 0xb6, 0xb8, 0x43, - 0x1e, 0xa6, 0x81, 0x1d, 0x60, 0x34, 0x5d, 0xd5, 0xf8, 0x1b, 0x0d, 0x7f, 0xa2, 0x49, 0x0b, 0xb8, - 0x6e, 0x0c, 0x13, 0x2c, 0xcf, 0x04, 0xea, 0xe0, 0x35, 0x18, 0x1c, 0xe7, 0x5c, 0x88, 0x43, 0xdb, - 0xe2, 0x9f, 0x0e, 0xf4, 0x56, 0xfa, 0x57, 0x8e, 0xfc, 0x0c, 0x23, 0xf5, 0xf4, 0x5a, 0xbf, 0x20, - 0x7e, 0x58, 0x92, 0x71, 0x6b, 0x4f, 0x7d, 0x7c, 0xb8, 0xbd, 0x3e, 0x41, 0x04, 0x23, 0xbf, 0x03, - 0xf9, 0x23, 0xdf, 0xb1, 0x42, 0x62, 0x73, 0x11, 0x3f, 0xbc, 0xea, 0xfe, 0xe7, 0xde, 0x18, 0x06, - 0xa7, 0x91, 0x2d, 0x69, 0xbf, 0x1d, 0xc7, 0xf8, 0x1a, 0x0c, 0xa7, 0xab, 0x70, 0x07, 0xa3, 0x56, - 0x20, 0xe4, 0x71, 0xdd, 0x7a, 0xba, 0x04, 0xb7, 0xb7, 0xff, 0x77, 0x24, 0x18, 0x79, 0x01, 0x57, - 0xcf, 0x91, 0xd3, 0xf7, 0x58, 0xdb, 0xd8, 0x60, 0x3a, 0xcd, 0xb1, 0xc1, 0xf4, 0x81, 0xeb, 0xcf, - 0xe6, 0x6f, 0x7f, 0x88, 0xa9, 0xdc, 0x16, 0xeb, 0x59, 0x94, 0xef, 0xe6, 0x29, 0x8d, 0xb7, 0x32, - 0xa3, 0x59, 0x9c, 0xa1, 0x2c, 0x73, 0x9e, 0xcc, 0xd3, 0x6c, 0x33, 0x4f, 0xeb, 0x2f, 0x0c, 0x67, - 0xd1, 0xba, 0xa7, 0xbf, 0x31, 0x3f, 0xfd, 0x1b, 0x00, 0x00, 0xff, 0xff, 0x73, 0xb0, 0xe9, 0x51, - 0x7b, 0x06, 0x00, 0x00, + // 775 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xed, 0x8e, 0xdb, 0x44, + 0x14, 0x55, 0x12, 0x36, 0xc9, 0x5e, 0x27, 0xbb, 0xd9, 0x61, 0x55, 0xdc, 0x05, 0xc4, 0x62, 0xa9, + 0x68, 0x2b, 0x41, 0x02, 0x01, 0x21, 0xc1, 0x2f, 0xb4, 0x54, 0xab, 0x56, 0x29, 0x6a, 0xe5, 0xac, + 0xf8, 0xd1, 0x3f, 0x96, 0xe3, 0xdc, 0x3a, 0x23, 0x3b, 0xf6, 0xec, 0xcc, 0xb8, 0x8e, 0x9f, 0x83, + 0x37, 0xe0, 0x99, 0x78, 0x20, 0x34, 0x1f, 0x71, 0xec, 0x14, 0x50, 0xf9, 0xb5, 0xbe, 0xc7, 0x77, + 0xce, 0x3d, 0x7b, 0xce, 0x9d, 0x18, 0x2e, 0x05, 0x8d, 0x33, 0xce, 0xa2, 0x99, 0xfa, 0x8b, 0x7c, + 0xca, 0x78, 0x2e, 0x73, 0x32, 0xb0, 0xa8, 0xf7, 0x1c, 0x60, 0x81, 0xd5, 0xcb, 0x3c, 0x0a, 0x65, + 0xce, 0xc9, 0xe7, 0x00, 0x09, 0x56, 0xc1, 0xdb, 0x70, 0x4b, 0xd3, 0xca, 0xed, 0x5c, 0x77, 0x6e, + 0x4e, 0xfc, 0xd3, 0x04, 0xab, 0x3b, 0x0d, 0x90, 0x4f, 0x41, 0x15, 0x01, 0xcd, 0xd6, 0xb8, 0x73, + 0xbb, 0xfa, 0xed, 0x30, 0xc1, 0xea, 0x85, 0xaa, 0xbd, 0x10, 0xc6, 0x0b, 0xac, 0x9e, 0xa1, 0x88, + 0x38, 0x65, 0x8a, 0xcc, 0x83, 0x31, 0x0f, 0xcb, 0x40, 0x9d, 0x58, 0x55, 0x12, 0x85, 0xe6, 0x1b, + 0xf9, 0x0e, 0x0f, 0xcb, 0x05, 0x56, 0xb7, 0x0a, 0x22, 0x5f, 0xc3, 0x40, 0xbd, 0x4f, 0xf3, 0x48, + 0xf3, 0x39, 0xf3, 0x8f, 0xa7, 0x56, 0xd9, 0xf4, 0x20, 0xcb, 0xef, 0x27, 0xfa, 0xd9, 0xfb, 0x19, + 0x4e, 0xee, 0x77, 0xaf, 0x0a, 0x49, 0x2e, 0xe1, 0xe4, 0x5d, 0x98, 0x16, 0xa8, 0x29, 0x7b, 0xbe, + 0x29, 0x94, 0x3c, 0x96, 0x04, 0x66, 0xbe, 0xa6, 0x1b, 0xf9, 0x43, 0x96, 0x2c, 0x75, 0xed, 0xfd, + 0xd1, 0x85, 0xb3, 0x25, 0x8d, 0xb3, 0x86, 0xc0, 0xef, 0x40, 0xa9, 0x0f, 0xd6, 0x28, 0x22, 0x4d, + 0xe4, 0xcc, 0x1f, 0x35, 0xa7, 0x1f, 0x3a, 0x7d, 0x25, 0x52, 0x95, 0xe4, 0x4b, 0x18, 0x09, 0x9a, + 0xc5, 0x29, 0x06, 0xb2, 0xc4, 0x30, 0xb1, 0x53, 0x1c, 0x83, 0xdd, 0x2b, 0x48, 0xb5, 0xac, 0xf3, + 0x62, 0x55, 0xb7, 0xf4, 0x4c, 0x8b, 0xc1, 0x4c, 0xcb, 0x13, 0x38, 0x2b, 0xa9, 0xcc, 0x50, 0x88, + 0xbd, 0xda, 0x8f, 0x74, 0xd3, 0xd8, 0xa2, 0x46, 0x32, 0xf9, 0x0a, 0xfa, 0x79, 0x21, 0x59, 0x21, + 0xdd, 0x13, 0xad, 0xee, 0xac, 0x56, 0xa7, 0x5d, 0xf0, 0xed, 0x5b, 0xe2, 0x82, 0x8a, 0x73, 0x13, + 0x8a, 0x8d, 0x3b, 0xb8, 0xee, 0xdc, 0x8c, 0xfd, 0x7d, 0x49, 0xbe, 0x00, 0x87, 0x66, 0xac, 0x90, + 0x36, 0xb2, 0xa1, 0x8e, 0x0c, 0x34, 0x64, 0x42, 0x8b, 0x60, 0xa0, 0x4c, 0xf1, 0xf1, 0x81, 0x5c, + 0xc3, 0x48, 0xc5, 0x25, 0x77, 0xad, 0xb4, 0x80, 0x87, 0xe5, 0xfd, 0xce, 0x84, 0xf5, 0x23, 0x80, + 0x12, 0xa0, 0x0d, 0x13, 0x6e, 0xf7, 0xba, 0x77, 0xe3, 0xcc, 0x3f, 0xa9, 0x35, 0xb5, 0xcd, 0xf5, + 0x4f, 0x85, 0xad, 0x85, 0xf7, 0x04, 0x86, 0x66, 0x88, 0x60, 0xe4, 0x31, 0x0c, 0xd5, 0x14, 0x41, + 0x63, 0x35, 0xa1, 0x77, 0x33, 0xf2, 0x07, 0x3c, 0x2c, 0x97, 0x34, 0x16, 0xde, 0x1d, 0x38, 0x2f, + 0x94, 0x32, 0xfb, 0xdf, 0xbb, 0x30, 0xb0, 0x76, 0xec, 0x1b, 0x6d, 0xa9, 0xb6, 0x54, 0xd0, 0xb8, + 0x1d, 0xb4, 0x1a, 0x67, 0x93, 0x7e, 0x09, 0xe7, 0x0d, 0x1e, 0x3d, 0xf5, 0x27, 0x18, 0x1b, 0x1f, + 0xcc, 0x19, 0xc3, 0xe8, 0xcc, 0x2f, 0x6b, 0xf1, 0xcd, 0x03, 0x23, 0x7a, 0x28, 0x84, 0xf7, 0xda, + 0xac, 0xcd, 0x6f, 0x28, 0x44, 0x18, 0xa3, 0x32, 0x6a, 0x02, 0xbd, 0xad, 0x88, 0xad, 0x3f, 0xea, + 0xf1, 0x7f, 0x6e, 0xf1, 0x0c, 0xce, 0x5b, 0x8c, 0x82, 0x91, 0xcf, 0x40, 0xdb, 0x15, 0xca, 0x82, + 0xa3, 0x25, 0x3e, 0x00, 0xde, 0x1b, 0x98, 0xfc, 0x8e, 0x9c, 0xbe, 0xad, 0xfe, 0x53, 0x44, 0x8b, + 0xa3, 0x7b, 0xc4, 0x41, 0x1e, 0x41, 0x9f, 0x15, 0xab, 0x04, 0x2b, 0xbb, 0x8f, 0xb6, 0xf2, 0x9e, + 0xc2, 0xc5, 0x11, 0xb7, 0x60, 0xf6, 0x7a, 0xd1, 0xb5, 0xa6, 0x1f, 0xfa, 0xa6, 0xf0, 0xfe, 0xec, + 0xc0, 0x64, 0xb9, 0x09, 0x39, 0xae, 0x17, 0x58, 0xf9, 0xf8, 0x50, 0xa0, 0x90, 0xe4, 0x29, 0x4c, + 0x90, 0x6d, 0x70, 0x8b, 0x3c, 0x4c, 0x03, 0x3b, 0xc1, 0x88, 0x3a, 0xaf, 0xf1, 0xd7, 0x1a, 0x26, + 0xdf, 0x7e, 0x88, 0x4b, 0xb7, 0x5d, 0xb7, 0xb3, 0x77, 0xaa, 0x75, 0x41, 0x7b, 0x1f, 0x74, 0x41, + 0xbd, 0x39, 0x5c, 0x34, 0x34, 0x0a, 0x96, 0x67, 0x02, 0xf5, 0xc2, 0x68, 0x30, 0x38, 0xc8, 0x3b, + 0x15, 0xfb, 0xb6, 0xf9, 0x5f, 0x5d, 0xe8, 0x2f, 0xf5, 0xaf, 0x23, 0xf9, 0x01, 0xc6, 0xea, 0xe9, + 0x95, 0xbe, 0x58, 0x7e, 0x58, 0x92, 0x49, 0x6b, 0xbf, 0x7d, 0x7c, 0xb8, 0xba, 0x38, 0x42, 0x04, + 0x23, 0xbf, 0x00, 0xf9, 0x35, 0xdf, 0xb2, 0x42, 0x62, 0x73, 0x81, 0xdf, 0x3f, 0xea, 0xfe, 0xe3, + 0xbe, 0x19, 0x06, 0xa7, 0xb1, 0x13, 0xa4, 0x7d, 0xab, 0x0e, 0xb1, 0x37, 0x18, 0x8e, 0x57, 0xe8, + 0x0e, 0xc6, 0xad, 0x20, 0xc9, 0xe3, 0xba, 0xf5, 0x78, 0x79, 0xae, 0xae, 0xfe, 0xed, 0x95, 0x60, + 0xe4, 0x39, 0x9c, 0x3f, 0x43, 0x4e, 0xdf, 0x61, 0x6d, 0x63, 0x83, 0xe9, 0x38, 0xfe, 0x06, 0xd3, + 0x7b, 0xae, 0xdf, 0xce, 0xde, 0x7c, 0x13, 0x53, 0xb9, 0x29, 0x56, 0xd3, 0x28, 0xdf, 0xce, 0x52, + 0x1a, 0x6f, 0x64, 0x46, 0xb3, 0x38, 0x43, 0x59, 0xe6, 0x3c, 0x99, 0xa5, 0xd9, 0x7a, 0x96, 0xd6, + 0x5f, 0x26, 0xce, 0xa2, 0x55, 0x5f, 0x7f, 0x9b, 0xbe, 0xff, 0x3b, 0x00, 0x00, 0xff, 0xff, 0x7d, + 0x28, 0x4a, 0xad, 0xb3, 0x06, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -882,8 +897,9 @@ type SignerClient interface { // //DeriveSharedKey returns a shared secret key by performing Diffie-Hellman key //derivation between the ephemeral public key in the request and the node's - //key specified in the key_loc parameter (or the node's identity private key - //if no key locator is specified): + //key specified in the key_desc parameter. Either a key locator or a raw + //public key is expected in the key_desc, if neither is supplied, defaults to + //the node's identity private key: //P_shared = privKeyNode * ephemeralPubkey //The resulting shared public key is serialized in the compressed format and //hashed with sha256, resulting in the final key length of 256bit. @@ -984,8 +1000,9 @@ type SignerServer interface { // //DeriveSharedKey returns a shared secret key by performing Diffie-Hellman key //derivation between the ephemeral public key in the request and the node's - //key specified in the key_loc parameter (or the node's identity private key - //if no key locator is specified): + //key specified in the key_desc parameter. Either a key locator or a raw + //public key is expected in the key_desc, if neither is supplied, defaults to + //the node's identity private key: //P_shared = privKeyNode * ephemeralPubkey //The resulting shared public key is serialized in the compressed format and //hashed with sha256, resulting in the final key length of 256bit. diff --git a/lnrpc/signrpc/signer.proto b/lnrpc/signrpc/signer.proto index 003d1e7e9..d0afc33be 100644 --- a/lnrpc/signrpc/signer.proto +++ b/lnrpc/signrpc/signer.proto @@ -54,8 +54,9 @@ service Signer { /* DeriveSharedKey returns a shared secret key by performing Diffie-Hellman key derivation between the ephemeral public key in the request and the node's - key specified in the key_loc parameter (or the node's identity private key - if no key locator is specified): + key specified in the key_desc parameter. Either a key locator or a raw + public key is expected in the key_desc, if neither is supplied, defaults to + the node's identity private key: P_shared = privKeyNode * ephemeralPubkey The resulting shared public key is serialized in the compressed format and hashed with sha256, resulting in the final key length of 256bit. @@ -220,10 +221,18 @@ message SharedKeyRequest { bytes ephemeral_pubkey = 1; /* - The optional key locator of the local key that should be used. If this - parameter is not set then the node's identity private key will be used. + Deprecated. The optional key locator of the local key that should be used. + If this parameter is not set then the node's identity private key will be + used. */ - KeyLocator key_loc = 2; + KeyLocator key_loc = 2 [deprecated = true]; + + /* + A key descriptor describes the key used for performing ECDH. Either a key + locator or a raw public key is expected, if neither is supplied, defaults to + the node's identity private key. + */ + KeyDescriptor key_desc = 3; } message SharedKeyResponse { diff --git a/lnrpc/signrpc/signer.swagger.json b/lnrpc/signrpc/signer.swagger.json index 594a9b82d..e539204f9 100644 --- a/lnrpc/signrpc/signer.swagger.json +++ b/lnrpc/signrpc/signer.swagger.json @@ -47,7 +47,7 @@ }, "/v2/signer/sharedkey": { "post": { - "summary": "DeriveSharedKey returns a shared secret key by performing Diffie-Hellman key\nderivation between the ephemeral public key in the request and the node's\nkey specified in the key_loc parameter (or the node's identity private key\nif no key locator is specified):\nP_shared = privKeyNode * ephemeralPubkey\nThe resulting shared public key is serialized in the compressed format and\nhashed with sha256, resulting in the final key length of 256bit.", + "summary": "DeriveSharedKey returns a shared secret key by performing Diffie-Hellman key\nderivation between the ephemeral public key in the request and the node's\nkey specified in the key_desc parameter. Either a key locator or a raw\npublic key is expected in the key_desc, if neither is supplied, defaults to\nthe node's identity private key:\nP_shared = privKeyNode * ephemeralPubkey\nThe resulting shared public key is serialized in the compressed format and\nhashed with sha256, resulting in the final key length of 256bit.", "operationId": "DeriveSharedKey", "responses": { "200": { @@ -284,7 +284,11 @@ }, "key_loc": { "$ref": "#/definitions/signrpcKeyLocator", - "description": "The optional key locator of the local key that should be used. If this\nparameter is not set then the node's identity private key will be used." + "description": "Deprecated. The optional key locator of the local key that should be used.\nIf this parameter is not set then the node's identity private key will be\nused." + }, + "key_desc": { + "$ref": "#/definitions/signrpcKeyDescriptor", + "description": "A key descriptor describes the key used for performing ECDH. Either a key\nlocator or a raw public key is expected, if neither is supplied, defaults to\nthe node's identity private key." } } }, diff --git a/lnrpc/signrpc/signer_server.go b/lnrpc/signrpc/signer_server.go index 33651d7b3..41faba586 100644 --- a/lnrpc/signrpc/signer_server.go +++ b/lnrpc/signrpc/signer_server.go @@ -19,6 +19,7 @@ import ( "github.com/lightningnetwork/lnd/keychain" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnwire" + "github.com/lightningnetwork/lnd/macaroons" "google.golang.org/grpc" "gopkg.in/macaroon-bakery.v2/bakery" ) @@ -111,8 +112,8 @@ func New(cfg *Config) (*Server, lnrpc.MacaroonPerms, error) { // At this point, we know that the signer macaroon doesn't yet, // exist, so we need to create it with the help of the main // macaroon service. - signerMac, err := cfg.MacService.Oven.NewMacaroon( - context.Background(), bakery.LatestVersion, nil, + signerMac, err := cfg.MacService.NewMacaroon( + context.Background(), macaroons.DefaultRootKeyID, macaroonOps..., ) if err != nil { @@ -253,27 +254,11 @@ func (s *Server) SignOutputRaw(ctx context.Context, in *SignReq) (*SignResp, err // If this method doesn't return nil, then we know that user is // attempting to include a raw serialized pub key. if keyDesc.GetRawKeyBytes() != nil { - rawKeyBytes := keyDesc.GetRawKeyBytes() - - switch { - // If the user provided a raw key, but it's of the - // wrong length, then we'll return with an error. - case len(rawKeyBytes) != 0 && len(rawKeyBytes) != 33: - - return nil, fmt.Errorf("pubkey must be " + - "serialized in compressed format if " + - "specified") - - // If a proper raw key was provided, then we'll attempt - // to decode and parse it. - case len(rawKeyBytes) != 0 && len(rawKeyBytes) == 33: - targetPubKey, err = btcec.ParsePubKey( - rawKeyBytes, btcec.S256(), - ) - if err != nil { - return nil, fmt.Errorf("unable to "+ - "parse pubkey: %v", err) - } + targetPubKey, err = parseRawKeyBytes( + keyDesc.GetRawKeyBytes(), + ) + if err != nil { + return nil, err } } @@ -506,38 +491,82 @@ func (s *Server) VerifyMessage(ctx context.Context, // DeriveSharedKey returns a shared secret key by performing Diffie-Hellman key // derivation between the ephemeral public key in the request and the node's -// key specified in the key_loc parameter (or the node's identity private key -// if no key locator is specified): -// P_shared = privKeyNode * ephemeralPubkey +// key specified in the key_desc parameter. Either a key locator or a raw public +// key is expected in the key_desc, if neither is supplied, defaults to the +// node's identity private key. The old key_loc parameter in the request +// shouldn't be used anymore. // The resulting shared public key is serialized in the compressed format and // hashed with sha256, resulting in the final key length of 256bit. func (s *Server) DeriveSharedKey(_ context.Context, in *SharedKeyRequest) ( *SharedKeyResponse, error) { - if len(in.EphemeralPubkey) != 33 { - return nil, fmt.Errorf("ephemeral pubkey must be " + - "serialized in compressed format") - } - ephemeralPubkey, err := btcec.ParsePubKey( - in.EphemeralPubkey, btcec.S256(), - ) + // Check that EphemeralPubkey is valid. + ephemeralPubkey, err := parseRawKeyBytes(in.EphemeralPubkey) if err != nil { - return nil, fmt.Errorf("unable to parse pubkey: %v", err) + return nil, fmt.Errorf("error in ephemeral pubkey: %v", err) + } + if ephemeralPubkey == nil { + return nil, fmt.Errorf("must provide ephemeral pubkey") } - // By default, use the node identity private key. - locator := keychain.KeyLocator{ - Family: keychain.KeyFamilyNodeKey, - Index: 0, + // Check for backward compatibility. The caller either specifies the old + // key_loc field, or the new key_desc field, but not both. + if in.KeyDesc != nil && in.KeyLoc != nil { + return nil, fmt.Errorf("use either key_desc or key_loc") } - if in.KeyLoc != nil { - locator.Family = keychain.KeyFamily(in.KeyLoc.KeyFamily) - locator.Index = uint32(in.KeyLoc.KeyIndex) + + // When key_desc is used, the key_desc.key_loc is expected as the caller + // needs to specify the KeyFamily. + if in.KeyDesc != nil && in.KeyDesc.KeyLoc == nil { + return nil, fmt.Errorf("when setting key_desc the field " + + "key_desc.key_loc must also be set") + } + + // We extract two params, rawKeyBytes and keyLoc. Notice their initial + // values will be overwritten if not using the deprecated RPC param. + var rawKeyBytes []byte + keyLoc := in.KeyLoc + if in.KeyDesc != nil { + keyLoc = in.KeyDesc.GetKeyLoc() + rawKeyBytes = in.KeyDesc.GetRawKeyBytes() + } + + // When no keyLoc is supplied, defaults to the node's identity private + // key. + if keyLoc == nil { + keyLoc = &KeyLocator{ + KeyFamily: int32(keychain.KeyFamilyNodeKey), + KeyIndex: 0, + } + } + + // Check the caller is using either the key index or the raw public key + // to perform the ECDH, we can't have both. + if rawKeyBytes != nil && keyLoc.KeyIndex != 0 { + return nil, fmt.Errorf("use either raw_key_bytes or key_index") + } + + // Check the raw public key is valid. Notice that if the rawKeyBytes is + // empty, the parseRawKeyBytes won't return an error, a nil + // *btcec.PublicKey is returned instead. + pk, err := parseRawKeyBytes(rawKeyBytes) + if err != nil { + return nil, fmt.Errorf("error in raw pubkey: %v", err) + } + + // Create a key descriptor. When the KeyIndex is not specified, it uses + // the empty value 0, and when the raw public key is not specified, the + // pk is nil. + keyDescriptor := keychain.KeyDescriptor{ + KeyLocator: keychain.KeyLocator{ + Family: keychain.KeyFamily(keyLoc.KeyFamily), + Index: uint32(keyLoc.KeyIndex), + }, + PubKey: pk, } // Derive the shared key using ECDH and hashing the serialized // compressed shared point. - keyDescriptor := keychain.KeyDescriptor{KeyLocator: locator} sharedKeyHash, err := s.cfg.KeyRing.ECDH(keyDescriptor, ephemeralPubkey) if err != nil { err := fmt.Errorf("unable to derive shared key: %v", err) @@ -547,3 +576,29 @@ func (s *Server) DeriveSharedKey(_ context.Context, in *SharedKeyRequest) ( return &SharedKeyResponse{SharedKey: sharedKeyHash[:]}, nil } + +// parseRawKeyBytes checks that the provided raw public key is valid and returns +// the public key. A nil public key is returned if the length of the rawKeyBytes +// is zero. +func parseRawKeyBytes(rawKeyBytes []byte) (*btcec.PublicKey, error) { + switch { + + case len(rawKeyBytes) == 33: + // If a proper raw key was provided, then we'll attempt + // to decode and parse it. + return btcec.ParsePubKey( + rawKeyBytes, btcec.S256(), + ) + + case len(rawKeyBytes) == 0: + // No key is provided, return nil. + return nil, nil + + default: + // If the user provided a raw key, but it's of the + // wrong length, then we'll return with an error. + return nil, fmt.Errorf("pubkey must be " + + "serialized in compressed format if " + + "specified") + } +} diff --git a/lnrpc/walletrpc/walletkit_server.go b/lnrpc/walletrpc/walletkit_server.go index d93dd04e7..0453ba6a4 100644 --- a/lnrpc/walletrpc/walletkit_server.go +++ b/lnrpc/walletrpc/walletkit_server.go @@ -25,6 +25,7 @@ import ( "github.com/lightningnetwork/lnd/lnwallet" "github.com/lightningnetwork/lnd/lnwallet/btcwallet" "github.com/lightningnetwork/lnd/lnwallet/chainfee" + "github.com/lightningnetwork/lnd/macaroons" "github.com/lightningnetwork/lnd/sweep" "google.golang.org/grpc" "gopkg.in/macaroon-bakery.v2/bakery" @@ -156,8 +157,9 @@ func New(cfg *Config) (*WalletKit, lnrpc.MacaroonPerms, error) { // At this point, we know that the wallet kit macaroon doesn't // yet, exist, so we need to create it with the help of the // main macaroon service. - walletKitMac, err := cfg.MacService.Oven.NewMacaroon( - context.Background(), bakery.LatestVersion, nil, + walletKitMac, err := cfg.MacService.NewMacaroon( + context.Background(), + macaroons.DefaultRootKeyID, macaroonOps..., ) if err != nil { diff --git a/lnrpc/websocket_proxy.go b/lnrpc/websocket_proxy.go index 921b21710..3cb701be0 100644 --- a/lnrpc/websocket_proxy.go +++ b/lnrpc/websocket_proxy.go @@ -21,6 +21,16 @@ const ( // This is necessary because the WebSocket API specifies that a // handshake request must always be done through a GET request. MethodOverrideParam = "method" + + // HeaderWebSocketProtocol is the name of the WebSocket protocol + // exchange header field that we use to transport additional header + // fields. + HeaderWebSocketProtocol = "Sec-Websocket-Protocol" + + // WebSocketProtocolDelimiter is the delimiter we use between the + // additional header field and its value. We use the plus symbol because + // the default delimiters aren't allowed in the protocol names. + WebSocketProtocolDelimiter = "+" ) var ( @@ -32,6 +42,13 @@ var ( "Referer": true, "Grpc-Metadata-Macaroon": true, } + + // defaultProtocolsToAllow are additional header fields that we allow + // to be transported inside of the Sec-Websocket-Protocol field to be + // forwarded to the backend. + defaultProtocolsToAllow = map[string]bool{ + "Grpc-Metadata-Macaroon": true, + } ) // NewWebSocketProxy attempts to expose the underlying handler as a response- @@ -101,13 +118,13 @@ func (p *WebsocketProxy) upgradeToWebSocketProxy(w http.ResponseWriter, p.logger.Errorf("WS: error preparing request:", err) return } - for header := range r.Header { - headerName := textproto.CanonicalMIMEHeaderKey(header) - forward, ok := defaultHeadersToForward[headerName] - if ok && forward { - request.Header.Set(headerName, r.Header.Get(header)) - } - } + + // Allow certain headers to be forwarded, either from source headers + // or the special Sec-Websocket-Protocol header field. + forwardHeaders(r.Header, request.Header) + + // Also allow the target request method to be overwritten, as all + // WebSocket establishment calls MUST be GET requests. if m := r.URL.Query().Get(MethodOverrideParam); m != "" { request.Method = m } @@ -182,6 +199,38 @@ func (p *WebsocketProxy) upgradeToWebSocketProxy(w http.ResponseWriter, } } +// forwardHeaders forwards certain allowed header fields from the source request +// to the target request. Because browsers are limited in what header fields +// they can send on the WebSocket setup call, we also allow additional fields to +// be transported in the special Sec-Websocket-Protocol field. +func forwardHeaders(source, target http.Header) { + // Forward allowed header fields directly. + for header := range source { + headerName := textproto.CanonicalMIMEHeaderKey(header) + forward, ok := defaultHeadersToForward[headerName] + if ok && forward { + target.Set(headerName, source.Get(header)) + } + } + + // Browser aren't allowed to set custom header fields on WebSocket + // requests. We need to allow them to submit the macaroon as a WS + // protocol, which is the only allowed header. Set any "protocols" we + // declare valid as header fields on the forwarded request. + protocol := source.Get(HeaderWebSocketProtocol) + for key := range defaultProtocolsToAllow { + if strings.HasPrefix(protocol, key) { + // The format is "+". We know the + // protocol string starts with the name so we only need + // to set the value. + values := strings.Split( + protocol, WebSocketProtocolDelimiter, + ) + target.Set(key, values[1]) + } + } +} + // newRequestForwardingReader creates a new request forwarding pipe. func newRequestForwardingReader() *requestForwardingReader { r, w := io.Pipe() diff --git a/lntest/btcd.go b/lntest/btcd.go index 3c50e551e..43713d9f8 100644 --- a/lntest/btcd.go +++ b/lntest/btcd.go @@ -1,4 +1,4 @@ -// +build btcd +// +build !bitcoind,!neutrino package lntest @@ -81,6 +81,7 @@ func NewBackend(miner string, netParams *chaincfg.Params) ( "--debuglevel=debug", "--logdir=" + logDir, "--connect=" + miner, + "--nowinservice", } chainBackend, err := rpctest.New(netParams, nil, args) if err != nil { diff --git a/lntest/fee_service.go b/lntest/fee_service.go new file mode 100644 index 000000000..68e7d435a --- /dev/null +++ b/lntest/fee_service.go @@ -0,0 +1,106 @@ +package lntest + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "sync" + + "github.com/lightningnetwork/lnd/lnwallet/chainfee" +) + +const ( + // feeServiceTarget is the confirmation target for which a fee estimate + // is returned. Requests for higher confirmation targets will fall back + // to this. + feeServiceTarget = 2 + + // feeServicePort is the tcp port on which the service runs. + feeServicePort = 16534 +) + +// feeService runs a web service that provides fee estimation information. +type feeService struct { + feeEstimates + + srv *http.Server + wg sync.WaitGroup + + url string + + lock sync.Mutex +} + +// feeEstimates contains the current fee estimates. +type feeEstimates struct { + Fees map[uint32]uint32 `json:"fee_by_block_target"` +} + +// startFeeService spins up a go-routine to serve fee estimates. +func startFeeService() *feeService { + f := feeService{ + url: fmt.Sprintf( + "http://localhost:%v/fee-estimates.json", feeServicePort, + ), + } + + // Initialize default fee estimate. + f.Fees = map[uint32]uint32{feeServiceTarget: 50000} + + listenAddr := fmt.Sprintf(":%v", feeServicePort) + f.srv = &http.Server{ + Addr: listenAddr, + } + + http.HandleFunc("/fee-estimates.json", f.handleRequest) + + f.wg.Add(1) + go func() { + defer f.wg.Done() + + if err := f.srv.ListenAndServe(); err != http.ErrServerClosed { + fmt.Printf("error: cannot start fee api: %v", err) + } + }() + + return &f +} + +// handleRequest handles a client request for fee estimates. +func (f *feeService) handleRequest(w http.ResponseWriter, r *http.Request) { + f.lock.Lock() + defer f.lock.Unlock() + + bytes, err := json.Marshal(f.feeEstimates) + if err != nil { + fmt.Printf("error: cannot serialize "+ + "estimates: %v", err) + + return + } + + _, err = io.WriteString(w, string(bytes)) + if err != nil { + fmt.Printf("error: cannot send estimates: %v", + err) + } +} + +// stop stops the web server. +func (f *feeService) stop() { + if err := f.srv.Shutdown(context.Background()); err != nil { + fmt.Printf("error: cannot stop fee api: %v", err) + } + + f.wg.Wait() +} + +// setFee changes the current fee estimate for the fixed confirmation target. +func (f *feeService) setFee(fee chainfee.SatPerKWeight) { + f.lock.Lock() + defer f.lock.Unlock() + + f.Fees[feeServiceTarget] = uint32(fee.FeePerKVByte()) +} diff --git a/lntest/fee_service_test.go b/lntest/fee_service_test.go new file mode 100644 index 000000000..c7ad38c4c --- /dev/null +++ b/lntest/fee_service_test.go @@ -0,0 +1,39 @@ +package lntest + +import ( + "io/ioutil" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +// TestFeeService tests the itest fee estimating web service. +func TestFeeService(t *testing.T) { + service := startFeeService() + defer service.stop() + + service.setFee(5000) + + // Wait for service to start accepting connections. + var resp *http.Response + require.Eventually( + t, + func() bool { + var err error + resp, err = http.Get(service.url) // nolint:bodyclose + return err == nil + }, + 10*time.Second, time.Second, + ) + + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + require.NoError(t, err) + + require.Equal( + t, "{\"fee_by_block_target\":{\"2\":20000}}", string(body), + ) +} diff --git a/lntest/harness.go b/lntest/harness.go index aa20c3b9c..aa56af721 100644 --- a/lntest/harness.go +++ b/lntest/harness.go @@ -22,6 +22,7 @@ import ( "github.com/lightningnetwork/lnd" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lntest/wait" + "github.com/lightningnetwork/lnd/lnwallet/chainfee" "github.com/lightningnetwork/lnd/lnwire" "google.golang.org/grpc/grpclog" ) @@ -63,6 +64,10 @@ type NetworkHarness struct { // to main process. lndErrorChan chan error + // feeService is a web service that provides external fee estimates to + // lnd. + feeService *feeService + quit chan struct{} mtx sync.Mutex @@ -75,6 +80,8 @@ type NetworkHarness struct { func NewNetworkHarness(r *rpctest.Harness, b BackendConfig, lndBinary string) ( *NetworkHarness, error) { + feeService := startFeeService() + n := NetworkHarness{ activeNodes: make(map[int]*HarnessNode), nodesByPub: make(map[string]*HarnessNode), @@ -84,6 +91,7 @@ func NewNetworkHarness(r *rpctest.Harness, b BackendConfig, lndBinary string) ( netParams: r.ActiveNet, Miner: r, BackendCfg: b, + feeService: feeService, quit: make(chan struct{}), lndBinary: lndBinary, } @@ -251,6 +259,8 @@ func (n *NetworkHarness) TearDownAll() error { close(n.lndErrorChan) close(n.quit) + n.feeService.stop() + return nil } @@ -358,6 +368,7 @@ func (n *NetworkHarness) newNode(name string, extraArgs []string, BackendCfg: n.BackendCfg, NetParams: n.netParams, ExtraArgs: extraArgs, + FeeURL: n.feeService.url, }) if err != nil { return nil, err @@ -452,7 +463,6 @@ func (n *NetworkHarness) EnsureConnected(ctx context.Context, a, b *HarnessNode) err := n.connect(ctx, req, a) switch { - // Request was successful, wait for both to display the // connection. case err == nil: @@ -855,6 +865,11 @@ type OpenChannelParams struct { // MinHtlc is the htlc_minimum_msat value set when opening the channel. MinHtlc lnwire.MilliSatoshi + // RemoteMaxHtlcs is the remote_max_htlcs value set when opening the + // channel, restricting the number of concurrent HTLCs the remote party + // can add to a commitment. + RemoteMaxHtlcs uint16 + // FundingShim is an optional funding shim that the caller can specify // in order to modify the channel funding workflow. FundingShim *lnrpc.FundingShim @@ -874,7 +889,7 @@ func (n *NetworkHarness) OpenChannel(ctx context.Context, // prevents any funding workflows from being kicked off if the chain // isn't yet synced. if err := srcNode.WaitForBlockchainSync(ctx); err != nil { - return nil, fmt.Errorf("enable to sync srcNode chain: %v", err) + return nil, fmt.Errorf("unable to sync srcNode chain: %v", err) } if err := destNode.WaitForBlockchainSync(ctx); err != nil { return nil, fmt.Errorf("unable to sync destNode chain: %v", err) @@ -893,6 +908,7 @@ func (n *NetworkHarness) OpenChannel(ctx context.Context, MinConfs: minConfs, SpendUnconfirmed: p.SpendUnconfirmed, MinHtlcMsat: int64(p.MinHtlc), + RemoteMaxHtlcs: uint32(p.RemoteMaxHtlcs), FundingShim: p.FundingShim, } @@ -1204,9 +1220,14 @@ func (n *NetworkHarness) WaitForChannelClose(ctx context.Context, } // AssertChannelExists asserts that an active channel identified by the -// specified channel point exists from the point-of-view of the node. +// specified channel point exists from the point-of-view of the node. It takes +// an optional set of check functions which can be used to make further +// assertions using channel's values. These functions are responsible for +// failing the test themselves if they do not pass. +// nolint: interfacer func (n *NetworkHarness) AssertChannelExists(ctx context.Context, - node *HarnessNode, chanPoint *wire.OutPoint) error { + node *HarnessNode, chanPoint *wire.OutPoint, + checks ...func(*lnrpc.Channel)) error { req := &lnrpc.ListChannelsRequest{} @@ -1218,12 +1239,20 @@ func (n *NetworkHarness) AssertChannelExists(ctx context.Context, for _, channel := range resp.Channels { if channel.ChannelPoint == chanPoint.String() { - if channel.Active { - return nil + // First check whether our channel is active, + // failing early if it is not. + if !channel.Active { + return fmt.Errorf("channel %s inactive", + chanPoint) } - return fmt.Errorf("channel %s inactive", - chanPoint) + // Apply any additional checks that we would + // like to verify. + for _, check := range checks { + check(channel) + } + + return nil } } @@ -1385,6 +1414,10 @@ func (n *NetworkHarness) sendCoins(ctx context.Context, amt btcutil.Amount, return target.WaitForBalance(expectedBalance, true) } +func (n *NetworkHarness) SetFeeEstimate(fee chainfee.SatPerKWeight) { + n.feeService.setFee(fee) +} + // CopyFile copies the file src to dest. func CopyFile(dest, src string) error { s, err := os.Open(src) diff --git a/lntest/itest/lnd_channel_backup_test.go b/lntest/itest/lnd_channel_backup_test.go new file mode 100644 index 000000000..5a8bf87dc --- /dev/null +++ b/lntest/itest/lnd_channel_backup_test.go @@ -0,0 +1,1074 @@ +package itest + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/btcsuite/btcd/wire" + "github.com/btcsuite/btcutil" + "github.com/lightningnetwork/lnd/chanbackup" + "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/lntest/wait" + "github.com/stretchr/testify/require" +) + +// testChannelBackupRestore tests that we're able to recover from, and initiate +// the DLP protocol via: the RPC restore command, restoring on unlock, and +// restoring from initial wallet creation. We'll also alternate between +// restoring form the on disk file, and restoring from the exported RPC command +// as well. +func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) { + password := []byte("El Psy Kongroo") + + ctxb := context.Background() + + var testCases = []chanRestoreTestCase{ + // Restore from backups obtained via the RPC interface. Dave + // was the initiator, of the non-advertised channel. + { + name: "restore from RPC backup", + channelsUpdated: false, + initiator: true, + private: false, + restoreMethod: func(oldNode *lntest.HarnessNode, + backupFilePath string, + mnemonic []string) (nodeRestorer, error) { + + // For this restoration method, we'll grab the + // current multi-channel backup from the old + // node, and use it to restore a new node + // within the closure. + req := &lnrpc.ChanBackupExportRequest{} + chanBackup, err := oldNode.ExportAllChannelBackups( + ctxb, req, + ) + if err != nil { + return nil, fmt.Errorf("unable to obtain "+ + "channel backup: %v", err) + } + + multi := chanBackup.MultiChanBackup.MultiChanBackup + + // In our nodeRestorer function, we'll restore + // the node from seed, then manually recover + // the channel backup. + return chanRestoreViaRPC( + net, password, mnemonic, multi, + ) + }, + }, + + // Restore the backup from the on-disk file, using the RPC + // interface. + { + name: "restore from backup file", + initiator: true, + private: false, + restoreMethod: func(oldNode *lntest.HarnessNode, + backupFilePath string, + mnemonic []string) (nodeRestorer, error) { + + // Read the entire Multi backup stored within + // this node's channels.backup file. + multi, err := ioutil.ReadFile(backupFilePath) + if err != nil { + return nil, err + } + + // Now that we have Dave's backup file, we'll + // create a new nodeRestorer that will restore + // using the on-disk channels.backup. + return chanRestoreViaRPC( + net, password, mnemonic, multi, + ) + }, + }, + + // Restore the backup as part of node initialization with the + // prior mnemonic and new backup seed. + { + name: "restore during creation", + initiator: true, + private: false, + restoreMethod: func(oldNode *lntest.HarnessNode, + backupFilePath string, + mnemonic []string) (nodeRestorer, error) { + + // First, fetch the current backup state as is, + // to obtain our latest Multi. + chanBackup, err := oldNode.ExportAllChannelBackups( + ctxb, &lnrpc.ChanBackupExportRequest{}, + ) + if err != nil { + return nil, fmt.Errorf("unable to obtain "+ + "channel backup: %v", err) + } + backupSnapshot := &lnrpc.ChanBackupSnapshot{ + MultiChanBackup: chanBackup.MultiChanBackup, + } + + // Create a new nodeRestorer that will restore + // the node using the Multi backup we just + // obtained above. + return func() (*lntest.HarnessNode, error) { + return net.RestoreNodeWithSeed( + "dave", nil, password, + mnemonic, 1000, backupSnapshot, + ) + }, nil + }, + }, + + // Restore the backup once the node has already been + // re-created, using the Unlock call. + { + name: "restore during unlock", + initiator: true, + private: false, + restoreMethod: func(oldNode *lntest.HarnessNode, + backupFilePath string, + mnemonic []string) (nodeRestorer, error) { + + // First, fetch the current backup state as is, + // to obtain our latest Multi. + chanBackup, err := oldNode.ExportAllChannelBackups( + ctxb, &lnrpc.ChanBackupExportRequest{}, + ) + if err != nil { + return nil, fmt.Errorf("unable to obtain "+ + "channel backup: %v", err) + } + backupSnapshot := &lnrpc.ChanBackupSnapshot{ + MultiChanBackup: chanBackup.MultiChanBackup, + } + + // Create a new nodeRestorer that will restore + // the node with its seed, but no channel + // backup, shutdown this initialized node, then + // restart it again using Unlock. + return func() (*lntest.HarnessNode, error) { + newNode, err := net.RestoreNodeWithSeed( + "dave", nil, password, + mnemonic, 1000, nil, + ) + if err != nil { + return nil, err + } + + err = net.RestartNode( + newNode, nil, backupSnapshot, + ) + if err != nil { + return nil, err + } + + return newNode, nil + }, nil + }, + }, + + // Restore the backup from the on-disk file a second time to + // make sure imports can be canceled and later resumed. + { + name: "restore from backup file twice", + initiator: true, + private: false, + restoreMethod: func(oldNode *lntest.HarnessNode, + backupFilePath string, + mnemonic []string) (nodeRestorer, error) { + + // Read the entire Multi backup stored within + // this node's channels.backup file. + multi, err := ioutil.ReadFile(backupFilePath) + if err != nil { + return nil, err + } + + // Now that we have Dave's backup file, we'll + // create a new nodeRestorer that will restore + // using the on-disk channels.backup. + backup := &lnrpc.RestoreChanBackupRequest_MultiChanBackup{ + MultiChanBackup: multi, + } + + ctxb := context.Background() + + return func() (*lntest.HarnessNode, error) { + newNode, err := net.RestoreNodeWithSeed( + "dave", nil, password, mnemonic, + 1000, nil, + ) + if err != nil { + return nil, fmt.Errorf("unable to "+ + "restore node: %v", err) + } + + _, err = newNode.RestoreChannelBackups( + ctxb, + &lnrpc.RestoreChanBackupRequest{ + Backup: backup, + }, + ) + if err != nil { + return nil, fmt.Errorf("unable "+ + "to restore backups: %v", + err) + } + + _, err = newNode.RestoreChannelBackups( + ctxb, + &lnrpc.RestoreChanBackupRequest{ + Backup: backup, + }, + ) + if err != nil { + return nil, fmt.Errorf("unable "+ + "to restore backups the"+ + "second time: %v", + err) + } + + return newNode, nil + }, nil + }, + }, + + // Use the channel backup file that contains an unconfirmed + // channel and make sure recovery works as well. + { + name: "restore unconfirmed channel file", + channelsUpdated: false, + initiator: true, + private: false, + unconfirmed: true, + restoreMethod: func(oldNode *lntest.HarnessNode, + backupFilePath string, + mnemonic []string) (nodeRestorer, error) { + + // Read the entire Multi backup stored within + // this node's channels.backup file. + multi, err := ioutil.ReadFile(backupFilePath) + if err != nil { + return nil, err + } + + // Let's assume time passes, the channel + // confirms in the meantime but for some reason + // the backup we made while it was still + // unconfirmed is the only backup we have. We + // should still be able to restore it. To + // simulate time passing, we mine some blocks + // to get the channel confirmed _after_ we saved + // the backup. + mineBlocks(t, net, 6, 1) + + // In our nodeRestorer function, we'll restore + // the node from seed, then manually recover + // the channel backup. + return chanRestoreViaRPC( + net, password, mnemonic, multi, + ) + }, + }, + + // Create a backup using RPC that contains an unconfirmed + // channel and make sure recovery works as well. + { + name: "restore unconfirmed channel RPC", + channelsUpdated: false, + initiator: true, + private: false, + unconfirmed: true, + restoreMethod: func(oldNode *lntest.HarnessNode, + backupFilePath string, + mnemonic []string) (nodeRestorer, error) { + + // For this restoration method, we'll grab the + // current multi-channel backup from the old + // node. The channel should be included, even if + // it is not confirmed yet. + req := &lnrpc.ChanBackupExportRequest{} + chanBackup, err := oldNode.ExportAllChannelBackups( + ctxb, req, + ) + if err != nil { + return nil, fmt.Errorf("unable to obtain "+ + "channel backup: %v", err) + } + chanPoints := chanBackup.MultiChanBackup.ChanPoints + if len(chanPoints) == 0 { + return nil, fmt.Errorf("unconfirmed " + + "channel not included in backup") + } + + // Let's assume time passes, the channel + // confirms in the meantime but for some reason + // the backup we made while it was still + // unconfirmed is the only backup we have. We + // should still be able to restore it. To + // simulate time passing, we mine some blocks + // to get the channel confirmed _after_ we saved + // the backup. + mineBlocks(t, net, 6, 1) + + // In our nodeRestorer function, we'll restore + // the node from seed, then manually recover + // the channel backup. + multi := chanBackup.MultiChanBackup.MultiChanBackup + return chanRestoreViaRPC( + net, password, mnemonic, multi, + ) + }, + }, + + // Restore the backup from the on-disk file, using the RPC + // interface, for anchor commitment channels. + { + name: "restore from backup file anchors", + initiator: true, + private: false, + anchorCommit: true, + restoreMethod: func(oldNode *lntest.HarnessNode, + backupFilePath string, + mnemonic []string) (nodeRestorer, error) { + + // Read the entire Multi backup stored within + // this node's channels.backup file. + multi, err := ioutil.ReadFile(backupFilePath) + if err != nil { + return nil, err + } + + // Now that we have Dave's backup file, we'll + // create a new nodeRestorer that will restore + // using the on-disk channels.backup. + return chanRestoreViaRPC( + net, password, mnemonic, multi, + ) + }, + }, + } + + // TODO(roasbeef): online vs offline close? + + // TODO(roasbeef): need to re-trigger the on-disk file once the node + // ann is updated? + + for _, testCase := range testCases { + testCase := testCase + success := t.t.Run(testCase.name, func(t *testing.T) { + h := newHarnessTest(t, net) + + // Start each test with the default static fee estimate. + net.SetFeeEstimate(12500) + + testChanRestoreScenario(h, net, &testCase, password) + }) + if !success { + break + } + } +} + +// testChannelBackupUpdates tests that both the streaming channel update RPC, +// and the on-disk channels.backup are updated each time a channel is +// opened/closed. +func testChannelBackupUpdates(net *lntest.NetworkHarness, t *harnessTest) { + ctxb := context.Background() + + // First, we'll make a temp directory that we'll use to store our + // backup file, so we can check in on it during the test easily. + backupDir, err := ioutil.TempDir("", "") + if err != nil { + t.Fatalf("unable to create backup dir: %v", err) + } + defer os.RemoveAll(backupDir) + + // First, we'll create a new node, Carol. We'll also create a temporary + // file that Carol will use to store her channel backups. + backupFilePath := filepath.Join( + backupDir, chanbackup.DefaultBackupFileName, + ) + carolArgs := fmt.Sprintf("--backupfilepath=%v", backupFilePath) + carol, err := net.NewNode("carol", []string{carolArgs}) + if err != nil { + t.Fatalf("unable to create new node: %v", err) + } + defer shutdownAndAssert(net, t, carol) + + // Next, we'll register for streaming notifications for changes to the + // backup file. + backupStream, err := carol.SubscribeChannelBackups( + ctxb, &lnrpc.ChannelBackupSubscription{}, + ) + if err != nil { + t.Fatalf("unable to create backup stream: %v", err) + } + + // We'll use this goroutine to proxy any updates to a channel we can + // easily use below. + var wg sync.WaitGroup + backupUpdates := make(chan *lnrpc.ChanBackupSnapshot) + streamErr := make(chan error) + streamQuit := make(chan struct{}) + + wg.Add(1) + go func() { + defer wg.Done() + for { + snapshot, err := backupStream.Recv() + if err != nil { + select { + case streamErr <- err: + case <-streamQuit: + return + } + } + + select { + case backupUpdates <- snapshot: + case <-streamQuit: + return + } + } + }() + defer close(streamQuit) + + // With Carol up, we'll now connect her to Alice, and open a channel + // between them. + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + if err := net.ConnectNodes(ctxt, carol, net.Alice); err != nil { + t.Fatalf("unable to connect carol to alice: %v", err) + } + + // Next, we'll open two channels between Alice and Carol back to back. + var chanPoints []*lnrpc.ChannelPoint + numChans := 2 + chanAmt := btcutil.Amount(1000000) + for i := 0; i < numChans; i++ { + ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) + chanPoint := openChannelAndAssert( + ctxt, t, net, net.Alice, carol, + lntest.OpenChannelParams{ + Amt: chanAmt, + }, + ) + + chanPoints = append(chanPoints, chanPoint) + } + + // Using this helper function, we'll maintain a pointer to the latest + // channel backup so we can compare it to the on disk state. + var currentBackup *lnrpc.ChanBackupSnapshot + assertBackupNtfns := func(numNtfns int) { + for i := 0; i < numNtfns; i++ { + select { + case err := <-streamErr: + t.Fatalf("error with backup stream: %v", err) + + case currentBackup = <-backupUpdates: + + case <-time.After(time.Second * 5): + t.Fatalf("didn't receive channel backup "+ + "notification %v", i+1) + } + } + } + + // assertBackupFileState is a helper function that we'll use to compare + // the on disk back up file to our currentBackup pointer above. + assertBackupFileState := func() { + err := wait.NoError(func() error { + packedBackup, err := ioutil.ReadFile(backupFilePath) + if err != nil { + return fmt.Errorf("unable to read backup "+ + "file: %v", err) + } + + // As each back up file will be encrypted with a fresh + // nonce, we can't compare them directly, so instead + // we'll compare the length which is a proxy for the + // number of channels that the multi-backup contains. + rawBackup := currentBackup.MultiChanBackup.MultiChanBackup + if len(rawBackup) != len(packedBackup) { + return fmt.Errorf("backup files don't match: "+ + "expected %x got %x", rawBackup, packedBackup) + } + + // Additionally, we'll assert that both backups up + // returned are valid. + for i, backup := range [][]byte{rawBackup, packedBackup} { + snapshot := &lnrpc.ChanBackupSnapshot{ + MultiChanBackup: &lnrpc.MultiChanBackup{ + MultiChanBackup: backup, + }, + } + _, err := carol.VerifyChanBackup(ctxb, snapshot) + if err != nil { + return fmt.Errorf("unable to verify "+ + "backup #%d: %v", i, err) + } + } + + return nil + }, time.Second*15) + if err != nil { + t.Fatalf("backup state invalid: %v", err) + } + } + + // As these two channels were just opened, we should've got two times + // the pending and open notifications for channel backups. + assertBackupNtfns(2 * 2) + + // The on disk file should also exactly match the latest backup that we + // have. + assertBackupFileState() + + // Next, we'll close the channels one by one. After each channel + // closure, we should get a notification, and the on-disk state should + // match this state as well. + for i := 0; i < numChans; i++ { + // To ensure force closes also trigger an update, we'll force + // close half of the channels. + forceClose := i%2 == 0 + + chanPoint := chanPoints[i] + + ctxt, _ := context.WithTimeout(ctxb, channelCloseTimeout) + closeChannelAndAssert( + ctxt, t, net, net.Alice, chanPoint, forceClose, + ) + + // We should get a single notification after closing, and the + // on-disk state should match this latest notifications. + assertBackupNtfns(1) + assertBackupFileState() + + // If we force closed the channel, then we'll mine enough + // blocks to ensure all outputs have been swept. + if forceClose { + cleanupForceClose(t, net, net.Alice, chanPoint) + } + } +} + +// testExportChannelBackup tests that we're able to properly export either a +// targeted channel's backup, or export backups of all the currents open +// channels. +func testExportChannelBackup(net *lntest.NetworkHarness, t *harnessTest) { + ctxb := context.Background() + + // First, we'll create our primary test node: Carol. We'll use Carol to + // open channels and also export backups that we'll examine throughout + // the test. + carol, err := net.NewNode("carol", nil) + if err != nil { + t.Fatalf("unable to create new node: %v", err) + } + defer shutdownAndAssert(net, t, carol) + + // With Carol up, we'll now connect her to Alice, and open a channel + // between them. + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + if err := net.ConnectNodes(ctxt, carol, net.Alice); err != nil { + t.Fatalf("unable to connect carol to alice: %v", err) + } + + // Next, we'll open two channels between Alice and Carol back to back. + var chanPoints []*lnrpc.ChannelPoint + numChans := 2 + chanAmt := btcutil.Amount(1000000) + for i := 0; i < numChans; i++ { + ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) + chanPoint := openChannelAndAssert( + ctxt, t, net, net.Alice, carol, + lntest.OpenChannelParams{ + Amt: chanAmt, + }, + ) + + chanPoints = append(chanPoints, chanPoint) + } + + // Now that the channels are open, we should be able to fetch the + // backups of each of the channels. + for _, chanPoint := range chanPoints { + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + req := &lnrpc.ExportChannelBackupRequest{ + ChanPoint: chanPoint, + } + chanBackup, err := carol.ExportChannelBackup(ctxt, req) + if err != nil { + t.Fatalf("unable to fetch backup for channel %v: %v", + chanPoint, err) + } + + // The returned backup should be full populated. Since it's + // encrypted, we can't assert any more than that atm. + if len(chanBackup.ChanBackup) == 0 { + t.Fatalf("obtained empty backup for channel: %v", chanPoint) + } + + // The specified chanPoint in the response should match our + // requested chanPoint. + if chanBackup.ChanPoint.String() != chanPoint.String() { + t.Fatalf("chanPoint mismatched: expected %v, got %v", + chanPoint.String(), + chanBackup.ChanPoint.String()) + } + } + + // Before we proceed, we'll make two utility methods we'll use below + // for our primary assertions. + assertNumSingleBackups := func(numSingles int) { + err := wait.NoError(func() error { + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + req := &lnrpc.ChanBackupExportRequest{} + chanSnapshot, err := carol.ExportAllChannelBackups( + ctxt, req, + ) + if err != nil { + return fmt.Errorf("unable to export channel "+ + "backup: %v", err) + } + + if chanSnapshot.SingleChanBackups == nil { + return fmt.Errorf("single chan backups not " + + "populated") + } + + backups := chanSnapshot.SingleChanBackups.ChanBackups + if len(backups) != numSingles { + return fmt.Errorf("expected %v singles, "+ + "got %v", len(backups), numSingles) + } + + return nil + }, defaultTimeout) + if err != nil { + t.Fatalf(err.Error()) + } + } + assertMultiBackupFound := func() func(bool, map[wire.OutPoint]struct{}) { + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + req := &lnrpc.ChanBackupExportRequest{} + chanSnapshot, err := carol.ExportAllChannelBackups(ctxt, req) + if err != nil { + t.Fatalf("unable to export channel backup: %v", err) + } + + return func(found bool, chanPoints map[wire.OutPoint]struct{}) { + switch { + case found && chanSnapshot.MultiChanBackup == nil: + t.Fatalf("multi-backup not present") + + case !found && chanSnapshot.MultiChanBackup != nil && + (len(chanSnapshot.MultiChanBackup.MultiChanBackup) != + chanbackup.NilMultiSizePacked): + + t.Fatalf("found multi-backup when non should " + + "be found") + } + + if !found { + return + } + + backedUpChans := chanSnapshot.MultiChanBackup.ChanPoints + if len(chanPoints) != len(backedUpChans) { + t.Fatalf("expected %v chans got %v", len(chanPoints), + len(backedUpChans)) + } + + for _, chanPoint := range backedUpChans { + wirePoint := rpcPointToWirePoint(t, chanPoint) + if _, ok := chanPoints[wirePoint]; !ok { + t.Fatalf("unexpected backup: %v", wirePoint) + } + } + } + } + + chans := make(map[wire.OutPoint]struct{}) + for _, chanPoint := range chanPoints { + chans[rpcPointToWirePoint(t, chanPoint)] = struct{}{} + } + + // We should have exactly two single channel backups contained, and we + // should also have a multi-channel backup. + assertNumSingleBackups(2) + assertMultiBackupFound()(true, chans) + + // We'll now close each channel on by one. After we close a channel, we + // shouldn't be able to find that channel as a backup still. We should + // also have one less single written to disk. + for i, chanPoint := range chanPoints { + ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) + closeChannelAndAssert( + ctxt, t, net, net.Alice, chanPoint, false, + ) + + assertNumSingleBackups(len(chanPoints) - i - 1) + + delete(chans, rpcPointToWirePoint(t, chanPoint)) + assertMultiBackupFound()(true, chans) + } + + // At this point we shouldn't have any single or multi-chan backups at + // all. + assertNumSingleBackups(0) + assertMultiBackupFound()(false, nil) +} + +// nodeRestorer is a function closure that allows each chanRestoreTestCase to +// control exactly *how* the prior node is restored. This might be using an +// backup obtained over RPC, or the file system, etc. +type nodeRestorer func() (*lntest.HarnessNode, error) + +// chanRestoreTestCase describes a test case for an end to end SCB restoration +// work flow. One node will start from scratch using an existing SCB. At the +// end of the est, both nodes should be made whole via the DLP protocol. +type chanRestoreTestCase struct { + // name is the name of the target test case. + name string + + // channelsUpdated is false then this means that no updates + // have taken place within the channel before restore. + // Otherwise, HTLCs will be settled between the two parties + // before restoration modifying the balance beyond the initial + // allocation. + channelsUpdated bool + + // initiator signals if Dave should be the one that opens the + // channel to Alice, or if it should be the other way around. + initiator bool + + // private signals if the channel from Dave to Carol should be + // private or not. + private bool + + // unconfirmed signals if the channel from Dave to Carol should be + // confirmed or not. + unconfirmed bool + + // anchorCommit is true, then the new anchor commitment type will be + // used for the channels created in the test. + anchorCommit bool + + // restoreMethod takes an old node, then returns a function + // closure that'll return the same node, but with its state + // restored via a custom method. We use this to abstract away + // _how_ a node is restored from our assertions once the node + // has been fully restored itself. + restoreMethod func(oldNode *lntest.HarnessNode, + backupFilePath string, + mnemonic []string) (nodeRestorer, error) +} + +// testChanRestoreScenario executes a chanRestoreTestCase from end to end, +// ensuring that after Dave restores his channel state according to the +// testCase, the DLP protocol is executed properly and both nodes are made +// whole. +func testChanRestoreScenario(t *harnessTest, net *lntest.NetworkHarness, + testCase *chanRestoreTestCase, password []byte) { + + const ( + chanAmt = btcutil.Amount(10000000) + pushAmt = btcutil.Amount(5000000) + ) + + ctxb := context.Background() + + var nodeArgs []string + if testCase.anchorCommit { + nodeArgs = commitTypeAnchors.Args() + } + + // First, we'll create a brand new node we'll use within the test. If + // we have a custom backup file specified, then we'll also create that + // for use. + dave, mnemonic, err := net.NewNodeWithSeed( + "dave", nodeArgs, password, + ) + if err != nil { + t.Fatalf("unable to create new node: %v", err) + } + // Defer to a closure instead of to shutdownAndAssert due to the value + // of 'dave' changing throughout the test. + defer func() { + shutdownAndAssert(net, t, dave) + }() + carol, err := net.NewNode("carol", nodeArgs) + if err != nil { + t.Fatalf("unable to make new node: %v", err) + } + defer shutdownAndAssert(net, t, carol) + + // Now that our new nodes are created, we'll give them some coins for + // channel opening and anchor sweeping. + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + err = net.SendCoins(ctxt, btcutil.SatoshiPerBitcoin, carol) + if err != nil { + t.Fatalf("unable to send coins to dave: %v", err) + } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = net.SendCoins(ctxt, btcutil.SatoshiPerBitcoin, dave) + if err != nil { + t.Fatalf("unable to send coins to dave: %v", err) + } + + var from, to *lntest.HarnessNode + if testCase.initiator { + from, to = dave, carol + } else { + from, to = carol, dave + } + + // Next, we'll connect Dave to Carol, and open a new channel to her + // with a portion pushed. + if err := net.ConnectNodes(ctxt, dave, carol); err != nil { + t.Fatalf("unable to connect dave to carol: %v", err) + } + + // We will either open a confirmed or unconfirmed channel, depending on + // the requirements of the test case. + switch { + case testCase.unconfirmed: + ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) + _, err := net.OpenPendingChannel( + ctxt, from, to, chanAmt, pushAmt, + ) + if err != nil { + t.Fatalf("couldn't open pending channel: %v", err) + } + + // Give the pubsub some time to update the channel backup. + err = wait.NoError(func() error { + fi, err := os.Stat(dave.ChanBackupPath()) + if err != nil { + return err + } + if fi.Size() <= chanbackup.NilMultiSizePacked { + return fmt.Errorf("backup file empty") + } + return nil + }, defaultTimeout) + if err != nil { + t.Fatalf("channel backup not updated in time: %v", err) + } + + default: + ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) + chanPoint := openChannelAndAssert( + ctxt, t, net, from, to, + lntest.OpenChannelParams{ + Amt: chanAmt, + PushAmt: pushAmt, + Private: testCase.private, + }, + ) + + // Wait for both sides to see the opened channel. + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = dave.WaitForNetworkChannelOpen(ctxt, chanPoint) + if err != nil { + t.Fatalf("dave didn't report channel: %v", err) + } + err = carol.WaitForNetworkChannelOpen(ctxt, chanPoint) + if err != nil { + t.Fatalf("carol didn't report channel: %v", err) + } + } + + // If both parties should start with existing channel updates, then + // we'll send+settle an HTLC between 'from' and 'to' now. + if testCase.channelsUpdated { + invoice := &lnrpc.Invoice{ + Memo: "testing", + Value: 10000, + } + invoiceResp, err := to.AddInvoice(ctxt, invoice) + if err != nil { + t.Fatalf("unable to add invoice: %v", err) + } + + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + err = completePaymentRequests( + ctxt, from, from.RouterClient, + []string{invoiceResp.PaymentRequest}, true, + ) + if err != nil { + t.Fatalf("unable to complete payments: %v", err) + } + } + + // Before we start the recovery, we'll record the balances of both + // Carol and Dave to ensure they both sweep their coins at the end. + balReq := &lnrpc.WalletBalanceRequest{} + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + carolBalResp, err := carol.WalletBalance(ctxt, balReq) + if err != nil { + t.Fatalf("unable to get carol's balance: %v", err) + } + carolStartingBalance := carolBalResp.ConfirmedBalance + + daveBalance, err := dave.WalletBalance(ctxt, balReq) + if err != nil { + t.Fatalf("unable to get carol's balance: %v", err) + } + daveStartingBalance := daveBalance.ConfirmedBalance + + // At this point, we'll now execute the restore method to give us the + // new node we should attempt our assertions against. + backupFilePath := dave.ChanBackupPath() + restoredNodeFunc, err := testCase.restoreMethod( + dave, backupFilePath, mnemonic, + ) + if err != nil { + t.Fatalf("unable to prep node restoration: %v", err) + } + + // Now that we're able to make our restored now, we'll shutdown the old + // Dave node as we'll be storing it shortly below. + shutdownAndAssert(net, t, dave) + + // To make sure the channel state is advanced correctly if the channel + // peer is not online at first, we also shutdown Carol. + restartCarol, err := net.SuspendNode(carol) + require.NoError(t.t, err) + + // Next, we'll make a new Dave and start the bulk of our recovery + // workflow. + dave, err = restoredNodeFunc() + if err != nil { + t.Fatalf("unable to restore node: %v", err) + } + + // First ensure that the on-chain balance is restored. + err = wait.NoError(func() error { + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + balReq := &lnrpc.WalletBalanceRequest{} + daveBalResp, err := dave.WalletBalance(ctxt, balReq) + if err != nil { + return err + } + + daveBal := daveBalResp.ConfirmedBalance + if daveBal <= 0 { + return fmt.Errorf("expected positive balance, had %v", + daveBal) + } + + return nil + }, defaultTimeout) + if err != nil { + t.Fatalf("On-chain balance not restored: %v", err) + } + + // We now check that the restored channel is in the proper state. It + // should not yet be force closing as no connection with the remote + // peer was established yet. We should also not be able to close the + // channel. + assertNumPendingChannels(t, dave, 1, 0) + ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) + defer cancel() + pendingChanResp, err := dave.PendingChannels( + ctxt, &lnrpc.PendingChannelsRequest{}, + ) + require.NoError(t.t, err) + + // We also want to make sure we cannot force close in this state. That + // would get the state machine in a weird state. + chanPointParts := strings.Split( + pendingChanResp.WaitingCloseChannels[0].Channel.ChannelPoint, + ":", + ) + chanPointIndex, _ := strconv.ParseUint(chanPointParts[1], 10, 32) + resp, err := dave.CloseChannel(ctxt, &lnrpc.CloseChannelRequest{ + ChannelPoint: &lnrpc.ChannelPoint{ + FundingTxid: &lnrpc.ChannelPoint_FundingTxidStr{ + FundingTxidStr: chanPointParts[0], + }, + OutputIndex: uint32(chanPointIndex), + }, + Force: true, + }) + + // We don't get an error directly but only when reading the first + // message of the stream. + require.NoError(t.t, err) + _, err = resp.Recv() + require.Error(t.t, err) + require.Contains(t.t, err.Error(), "cannot close channel with state: ") + require.Contains(t.t, err.Error(), "ChanStatusRestored") + + // Now that we have ensured that the channels restored by the backup are + // in the correct state even without the remote peer telling us so, + // let's start up Carol again. + err = restartCarol() + require.NoError(t.t, err) + + // Now that we have our new node up, we expect that it'll re-connect to + // Carol automatically based on the restored backup. + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = net.EnsureConnected(ctxt, dave, carol) + if err != nil { + t.Fatalf("node didn't connect after recovery: %v", err) + } + + // TODO(roasbeef): move dave restarts? + + // Now we'll assert that both sides properly execute the DLP protocol. + // We grab their balances now to ensure that they're made whole at the + // end of the protocol. + assertDLPExecuted( + net, t, carol, carolStartingBalance, dave, daveStartingBalance, + testCase.anchorCommit, + ) +} + +// chanRestoreViaRPC is a helper test method that returns a nodeRestorer +// instance which will restore the target node from a password+seed, then +// trigger a SCB restore using the RPC interface. +func chanRestoreViaRPC(net *lntest.NetworkHarness, + password []byte, mnemonic []string, + multi []byte) (nodeRestorer, error) { + + backup := &lnrpc.RestoreChanBackupRequest_MultiChanBackup{ + MultiChanBackup: multi, + } + + ctxb := context.Background() + + return func() (*lntest.HarnessNode, error) { + newNode, err := net.RestoreNodeWithSeed( + "dave", nil, password, mnemonic, 1000, nil, + ) + if err != nil { + return nil, fmt.Errorf("unable to "+ + "restore node: %v", err) + } + + _, err = newNode.RestoreChannelBackups( + ctxb, &lnrpc.RestoreChanBackupRequest{ + Backup: backup, + }, + ) + if err != nil { + return nil, fmt.Errorf("unable "+ + "to restore backups: %v", err) + } + + return newNode, nil + }, nil +} diff --git a/lntest/itest/lnd_forward_interceptor_test.go b/lntest/itest/lnd_forward_interceptor_test.go index 0a72d4778..6c54756f8 100644 --- a/lntest/itest/lnd_forward_interceptor_test.go +++ b/lntest/itest/lnd_forward_interceptor_test.go @@ -1,5 +1,3 @@ -// +build rpctest - package itest import ( @@ -122,7 +120,7 @@ func testForwardInterceptor(net *lntest.NetworkHarness, t *harnessTest) { t.t.Errorf("expected payment to fail, instead got %v", attempt.Status) } - // For settle and resume we make sure the payment is successfull. + // For settle and resume we make sure the payment is successful. case routerrpc.ResolveHoldForwardAction_SETTLE: fallthrough @@ -166,7 +164,7 @@ func testForwardInterceptor(net *lntest.NetworkHarness, t *harnessTest) { } // For all other packets we resolve according to the test case. - interceptor.Send(&routerrpc.ForwardHtlcInterceptResponse{ + _ = interceptor.Send(&routerrpc.ForwardHtlcInterceptResponse{ IncomingCircuitKey: request.IncomingCircuitKey, Action: testCase.interceptorAction, Preimage: testCase.invoice.RPreimage, diff --git a/lntest/itest/lnd_macaroons_test.go b/lntest/itest/lnd_macaroons_test.go new file mode 100644 index 000000000..93ef10b00 --- /dev/null +++ b/lntest/itest/lnd_macaroons_test.go @@ -0,0 +1,514 @@ +package itest + +import ( + "context" + "encoding/hex" + "sort" + "strconv" + "testing" + + "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lntest" + "github.com/lightningnetwork/lnd/macaroons" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "gopkg.in/macaroon.v2" +) + +// testMacaroonAuthentication makes sure that if macaroon authentication is +// enabled on the gRPC interface, no requests with missing or invalid +// macaroons are allowed. Further, the specific access rights (read/write, +// entity based) and first-party caveats are tested as well. +func testMacaroonAuthentication(net *lntest.NetworkHarness, t *harnessTest) { + var ( + infoReq = &lnrpc.GetInfoRequest{} + newAddrReq = &lnrpc.NewAddressRequest{ + Type: AddrTypeWitnessPubkeyHash, + } + testNode = net.Alice + ) + + testCases := []struct { + name string + run func(ctxt context.Context, t *testing.T) + }{{ + // First test: Make sure we get an error if we use no macaroons + // but try to connect to a node that has macaroon authentication + // enabled. + name: "no macaroon", + run: func(ctxt context.Context, t *testing.T) { + conn, err := testNode.ConnectRPC(false) + require.NoError(t, err) + defer func() { _ = conn.Close() }() + client := lnrpc.NewLightningClient(conn) + _, err = client.GetInfo(ctxt, infoReq) + require.Error(t, err) + require.Contains(t, err.Error(), "expected 1 macaroon") + }, + }, { + // Second test: Ensure that an invalid macaroon also triggers an + // error. + name: "invalid macaroon", + run: func(ctxt context.Context, t *testing.T) { + invalidMac, _ := macaroon.New( + []byte("dummy_root_key"), []byte("0"), "itest", + macaroon.LatestVersion, + ) + cleanup, client := macaroonClient( + t, testNode, invalidMac, + ) + defer cleanup() + _, err := client.GetInfo(ctxt, infoReq) + require.Error(t, err) + require.Contains(t, err.Error(), "cannot get macaroon") + }, + }, { + // Third test: Try to access a write method with read-only + // macaroon. + name: "read only macaroon", + run: func(ctxt context.Context, t *testing.T) { + readonlyMac, err := testNode.ReadMacaroon( + testNode.ReadMacPath(), defaultTimeout, + ) + require.NoError(t, err) + cleanup, client := macaroonClient( + t, testNode, readonlyMac, + ) + defer cleanup() + _, err = client.NewAddress(ctxt, newAddrReq) + require.Error(t, err) + require.Contains(t, err.Error(), "permission denied") + }, + }, { + // Fourth test: Check first-party caveat with timeout that + // expired 30 seconds ago. + name: "expired macaroon", + run: func(ctxt context.Context, t *testing.T) { + readonlyMac, err := testNode.ReadMacaroon( + testNode.ReadMacPath(), defaultTimeout, + ) + require.NoError(t, err) + timeoutMac, err := macaroons.AddConstraints( + readonlyMac, macaroons.TimeoutConstraint(-30), + ) + require.NoError(t, err) + cleanup, client := macaroonClient( + t, testNode, timeoutMac, + ) + defer cleanup() + _, err = client.GetInfo(ctxt, infoReq) + require.Error(t, err) + require.Contains(t, err.Error(), "macaroon has expired") + }, + }, { + // Fifth test: Check first-party caveat with invalid IP address. + name: "invalid IP macaroon", + run: func(ctxt context.Context, t *testing.T) { + readonlyMac, err := testNode.ReadMacaroon( + testNode.ReadMacPath(), defaultTimeout, + ) + require.NoError(t, err) + invalidIPAddrMac, err := macaroons.AddConstraints( + readonlyMac, macaroons.IPLockConstraint( + "1.1.1.1", + ), + ) + require.NoError(t, err) + cleanup, client := macaroonClient( + t, testNode, invalidIPAddrMac, + ) + defer cleanup() + _, err = client.GetInfo(ctxt, infoReq) + require.Error(t, err) + require.Contains(t, err.Error(), "different IP address") + }, + }, { + // Sixth test: Make sure that if we do everything correct and + // send the admin macaroon with first-party caveats that we can + // satisfy, we get a correct answer. + name: "correct macaroon", + run: func(ctxt context.Context, t *testing.T) { + adminMac, err := testNode.ReadMacaroon( + testNode.AdminMacPath(), defaultTimeout, + ) + require.NoError(t, err) + adminMac, err = macaroons.AddConstraints( + adminMac, macaroons.TimeoutConstraint(30), + macaroons.IPLockConstraint("127.0.0.1"), + ) + require.NoError(t, err) + cleanup, client := macaroonClient(t, testNode, adminMac) + defer cleanup() + res, err := client.NewAddress(ctxt, newAddrReq) + require.NoError(t, err, "get new address") + assert.Contains(t, res.Address, "bcrt1") + }, + }, { + // Seventh test: Bake a macaroon that can only access exactly + // two RPCs and make sure it works as expected. + name: "custom URI permissions", + run: func(ctxt context.Context, t *testing.T) { + entity := macaroons.PermissionEntityCustomURI + req := &lnrpc.BakeMacaroonRequest{ + Permissions: []*lnrpc.MacaroonPermission{{ + Entity: entity, + Action: "/lnrpc.Lightning/GetInfo", + }, { + Entity: entity, + Action: "/lnrpc.Lightning/List" + + "Permissions", + }}, + } + bakeRes, err := testNode.BakeMacaroon(ctxt, req) + require.NoError(t, err) + + // Create a connection that uses the custom macaroon. + customMacBytes, err := hex.DecodeString( + bakeRes.Macaroon, + ) + require.NoError(t, err) + customMac := &macaroon.Macaroon{} + err = customMac.UnmarshalBinary(customMacBytes) + require.NoError(t, err) + cleanup, client := macaroonClient( + t, testNode, customMac, + ) + defer cleanup() + + // Call GetInfo which should succeed. + _, err = client.GetInfo(ctxt, infoReq) + require.NoError(t, err) + + // Call ListPermissions which should also succeed. + permReq := &lnrpc.ListPermissionsRequest{} + permRes, err := client.ListPermissions(ctxt, permReq) + require.NoError(t, err) + require.Greater( + t, len(permRes.MethodPermissions), 10, + "permissions", + ) + + // Try NewAddress which should be denied. + _, err = client.NewAddress(ctxt, newAddrReq) + require.Error(t, err) + require.Contains(t, err.Error(), "permission denied") + }, + }} + + for _, tc := range testCases { + tc := tc + t.t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + ctxt, cancel := context.WithTimeout( + context.Background(), defaultTimeout, + ) + defer cancel() + + tc.run(ctxt, t) + }) + } +} + +// testBakeMacaroon checks that when creating macaroons, the permissions param +// in the request must be set correctly, and the baked macaroon has the intended +// permissions. +func testBakeMacaroon(net *lntest.NetworkHarness, t *harnessTest) { + var testNode = net.Alice + + testCases := []struct { + name string + run func(ctxt context.Context, t *testing.T, + adminClient lnrpc.LightningClient) + }{{ + // First test: when the permission list is empty in the request, + // an error should be returned. + name: "no permission list", + run: func(ctxt context.Context, t *testing.T, + adminClient lnrpc.LightningClient) { + + req := &lnrpc.BakeMacaroonRequest{} + _, err := adminClient.BakeMacaroon(ctxt, req) + require.Error(t, err) + assert.Contains( + t, err.Error(), "permission list cannot be "+ + "empty", + ) + }, + }, { + // Second test: when the action in the permission list is not + // valid, an error should be returned. + name: "invalid permission list", + run: func(ctxt context.Context, t *testing.T, + adminClient lnrpc.LightningClient) { + + req := &lnrpc.BakeMacaroonRequest{ + Permissions: []*lnrpc.MacaroonPermission{{ + Entity: "macaroon", + Action: "invalid123", + }}, + } + _, err := adminClient.BakeMacaroon(ctxt, req) + require.Error(t, err) + assert.Contains( + t, err.Error(), "invalid permission action", + ) + }, + }, { + // Third test: when the entity in the permission list is not + // valid, an error should be returned. + name: "invalid permission entity", + run: func(ctxt context.Context, t *testing.T, + adminClient lnrpc.LightningClient) { + + req := &lnrpc.BakeMacaroonRequest{ + Permissions: []*lnrpc.MacaroonPermission{{ + Entity: "invalid123", + Action: "read", + }}, + } + _, err := adminClient.BakeMacaroon(ctxt, req) + require.Error(t, err) + assert.Contains( + t, err.Error(), "invalid permission entity", + ) + }, + }, { + // Fourth test: check that when no root key ID is specified, the + // default root keyID is used. + name: "default root key ID", + run: func(ctxt context.Context, t *testing.T, + adminClient lnrpc.LightningClient) { + + req := &lnrpc.BakeMacaroonRequest{ + Permissions: []*lnrpc.MacaroonPermission{{ + Entity: "macaroon", + Action: "read", + }}, + } + _, err := adminClient.BakeMacaroon(ctxt, req) + require.NoError(t, err) + + listReq := &lnrpc.ListMacaroonIDsRequest{} + resp, err := adminClient.ListMacaroonIDs(ctxt, listReq) + require.NoError(t, err) + require.Equal(t, resp.RootKeyIds[0], uint64(0)) + }, + }, { + // Fifth test: create a macaroon use a non-default root key ID. + name: "custom root key ID", + run: func(ctxt context.Context, t *testing.T, + adminClient lnrpc.LightningClient) { + + rootKeyID := uint64(4200) + req := &lnrpc.BakeMacaroonRequest{ + RootKeyId: rootKeyID, + Permissions: []*lnrpc.MacaroonPermission{{ + Entity: "macaroon", + Action: "read", + }}, + } + _, err := adminClient.BakeMacaroon(ctxt, req) + require.NoError(t, err) + + listReq := &lnrpc.ListMacaroonIDsRequest{} + resp, err := adminClient.ListMacaroonIDs(ctxt, listReq) + require.NoError(t, err) + + // the ListMacaroonIDs should give a list of two IDs, + // the default ID 0, and the newly created ID. The + // returned response is sorted to guarantee the order so + // that we can compare them one by one. + sort.Slice(resp.RootKeyIds, func(i, j int) bool { + return resp.RootKeyIds[i] < resp.RootKeyIds[j] + }) + require.Equal(t, resp.RootKeyIds[0], uint64(0)) + require.Equal(t, resp.RootKeyIds[1], rootKeyID) + }, + }, { + // Sixth test: check the baked macaroon has the intended + // permissions. It should succeed in reading, and fail to write + // a macaroon. + name: "custom macaroon permissions", + run: func(ctxt context.Context, t *testing.T, + adminClient lnrpc.LightningClient) { + + rootKeyID := uint64(4200) + req := &lnrpc.BakeMacaroonRequest{ + RootKeyId: rootKeyID, + Permissions: []*lnrpc.MacaroonPermission{{ + Entity: "macaroon", + Action: "read", + }}, + } + bakeResp, err := adminClient.BakeMacaroon(ctxt, req) + require.NoError(t, err) + + newMac, err := readMacaroonFromHex(bakeResp.Macaroon) + require.NoError(t, err) + cleanup, readOnlyClient := macaroonClient( + t, testNode, newMac, + ) + defer cleanup() + + // BakeMacaroon requires a write permission, so this + // call should return an error. + _, err = readOnlyClient.BakeMacaroon(ctxt, req) + require.Error(t, err) + require.Contains(t, err.Error(), "permission denied") + + // ListMacaroon requires a read permission, so this call + // should succeed. + listReq := &lnrpc.ListMacaroonIDsRequest{} + _, err = readOnlyClient.ListMacaroonIDs(ctxt, listReq) + require.NoError(t, err) + + // Current macaroon can only work on entity macaroon, so + // a GetInfo request will fail. + infoReq := &lnrpc.GetInfoRequest{} + _, err = readOnlyClient.GetInfo(ctxt, infoReq) + require.Error(t, err) + require.Contains(t, err.Error(), "permission denied") + }, + }} + + for _, tc := range testCases { + tc := tc + t.t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + ctxt, cancel := context.WithTimeout( + context.Background(), defaultTimeout, + ) + defer cancel() + + adminMac, err := testNode.ReadMacaroon( + testNode.AdminMacPath(), defaultTimeout, + ) + require.NoError(t, err) + cleanup, client := macaroonClient(t, testNode, adminMac) + defer cleanup() + + tc.run(ctxt, t, client) + }) + } +} + +// testDeleteMacaroonID checks that when deleting a macaroon ID, it removes the +// specified ID and invalidates all macaroons derived from the key with that ID. +// Also, it checks deleting the reserved marcaroon ID, DefaultRootKeyID or is +// forbidden. +func testDeleteMacaroonID(net *lntest.NetworkHarness, t *harnessTest) { + var ( + ctxb = context.Background() + testNode = net.Alice + ) + ctxt, cancel := context.WithTimeout(ctxb, defaultTimeout) + defer cancel() + + // Use admin macaroon to create a connection. + adminMac, err := testNode.ReadMacaroon( + testNode.AdminMacPath(), defaultTimeout, + ) + require.NoError(t.t, err) + cleanup, client := macaroonClient(t.t, testNode, adminMac) + defer cleanup() + + // Record the number of macaroon IDs before creation. + listReq := &lnrpc.ListMacaroonIDsRequest{} + listResp, err := client.ListMacaroonIDs(ctxt, listReq) + require.NoError(t.t, err) + numMacIDs := len(listResp.RootKeyIds) + + // Create macaroons for testing. + rootKeyIDs := []uint64{1, 2, 3} + macList := make([]string, 0, len(rootKeyIDs)) + for _, id := range rootKeyIDs { + req := &lnrpc.BakeMacaroonRequest{ + RootKeyId: id, + Permissions: []*lnrpc.MacaroonPermission{{ + Entity: "macaroon", + Action: "read", + }}, + } + resp, err := client.BakeMacaroon(ctxt, req) + require.NoError(t.t, err) + macList = append(macList, resp.Macaroon) + } + + // Check that the creation is successful. + listReq = &lnrpc.ListMacaroonIDsRequest{} + listResp, err = client.ListMacaroonIDs(ctxt, listReq) + require.NoError(t.t, err) + + // The number of macaroon IDs should be increased by len(rootKeyIDs). + require.Equal(t.t, numMacIDs+len(rootKeyIDs), len(listResp.RootKeyIds)) + + // First test: check deleting the DefaultRootKeyID returns an error. + defaultID, _ := strconv.ParseUint( + string(macaroons.DefaultRootKeyID), 10, 64, + ) + req := &lnrpc.DeleteMacaroonIDRequest{ + RootKeyId: defaultID, + } + _, err = client.DeleteMacaroonID(ctxt, req) + require.Error(t.t, err) + require.Contains( + t.t, err.Error(), macaroons.ErrDeletionForbidden.Error(), + ) + + // Second test: check deleting the customized ID returns success. + req = &lnrpc.DeleteMacaroonIDRequest{ + RootKeyId: rootKeyIDs[0], + } + resp, err := client.DeleteMacaroonID(ctxt, req) + require.NoError(t.t, err) + require.True(t.t, resp.Deleted) + + // Check that the deletion is successful. + listReq = &lnrpc.ListMacaroonIDsRequest{} + listResp, err = client.ListMacaroonIDs(ctxt, listReq) + require.NoError(t.t, err) + + // The number of macaroon IDs should be decreased by 1. + require.Equal(t.t, numMacIDs+len(rootKeyIDs)-1, len(listResp.RootKeyIds)) + + // Check that the deleted macaroon can no longer access macaroon:read. + deletedMac, err := readMacaroonFromHex(macList[0]) + require.NoError(t.t, err) + cleanup, client = macaroonClient(t.t, testNode, deletedMac) + defer cleanup() + + // Because the macaroon is deleted, it will be treated as an invalid one. + listReq = &lnrpc.ListMacaroonIDsRequest{} + _, err = client.ListMacaroonIDs(ctxt, listReq) + require.Error(t.t, err) + require.Contains(t.t, err.Error(), "cannot get macaroon") +} + +// readMacaroonFromHex loads a macaroon from a hex string. +func readMacaroonFromHex(macHex string) (*macaroon.Macaroon, error) { + macBytes, err := hex.DecodeString(macHex) + if err != nil { + return nil, err + } + + mac := &macaroon.Macaroon{} + if err := mac.UnmarshalBinary(macBytes); err != nil { + return nil, err + } + return mac, nil +} + +func macaroonClient(t *testing.T, testNode *lntest.HarnessNode, + mac *macaroon.Macaroon) (func(), lnrpc.LightningClient) { + + conn, err := testNode.ConnectRPCWithMacaroon(mac) + require.NoError(t, err, "connect to alice") + + cleanup := func() { + err := conn.Close() + require.NoError(t, err, "close") + } + return cleanup, lnrpc.NewLightningClient(conn) +} diff --git a/lntest/itest/lnd_max_channel_size_test.go b/lntest/itest/lnd_max_channel_size_test.go new file mode 100644 index 000000000..9d0745b90 --- /dev/null +++ b/lntest/itest/lnd_max_channel_size_test.go @@ -0,0 +1,120 @@ +// +build rpctest + +package itest + +import ( + "context" + "fmt" + "strings" + + "github.com/btcsuite/btcutil" + "github.com/lightningnetwork/lnd" + "github.com/lightningnetwork/lnd/lntest" +) + +// testMaxChannelSize tests that lnd handles --maxchansize parameter +// correctly. Wumbo nodes should enforce a default soft limit of 10 BTC by +// default. This limit can be adjusted with --maxchansize config option +func testMaxChannelSize(net *lntest.NetworkHarness, t *harnessTest) { + // We'll make two new nodes, both wumbo but with the default + // limit on maximum channel size (10 BTC) + wumboNode, err := net.NewNode( + "wumbo", []string{"--protocol.wumbo-channels"}, + ) + if err != nil { + t.Fatalf("unable to create new node: %v", err) + } + defer shutdownAndAssert(net, t, wumboNode) + + wumboNode2, err := net.NewNode( + "wumbo2", []string{"--protocol.wumbo-channels"}, + ) + if err != nil { + t.Fatalf("unable to create new node: %v", err) + } + defer shutdownAndAssert(net, t, wumboNode2) + + // We'll send 11 BTC to the wumbo node so it can test the wumbo soft limit. + ctxb := context.Background() + err = net.SendCoins(ctxb, 11*btcutil.SatoshiPerBitcoin, wumboNode) + if err != nil { + t.Fatalf("unable to send coins to wumbo node: %v", err) + } + + // Next we'll connect both nodes, then attempt to make a wumbo channel + // funding request, which should fail as it exceeds the default wumbo + // soft limit of 10 BTC. + err = net.EnsureConnected(ctxb, wumboNode, wumboNode2) + if err != nil { + t.Fatalf("unable to connect peers: %v", err) + } + + chanAmt := lnd.MaxBtcFundingAmountWumbo + 1 + _, err = net.OpenChannel( + ctxb, wumboNode, wumboNode2, lntest.OpenChannelParams{ + Amt: chanAmt, + }, + ) + if err == nil { + t.Fatalf("expected channel funding to fail as it exceeds 10 BTC limit") + } + + // The test should show failure due to the channel exceeding our max size. + if !strings.Contains(err.Error(), "exceeds maximum chan size") { + t.Fatalf("channel should be rejected due to size, instead "+ + "error was: %v", err) + } + + // Next we'll create a non-wumbo node to verify that it enforces the + // BOLT-02 channel size limit and rejects our funding request. + miniNode, err := net.NewNode("mini", nil) + if err != nil { + t.Fatalf("unable to create new node: %v", err) + } + defer shutdownAndAssert(net, t, miniNode) + + err = net.EnsureConnected(ctxb, wumboNode, miniNode) + if err != nil { + t.Fatalf("unable to connect peers: %v", err) + } + + _, err = net.OpenChannel( + ctxb, wumboNode, miniNode, lntest.OpenChannelParams{ + Amt: chanAmt, + }, + ) + if err == nil { + t.Fatalf("expected channel funding to fail as it exceeds 0.16 BTC limit") + } + + // The test should show failure due to the channel exceeding our max size. + if !strings.Contains(err.Error(), "exceeds maximum chan size") { + t.Fatalf("channel should be rejected due to size, instead "+ + "error was: %v", err) + } + + // We'll now make another wumbo node with appropriate maximum channel size + // to accept our wumbo channel funding. + wumboNode3, err := net.NewNode( + "wumbo3", []string{"--protocol.wumbo-channels", + fmt.Sprintf("--maxchansize=%v", int64(lnd.MaxBtcFundingAmountWumbo+1))}, + ) + if err != nil { + t.Fatalf("unable to create new node: %v", err) + } + defer shutdownAndAssert(net, t, wumboNode3) + + // Creating a wumbo channel between these two nodes should succeed. + err = net.EnsureConnected(ctxb, wumboNode, wumboNode3) + if err != nil { + t.Fatalf("unable to connect peers: %v", err) + } + chanPoint := openChannelAndAssert( + ctxb, t, net, wumboNode, wumboNode3, + lntest.OpenChannelParams{ + Amt: chanAmt, + }, + ) + closeChannelAndAssert(ctxb, t, net, wumboNode, chanPoint, false) + +} diff --git a/lntest/itest/lnd_mpp_test.go b/lntest/itest/lnd_mpp_test.go index 7a30a3780..e213d9818 100644 --- a/lntest/itest/lnd_mpp_test.go +++ b/lntest/itest/lnd_mpp_test.go @@ -1,5 +1,3 @@ -// +build rpctest - package itest import ( @@ -25,7 +23,7 @@ func testSendToRouteMultiPath(net *lntest.NetworkHarness, t *harnessTest) { ctx := newMppTestContext(t, net) defer ctx.shutdownNodes() - // To ensure the payment goes through seperate paths, we'll set a + // To ensure the payment goes through separate paths, we'll set a // channel size that can only carry one shard at a time. We'll divide // the payment into 3 shards. const ( diff --git a/lntest/itest/lnd_multi-hop-error-propagation.go b/lntest/itest/lnd_multi-hop-error-propagation_test.go similarity index 99% rename from lntest/itest/lnd_multi-hop-error-propagation.go rename to lntest/itest/lnd_multi-hop-error-propagation_test.go index 244ec205f..e631dd92d 100644 --- a/lntest/itest/lnd_multi-hop-error-propagation.go +++ b/lntest/itest/lnd_multi-hop-error-propagation_test.go @@ -1,5 +1,3 @@ -// +build rpctest - package itest import ( diff --git a/lntest/itest/lnd_multi-hop-payments.go b/lntest/itest/lnd_multi-hop-payments_test.go similarity index 93% rename from lntest/itest/lnd_multi-hop-payments.go rename to lntest/itest/lnd_multi-hop-payments_test.go index a7dfee5a5..ed8f25d80 100644 --- a/lntest/itest/lnd_multi-hop-payments.go +++ b/lntest/itest/lnd_multi-hop-payments_test.go @@ -1,5 +1,3 @@ -// +build rpctest - package itest import ( @@ -170,14 +168,19 @@ func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) { // Set the fee policies of the Alice -> Bob and the Dave -> Alice // channel edges to relatively large non default values. This makes it // possible to pick up more subtle fee calculation errors. - maxHtlc := uint64(calculateMaxHtlc(chanAmt)) + maxHtlc := calculateMaxHtlc(chanAmt) + const aliceBaseFeeSat = 1 + const aliceFeeRatePPM = 100000 updateChannelPolicy( - t, net.Alice, chanPointAlice, 1000, 100000, - lnd.DefaultBitcoinTimeLockDelta, maxHtlc, carol, + t, net.Alice, chanPointAlice, aliceBaseFeeSat*1000, + aliceFeeRatePPM, lnd.DefaultBitcoinTimeLockDelta, maxHtlc, + carol, ) + const daveBaseFeeSat = 5 + const daveFeeRatePPM = 150000 updateChannelPolicy( - t, dave, chanPointDave, 5000, 150000, + t, dave, chanPointDave, daveBaseFeeSat*1000, daveFeeRatePPM, lnd.DefaultBitcoinTimeLockDelta, maxHtlc, carol, ) @@ -224,11 +227,6 @@ func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) { t.Fatalf("unable to send payments: %v", err) } - // When asserting the amount of satoshis moved, we'll factor in the - // default base fee, as we didn't modify the fee structure when - // creating the seed nodes in the network. - const baseFee = 1 - // At this point all the channels within our proto network should be // shifted by 5k satoshis in the direction of Bob, the sink within the // payment flow generated above. The order of asserts corresponds to @@ -237,7 +235,7 @@ func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) { // Alice, David, Carol. // The final node bob expects to get paid five times 1000 sat. - expectedAmountPaidAtoB := int64(5 * 1000) + expectedAmountPaidAtoB := int64(numPayments * paymentAmt) assertAmountPaid(t, "Alice(local) => Bob(remote)", net.Bob, aliceFundPoint, int64(0), expectedAmountPaidAtoB) @@ -246,7 +244,9 @@ func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) { // To forward a payment of 1000 sat, Alice is charging a fee of // 1 sat + 10% = 101 sat. - const expectedFeeAlice = 5 * 101 + const aliceFeePerPayment = aliceBaseFeeSat + + (paymentAmt * aliceFeeRatePPM / 1_000_000) + const expectedFeeAlice = numPayments * aliceFeePerPayment // Dave needs to pay what Alice pays plus Alice's fee. expectedAmountPaidDtoA := expectedAmountPaidAtoB + expectedFeeAlice @@ -258,7 +258,10 @@ func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) { // To forward a payment of 1101 sat, Dave is charging a fee of // 5 sat + 15% = 170.15 sat. This is rounded down in rpcserver to 170. - const expectedFeeDave = 5 * 170 + const davePaymentAmt = paymentAmt + aliceFeePerPayment + const daveFeePerPayment = daveBaseFeeSat + + (davePaymentAmt * daveFeeRatePPM / 1_000_000) + const expectedFeeDave = numPayments * daveFeePerPayment // Carol needs to pay what Dave pays plus Dave's fee. expectedAmountPaidCtoD := expectedAmountPaidDtoA + expectedFeeDave @@ -303,9 +306,10 @@ func testMultiHopPayments(net *lntest.NetworkHarness, t *harnessTest) { if err != nil { t.Fatalf("unable to query for fee report: %v", err) } - if len(fwdingHistory.ForwardingEvents) != 5 { + if len(fwdingHistory.ForwardingEvents) != numPayments { t.Fatalf("wrong number of forwarding event: expected %v, "+ - "got %v", 5, len(fwdingHistory.ForwardingEvents)) + "got %v", numPayments, + len(fwdingHistory.ForwardingEvents)) } expectedForwardingFee := uint64(expectedFeeDave / numPayments) for _, event := range fwdingHistory.ForwardingEvents { diff --git a/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go b/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go index 0bab9b41b..fcb3d0d03 100644 --- a/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go +++ b/lntest/itest/lnd_multi-hop_htlc_local_chain_claim_test.go @@ -1,5 +1,3 @@ -// +build rpctest - package itest import ( @@ -96,6 +94,10 @@ func testMultiHopHtlcLocalChainClaim(net *lntest.NetworkHarness, t *harnessTest, // hop logic. waitForInvoiceAccepted(t, carol, payHash) + // Increase the fee estimate so that the following force close tx will + // be cpfp'ed. + net.SetFeeEstimate(30000) + // At this point, Bob decides that he wants to exit the channel // immediately, so he force closes his commitment transaction. ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) diff --git a/lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go b/lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go index 03320f08c..8e55a7445 100644 --- a/lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go +++ b/lntest/itest/lnd_multi-hop_htlc_local_timeout_test.go @@ -1,5 +1,3 @@ -// +build rpctest - package itest import ( @@ -95,16 +93,16 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest, nodes := []*lntest.HarnessNode{alice, bob, carol} err = wait.Predicate(func() bool { predErr = assertActiveHtlcs(nodes, dustPayHash, payHash) - if predErr != nil { - return false - } - - return true + return predErr == nil }, time.Second*15) if err != nil { t.Fatalf("htlc mismatch: %v", predErr) } + // Increase the fee estimate so that the following force close tx will + // be cpfp'ed. + net.SetFeeEstimate(30000) + // We'll now mine enough blocks to trigger Bob's broadcast of his // commitment transaction due to the fact that the HTLC is about to // timeout. With the default outgoing broadcast delta of zero, this will @@ -150,11 +148,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest, nodes = []*lntest.HarnessNode{alice} err = wait.Predicate(func() bool { predErr = assertActiveHtlcs(nodes, payHash) - if predErr != nil { - return false - } - - return true + return predErr == nil }, time.Second*15) if err != nil { t.Fatalf("htlc mismatch: %v", predErr) @@ -238,10 +232,7 @@ func testMultiHopHtlcLocalTimeout(net *lntest.NetworkHarness, t *harnessTest, nodes = []*lntest.HarnessNode{alice} err = wait.Predicate(func() bool { predErr = assertNumActiveHtlcs(nodes, 0) - if predErr != nil { - return false - } - return true + return predErr == nil }, time.Second*15) if err != nil { t.Fatalf("alice's channel still has active htlc's: %v", predErr) diff --git a/lntest/itest/lnd_multi-hop_htlc_receiver_chain_claim_test.go b/lntest/itest/lnd_multi-hop_htlc_receiver_chain_claim_test.go index b157f4f0a..1e0228441 100644 --- a/lntest/itest/lnd_multi-hop_htlc_receiver_chain_claim_test.go +++ b/lntest/itest/lnd_multi-hop_htlc_receiver_chain_claim_test.go @@ -1,5 +1,3 @@ -// +build rpctest - package itest import ( @@ -117,6 +115,10 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest, t.Fatalf("settle invoice: %v", err) } + // Increase the fee estimate so that the following force close tx will + // be cpfp'ed. + net.SetFeeEstimate(30000) + // Now we'll mine enough blocks to prompt carol to actually go to the // chain in order to sweep her HTLC since the value is high enough. // TODO(roasbeef): modify once go to chain policy changes @@ -134,7 +136,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest, if c == commitTypeAnchors { expectedTxes = 2 } - txes, err := getNTxsFromMempool( + _, err = getNTxsFromMempool( net.Miner.Node, expectedTxes, minerMempoolTimeout, ) if err != nil { @@ -176,7 +178,7 @@ func testMultiHopReceiverChainClaim(net *lntest.NetworkHarness, t *harnessTest, if c == commitTypeAnchors { expectedTxes = 3 } - txes, err = getNTxsFromMempool(net.Miner.Node, + txes, err := getNTxsFromMempool(net.Miner.Node, expectedTxes, minerMempoolTimeout) if err != nil { t.Fatalf("transactions not found in mempool: %v", err) diff --git a/lntest/itest/lnd_multi-hop_htlc_remote_chain_claim_test.go b/lntest/itest/lnd_multi-hop_htlc_remote_chain_claim_test.go index 5ea1c4a1a..574ecf71b 100644 --- a/lntest/itest/lnd_multi-hop_htlc_remote_chain_claim_test.go +++ b/lntest/itest/lnd_multi-hop_htlc_remote_chain_claim_test.go @@ -1,5 +1,3 @@ -// +build rpctest - package itest import ( @@ -96,6 +94,10 @@ func testMultiHopHtlcRemoteChainClaim(net *lntest.NetworkHarness, t *harnessTest // hop logic. waitForInvoiceAccepted(t, carol, payHash) + // Increase the fee estimate so that the following force close tx will + // be cpfp'ed. + net.SetFeeEstimate(30000) + // Next, Alice decides that she wants to exit the channel, so she'll // immediately force close the channel by broadcast her commitment // transaction. diff --git a/lntest/itest/lnd_multi-hop_local_force_close_on_chain_htlc_timeout_test.go b/lntest/itest/lnd_multi-hop_local_force_close_on_chain_htlc_timeout_test.go index 576295ec0..8eb5b97f4 100644 --- a/lntest/itest/lnd_multi-hop_local_force_close_on_chain_htlc_timeout_test.go +++ b/lntest/itest/lnd_multi-hop_local_force_close_on_chain_htlc_timeout_test.go @@ -1,5 +1,3 @@ -// +build rpctest - package itest import ( @@ -69,16 +67,16 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, nodes := []*lntest.HarnessNode{alice, bob, carol} err = wait.Predicate(func() bool { predErr = assertActiveHtlcs(nodes, payHash) - if predErr != nil { - return false - } - - return true + return predErr == nil }, time.Second*15) if err != nil { t.Fatalf("htlc mismatch: %v", err) } + // Increase the fee estimate so that the following force close tx will + // be cpfp'ed. + net.SetFeeEstimate(30000) + // Now that all parties have the HTLC locked in, we'll immediately // force close the Bob -> Carol channel. This should trigger contract // resolution mode for both of them. @@ -199,10 +197,7 @@ func testMultiHopLocalForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, nodes = []*lntest.HarnessNode{alice} err = wait.Predicate(func() bool { predErr = assertNumActiveHtlcs(nodes, 0) - if predErr != nil { - return false - } - return true + return predErr == nil }, time.Second*15) if err != nil { t.Fatalf("alice's channel still has active htlc's: %v", predErr) diff --git a/lntest/itest/lnd_multi-hop_remote_force_close_on_chain_htlc_timeout_test.go b/lntest/itest/lnd_multi-hop_remote_force_close_on_chain_htlc_timeout_test.go index 9d30a2dc3..19d921650 100644 --- a/lntest/itest/lnd_multi-hop_remote_force_close_on_chain_htlc_timeout_test.go +++ b/lntest/itest/lnd_multi-hop_remote_force_close_on_chain_htlc_timeout_test.go @@ -1,5 +1,3 @@ -// +build rpctest - package itest import ( @@ -70,16 +68,16 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, nodes := []*lntest.HarnessNode{alice, bob, carol} err = wait.Predicate(func() bool { predErr = assertActiveHtlcs(nodes, payHash) - if predErr != nil { - return false - } - - return true + return predErr == nil }, time.Second*15) if err != nil { t.Fatalf("htlc mismatch: %v", predErr) } + // Increase the fee estimate so that the following force close tx will + // be cpfp'ed. + net.SetFeeEstimate(30000) + // At this point, we'll now instruct Carol to force close the // transaction. This will let us exercise that Bob is able to sweep the // expired HTLC on Carol's version of the commitment transaction. If @@ -208,10 +206,7 @@ func testMultiHopRemoteForceCloseOnChainHtlcTimeout(net *lntest.NetworkHarness, nodes = []*lntest.HarnessNode{alice} err = wait.Predicate(func() bool { predErr = assertNumActiveHtlcs(nodes, 0) - if predErr != nil { - return false - } - return true + return predErr == nil }, time.Second*15) if err != nil { t.Fatalf("alice's channel still has active htlc's: %v", predErr) diff --git a/lntest/itest/lnd_multi-hop_test.go b/lntest/itest/lnd_multi-hop_test.go index ec73e1877..3707ae111 100644 --- a/lntest/itest/lnd_multi-hop_test.go +++ b/lntest/itest/lnd_multi-hop_test.go @@ -1,5 +1,3 @@ -// +build rpctest - package itest import ( @@ -73,6 +71,7 @@ func testMultiHopHtlcClaims(net *lntest.NetworkHarness, t *harnessTest) { for _, commitType := range commitTypes { testName := fmt.Sprintf("committype=%v", commitType.String()) + commitType := commitType success := t.t.Run(testName, func(t *testing.T) { ht := newHarnessTest(t, net) @@ -101,6 +100,10 @@ func testMultiHopHtlcClaims(net *lntest.NetworkHarness, t *harnessTest) { success := ht.t.Run(subTest.name, func(t *testing.T) { ht := newHarnessTest(t, net) + // Start each test with the default + // static fee estimate. + net.SetFeeEstimate(12500) + subTest.test(net, ht, alice, bob, commitType) }) if !success { diff --git a/lntest/itest/lnd_network_test.go b/lntest/itest/lnd_network_test.go new file mode 100644 index 000000000..a1d69f53e --- /dev/null +++ b/lntest/itest/lnd_network_test.go @@ -0,0 +1,123 @@ +package itest + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/lightningnetwork/lnd" + "github.com/lightningnetwork/lnd/lnrpc" + "github.com/lightningnetwork/lnd/lntest" + "github.com/stretchr/testify/require" +) + +// testNetworkConnectionTimeout checks that the connectiontimeout is taking +// effect. It creates a node with a small connection timeout value, and connects +// it to a non-routable IP address. +func testNetworkConnectionTimeout(net *lntest.NetworkHarness, t *harnessTest) { + var ( + ctxt, _ = context.WithTimeout( + context.Background(), defaultTimeout, + ) + // testPub is a random public key for testing only. + testPub = "0332bda7da70fefe4b6ab92f53b3c4f4ee7999" + + "f312284a8e89c8670bb3f67dbee2" + // testHost is a non-routable IP address. It's used to cause a + // connection timeout. + testHost = "10.255.255.255" + ) + + // First, test the global timeout settings. + // Create Carol with a connection timeout of 1 millisecond. + carol, err := net.NewNode("Carol", []string{"--connectiontimeout=1ms"}) + if err != nil { + t.Fatalf("unable to create new node carol: %v", err) + } + defer shutdownAndAssert(net, t, carol) + + // Try to connect Carol to a non-routable IP address, which should give + // us a timeout error. + req := &lnrpc.ConnectPeerRequest{ + Addr: &lnrpc.LightningAddress{ + Pubkey: testPub, + Host: testHost, + }, + } + assertTimeoutError(ctxt, t, carol, req) + + // Second, test timeout on the connect peer request. + // Create Dave with the default timeout setting. + dave, err := net.NewNode("Dave", nil) + if err != nil { + t.Fatalf("unable to create new node dave: %v", err) + } + defer shutdownAndAssert(net, t, dave) + + // Try to connect Dave to a non-routable IP address, using a timeout + // value of 1ms, which should give us a timeout error immediately. + req = &lnrpc.ConnectPeerRequest{ + Addr: &lnrpc.LightningAddress{ + Pubkey: testPub, + Host: testHost, + }, + Timeout: 1, + } + assertTimeoutError(ctxt, t, dave, req) +} + +// assertTimeoutError asserts that a connection timeout error is raised. A +// context with a default timeout is used to make the request. If our customized +// connection timeout is less than the default, we won't see the request context +// times out, instead a network connection timeout will be returned. +func assertTimeoutError(ctxt context.Context, t *harnessTest, + node *lntest.HarnessNode, req *lnrpc.ConnectPeerRequest) { + + t.t.Helper() + + // Create a context with a timeout value. + ctxt, cancel := context.WithTimeout(ctxt, defaultTimeout) + defer cancel() + + err := connect(ctxt, node, req) + + // a DeadlineExceeded error will appear in the context if the above + // ctxtTimeout value is reached. + require.NoError(t.t, ctxt.Err(), "context time out") + + // Check that the network returns a timeout error. + require.Containsf( + t.t, err.Error(), "i/o timeout", + "expected to get a timeout error, instead got: %v", err, + ) +} + +func connect(ctxt context.Context, node *lntest.HarnessNode, + req *lnrpc.ConnectPeerRequest) error { + + syncTimeout := time.After(15 * time.Second) + ticker := time.NewTicker(time.Millisecond * 100) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + _, err := node.ConnectPeer(ctxt, req) + // If there's no error, return nil + if err == nil { + return err + } + // If the error is no ErrServerNotActive, return it. + // Otherwise, we will retry until timeout. + if !strings.Contains(err.Error(), + lnd.ErrServerNotActive.Error()) { + + return err + } + case <-syncTimeout: + return fmt.Errorf("chain backend did not " + + "finish syncing") + } + } + return nil +} diff --git a/lntest/itest/onchain.go b/lntest/itest/lnd_onchain_test.go similarity index 99% rename from lntest/itest/onchain.go rename to lntest/itest/lnd_onchain_test.go index 7cc4df11e..bb03165d4 100644 --- a/lntest/itest/onchain.go +++ b/lntest/itest/lnd_onchain_test.go @@ -1,5 +1,3 @@ -// +build rpctest - package itest import ( diff --git a/lntest/itest/psbt.go b/lntest/itest/lnd_psbt_test.go similarity index 97% rename from lntest/itest/psbt.go rename to lntest/itest/lnd_psbt_test.go index e316c45dc..69af9589b 100644 --- a/lntest/itest/psbt.go +++ b/lntest/itest/lnd_psbt_test.go @@ -1,5 +1,3 @@ -// +build rpctest - package itest import ( @@ -14,6 +12,7 @@ import ( "github.com/lightningnetwork/lnd" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lntest" + "github.com/stretchr/testify/require" ) // testPsbtChanFunding makes sure a channel can be opened between carol and dave @@ -119,14 +118,14 @@ func testPsbtChanFunding(net *lntest.NetworkHarness, t *harnessTest) { // encoded in the PSBT. We'll let the miner do it and convert the final // TX into a PSBT, that's way easier than assembling a PSBT manually. allOuts := append(packet.UnsignedTx.TxOut, packet2.UnsignedTx.TxOut...) - tx, err := net.Miner.CreateTransaction(allOuts, 5, true) + finalTx, err := net.Miner.CreateTransaction(allOuts, 5, true) if err != nil { t.Fatalf("unable to create funding transaction: %v", err) } // The helper function splits the final TX into the non-witness data // encoded in a PSBT and the witness data returned separately. - unsignedPsbt, scripts, witnesses, err := createPsbtFromSignedTx(tx) + unsignedPsbt, scripts, witnesses, err := createPsbtFromSignedTx(finalTx) if err != nil { t.Fatalf("unable to convert funding transaction into PSBT: %v", err) @@ -185,7 +184,7 @@ func testPsbtChanFunding(net *lntest.NetworkHarness, t *harnessTest) { // complete and signed transaction that can be finalized. We'll trick // a bit by putting the script sig back directly, because we know we // will only get non-witness outputs from the miner wallet. - for idx := range tx.TxIn { + for idx := range finalTx.TxIn { if len(witnesses[idx]) > 0 { t.Fatalf("unexpected witness inputs in wallet TX") } @@ -239,12 +238,16 @@ func testPsbtChanFunding(net *lntest.NetworkHarness, t *harnessTest) { t.Fatalf("unexpected txes in mempool: %v", mempool) } - // Let's progress the second channel now. + // Let's progress the second channel now. This time we'll use the raw + // wire format transaction directly. + buf.Reset() + err = finalTx.Serialize(&buf) + require.NoError(t.t, err) _, err = carol.FundingStateStep(ctxb, &lnrpc.FundingTransitionMsg{ Trigger: &lnrpc.FundingTransitionMsg_PsbtFinalize{ PsbtFinalize: &lnrpc.FundingPsbtFinalize{ PendingChanId: pendingChanID2[:], - SignedPsbt: buf.Bytes(), + FinalRawTx: buf.Bytes(), }, }, }) @@ -275,7 +278,7 @@ func testPsbtChanFunding(net *lntest.NetworkHarness, t *harnessTest) { // Great, now we can mine a block to get the transaction confirmed, then // wait for the new channel to be propagated through the network. - txHash := tx.TxHash() + txHash := finalTx.TxHash() block := mineBlocks(t, net, 6, 1)[0] assertTxInBlock(t, block, &txHash) ctxt, cancel = context.WithTimeout(ctxb, defaultTimeout) @@ -397,7 +400,7 @@ func receiveChanUpdate(ctx context.Context, errChan := make(chan error) go func() { // Consume one message. This will block until the message is - // recieved. + // received. resp, err := stream.Recv() if err != nil { errChan <- err diff --git a/lntest/itest/rest_api.go b/lntest/itest/lnd_rest_api_test.go similarity index 77% rename from lntest/itest/rest_api.go rename to lntest/itest/lnd_rest_api_test.go index 5eee3d85a..573d0b432 100644 --- a/lntest/itest/rest_api.go +++ b/lntest/itest/lnd_rest_api_test.go @@ -1,5 +1,3 @@ -// +build rpctest - package itest import ( @@ -51,9 +49,9 @@ var ( resultPattern = regexp.MustCompile("{\"result\":(.*)}") ) -// testRestApi tests that the most important features of the REST API work +// testRestAPI tests that the most important features of the REST API work // correctly. -func testRestApi(net *lntest.NetworkHarness, ht *harnessTest) { +func testRestAPI(net *lntest.NetworkHarness, ht *harnessTest) { testCases := []struct { name string run func(*testing.T, *lntest.HarnessNode, *lntest.HarnessNode) @@ -201,7 +199,116 @@ func testRestApi(net *lntest.NetworkHarness, ht *harnessTest) { Height: uint32(height), } url := "/v2/chainnotifier/register/blocks" - c, err := openWebSocket(a, url, "POST", req) + c, err := openWebSocket(a, url, "POST", req, nil) + require.Nil(t, err, "websocket") + defer func() { + _ = c.WriteMessage( + websocket.CloseMessage, + websocket.FormatCloseMessage( + websocket.CloseNormalClosure, + "done", + ), + ) + _ = c.Close() + }() + + msgChan := make(chan *chainrpc.BlockEpoch) + errChan := make(chan error) + timeout := time.After(defaultTimeout) + + // We want to read exactly one message. + go func() { + defer close(msgChan) + + _, msg, err := c.ReadMessage() + if err != nil { + errChan <- err + return + } + + // The chunked/streamed responses come wrapped + // in either a {"result":{}} or {"error":{}} + // wrapper which we'll get rid of here. + msgStr := string(msg) + if !strings.Contains(msgStr, "\"result\":") { + errChan <- fmt.Errorf("invalid msg: %s", + msgStr) + return + } + msgStr = resultPattern.ReplaceAllString( + msgStr, "${1}", + ) + + // Make sure we can parse the unwrapped message + // into the expected proto message. + protoMsg := &chainrpc.BlockEpoch{} + err = jsonpb.UnmarshalString( + msgStr, protoMsg, + ) + if err != nil { + errChan <- err + return + } + + select { + case msgChan <- protoMsg: + case <-timeout: + } + }() + + // Mine a block and make sure we get a message for it. + blockHashes, err := net.Miner.Node.Generate(1) + require.Nil(t, err, "generate blocks") + assert.Equal(t, 1, len(blockHashes), "num blocks") + select { + case msg := <-msgChan: + assert.Equal( + t, blockHashes[0].CloneBytes(), + msg.Hash, "block hash", + ) + + case err := <-errChan: + t.Fatalf("Received error from WS: %v", err) + + case <-timeout: + t.Fatalf("Timeout before message was received") + } + }, + }, { + name: "websocket subscription with macaroon in protocol", + run: func(t *testing.T, a, b *lntest.HarnessNode) { + // Find out the current best block so we can subscribe + // to the next one. + hash, height, err := net.Miner.Node.GetBestBlock() + require.Nil(t, err, "get best block") + + // Create a new subscription to get block epoch events. + req := &chainrpc.BlockEpoch{ + Hash: hash.CloneBytes(), + Height: uint32(height), + } + url := "/v2/chainnotifier/register/blocks" + + // This time we send the macaroon in the special header + // Sec-Websocket-Protocol which is the only header field + // available to browsers when opening a WebSocket. + mac, err := a.ReadMacaroon( + a.AdminMacPath(), defaultTimeout, + ) + require.NoError(t, err, "read admin mac") + macBytes, err := mac.MarshalBinary() + require.NoError(t, err, "marshal admin mac") + + customHeader := make(http.Header) + customHeader.Set( + lnrpc.HeaderWebSocketProtocol, fmt.Sprintf( + "Grpc-Metadata-Macaroon+%s", + hex.EncodeToString(macBytes), + ), + ) + c, err := openWebSocket( + a, url, "POST", req, customHeader, + ) require.Nil(t, err, "websocket") defer func() { _ = c.WriteMessage( @@ -289,6 +396,7 @@ func testRestApi(net *lntest.NetworkHarness, ht *harnessTest) { } for _, tc := range testCases { + tc := tc ht.t.Run(tc.name, func(t *testing.T) { tc.run(t, net.Alice, net.Bob) }) @@ -364,22 +472,26 @@ func makeRequest(node *lntest.HarnessNode, url, method string, // openWebSocket opens a new WebSocket connection to the given URL with the // appropriate macaroon headers and sends the request message over the socket. func openWebSocket(node *lntest.HarnessNode, url, method string, - req proto.Message) (*websocket.Conn, error) { + req proto.Message, customHeader http.Header) (*websocket.Conn, error) { // Prepare our macaroon headers and assemble the full URL from the // node's listening address. WebSockets always work over GET so we need // to append the target request method as a query parameter. - header := make(http.Header) - if err := addAdminMacaroon(node, header); err != nil { - return nil, err + header := customHeader + if header == nil { + header = make(http.Header) + if err := addAdminMacaroon(node, header); err != nil { + return nil, err + } } fullURL := fmt.Sprintf( "wss://%s%s?method=%s", node.Cfg.RESTAddr(), url, method, ) - conn, _, err := webSocketDialer.Dial(fullURL, header) + conn, resp, err := webSocketDialer.Dial(fullURL, header) if err != nil { return nil, err } + defer func() { _ = resp.Body.Close() }() // Send the given request message as the first message on the socket. reqMsg, err := jsonMarshaler.MarshalToString(req) diff --git a/lntest/itest/lnd_send_multi_path_payment.go b/lntest/itest/lnd_send_multi_path_payment_test.go similarity index 99% rename from lntest/itest/lnd_send_multi_path_payment.go rename to lntest/itest/lnd_send_multi_path_payment_test.go index 453f16763..4065cd0d4 100644 --- a/lntest/itest/lnd_send_multi_path_payment.go +++ b/lntest/itest/lnd_send_multi_path_payment_test.go @@ -1,5 +1,3 @@ -// +build rpctest - package itest import ( diff --git a/lntest/itest/lnd_signer_test.go b/lntest/itest/lnd_signer_test.go new file mode 100644 index 000000000..02402145a --- /dev/null +++ b/lntest/itest/lnd_signer_test.go @@ -0,0 +1,204 @@ +package itest + +import ( + "context" + "fmt" + + "github.com/btcsuite/btcd/btcec" + "github.com/lightningnetwork/lnd/keychain" + "github.com/lightningnetwork/lnd/lnrpc/signrpc" + "github.com/lightningnetwork/lnd/lntest" + "github.com/stretchr/testify/require" +) + +// testDeriveSharedKey checks the ECDH performed by the endpoint +// DeriveSharedKey. It creates an ephemeral private key, performing an ECDH with +// the node's pubkey and a customized public key to check the validity of the +// result. +func testDeriveSharedKey(net *lntest.NetworkHarness, t *harnessTest) { + ctxb := context.Background() + + // Create an ephemeral key, extracts its public key, and make a + // PrivKeyECDH using the ephemeral key. + ephemeralPriv, err := btcec.NewPrivateKey(btcec.S256()) + require.NoError(t.t, err, "failed to create ephemeral key") + + ephemeralPubBytes := ephemeralPriv.PubKey().SerializeCompressed() + privKeyECDH := &keychain.PrivKeyECDH{PrivKey: ephemeralPriv} + + // assertECDHMatch checks the correctness of the ECDH between the + // ephemeral key and the given public key. + assertECDHMatch := func(pub *btcec.PublicKey, + req *signrpc.SharedKeyRequest) { + + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + resp, err := net.Alice.SignerClient.DeriveSharedKey(ctxt, req) + require.NoError(t.t, err, "calling DeriveSharedKey failed") + + sharedKey, _ := privKeyECDH.ECDH(pub) + require.Equal( + t.t, sharedKey[:], resp.SharedKey, + "failed to derive the expected key", + ) + } + + nodePub, err := btcec.ParsePubKey(net.Alice.PubKey[:], btcec.S256()) + require.NoError(t.t, err, "failed to parse node pubkey") + + customizedKeyFamily := int32(keychain.KeyFamilyMultiSig) + customizedIndex := int32(1) + customizedPub, err := deriveCustomizedKey( + ctxb, net.Alice, customizedKeyFamily, customizedIndex, + ) + require.NoError(t.t, err, "failed to create customized pubkey") + + // Test DeriveSharedKey with no optional arguments. It will result in + // performing an ECDH between the ephemeral key and the node's pubkey. + req := &signrpc.SharedKeyRequest{EphemeralPubkey: ephemeralPubBytes} + assertECDHMatch(nodePub, req) + + // Test DeriveSharedKey with a KeyLoc which points to the node's pubkey. + req = &signrpc.SharedKeyRequest{ + EphemeralPubkey: ephemeralPubBytes, + KeyLoc: &signrpc.KeyLocator{ + KeyFamily: int32(keychain.KeyFamilyNodeKey), + KeyIndex: 0, + }, + } + assertECDHMatch(nodePub, req) + + // Test DeriveSharedKey with a KeyLoc being set in KeyDesc. The KeyLoc + // points to the node's pubkey. + req = &signrpc.SharedKeyRequest{ + EphemeralPubkey: ephemeralPubBytes, + KeyDesc: &signrpc.KeyDescriptor{ + KeyLoc: &signrpc.KeyLocator{ + KeyFamily: int32(keychain.KeyFamilyNodeKey), + KeyIndex: 0, + }, + }, + } + assertECDHMatch(nodePub, req) + + // Test DeriveSharedKey with RawKeyBytes set in KeyDesc. The RawKeyBytes + // is the node's pubkey bytes, and the KeyFamily is KeyFamilyNodeKey. + req = &signrpc.SharedKeyRequest{ + EphemeralPubkey: ephemeralPubBytes, + KeyDesc: &signrpc.KeyDescriptor{ + RawKeyBytes: net.Alice.PubKey[:], + KeyLoc: &signrpc.KeyLocator{ + KeyFamily: int32(keychain.KeyFamilyNodeKey), + }, + }, + } + assertECDHMatch(nodePub, req) + + // Test DeriveSharedKey with a KeyLoc which points to the customized + // public key. + req = &signrpc.SharedKeyRequest{ + EphemeralPubkey: ephemeralPubBytes, + KeyLoc: &signrpc.KeyLocator{ + KeyFamily: customizedKeyFamily, + KeyIndex: customizedIndex, + }, + } + assertECDHMatch(customizedPub, req) + + // Test DeriveSharedKey with a KeyLoc being set in KeyDesc. The KeyLoc + // points to the customized public key. + req = &signrpc.SharedKeyRequest{ + EphemeralPubkey: ephemeralPubBytes, + KeyDesc: &signrpc.KeyDescriptor{ + KeyLoc: &signrpc.KeyLocator{ + KeyFamily: customizedKeyFamily, + KeyIndex: customizedIndex, + }, + }, + } + assertECDHMatch(customizedPub, req) + + // Test DeriveSharedKey with RawKeyBytes set in KeyDesc. The RawKeyBytes + // is the customized public key. The KeyLoc is also set with the family + // being the customizedKeyFamily. + req = &signrpc.SharedKeyRequest{ + EphemeralPubkey: ephemeralPubBytes, + KeyDesc: &signrpc.KeyDescriptor{ + RawKeyBytes: customizedPub.SerializeCompressed(), + KeyLoc: &signrpc.KeyLocator{ + KeyFamily: customizedKeyFamily, + }, + }, + } + assertECDHMatch(customizedPub, req) + + // assertErrorMatch checks when calling DeriveSharedKey with invalid + // params, the expected error is returned. + assertErrorMatch := func(match string, req *signrpc.SharedKeyRequest) { + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) + _, err := net.Alice.SignerClient.DeriveSharedKey(ctxt, req) + require.Error(t.t, err, "expected to have an error") + require.Contains( + t.t, err.Error(), match, "error failed to match", + ) + } + + // Test that EphemeralPubkey must be supplied. + req = &signrpc.SharedKeyRequest{} + assertErrorMatch("must provide ephemeral pubkey", req) + + // Test that cannot use both KeyDesc and KeyLoc. + req = &signrpc.SharedKeyRequest{ + EphemeralPubkey: ephemeralPubBytes, + KeyDesc: &signrpc.KeyDescriptor{ + RawKeyBytes: customizedPub.SerializeCompressed(), + }, + KeyLoc: &signrpc.KeyLocator{ + KeyFamily: customizedKeyFamily, + KeyIndex: 0, + }, + } + assertErrorMatch("use either key_desc or key_loc", req) + + // Test when KeyDesc is used, KeyLoc must be set. + req = &signrpc.SharedKeyRequest{ + EphemeralPubkey: ephemeralPubBytes, + KeyDesc: &signrpc.KeyDescriptor{ + RawKeyBytes: net.Alice.PubKey[:], + }, + } + assertErrorMatch("key_desc.key_loc must also be set", req) + + // Test that cannot use both RawKeyBytes and KeyIndex. + req = &signrpc.SharedKeyRequest{ + EphemeralPubkey: ephemeralPubBytes, + KeyDesc: &signrpc.KeyDescriptor{ + RawKeyBytes: customizedPub.SerializeCompressed(), + KeyLoc: &signrpc.KeyLocator{ + KeyFamily: customizedKeyFamily, + KeyIndex: 1, + }, + }, + } + assertErrorMatch("use either raw_key_bytes or key_index", req) +} + +// deriveCustomizedKey uses the family and index to derive a public key from +// the node's walletkit client. +func deriveCustomizedKey(ctx context.Context, node *lntest.HarnessNode, + family, index int32) (*btcec.PublicKey, error) { + + ctxt, _ := context.WithTimeout(ctx, defaultTimeout) + req := &signrpc.KeyLocator{ + KeyFamily: family, + KeyIndex: index, + } + resp, err := node.WalletKitClient.DeriveKey(ctxt, req) + if err != nil { + return nil, fmt.Errorf("failed to derive key: %v", err) + } + pub, err := btcec.ParsePubKey(resp.RawKeyBytes, btcec.S256()) + if err != nil { + return nil, fmt.Errorf("failed to parse node pubkey: %v", err) + } + return pub, nil +} diff --git a/lntest/itest/lnd_single_hop_invoice_test.go b/lntest/itest/lnd_single_hop_invoice_test.go index 6f05841bb..83d914a11 100644 --- a/lntest/itest/lnd_single_hop_invoice_test.go +++ b/lntest/itest/lnd_single_hop_invoice_test.go @@ -1,5 +1,3 @@ -// +build rpctest - package itest import ( @@ -42,7 +40,6 @@ func testSingleHopInvoice(net *lntest.NetworkHarness, t *harnessTest) { RPreimage: preimage, Value: paymentAmt, } - ctxt, _ = context.WithTimeout(ctxt, defaultTimeout) invoiceResp, err := net.Bob.AddInvoice(ctxb, invoice) if err != nil { t.Fatalf("unable to add invoice: %v", err) diff --git a/lntest/itest/lnd_test.go b/lntest/itest/lnd_test.go index ee51f47b5..4149802a1 100644 --- a/lntest/itest/lnd_test.go +++ b/lntest/itest/lnd_test.go @@ -1,5 +1,3 @@ -// +build rpctest - package itest import ( @@ -15,14 +13,15 @@ import ( "os" "path/filepath" "reflect" + "runtime" "strings" "sync" "sync/atomic" "testing" "time" + "github.com/btcsuite/btcd/blockchain" "github.com/btcsuite/btcd/btcjson" - "github.com/btcsuite/btcd/chaincfg" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/integration/rpctest" "github.com/btcsuite/btcd/rpcclient" @@ -32,9 +31,9 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/go-errors/errors" "github.com/lightningnetwork/lnd" - "github.com/lightningnetwork/lnd/chanbackup" "github.com/lightningnetwork/lnd/channeldb" "github.com/lightningnetwork/lnd/input" + "github.com/lightningnetwork/lnd/labels" "github.com/lightningnetwork/lnd/lncfg" "github.com/lightningnetwork/lnd/lnrpc" "github.com/lightningnetwork/lnd/lnrpc/invoicesrpc" @@ -53,141 +52,6 @@ import ( "github.com/stretchr/testify/require" ) -var ( - harnessNetParams = &chaincfg.RegressionNetParams -) - -const ( - testFeeBase = 1e+6 - defaultCSV = lntest.DefaultCSV - defaultTimeout = lntest.DefaultTimeout - minerMempoolTimeout = lntest.MinerMempoolTimeout - channelOpenTimeout = lntest.ChannelOpenTimeout - channelCloseTimeout = lntest.ChannelCloseTimeout - itestLndBinary = "../../lnd-itest" - anchorSize = 330 - noFeeLimitMsat = math.MaxInt64 -) - -// harnessTest wraps a regular testing.T providing enhanced error detection -// and propagation. All error will be augmented with a full stack-trace in -// order to aid in debugging. Additionally, any panics caused by active -// test cases will also be handled and represented as fatals. -type harnessTest struct { - t *testing.T - - // testCase is populated during test execution and represents the - // current test case. - testCase *testCase - - // lndHarness is a reference to the current network harness. Will be - // nil if not yet set up. - lndHarness *lntest.NetworkHarness -} - -// newHarnessTest creates a new instance of a harnessTest from a regular -// testing.T instance. -func newHarnessTest(t *testing.T, net *lntest.NetworkHarness) *harnessTest { - return &harnessTest{t, nil, net} -} - -// Skipf calls the underlying testing.T's Skip method, causing the current test -// to be skipped. -func (h *harnessTest) Skipf(format string, args ...interface{}) { - h.t.Skipf(format, args...) -} - -// Fatalf causes the current active test case to fail with a fatal error. All -// integration tests should mark test failures solely with this method due to -// the error stack traces it produces. -func (h *harnessTest) Fatalf(format string, a ...interface{}) { - if h.lndHarness != nil { - h.lndHarness.SaveProfilesPages() - } - - stacktrace := errors.Wrap(fmt.Sprintf(format, a...), 1).ErrorStack() - - if h.testCase != nil { - h.t.Fatalf("Failed: (%v): exited with error: \n"+ - "%v", h.testCase.name, stacktrace) - } else { - h.t.Fatalf("Error outside of test: %v", stacktrace) - } -} - -// RunTestCase executes a harness test case. Any errors or panics will be -// represented as fatal. -func (h *harnessTest) RunTestCase(testCase *testCase) { - h.testCase = testCase - defer func() { - h.testCase = nil - }() - - defer func() { - if err := recover(); err != nil { - description := errors.Wrap(err, 2).ErrorStack() - h.t.Fatalf("Failed: (%v) panicked with: \n%v", - h.testCase.name, description) - } - }() - - testCase.test(h.lndHarness, h) - - return -} - -func (h *harnessTest) Logf(format string, args ...interface{}) { - h.t.Logf(format, args...) -} - -func (h *harnessTest) Log(args ...interface{}) { - h.t.Log(args...) -} - -func assertTxInBlock(t *harnessTest, block *wire.MsgBlock, txid *chainhash.Hash) { - for _, tx := range block.Transactions { - sha := tx.TxHash() - if bytes.Equal(txid[:], sha[:]) { - return - } - } - - t.Fatalf("tx was not included in block") -} - -func assertWalletUnspent(t *harnessTest, node *lntest.HarnessNode, out *lnrpc.OutPoint) { - t.t.Helper() - - err := wait.NoError(func() error { - ctxt, cancel := context.WithTimeout(context.Background(), defaultTimeout) - defer cancel() - unspent, err := node.ListUnspent(ctxt, &lnrpc.ListUnspentRequest{}) - if err != nil { - return err - } - - err = errors.New("tx with wanted txhash never found") - for _, utxo := range unspent.Utxos { - if !bytes.Equal(utxo.Outpoint.TxidBytes, out.TxidBytes) { - continue - } - - err = errors.New("wanted output is not a wallet utxo") - if utxo.Outpoint.OutputIndex != out.OutputIndex { - continue - } - - return nil - } - - return err - }, defaultTimeout) - if err != nil { - t.Fatalf("outpoint %s not unspent by %s's wallet: %v", out, - node.Name(), err) - } -} - func rpcPointToWirePoint(t *harnessTest, chanPoint *lnrpc.ChannelPoint) wire.OutPoint { txid, err := lnd.GetChanPointFundingTxid(chanPoint) if err != nil { @@ -200,50 +64,6 @@ func rpcPointToWirePoint(t *harnessTest, chanPoint *lnrpc.ChannelPoint) wire.Out } } -// mineBlocks mine 'num' of blocks and check that blocks are present in -// node blockchain. numTxs should be set to the number of transactions -// (excluding the coinbase) we expect to be included in the first mined block. -func mineBlocks(t *harnessTest, net *lntest.NetworkHarness, - num uint32, numTxs int) []*wire.MsgBlock { - - // If we expect transactions to be included in the blocks we'll mine, - // we wait here until they are seen in the miner's mempool. - var txids []*chainhash.Hash - var err error - if numTxs > 0 { - txids, err = waitForNTxsInMempool( - net.Miner.Node, numTxs, minerMempoolTimeout, - ) - if err != nil { - t.Fatalf("unable to find txns in mempool: %v", err) - } - } - - blocks := make([]*wire.MsgBlock, num) - - blockHashes, err := net.Miner.Node.Generate(num) - if err != nil { - t.Fatalf("unable to generate blocks: %v", err) - } - - for i, blockHash := range blockHashes { - block, err := net.Miner.Node.GetBlock(blockHash) - if err != nil { - t.Fatalf("unable to get block: %v", err) - } - - blocks[i] = block - } - - // Finally, assert that all the transactions were included in the first - // block. - for _, txid := range txids { - assertTxInBlock(t, blocks[0], txid) - } - - return blocks -} - // openChannelStream blocks until an OpenChannel request for a channel funding // by alice succeeds. If it does, a stream client is returned to receive events // about the opening channel. @@ -847,11 +667,6 @@ func getChanInfo(ctx context.Context, node *lntest.HarnessNode) ( return channelInfo.Channels[0], nil } -const ( - AddrTypeWitnessPubkeyHash = lnrpc.AddressType_WITNESS_PUBKEY_HASH - AddrTypeNestedPubkeyHash = lnrpc.AddressType_NESTED_PUBKEY_HASH -) - // testGetRecoveryInfo checks whether lnd gives the right information about // the wallet recovery process. func testGetRecoveryInfo(net *lntest.NetworkHarness, t *harnessTest) { @@ -1372,7 +1187,7 @@ func basicChannelFundingTest(t *harnessTest, net *lntest.NetworkHarness, // Finally, immediately close the channel. This function will // also block until the channel is closed and will additionally // assert the relevant channel closing post conditions. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) + ctxt, _ := context.WithTimeout(ctxb, channelCloseTimeout) closeChannelAndAssert(ctxt, t, net, alice, chanPoint, false) } @@ -1438,6 +1253,8 @@ test: carolCommitType, daveCommitType) ht := t + carolCommitType := carolCommitType + daveCommitType := daveCommitType success := t.t.Run(testName, func(t *testing.T) { carolChannel, daveChannel, closeChan, err := basicChannelFundingTest( ht, net, carol, dave, nil, @@ -1639,7 +1456,7 @@ func testPaymentFollowingChannelOpen(net *lntest.NetworkHarness, t *harnessTest) ctxb := context.Background() const paymentAmt = btcutil.Amount(100) - channelCapacity := btcutil.Amount(paymentAmt * 1000) + channelCapacity := paymentAmt * 1000 // We first establish a channel between Alice and Bob. ctxt, cancel := context.WithTimeout(ctxb, channelOpenTimeout) @@ -2333,7 +2150,7 @@ func testUpdateChannelPolicy(net *lntest.NetworkHarness, t *harnessTest) { baseFee = int64(800) feeRate = int64(123) timeLockDelta = uint32(22) - maxHtlc = maxHtlc * 2 + maxHtlc *= 2 expectedPolicy.FeeBaseMsat = baseFee expectedPolicy.FeeRateMilliMsat = testFeeBase * feeRate @@ -2462,9 +2279,14 @@ func testOpenChannelAfterReorg(net *lntest.NetworkHarness, t *harnessTest) { ) // Set up a new miner that we can use to cause a reorg. - args := []string{"--rejectnonstd", "--txindex"} - tempMiner, err := rpctest.New(harnessNetParams, - &rpcclient.NotificationHandlers{}, args) + args := []string{ + "--rejectnonstd", + "--txindex", + "--nowinservice", + } + tempMiner, err := rpctest.New( + harnessNetParams, &rpcclient.NotificationHandlers{}, args, + ) if err != nil { t.Fatalf("unable to create mining node: %v", err) } @@ -2905,6 +2727,7 @@ func testChannelFundingPersistence(net *lntest.NetworkHarness, t *harnessTest) { t.Fatalf("unable to convert funding txid into chainhash.Hash:"+ " %v", err) } + fundingTxStr := fundingTxID.String() // Mine a block, then wait for Alice's node to notify us that the // channel has been opened. The funding transaction should be found @@ -2912,6 +2735,10 @@ func testChannelFundingPersistence(net *lntest.NetworkHarness, t *harnessTest) { block := mineBlocks(t, net, 1, 1)[0] assertTxInBlock(t, block, fundingTxID) + // Get the height that our transaction confirmed at. + _, height, err := net.Miner.Node.GetBestBlock() + require.NoError(t.t, err, "could not get best block") + // Restart both nodes to test that the appropriate state has been // persisted and that both nodes recover gracefully. if err := net.RestartNode(net.Alice, nil); err != nil { @@ -2934,6 +2761,16 @@ func testChannelFundingPersistence(net *lntest.NetworkHarness, t *harnessTest) { t.Fatalf("unable to mine blocks: %v", err) } + // Assert that our wallet has our opening transaction with a label + // that does not have a channel ID set yet, because we have not + // reached our required confirmations. + tx := findTxAtHeight(ctxt, t, height, fundingTxStr, net.Alice) + + // At this stage, we expect the transaction to be labelled, but not with + // our channel ID because our transaction has not yet confirmed. + label := labels.MakeLabel(labels.LabelTypeChannelOpen, nil) + require.Equal(t.t, label, tx.Label, "open channel label wrong") + // Both nodes should still show a single channel as pending. time.Sleep(time.Second * 1) ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) @@ -2957,9 +2794,27 @@ func testChannelFundingPersistence(net *lntest.NetworkHarness, t *harnessTest) { Index: pendingUpdate.OutputIndex, } + // Re-lookup our transaction in the block that it confirmed in. + tx = findTxAtHeight(ctxt, t, height, fundingTxStr, net.Alice) + + // Create an additional check for our channel assertion that will + // check that our label is as expected. + check := func(channel *lnrpc.Channel) { + shortChanID := lnwire.NewShortChanIDFromInt( + channel.ChanId, + ) + + label := labels.MakeLabel( + labels.LabelTypeChannelOpen, &shortChanID, + ) + require.Equal(t.t, label, tx.Label, + "open channel label not updated") + } + // Check both nodes to ensure that the channel is ready for operation. ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - if err := net.AssertChannelExists(ctxt, net.Alice, &outPoint); err != nil { + err = net.AssertChannelExists(ctxt, net.Alice, &outPoint, check) + if err != nil { t.Fatalf("unable to assert channel existence: %v", err) } ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) @@ -2980,6 +2835,29 @@ func testChannelFundingPersistence(net *lntest.NetworkHarness, t *harnessTest) { closeChannelAndAssert(ctxt, t, net, net.Alice, chanPoint, false) } +// findTxAtHeight gets all of the transactions that a node's wallet has a record +// of at the target height, and finds and returns the tx with the target txid, +// failing if it is not found. +func findTxAtHeight(ctx context.Context, t *harnessTest, height int32, + target string, node *lntest.HarnessNode) *lnrpc.Transaction { + + txns, err := node.LightningClient.GetTransactions( + ctx, &lnrpc.GetTransactionsRequest{ + StartHeight: height, + EndHeight: height, + }, + ) + require.NoError(t.t, err, "could not get transactions") + + for _, tx := range txns.Transactions { + if tx.TxHash == target { + return tx + } + } + + return nil +} + // testChannelBalance creates a new channel between Alice and Bob, then // checks channel balance to be equal amount specified while creation of channel. func testChannelBalance(net *lntest.NetworkHarness, t *harnessTest) { @@ -3331,6 +3209,7 @@ func testChannelForceClosure(net *lntest.NetworkHarness, t *harnessTest) { for _, channelType := range commitTypes { testName := fmt.Sprintf("committype=%v", channelType) + channelType := channelType success := t.t.Run(testName, func(t *testing.T) { ht := newHarnessTest(t, net) @@ -3397,6 +3276,9 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, numInvoices = 6 ) + const commitFeeRate = 20000 + net.SetFeeEstimate(commitFeeRate) + // TODO(roasbeef): should check default value in config here // instead, or make delay a param defaultCLTV := uint32(lnd.DefaultBitcoinTimeLockDelta) @@ -3511,6 +3393,9 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, // execute a force closure of the channel. This will also assert that // the commitment transaction was immediately broadcast in order to // fulfill the force closure request. + const actualFeeRate = 30000 + net.SetFeeEstimate(actualFeeRate) + ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) _, closingTxID, err := net.CloseChannel(ctxt, alice, chanPoint, true) if err != nil { @@ -3572,17 +3457,35 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, // broadcast as a result of the force closure. If there are anchors, we // also expect the anchor sweep tx to be in the mempool. expectedTxes := 1 + expectedFeeRate := commitFeeRate if channelType == commitTypeAnchors { expectedTxes = 2 + expectedFeeRate = actualFeeRate } - sweepTxns, err := waitForNTxsInMempool( + sweepTxns, err := getNTxsFromMempool( net.Miner.Node, expectedTxes, minerMempoolTimeout, ) if err != nil { t.Fatalf("failed to find commitment in miner mempool: %v", err) } + // Verify fee rate of the commitment tx plus anchor if present. + var totalWeight, totalFee int64 + for _, tx := range sweepTxns { + utx := btcutil.NewTx(tx) + totalWeight += blockchain.GetTransactionWeight(utx) + + fee, err := getTxFee(net.Miner.Node, tx) + require.NoError(t.t, err) + totalFee += int64(fee) + } + feeRate := totalFee * 1000 / totalWeight + + // Allow some deviation because weight estimates during tx generation + // are estimates. + require.InEpsilon(t.t, expectedFeeRate, feeRate, 0.005) + // Find alice's commit sweep and anchor sweep (if present) in the // mempool. aliceCloseTx := waitingClose.Commitments.LocalTxid @@ -3674,7 +3577,7 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, // Carol's sweep tx should be in the mempool already, as her output is // not timelocked. If there are anchors, we also expect Carol's anchor // sweep now. - sweepTxns, err = waitForNTxsInMempool( + sweepTxns, err = getNTxsFromMempool( net.Miner.Node, expectedTxes, minerMempoolTimeout, ) if err != nil { @@ -4222,7 +4125,7 @@ func channelForceClosureTest(net *lntest.NetworkHarness, t *harnessTest, TxidStr: output.Hash.String(), OutputIndex: output.Index, }, - AmountSat: uint64(htlcLessFees), + AmountSat: htlcLessFees, } } @@ -4353,12 +4256,13 @@ type sweptOutput struct { // we have to bring another input to add fees to the anchor. Note that the // anchor swept output may be nil if the channel did not have anchors. func findCommitAndAnchor(t *harnessTest, net *lntest.NetworkHarness, - sweepTxns []*chainhash.Hash, closeTx string) (*sweptOutput, *sweptOutput) { + sweepTxns []*wire.MsgTx, closeTx string) (*sweptOutput, *sweptOutput) { var commitSweep, anchorSweep *sweptOutput for _, tx := range sweepTxns { - sweepTx, err := net.Miner.Node.GetRawTransaction(tx) + txHash := tx.TxHash() + sweepTx, err := net.Miner.Node.GetRawTransaction(&txHash) require.NoError(t.t, err) // We expect our commitment sweep to have a single input, and, @@ -4371,7 +4275,7 @@ func findCommitAndAnchor(t *harnessTest, net *lntest.NetworkHarness, if len(inputs) == 1 { commitSweep = &sweptOutput{ OutPoint: inputs[0].PreviousOutPoint, - SweepTx: tx.String(), + SweepTx: txHash.String(), } } else { // Since we have more than one input, we run through @@ -4382,7 +4286,7 @@ func findCommitAndAnchor(t *harnessTest, net *lntest.NetworkHarness, if outpointStr == closeTx { anchorSweep = &sweptOutput{ OutPoint: txin.PreviousOutPoint, - SweepTx: tx.String(), + SweepTx: txHash.String(), } } } @@ -4735,6 +4639,8 @@ func testSphinxReplayPersistence(net *lntest.NetworkHarness, t *harnessTest) { func assertChannelConstraintsEqual( t *harnessTest, want, got *lnrpc.ChannelConstraints) { + t.t.Helper() + if want.CsvDelay != got.CsvDelay { t.Fatalf("CsvDelay mismatched, want: %v, got: %v", want.CsvDelay, got.CsvDelay, @@ -4779,6 +4685,9 @@ func assertChannelConstraintsEqual( func testListChannels(net *lntest.NetworkHarness, t *harnessTest) { ctxb := context.Background() + const aliceRemoteMaxHtlcs = 50 + const bobRemoteMaxHtlcs = 100 + // Create two fresh nodes and open a channel between them. alice, err := net.NewNode("Alice", nil) if err != nil { @@ -4786,20 +4695,21 @@ func testListChannels(net *lntest.NetworkHarness, t *harnessTest) { } defer shutdownAndAssert(net, t, alice) - bob, err := net.NewNode("Bob", nil) + bob, err := net.NewNode("Bob", []string{ + fmt.Sprintf("--default-remote-max-htlcs=%v", bobRemoteMaxHtlcs), + }) if err != nil { t.Fatalf("unable to create new node: %v", err) } defer shutdownAndAssert(net, t, bob) // Connect Alice to Bob. - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) if err := net.ConnectNodes(ctxb, alice, bob); err != nil { t.Fatalf("unable to connect alice to bob: %v", err) } // Give Alice some coins so she can fund a channel. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) err = net.SendCoins(ctxt, btcutil.SatoshiPerBitcoin, alice) if err != nil { t.Fatalf("unable to send coins to alice: %v", err) @@ -4815,8 +4725,9 @@ func testListChannels(net *lntest.NetworkHarness, t *harnessTest) { chanPoint := openChannelAndAssert( ctxt, t, net, alice, bob, lntest.OpenChannelParams{ - Amt: chanAmt, - MinHtlc: customizedMinHtlc, + Amt: chanAmt, + MinHtlc: customizedMinHtlc, + RemoteMaxHtlcs: aliceRemoteMaxHtlcs, }, ) @@ -4862,10 +4773,10 @@ func testListChannels(net *lntest.NetworkHarness, t *harnessTest) { DustLimitSat: uint64(lnwallet.DefaultDustLimit()), MaxPendingAmtMsat: 99000000, MinHtlcMsat: 1, - MaxAcceptedHtlcs: input.MaxHTLCNumber / 2, + MaxAcceptedHtlcs: bobRemoteMaxHtlcs, } assertChannelConstraintsEqual( - t, aliceChannel.LocalConstraints, defaultConstraints, + t, defaultConstraints, aliceChannel.LocalConstraints, ) // customizedConstraints is a ChannelConstraints with customized values. @@ -4878,10 +4789,10 @@ func testListChannels(net *lntest.NetworkHarness, t *harnessTest) { DustLimitSat: uint64(lnwallet.DefaultDustLimit()), MaxPendingAmtMsat: 99000000, MinHtlcMsat: customizedMinHtlc, - MaxAcceptedHtlcs: input.MaxHTLCNumber / 2, + MaxAcceptedHtlcs: aliceRemoteMaxHtlcs, } assertChannelConstraintsEqual( - t, aliceChannel.RemoteConstraints, customizedConstraints, + t, customizedConstraints, aliceChannel.RemoteConstraints, ) // Get the ListChannel response for Bob. @@ -5507,7 +5418,7 @@ func testSingleHopSendToRouteCase(net *lntest.NetworkHarness, t *harnessTest, i, p.PaymentRequest) } - // Assert the payment ammount is correct. + // Assert the payment amount is correct. if p.ValueSat != paymentAmtSat { t.Fatalf("incorrect payment amt for payment %d, "+ "want: %d, got: %d", @@ -7438,50 +7349,6 @@ func testMaxPendingChannels(net *lntest.NetworkHarness, t *harnessTest) { } } -// waitForTxInMempool polls until finding one transaction in the provided -// miner's mempool. An error is returned if *one* transaction isn't found within -// the given timeout. -func waitForTxInMempool(miner *rpcclient.Client, - timeout time.Duration) (*chainhash.Hash, error) { - - txs, err := waitForNTxsInMempool(miner, 1, timeout) - if err != nil { - return nil, err - } - - return txs[0], err -} - -// waitForNTxsInMempool polls until finding the desired number of transactions -// in the provided miner's mempool. An error is returned if this number is not -// met after the given timeout. -func waitForNTxsInMempool(miner *rpcclient.Client, n int, - timeout time.Duration) ([]*chainhash.Hash, error) { - - breakTimeout := time.After(timeout) - ticker := time.NewTicker(50 * time.Millisecond) - defer ticker.Stop() - - var err error - var mempool []*chainhash.Hash - for { - select { - case <-breakTimeout: - return nil, fmt.Errorf("wanted %v, found %v txs "+ - "in mempool: %v", n, len(mempool), mempool) - case <-ticker.C: - mempool, err = miner.GetRawMempool() - if err != nil { - return nil, err - } - - if len(mempool) == n { - return mempool, nil - } - } - } -} - // getNTxsFromMempool polls until finding the desired number of transactions in // the provided miner's mempool and returns the full transactions to the caller. func getNTxsFromMempool(miner *rpcclient.Client, n int, @@ -7503,6 +7370,28 @@ func getNTxsFromMempool(miner *rpcclient.Client, n int, return txes, nil } +// getTxFee retrieves parent transactions and reconstructs the fee paid. +func getTxFee(miner *rpcclient.Client, tx *wire.MsgTx) (btcutil.Amount, error) { + var balance btcutil.Amount + for _, in := range tx.TxIn { + parentHash := in.PreviousOutPoint.Hash + rawTx, err := miner.GetRawTransaction(&parentHash) + if err != nil { + return 0, err + } + parent := rawTx.MsgTx() + balance += btcutil.Amount( + parent.TxOut[in.PreviousOutPoint.Index].Value, + ) + } + + for _, out := range tx.TxOut { + balance -= btcutil.Amount(out.Value) + } + + return balance, nil +} + // testFailingChannel tests that we will fail the channel by force closing ii // in the case where a counterparty tries to settle an HTLC with the wrong // preimage. @@ -9352,6 +9241,10 @@ func assertDLPExecuted(net *lntest.NetworkHarness, t *harnessTest, dave *lntest.HarnessNode, daveStartingBalance int64, anchors bool) { + // Increase the fee estimate so that the following force close tx will + // be cpfp'ed. + net.SetFeeEstimate(30000) + // We disabled auto-reconnect for some tests to avoid timing issues. // To make sure the nodes are initiating DLP now, we have to manually // re-connect them. @@ -9392,7 +9285,7 @@ func assertDLPExecuted(net *lntest.NetworkHarness, t *harnessTest, } // Generate a single block, which should confirm the closing tx. - block := mineBlocks(t, net, 1, expectedTxes)[0] + _ = mineBlocks(t, net, 1, expectedTxes)[0] // Dave should sweep his funds immediately, as they are not timelocked. // We also expect Dave to sweep his anchor, if present. @@ -9413,7 +9306,7 @@ func assertDLPExecuted(net *lntest.NetworkHarness, t *harnessTest, assertNumPendingChannels(t, carol, 0, 1) // Mine the sweep tx. - block = mineBlocks(t, net, 1, expectedTxes)[0] + _ = mineBlocks(t, net, 1, expectedTxes)[0] // Now Dave should consider the channel fully closed. assertNumPendingChannels(t, dave, 0, 0) @@ -9442,7 +9335,7 @@ func assertDLPExecuted(net *lntest.NetworkHarness, t *harnessTest, if err != nil { t.Fatalf("unable to find Carol's sweep tx in mempool: %v", err) } - block = mineBlocks(t, net, 1, 1)[0] + block := mineBlocks(t, net, 1, 1)[0] assertTxInBlock(t, block, carolSweep) // Now the channel should be fully closed also from Carol's POV. @@ -11158,11 +11051,7 @@ func testSwitchCircuitPersistence(net *lntest.NetworkHarness, t *harnessTest) { // Ensure all nodes in the network still have 5 outstanding htlcs. err = wait.Predicate(func() bool { predErr = assertNumActiveHtlcs(nodes, numPayments) - if predErr != nil { - return false - } - return true - + return predErr == nil }, time.Second*15) if err != nil { t.Fatalf("htlc mismatch: %v", predErr) @@ -11473,10 +11362,7 @@ func testSwitchOfflineDelivery(net *lntest.NetworkHarness, t *harnessTest) { // for the duration of the interval. err = wait.Invariant(func() bool { predErr = assertNumActiveHtlcs(nodes, numPayments) - if predErr != nil { - return false - } - return true + return predErr == nil }, time.Second*2) if err != nil { t.Fatalf("htlc change: %v", predErr) @@ -11500,10 +11386,7 @@ func testSwitchOfflineDelivery(net *lntest.NetworkHarness, t *harnessTest) { carolNode := []*lntest.HarnessNode{carol} err = wait.Predicate(func() bool { predErr = assertNumActiveHtlcs(carolNode, 0) - if predErr != nil { - return false - } - return true + return predErr == nil }, time.Second*15) if err != nil { t.Fatalf("htlc mismatch: %v", predErr) @@ -11791,7 +11674,6 @@ func testSwitchOfflineDeliveryPersistence(net *lntest.NetworkHarness, t *harness // Disconnect the two intermediaries, Alice and Dave, by shutting down // Alice. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) if err := net.StopNode(net.Alice); err != nil { t.Fatalf("unable to shutdown alice: %v", err) } @@ -11821,10 +11703,7 @@ func testSwitchOfflineDeliveryPersistence(net *lntest.NetworkHarness, t *harness } predErr = assertNumActiveHtlcsChanPoint(dave, carolFundPoint, 0) - if predErr != nil { - return false - } - return true + return predErr == nil }, time.Second*15) if err != nil { t.Fatalf("htlc mismatch: %v", predErr) @@ -12129,7 +12008,6 @@ func testSwitchOfflineDeliveryOutgoingOffline( // Disconnect the two intermediaries, Alice and Dave, so that when carol // restarts, the response will be held by Dave. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) if err := net.StopNode(net.Alice); err != nil { t.Fatalf("unable to shutdown alice: %v", err) } @@ -12150,11 +12028,7 @@ func testSwitchOfflineDeliveryOutgoingOffline( } predErr = assertNumActiveHtlcsChanPoint(dave, carolFundPoint, 0) - if predErr != nil { - return false - } - - return true + return predErr == nil }, time.Second*15) if err != nil { t.Fatalf("htlc mismatch: %v", predErr) @@ -13038,10 +12912,10 @@ func testAbandonChannel(net *lntest.NetworkHarness, t *harnessTest) { if err != nil { t.Fatalf("unable to list pending channels: %v", err) } - if len(alicePendingList.PendingClosingChannels) != 0 { + if len(alicePendingList.PendingClosingChannels) != 0 { //nolint:staticcheck t.Fatalf("alice should only have no pending closing channels, "+ "instead she has %v", - len(alicePendingList.PendingClosingChannels)) + len(alicePendingList.PendingClosingChannels)) //nolint:staticcheck } if len(alicePendingList.PendingForceClosingChannels) != 0 { t.Fatalf("alice should only have no pending force closing "+ @@ -13250,7 +13124,8 @@ func testSweepAllCoins(net *lntest.NetworkHarness, t *harnessTest) { // Our error will be wrapped in a rpc error, so we check that it // contains the error we expect. - if !strings.Contains(err.Error(), walletrpc.ErrZeroLabel.Error()) { + errZeroLabel := "cannot label transaction with empty label" + if !strings.Contains(err.Error(), errZeroLabel) { t.Fatalf("expected: zero label error, got: %v", err) } @@ -13344,1007 +13219,6 @@ func assertTxLabel(ctx context.Context, t *harnessTest, } } -// testChannelBackupUpdates tests that both the streaming channel update RPC, -// and the on-disk channels.backup are updated each time a channel is -// opened/closed. -func testChannelBackupUpdates(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - // First, we'll make a temp directory that we'll use to store our - // backup file, so we can check in on it during the test easily. - backupDir, err := ioutil.TempDir("", "") - if err != nil { - t.Fatalf("unable to create backup dir: %v", err) - } - defer os.RemoveAll(backupDir) - - // First, we'll create a new node, Carol. We'll also create a temporary - // file that Carol will use to store her channel backups. - backupFilePath := filepath.Join( - backupDir, chanbackup.DefaultBackupFileName, - ) - carolArgs := fmt.Sprintf("--backupfilepath=%v", backupFilePath) - carol, err := net.NewNode("carol", []string{carolArgs}) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - defer shutdownAndAssert(net, t, carol) - - // Next, we'll register for streaming notifications for changes to the - // backup file. - backupStream, err := carol.SubscribeChannelBackups( - ctxb, &lnrpc.ChannelBackupSubscription{}, - ) - if err != nil { - t.Fatalf("unable to create backup stream: %v", err) - } - - // We'll use this goroutine to proxy any updates to a channel we can - // easily use below. - var wg sync.WaitGroup - backupUpdates := make(chan *lnrpc.ChanBackupSnapshot) - streamErr := make(chan error) - streamQuit := make(chan struct{}) - - wg.Add(1) - go func() { - defer wg.Done() - for { - snapshot, err := backupStream.Recv() - if err != nil { - select { - case streamErr <- err: - case <-streamQuit: - return - } - } - - select { - case backupUpdates <- snapshot: - case <-streamQuit: - return - } - } - }() - defer close(streamQuit) - - // With Carol up, we'll now connect her to Alice, and open a channel - // between them. - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, carol, net.Alice); err != nil { - t.Fatalf("unable to connect carol to alice: %v", err) - } - - // Next, we'll open two channels between Alice and Carol back to back. - var chanPoints []*lnrpc.ChannelPoint - numChans := 2 - chanAmt := btcutil.Amount(1000000) - for i := 0; i < numChans; i++ { - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - chanPoint := openChannelAndAssert( - ctxt, t, net, net.Alice, carol, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - chanPoints = append(chanPoints, chanPoint) - } - - // Using this helper function, we'll maintain a pointer to the latest - // channel backup so we can compare it to the on disk state. - var currentBackup *lnrpc.ChanBackupSnapshot - assertBackupNtfns := func(numNtfns int) { - for i := 0; i < numNtfns; i++ { - select { - case err := <-streamErr: - t.Fatalf("error with backup stream: %v", err) - - case currentBackup = <-backupUpdates: - - case <-time.After(time.Second * 5): - t.Fatalf("didn't receive channel backup "+ - "notification %v", i+1) - } - } - } - - // assertBackupFileState is a helper function that we'll use to compare - // the on disk back up file to our currentBackup pointer above. - assertBackupFileState := func() { - err := wait.NoError(func() error { - packedBackup, err := ioutil.ReadFile(backupFilePath) - if err != nil { - return fmt.Errorf("unable to read backup "+ - "file: %v", err) - } - - // As each back up file will be encrypted with a fresh - // nonce, we can't compare them directly, so instead - // we'll compare the length which is a proxy for the - // number of channels that the multi-backup contains. - rawBackup := currentBackup.MultiChanBackup.MultiChanBackup - if len(rawBackup) != len(packedBackup) { - return fmt.Errorf("backup files don't match: "+ - "expected %x got %x", rawBackup, packedBackup) - } - - // Additionally, we'll assert that both backups up - // returned are valid. - for i, backup := range [][]byte{rawBackup, packedBackup} { - snapshot := &lnrpc.ChanBackupSnapshot{ - MultiChanBackup: &lnrpc.MultiChanBackup{ - MultiChanBackup: backup, - }, - } - _, err := carol.VerifyChanBackup(ctxb, snapshot) - if err != nil { - return fmt.Errorf("unable to verify "+ - "backup #%d: %v", i, err) - } - } - - return nil - }, time.Second*15) - if err != nil { - t.Fatalf("backup state invalid: %v", err) - } - } - - // As these two channels were just opened, we should've got two times - // the pending and open notifications for channel backups. - assertBackupNtfns(2 * 2) - - // The on disk file should also exactly match the latest backup that we - // have. - assertBackupFileState() - - // Next, we'll close the channels one by one. After each channel - // closure, we should get a notification, and the on-disk state should - // match this state as well. - for i := 0; i < numChans; i++ { - // To ensure force closes also trigger an update, we'll force - // close half of the channels. - forceClose := i%2 == 0 - - chanPoint := chanPoints[i] - - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert( - ctxt, t, net, net.Alice, chanPoint, forceClose, - ) - - // We should get a single notification after closing, and the - // on-disk state should match this latest notifications. - assertBackupNtfns(1) - assertBackupFileState() - - // If we force closed the channel, then we'll mine enough - // blocks to ensure all outputs have been swept. - if forceClose { - cleanupForceClose(t, net, net.Alice, chanPoint) - } - } -} - -// testExportChannelBackup tests that we're able to properly export either a -// targeted channel's backup, or export backups of all the currents open -// channels. -func testExportChannelBackup(net *lntest.NetworkHarness, t *harnessTest) { - ctxb := context.Background() - - // First, we'll create our primary test node: Carol. We'll use Carol to - // open channels and also export backups that we'll examine throughout - // the test. - carol, err := net.NewNode("carol", nil) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - defer shutdownAndAssert(net, t, carol) - - // With Carol up, we'll now connect her to Alice, and open a channel - // between them. - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - if err := net.ConnectNodes(ctxt, carol, net.Alice); err != nil { - t.Fatalf("unable to connect carol to alice: %v", err) - } - - // Next, we'll open two channels between Alice and Carol back to back. - var chanPoints []*lnrpc.ChannelPoint - numChans := 2 - chanAmt := btcutil.Amount(1000000) - for i := 0; i < numChans; i++ { - ctxt, _ := context.WithTimeout(ctxb, channelOpenTimeout) - chanPoint := openChannelAndAssert( - ctxt, t, net, net.Alice, carol, - lntest.OpenChannelParams{ - Amt: chanAmt, - }, - ) - - chanPoints = append(chanPoints, chanPoint) - } - - // Now that the channels are open, we should be able to fetch the - // backups of each of the channels. - for _, chanPoint := range chanPoints { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - req := &lnrpc.ExportChannelBackupRequest{ - ChanPoint: chanPoint, - } - chanBackup, err := carol.ExportChannelBackup(ctxt, req) - if err != nil { - t.Fatalf("unable to fetch backup for channel %v: %v", - chanPoint, err) - } - - // The returned backup should be full populated. Since it's - // encrypted, we can't assert any more than that atm. - if len(chanBackup.ChanBackup) == 0 { - t.Fatalf("obtained empty backup for channel: %v", chanPoint) - } - - // The specified chanPoint in the response should match our - // requested chanPoint. - if chanBackup.ChanPoint.String() != chanPoint.String() { - t.Fatalf("chanPoint mismatched: expected %v, got %v", - chanPoint.String(), - chanBackup.ChanPoint.String()) - } - } - - // Before we proceed, we'll make two utility methods we'll use below - // for our primary assertions. - assertNumSingleBackups := func(numSingles int) { - err := wait.NoError(func() error { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - req := &lnrpc.ChanBackupExportRequest{} - chanSnapshot, err := carol.ExportAllChannelBackups( - ctxt, req, - ) - if err != nil { - return fmt.Errorf("unable to export channel "+ - "backup: %v", err) - } - - if chanSnapshot.SingleChanBackups == nil { - return fmt.Errorf("single chan backups not " + - "populated") - } - - backups := chanSnapshot.SingleChanBackups.ChanBackups - if len(backups) != numSingles { - return fmt.Errorf("expected %v singles, "+ - "got %v", len(backups), numSingles) - } - - return nil - }, defaultTimeout) - if err != nil { - t.Fatalf(err.Error()) - } - } - assertMultiBackupFound := func() func(bool, map[wire.OutPoint]struct{}) { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - req := &lnrpc.ChanBackupExportRequest{} - chanSnapshot, err := carol.ExportAllChannelBackups(ctxt, req) - if err != nil { - t.Fatalf("unable to export channel backup: %v", err) - } - - return func(found bool, chanPoints map[wire.OutPoint]struct{}) { - switch { - case found && chanSnapshot.MultiChanBackup == nil: - t.Fatalf("multi-backup not present") - - case !found && chanSnapshot.MultiChanBackup != nil && - (len(chanSnapshot.MultiChanBackup.MultiChanBackup) != - chanbackup.NilMultiSizePacked): - - t.Fatalf("found multi-backup when non should " + - "be found") - } - - if !found { - return - } - - backedUpChans := chanSnapshot.MultiChanBackup.ChanPoints - if len(chanPoints) != len(backedUpChans) { - t.Fatalf("expected %v chans got %v", len(chanPoints), - len(backedUpChans)) - } - - for _, chanPoint := range backedUpChans { - wirePoint := rpcPointToWirePoint(t, chanPoint) - if _, ok := chanPoints[wirePoint]; !ok { - t.Fatalf("unexpected backup: %v", wirePoint) - } - } - } - } - - chans := make(map[wire.OutPoint]struct{}) - for _, chanPoint := range chanPoints { - chans[rpcPointToWirePoint(t, chanPoint)] = struct{}{} - } - - // We should have exactly two single channel backups contained, and we - // should also have a multi-channel backup. - assertNumSingleBackups(2) - assertMultiBackupFound()(true, chans) - - // We'll now close each channel on by one. After we close a channel, we - // shouldn't be able to find that channel as a backup still. We should - // also have one less single written to disk. - for i, chanPoint := range chanPoints { - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert( - ctxt, t, net, net.Alice, chanPoint, false, - ) - - assertNumSingleBackups(len(chanPoints) - i - 1) - - delete(chans, rpcPointToWirePoint(t, chanPoint)) - assertMultiBackupFound()(true, chans) - } - - // At this point we shouldn't have any single or multi-chan backups at - // all. - assertNumSingleBackups(0) - assertMultiBackupFound()(false, nil) -} - -// nodeRestorer is a function closure that allows each chanRestoreTestCase to -// control exactly *how* the prior node is restored. This might be using an -// backup obtained over RPC, or the file system, etc. -type nodeRestorer func() (*lntest.HarnessNode, error) - -// chanRestoreTestCase describes a test case for an end to end SCB restoration -// work flow. One node will start from scratch using an existing SCB. At the -// end of the est, both nodes should be made whole via the DLP protocol. -type chanRestoreTestCase struct { - // name is the name of the target test case. - name string - - // channelsUpdated is false then this means that no updates - // have taken place within the channel before restore. - // Otherwise, HTLCs will be settled between the two parties - // before restoration modifying the balance beyond the initial - // allocation. - channelsUpdated bool - - // initiator signals if Dave should be the one that opens the - // channel to Alice, or if it should be the other way around. - initiator bool - - // private signals if the channel from Dave to Carol should be - // private or not. - private bool - - // unconfirmed signals if the channel from Dave to Carol should be - // confirmed or not. - unconfirmed bool - - // anchorCommit is true, then the new anchor commitment type will be - // used for the channels created in the test. - anchorCommit bool - - // restoreMethod takes an old node, then returns a function - // closure that'll return the same node, but with its state - // restored via a custom method. We use this to abstract away - // _how_ a node is restored from our assertions once the node - // has been fully restored itself. - restoreMethod func(oldNode *lntest.HarnessNode, - backupFilePath string, - mnemonic []string) (nodeRestorer, error) -} - -// testChanRestoreScenario executes a chanRestoreTestCase from end to end, -// ensuring that after Dave restores his channel state according to the -// testCase, the DLP protocol is executed properly and both nodes are made -// whole. -func testChanRestoreScenario(t *harnessTest, net *lntest.NetworkHarness, - testCase *chanRestoreTestCase, password []byte) { - - const ( - chanAmt = btcutil.Amount(10000000) - pushAmt = btcutil.Amount(5000000) - ) - - ctxb := context.Background() - - var nodeArgs []string - if testCase.anchorCommit { - nodeArgs = commitTypeAnchors.Args() - } - - // First, we'll create a brand new node we'll use within the test. If - // we have a custom backup file specified, then we'll also create that - // for use. - dave, mnemonic, err := net.NewNodeWithSeed( - "dave", nodeArgs, password, - ) - if err != nil { - t.Fatalf("unable to create new node: %v", err) - } - // Defer to a closure instead of to shutdownAndAssert due to the value - // of 'dave' changing throughout the test. - defer func() { - shutdownAndAssert(net, t, dave) - }() - carol, err := net.NewNode("carol", nodeArgs) - if err != nil { - t.Fatalf("unable to make new node: %v", err) - } - defer shutdownAndAssert(net, t, carol) - - // Now that our new nodes are created, we'll give them some coins for - // channel opening and anchor sweeping. - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.SatoshiPerBitcoin, carol) - if err != nil { - t.Fatalf("unable to send coins to dave: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.SendCoins(ctxt, btcutil.SatoshiPerBitcoin, dave) - if err != nil { - t.Fatalf("unable to send coins to dave: %v", err) - } - - var from, to *lntest.HarnessNode - if testCase.initiator { - from, to = dave, carol - } else { - from, to = carol, dave - } - - // Next, we'll connect Dave to Carol, and open a new channel to her - // with a portion pushed. - if err := net.ConnectNodes(ctxt, dave, carol); err != nil { - t.Fatalf("unable to connect dave to carol: %v", err) - } - - // We will either open a confirmed or unconfirmed channel, depending on - // the requirements of the test case. - switch { - case testCase.unconfirmed: - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - _, err := net.OpenPendingChannel( - ctxt, from, to, chanAmt, pushAmt, - ) - if err != nil { - t.Fatalf("couldn't open pending channel: %v", err) - } - - // Give the pubsub some time to update the channel backup. - err = wait.NoError(func() error { - fi, err := os.Stat(dave.ChanBackupPath()) - if err != nil { - return err - } - if fi.Size() <= chanbackup.NilMultiSizePacked { - return fmt.Errorf("backup file empty") - } - return nil - }, defaultTimeout) - if err != nil { - t.Fatalf("channel backup not updated in time: %v", err) - } - - default: - ctxt, _ = context.WithTimeout(ctxb, channelOpenTimeout) - chanPoint := openChannelAndAssert( - ctxt, t, net, from, to, - lntest.OpenChannelParams{ - Amt: chanAmt, - PushAmt: pushAmt, - Private: testCase.private, - }, - ) - - // Wait for both sides to see the opened channel. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = dave.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("dave didn't report channel: %v", err) - } - err = carol.WaitForNetworkChannelOpen(ctxt, chanPoint) - if err != nil { - t.Fatalf("carol didn't report channel: %v", err) - } - } - - // If both parties should start with existing channel updates, then - // we'll send+settle an HTLC between 'from' and 'to' now. - if testCase.channelsUpdated { - invoice := &lnrpc.Invoice{ - Memo: "testing", - Value: 10000, - } - invoiceResp, err := to.AddInvoice(ctxt, invoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } - - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - err = completePaymentRequests( - ctxt, from, from.RouterClient, - []string{invoiceResp.PaymentRequest}, true, - ) - if err != nil { - t.Fatalf("unable to complete payments: %v", err) - } - } - - // Before we start the recovery, we'll record the balances of both - // Carol and Dave to ensure they both sweep their coins at the end. - balReq := &lnrpc.WalletBalanceRequest{} - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - carolBalResp, err := carol.WalletBalance(ctxt, balReq) - if err != nil { - t.Fatalf("unable to get carol's balance: %v", err) - } - carolStartingBalance := carolBalResp.ConfirmedBalance - - daveBalance, err := dave.WalletBalance(ctxt, balReq) - if err != nil { - t.Fatalf("unable to get carol's balance: %v", err) - } - daveStartingBalance := daveBalance.ConfirmedBalance - - // At this point, we'll now execute the restore method to give us the - // new node we should attempt our assertions against. - backupFilePath := dave.ChanBackupPath() - restoredNodeFunc, err := testCase.restoreMethod( - dave, backupFilePath, mnemonic, - ) - if err != nil { - t.Fatalf("unable to prep node restoration: %v", err) - } - - // TODO(roasbeef): assert recovery state in channel - - // Now that we're able to make our restored now, we'll shutdown the old - // Dave node as we'll be storing it shortly below. - shutdownAndAssert(net, t, dave) - - // Next, we'll make a new Dave and start the bulk of our recovery - // workflow. - dave, err = restoredNodeFunc() - if err != nil { - t.Fatalf("unable to restore node: %v", err) - } - - // First ensure that the on-chain balance is restored. - err = wait.NoError(func() error { - ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) - balReq := &lnrpc.WalletBalanceRequest{} - daveBalResp, err := dave.WalletBalance(ctxt, balReq) - if err != nil { - return err - } - - daveBal := daveBalResp.ConfirmedBalance - if daveBal <= 0 { - return fmt.Errorf("expected positive balance, had %v", - daveBal) - } - - return nil - }, defaultTimeout) - if err != nil { - t.Fatalf("On-chain balance not restored: %v", err) - } - - // Now that we have our new node up, we expect that it'll re-connect to - // Carol automatically based on the restored backup. - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = net.EnsureConnected(ctxt, dave, carol) - if err != nil { - t.Fatalf("node didn't connect after recovery: %v", err) - } - - // TODO(roasbeef): move dave restarts? - - // Now we'll assert that both sides properly execute the DLP protocol. - // We grab their balances now to ensure that they're made whole at the - // end of the protocol. - assertDLPExecuted( - net, t, carol, carolStartingBalance, dave, daveStartingBalance, - testCase.anchorCommit, - ) -} - -// chanRestoreViaRPC is a helper test method that returns a nodeRestorer -// instance which will restore the target node from a password+seed, then -// trigger a SCB restore using the RPC interface. -func chanRestoreViaRPC(net *lntest.NetworkHarness, - password []byte, mnemonic []string, - multi []byte) (nodeRestorer, error) { - - backup := &lnrpc.RestoreChanBackupRequest_MultiChanBackup{ - MultiChanBackup: multi, - } - - ctxb := context.Background() - - return func() (*lntest.HarnessNode, error) { - newNode, err := net.RestoreNodeWithSeed( - "dave", nil, password, mnemonic, 1000, nil, - ) - if err != nil { - return nil, fmt.Errorf("unable to "+ - "restore node: %v", err) - } - - _, err = newNode.RestoreChannelBackups( - ctxb, &lnrpc.RestoreChanBackupRequest{ - Backup: backup, - }, - ) - if err != nil { - return nil, fmt.Errorf("unable "+ - "to restore backups: %v", err) - } - - return newNode, nil - }, nil -} - -// testChannelBackupRestore tests that we're able to recover from, and initiate -// the DLP protocol via: the RPC restore command, restoring on unlock, and -// restoring from initial wallet creation. We'll also alternate between -// restoring form the on disk file, and restoring from the exported RPC command -// as well. -func testChannelBackupRestore(net *lntest.NetworkHarness, t *harnessTest) { - password := []byte("El Psy Kongroo") - - ctxb := context.Background() - - var testCases = []chanRestoreTestCase{ - // Restore from backups obtained via the RPC interface. Dave - // was the initiator, of the non-advertised channel. - { - name: "restore from RPC backup", - channelsUpdated: false, - initiator: true, - private: false, - restoreMethod: func(oldNode *lntest.HarnessNode, - backupFilePath string, - mnemonic []string) (nodeRestorer, error) { - - // For this restoration method, we'll grab the - // current multi-channel backup from the old - // node, and use it to restore a new node - // within the closure. - req := &lnrpc.ChanBackupExportRequest{} - chanBackup, err := oldNode.ExportAllChannelBackups( - ctxb, req, - ) - if err != nil { - return nil, fmt.Errorf("unable to obtain "+ - "channel backup: %v", err) - } - - multi := chanBackup.MultiChanBackup.MultiChanBackup - - // In our nodeRestorer function, we'll restore - // the node from seed, then manually recover - // the channel backup. - return chanRestoreViaRPC( - net, password, mnemonic, multi, - ) - }, - }, - - // Restore the backup from the on-disk file, using the RPC - // interface. - { - name: "restore from backup file", - initiator: true, - private: false, - restoreMethod: func(oldNode *lntest.HarnessNode, - backupFilePath string, - mnemonic []string) (nodeRestorer, error) { - - // Read the entire Multi backup stored within - // this node's channels.backup file. - multi, err := ioutil.ReadFile(backupFilePath) - if err != nil { - return nil, err - } - - // Now that we have Dave's backup file, we'll - // create a new nodeRestorer that will restore - // using the on-disk channels.backup. - return chanRestoreViaRPC( - net, password, mnemonic, multi, - ) - }, - }, - - // Restore the backup as part of node initialization with the - // prior mnemonic and new backup seed. - { - name: "restore during creation", - initiator: true, - private: false, - restoreMethod: func(oldNode *lntest.HarnessNode, - backupFilePath string, - mnemonic []string) (nodeRestorer, error) { - - // First, fetch the current backup state as is, - // to obtain our latest Multi. - chanBackup, err := oldNode.ExportAllChannelBackups( - ctxb, &lnrpc.ChanBackupExportRequest{}, - ) - if err != nil { - return nil, fmt.Errorf("unable to obtain "+ - "channel backup: %v", err) - } - backupSnapshot := &lnrpc.ChanBackupSnapshot{ - MultiChanBackup: chanBackup.MultiChanBackup, - } - - // Create a new nodeRestorer that will restore - // the node using the Multi backup we just - // obtained above. - return func() (*lntest.HarnessNode, error) { - return net.RestoreNodeWithSeed( - "dave", nil, password, - mnemonic, 1000, backupSnapshot, - ) - }, nil - }, - }, - - // Restore the backup once the node has already been - // re-created, using the Unlock call. - { - name: "restore during unlock", - initiator: true, - private: false, - restoreMethod: func(oldNode *lntest.HarnessNode, - backupFilePath string, - mnemonic []string) (nodeRestorer, error) { - - // First, fetch the current backup state as is, - // to obtain our latest Multi. - chanBackup, err := oldNode.ExportAllChannelBackups( - ctxb, &lnrpc.ChanBackupExportRequest{}, - ) - if err != nil { - return nil, fmt.Errorf("unable to obtain "+ - "channel backup: %v", err) - } - backupSnapshot := &lnrpc.ChanBackupSnapshot{ - MultiChanBackup: chanBackup.MultiChanBackup, - } - - // Create a new nodeRestorer that will restore - // the node with its seed, but no channel - // backup, shutdown this initialized node, then - // restart it again using Unlock. - return func() (*lntest.HarnessNode, error) { - newNode, err := net.RestoreNodeWithSeed( - "dave", nil, password, - mnemonic, 1000, nil, - ) - if err != nil { - return nil, err - } - - err = net.RestartNode( - newNode, nil, backupSnapshot, - ) - if err != nil { - return nil, err - } - - return newNode, nil - }, nil - }, - }, - - // Restore the backup from the on-disk file a second time to - // make sure imports can be canceled and later resumed. - { - name: "restore from backup file twice", - initiator: true, - private: false, - restoreMethod: func(oldNode *lntest.HarnessNode, - backupFilePath string, - mnemonic []string) (nodeRestorer, error) { - - // Read the entire Multi backup stored within - // this node's channels.backup file. - multi, err := ioutil.ReadFile(backupFilePath) - if err != nil { - return nil, err - } - - // Now that we have Dave's backup file, we'll - // create a new nodeRestorer that will restore - // using the on-disk channels.backup. - backup := &lnrpc.RestoreChanBackupRequest_MultiChanBackup{ - MultiChanBackup: multi, - } - - ctxb := context.Background() - - return func() (*lntest.HarnessNode, error) { - newNode, err := net.RestoreNodeWithSeed( - "dave", nil, password, mnemonic, - 1000, nil, - ) - if err != nil { - return nil, fmt.Errorf("unable to "+ - "restore node: %v", err) - } - - _, err = newNode.RestoreChannelBackups( - ctxb, - &lnrpc.RestoreChanBackupRequest{ - Backup: backup, - }, - ) - if err != nil { - return nil, fmt.Errorf("unable "+ - "to restore backups: %v", - err) - } - - _, err = newNode.RestoreChannelBackups( - ctxb, - &lnrpc.RestoreChanBackupRequest{ - Backup: backup, - }, - ) - if err != nil { - return nil, fmt.Errorf("unable "+ - "to restore backups the"+ - "second time: %v", - err) - } - - return newNode, nil - }, nil - }, - }, - - // Use the channel backup file that contains an unconfirmed - // channel and make sure recovery works as well. - { - name: "restore unconfirmed channel file", - channelsUpdated: false, - initiator: true, - private: false, - unconfirmed: true, - restoreMethod: func(oldNode *lntest.HarnessNode, - backupFilePath string, - mnemonic []string) (nodeRestorer, error) { - - // Read the entire Multi backup stored within - // this node's channels.backup file. - multi, err := ioutil.ReadFile(backupFilePath) - if err != nil { - return nil, err - } - - // Let's assume time passes, the channel - // confirms in the meantime but for some reason - // the backup we made while it was still - // unconfirmed is the only backup we have. We - // should still be able to restore it. To - // simulate time passing, we mine some blocks - // to get the channel confirmed _after_ we saved - // the backup. - mineBlocks(t, net, 6, 1) - - // In our nodeRestorer function, we'll restore - // the node from seed, then manually recover - // the channel backup. - return chanRestoreViaRPC( - net, password, mnemonic, multi, - ) - }, - }, - - // Create a backup using RPC that contains an unconfirmed - // channel and make sure recovery works as well. - { - name: "restore unconfirmed channel RPC", - channelsUpdated: false, - initiator: true, - private: false, - unconfirmed: true, - restoreMethod: func(oldNode *lntest.HarnessNode, - backupFilePath string, - mnemonic []string) (nodeRestorer, error) { - - // For this restoration method, we'll grab the - // current multi-channel backup from the old - // node. The channel should be included, even if - // it is not confirmed yet. - req := &lnrpc.ChanBackupExportRequest{} - chanBackup, err := oldNode.ExportAllChannelBackups( - ctxb, req, - ) - if err != nil { - return nil, fmt.Errorf("unable to obtain "+ - "channel backup: %v", err) - } - chanPoints := chanBackup.MultiChanBackup.ChanPoints - if len(chanPoints) == 0 { - return nil, fmt.Errorf("unconfirmed " + - "channel not included in backup") - } - - // Let's assume time passes, the channel - // confirms in the meantime but for some reason - // the backup we made while it was still - // unconfirmed is the only backup we have. We - // should still be able to restore it. To - // simulate time passing, we mine some blocks - // to get the channel confirmed _after_ we saved - // the backup. - mineBlocks(t, net, 6, 1) - - // In our nodeRestorer function, we'll restore - // the node from seed, then manually recover - // the channel backup. - multi := chanBackup.MultiChanBackup.MultiChanBackup - return chanRestoreViaRPC( - net, password, mnemonic, multi, - ) - }, - }, - - // Restore the backup from the on-disk file, using the RPC - // interface, for anchor commitment channels. - { - name: "restore from backup file anchors", - initiator: true, - private: false, - anchorCommit: true, - restoreMethod: func(oldNode *lntest.HarnessNode, - backupFilePath string, - mnemonic []string) (nodeRestorer, error) { - - // Read the entire Multi backup stored within - // this node's channels.backup file. - multi, err := ioutil.ReadFile(backupFilePath) - if err != nil { - return nil, err - } - - // Now that we have Dave's backup file, we'll - // create a new nodeRestorer that will restore - // using the on-disk channels.backup. - return chanRestoreViaRPC( - net, password, mnemonic, multi, - ) - }, - }, - } - - // TODO(roasbeef): online vs offline close? - - // TODO(roasbeef): need to re-trigger the on-disk file once the node - // ann is updated? - - for _, testCase := range testCases { - success := t.t.Run(testCase.name, func(t *testing.T) { - h := newHarnessTest(t, net) - testChanRestoreScenario(h, net, &testCase, password) - }) - if !success { - break - } - } -} - // testHoldInvoicePersistence tests that a sender to a hold-invoice, can be // restarted before the payment gets settled, and still be able to receive the // preimage. @@ -14667,7 +13541,7 @@ func testHoldInvoicePersistence(net *lntest.NetworkHarness, t *harnessTest) { // Assert terminal payment state. if i%2 == 0 { if payment.Status != lnrpc.Payment_SUCCEEDED { - t.Fatalf("state not suceeded : %v", + t.Fatalf("state not succeeded : %v", payment.Status) } } else { @@ -14725,77 +13599,178 @@ func testExternalFundingChanPoint(net *lntest.NetworkHarness, t *harnessTest) { // First, we'll create two new nodes that we'll use to open channel // between for this test. carol, err := net.NewNode("carol", nil) - if err != nil { - t.Fatalf("unable to start new node: %v", err) - } + require.NoError(t.t, err) defer shutdownAndAssert(net, t, carol) dave, err := net.NewNode("dave", nil) - if err != nil { - t.Fatalf("unable to start new node: %v", err) - } + require.NoError(t.t, err) defer shutdownAndAssert(net, t, dave) // Carol will be funding the channel, so we'll send some coins over to // her and ensure they have enough confirmations before we proceed. ctxt, _ := context.WithTimeout(ctxb, defaultTimeout) err = net.SendCoins(ctxt, btcutil.SatoshiPerBitcoin, carol) - if err != nil { - t.Fatalf("unable to send coins to carol: %v", err) - } + require.NoError(t.t, err) // Before we start the test, we'll ensure both sides are connected to // the funding flow can properly be executed. ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) err = net.EnsureConnected(ctxt, carol, dave) - if err != nil { - t.Fatalf("unable to connect peers: %v", err) + require.NoError(t.t, err) + + // At this point, we're ready to simulate our external channel funding + // flow. To start with, we'll create a pending channel with a shim for + // a transaction that will never be published. + const thawHeight uint32 = 10 + const chanSize = lnd.MaxBtcFundingAmount + fundingShim1, chanPoint1, _ := deriveFundingShim( + net, t, carol, dave, chanSize, thawHeight, 1, false, + ) + _ = openChannelStream( + ctxb, t, net, carol, dave, lntest.OpenChannelParams{ + Amt: chanSize, + FundingShim: fundingShim1, + }, + ) + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + assertNumOpenChannelsPending(ctxt, t, carol, dave, 1) + + // That channel is now pending forever and normally would saturate the + // max pending channel limit for both nodes. But because the channel is + // externally funded, we should still be able to open another one. Let's + // do exactly that now. For this one we publish the transaction so we + // can mine it later. + fundingShim2, chanPoint2, _ := deriveFundingShim( + net, t, carol, dave, chanSize, thawHeight, 2, true, + ) + + // At this point, we'll now carry out the normal basic channel funding + // test as everything should now proceed as normal (a regular channel + // funding flow). + carolChan, daveChan, _, err := basicChannelFundingTest( + t, net, carol, dave, fundingShim2, + ) + require.NoError(t.t, err) + + // Both channels should be marked as frozen with the proper thaw + // height. + if carolChan.ThawHeight != thawHeight { + t.Fatalf("expected thaw height of %v, got %v", + carolChan.ThawHeight, thawHeight) + } + if daveChan.ThawHeight != thawHeight { + t.Fatalf("expected thaw height of %v, got %v", + daveChan.ThawHeight, thawHeight) } - // At this point, we're ready to simulate our external channle funding - // flow. To start with, we'll get to new keys from both sides which - // will be used to create the multi-sig output for the external funding - // transaction. + // Next, to make sure the channel functions as normal, we'll make some + // payments within the channel. + payAmt := btcutil.Amount(100000) + invoice := &lnrpc.Invoice{ + Memo: "new chans", + Value: int64(payAmt), + } + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + resp, err := dave.AddInvoice(ctxt, invoice) + require.NoError(t.t, err) + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + err = completePaymentRequests( + ctxt, carol, carol.RouterClient, []string{resp.PaymentRequest}, + true, + ) + require.NoError(t.t, err) + + // Now that the channels are open, and we've confirmed that they're + // operational, we'll now ensure that the channels are frozen as + // intended (if requested). + // + // First, we'll try to close the channel as Carol, the initiator. This + // should fail as a frozen channel only allows the responder to + // initiate a channel close. + ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) + _, _, err = net.CloseChannel(ctxt, carol, chanPoint2, false) + if err == nil { + t.Fatalf("carol wasn't denied a co-op close attempt for a " + + "frozen channel") + } + + // Next we'll try but this time with Dave (the responder) as the + // initiator. This time the channel should be closed as normal. + ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) + closeChannelAndAssert(ctxt, t, net, dave, chanPoint2, false) + + // As a last step, we check if we still have the pending channel hanging + // around because we never published the funding TX. + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + assertNumOpenChannelsPending(ctxt, t, carol, dave, 1) + + // Let's make sure we can abandon it. + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + _, err = carol.AbandonChannel(ctxt, &lnrpc.AbandonChannelRequest{ + ChannelPoint: chanPoint1, + PendingFundingShimOnly: true, + }) + require.NoError(t.t, err) + ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) + _, err = dave.AbandonChannel(ctxt, &lnrpc.AbandonChannelRequest{ + ChannelPoint: chanPoint1, + PendingFundingShimOnly: true, + }) + require.NoError(t.t, err) + + // It should now not appear in the pending channels anymore. + assertNumOpenChannelsPending(ctxt, t, carol, dave, 0) +} + +// deriveFundingShim creates a channel funding shim by deriving the necessary +// keys on both sides. +func deriveFundingShim(net *lntest.NetworkHarness, t *harnessTest, + carol, dave *lntest.HarnessNode, chanSize btcutil.Amount, + thawHeight uint32, keyIndex int32, publish bool) (*lnrpc.FundingShim, + *lnrpc.ChannelPoint, *chainhash.Hash) { + + ctxb := context.Background() keyLoc := &signrpc.KeyLocator{ KeyFamily: 9999, - KeyIndex: 1, + KeyIndex: keyIndex, } carolFundingKey, err := carol.WalletKitClient.DeriveKey(ctxb, keyLoc) - if err != nil { - t.Fatalf("unable to get carol funding key: %v", err) - } + require.NoError(t.t, err) daveFundingKey, err := dave.WalletKitClient.DeriveKey(ctxb, keyLoc) - if err != nil { - t.Fatalf("unable to get dave funding key: %v", err) - } + require.NoError(t.t, err) // Now that we have the multi-sig keys for each party, we can manually // construct the funding transaction. We'll instruct the backend to // immediately create and broadcast a transaction paying out an exact // amount. Normally this would reside in the mempool, but we just // confirm it now for simplicity. - const chanSize = lnd.MaxBtcFundingAmount _, fundingOutput, err := input.GenFundingPkScript( carolFundingKey.RawKeyBytes, daveFundingKey.RawKeyBytes, int64(chanSize), ) - if err != nil { - t.Fatalf("unable to create funding script: %v", err) - } - txid, err := net.Miner.SendOutputsWithoutChange( - []*wire.TxOut{fundingOutput}, 5, - ) - if err != nil { - t.Fatalf("unable to create funding output: %v", err) + require.NoError(t.t, err) + + var txid *chainhash.Hash + targetOutputs := []*wire.TxOut{fundingOutput} + if publish { + txid, err = net.Miner.SendOutputsWithoutChange( + targetOutputs, 5, + ) + require.NoError(t.t, err) + } else { + tx, err := net.Miner.CreateTransaction(targetOutputs, 5, false) + require.NoError(t.t, err) + + txHash := tx.TxHash() + txid = &txHash } // At this point, we can being our external channel funding workflow. // We'll start by generating a pending channel ID externally that will // be used to track this new funding type. var pendingChanID [32]byte - if _, err := rand.Read(pendingChanID[:]); err != nil { - t.Fatalf("unable to gen pending chan ID: %v", err) - } + _, err = rand.Read(pendingChanID[:]) + require.NoError(t.t, err) // Now that we have the pending channel ID, Dave (our responder) will // register the intent to receive a new channel funding workflow using @@ -14805,7 +13780,6 @@ func testExternalFundingChanPoint(net *lntest.NetworkHarness, t *harnessTest) { FundingTxidBytes: txid[:], }, } - thawHeight := uint32(10) chanPointShim := &lnrpc.ChanPointShim{ Amt: int64(chanSize), ChanPoint: chanPoint, @@ -14830,9 +13804,7 @@ func testExternalFundingChanPoint(net *lntest.NetworkHarness, t *harnessTest) { ShimRegister: fundingShim, }, }) - if err != nil { - t.Fatalf("unable to walk funding state forward: %v", err) - } + require.NoError(t.t, err) // If we attempt to register the same shim (has the same pending chan // ID), then we should get an error. @@ -14858,66 +13830,7 @@ func testExternalFundingChanPoint(net *lntest.NetworkHarness, t *harnessTest) { } fundingShim.GetChanPointShim().RemoteKey = daveFundingKey.RawKeyBytes - // At this point, we'll now carry out the normal basic channel funding - // test as everything should now proceed as normal (a regular channel - // funding flow). - carolChan, daveChan, _, err := basicChannelFundingTest( - t, net, carol, dave, fundingShim, - ) - if err != nil { - t.Fatalf("unable to open channels: %v", err) - } - - // Both channels should be marked as frozen with the proper thaw - // height. - if carolChan.ThawHeight != thawHeight { - t.Fatalf("expected thaw height of %v, got %v", - carolChan.ThawHeight, thawHeight) - } - if daveChan.ThawHeight != thawHeight { - t.Fatalf("expected thaw height of %v, got %v", - daveChan.ThawHeight, thawHeight) - } - - // Next, to make sure the channel functions as normal, we'll make some - // payments within the channel. - payAmt := btcutil.Amount(100000) - invoice := &lnrpc.Invoice{ - Memo: "new chans", - Value: int64(payAmt), - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - resp, err := dave.AddInvoice(ctxt, invoice) - if err != nil { - t.Fatalf("unable to add invoice: %v", err) - } - ctxt, _ = context.WithTimeout(ctxb, defaultTimeout) - err = completePaymentRequests( - ctxt, carol, carol.RouterClient, []string{resp.PaymentRequest}, - true, - ) - if err != nil { - t.Fatalf("unable to make payments between Carol and Dave") - } - - // Now that the channels are open, and we've confirmed that they're - // operational, we'll now ensure that the channels are frozen as - // intended (if requested). - // - // First, we'll try to close the channel as Carol, the initiator. This - // should fail as a frozen channel only allows the responder to - // initiate a channel close. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - _, _, err = net.CloseChannel(ctxt, carol, chanPoint, false) - if err == nil { - t.Fatalf("carol wasn't denied a co-op close attempt for a " + - "frozen channel") - } - - // Next we'll try but this time with Dave (the responder) as the - // initiator. This time the channel should be closed as normal. - ctxt, _ = context.WithTimeout(ctxb, channelCloseTimeout) - closeChannelAndAssert(ctxt, t, net, dave, chanPoint, false) + return fundingShim, chanPoint, txid } // sendAndAssertSuccess sends the given payment requests and asserts that the @@ -14992,276 +13905,14 @@ func getPaymentResult(stream routerrpc.Router_SendPaymentV2Client) ( } } -type testCase struct { - name string - test func(net *lntest.NetworkHarness, t *harnessTest) -} - -var testsCases = []*testCase{ - { - name: "sweep coins", - test: testSweepAllCoins, - }, - { - name: "recovery info", - test: testGetRecoveryInfo, - }, - { - name: "onchain fund recovery", - test: testOnchainFundRecovery, - }, - { - name: "basic funding flow", - test: testBasicChannelFunding, - }, - { - name: "unconfirmed channel funding", - test: testUnconfirmedChannelFunding, - }, - { - name: "update channel policy", - test: testUpdateChannelPolicy, - }, - { - name: "open channel reorg test", - test: testOpenChannelAfterReorg, - }, - { - name: "disconnecting target peer", - test: testDisconnectingTargetPeer, - }, - { - name: "graph topology notifications", - test: testGraphTopologyNotifications, - }, - { - name: "funding flow persistence", - test: testChannelFundingPersistence, - }, - { - name: "channel force closure", - test: testChannelForceClosure, - }, - { - name: "channel balance", - test: testChannelBalance, - }, - { - name: "channel unsettled balance", - test: testChannelUnsettledBalance, - }, - { - name: "single hop invoice", - test: testSingleHopInvoice, - }, - { - name: "sphinx replay persistence", - test: testSphinxReplayPersistence, - }, - { - name: "list channels", - test: testListChannels, - }, - { - name: "list outgoing payments", - test: testListPayments, - }, - { - name: "max pending channel", - test: testMaxPendingChannels, - }, - { - name: "multi-hop payments", - test: testMultiHopPayments, - }, - { - name: "single-hop send to route", - test: testSingleHopSendToRoute, - }, - { - name: "multi-hop send to route", - test: testMultiHopSendToRoute, - }, - { - name: "send to route error propagation", - test: testSendToRouteErrorPropagation, - }, - { - name: "unannounced channels", - test: testUnannouncedChannels, - }, - { - name: "private channels", - test: testPrivateChannels, - }, - { - name: "invoice routing hints", - test: testInvoiceRoutingHints, - }, - { - name: "multi-hop payments over private channels", - test: testMultiHopOverPrivateChannels, - }, - { - name: "multiple channel creation and update subscription", - test: testBasicChannelCreationAndUpdates, - }, - { - name: "invoice update subscription", - test: testInvoiceSubscriptions, - }, - { - name: "multi-hop htlc error propagation", - test: testHtlcErrorPropagation, - }, - { - name: "reject onward htlc", - test: testRejectHTLC, - }, - // TODO(roasbeef): multi-path integration test - { - name: "node announcement", - test: testNodeAnnouncement, - }, - { - name: "node sign verify", - test: testNodeSignVerify, - }, - { - name: "async payments benchmark", - test: testAsyncPayments, - }, - { - name: "async bidirectional payments", - test: testBidirectionalAsyncPayments, - }, - { - name: "test multi-hop htlc", - test: testMultiHopHtlcClaims, - }, - { - name: "switch circuit persistence", - test: testSwitchCircuitPersistence, - }, - { - name: "switch offline delivery", - test: testSwitchOfflineDelivery, - }, - { - name: "switch offline delivery persistence", - test: testSwitchOfflineDeliveryPersistence, - }, - { - name: "switch offline delivery outgoing offline", - test: testSwitchOfflineDeliveryOutgoingOffline, - }, - { - // TODO(roasbeef): test always needs to be last as Bob's state - // is borked since we trick him into attempting to cheat Alice? - name: "revoked uncooperative close retribution", - test: testRevokedCloseRetribution, - }, - { - name: "failing link", - test: testFailingChannel, - }, - { - name: "garbage collect link nodes", - test: testGarbageCollectLinkNodes, - }, - { - name: "abandonchannel", - test: testAbandonChannel, - }, - { - name: "revoked uncooperative close retribution zero value remote output", - test: testRevokedCloseRetributionZeroValueRemoteOutput, - }, - { - name: "revoked uncooperative close retribution remote hodl", - test: testRevokedCloseRetributionRemoteHodl, - }, - { - name: "revoked uncooperative close retribution altruist watchtower", - test: testRevokedCloseRetributionAltruistWatchtower, - }, - { - name: "data loss protection", - test: testDataLossProtection, - }, - { - name: "query routes", - test: testQueryRoutes, - }, - { - name: "route fee cutoff", - test: testRouteFeeCutoff, - }, - { - name: "send update disable channel", - test: testSendUpdateDisableChannel, - }, - { - name: "streaming channel backup update", - test: testChannelBackupUpdates, - }, - { - name: "export channel backup", - test: testExportChannelBackup, - }, - { - name: "channel backup restore", - test: testChannelBackupRestore, - }, - { - name: "hold invoice sender persistence", - test: testHoldInvoicePersistence, - }, - { - name: "cpfp", - test: testCPFP, - }, - { - name: "macaroon authentication", - test: testMacaroonAuthentication, - }, - { - name: "immediate payment after channel opened", - test: testPaymentFollowingChannelOpen, - }, - { - name: "external channel funding", - test: testExternalFundingChanPoint, - }, - { - name: "psbt channel funding", - test: testPsbtChanFunding, - }, - { - name: "sendtoroute multi path payment", - test: testSendToRouteMultiPath, - }, - { - name: "send multi path payment", - test: testSendMultiPathPayment, - }, - { - name: "REST API", - test: testRestApi, - }, - { - name: "intercept forwarded htlc packets", - test: testForwardInterceptor, - }, - { - name: "wumbo channels", - test: testWumboChannels, - }, -} - // TestLightningNetworkDaemon performs a series of integration tests amongst a // programmatically driven network of lnd nodes. func TestLightningNetworkDaemon(t *testing.T) { + // If no tests are regsitered, then we can exit early. + if len(testsCases) == 0 { + t.Skip("integration tests not selected with flag 'rpctest'") + } + ht := newHarnessTest(t, nil) // Declare the network harness here to gain access to its @@ -15284,6 +13935,7 @@ func TestLightningNetworkDaemon(t *testing.T) { "--debuglevel=debug", "--logdir=" + minerLogDir, "--trickleinterval=100ms", + "--nowinservice", } handlers := &rpcclient.NotificationHandlers{ OnTxAccepted: func(hash *chainhash.Hash, amt btcutil.Amount) { @@ -15329,11 +13981,24 @@ func TestLightningNetworkDaemon(t *testing.T) { ht.Fatalf("unable to request transaction notifications: %v", err) } + binary := itestLndBinary + if runtime.GOOS == "windows" { + // Windows (even in a bash like environment like git bash as on + // Travis) doesn't seem to like relative paths to exe files... + currentDir, err := os.Getwd() + if err != nil { + ht.Fatalf("unable to get working directory: %v", err) + } + targetPath := filepath.Join(currentDir, "../../lnd-itest.exe") + binary, err = filepath.Abs(targetPath) + if err != nil { + ht.Fatalf("unable to get absolute path: %v", err) + } + } + // Now we can set up our test harness (LND instance), with the chain // backend we just created. - lndHarness, err = lntest.NewNetworkHarness( - miner, chainBackend, itestLndBinary, - ) + lndHarness, err = lntest.NewNetworkHarness(miner, chainBackend, binary) if err != nil { ht.Fatalf("unable to create lightning network harness: %v", err) } @@ -15366,7 +14031,10 @@ func TestLightningNetworkDaemon(t *testing.T) { // initialization of the network. args - list of lnd arguments, // example: "--debuglevel=debug" // TODO(roasbeef): create master balanced channel with all the monies? - if err = lndHarness.SetUp(nil); err != nil { + aliceBobArgs := []string{ + "--default-remote-max-htlcs=483", + } + if err = lndHarness.SetUp(aliceBobArgs); err != nil { ht.Fatalf("unable to set up test lightning network: %v", err) } @@ -15389,6 +14057,9 @@ func TestLightningNetworkDaemon(t *testing.T) { t.Fatalf("unable to add to log: %v", err) } + // Start every test with the default static fee estimate. + lndHarness.SetFeeEstimate(12500) + success := t.Run(testCase.name, func(t1 *testing.T) { ht := newHarnessTest(t1, lndHarness) ht.RunTestCase(testCase) diff --git a/lntest/itest/lnd_test_list_off_test.go b/lntest/itest/lnd_test_list_off_test.go new file mode 100644 index 000000000..ae18d5e0c --- /dev/null +++ b/lntest/itest/lnd_test_list_off_test.go @@ -0,0 +1,5 @@ +// +build !rpctest + +package itest + +var testsCases = []*testCase{} diff --git a/lntest/itest/lnd_test_list_on_test.go b/lntest/itest/lnd_test_list_on_test.go new file mode 100644 index 000000000..98910d22b --- /dev/null +++ b/lntest/itest/lnd_test_list_on_test.go @@ -0,0 +1,285 @@ +// +build rpctest + +package itest + +var testsCases = []*testCase{ + { + name: "sweep coins", + test: testSweepAllCoins, + }, + { + name: "recovery info", + test: testGetRecoveryInfo, + }, + { + name: "onchain fund recovery", + test: testOnchainFundRecovery, + }, + { + name: "basic funding flow", + test: testBasicChannelFunding, + }, + { + name: "unconfirmed channel funding", + test: testUnconfirmedChannelFunding, + }, + { + name: "update channel policy", + test: testUpdateChannelPolicy, + }, + { + name: "open channel reorg test", + test: testOpenChannelAfterReorg, + }, + { + name: "disconnecting target peer", + test: testDisconnectingTargetPeer, + }, + { + name: "graph topology notifications", + test: testGraphTopologyNotifications, + }, + { + name: "funding flow persistence", + test: testChannelFundingPersistence, + }, + { + name: "channel force closure", + test: testChannelForceClosure, + }, + { + name: "channel balance", + test: testChannelBalance, + }, + { + name: "channel unsettled balance", + test: testChannelUnsettledBalance, + }, + { + name: "single hop invoice", + test: testSingleHopInvoice, + }, + { + name: "sphinx replay persistence", + test: testSphinxReplayPersistence, + }, + { + name: "list channels", + test: testListChannels, + }, + { + name: "list outgoing payments", + test: testListPayments, + }, + { + name: "max pending channel", + test: testMaxPendingChannels, + }, + { + name: "multi-hop payments", + test: testMultiHopPayments, + }, + { + name: "single-hop send to route", + test: testSingleHopSendToRoute, + }, + { + name: "multi-hop send to route", + test: testMultiHopSendToRoute, + }, + { + name: "send to route error propagation", + test: testSendToRouteErrorPropagation, + }, + { + name: "unannounced channels", + test: testUnannouncedChannels, + }, + { + name: "private channels", + test: testPrivateChannels, + }, + { + name: "invoice routing hints", + test: testInvoiceRoutingHints, + }, + { + name: "multi-hop payments over private channels", + test: testMultiHopOverPrivateChannels, + }, + { + name: "multiple channel creation and update subscription", + test: testBasicChannelCreationAndUpdates, + }, + { + name: "invoice update subscription", + test: testInvoiceSubscriptions, + }, + { + name: "multi-hop htlc error propagation", + test: testHtlcErrorPropagation, + }, + { + name: "reject onward htlc", + test: testRejectHTLC, + }, + // TODO(roasbeef): multi-path integration test + { + name: "node announcement", + test: testNodeAnnouncement, + }, + { + name: "node sign verify", + test: testNodeSignVerify, + }, + { + name: "derive shared key", + test: testDeriveSharedKey, + }, + { + name: "async payments benchmark", + test: testAsyncPayments, + }, + { + name: "async bidirectional payments", + test: testBidirectionalAsyncPayments, + }, + { + name: "test multi-hop htlc", + test: testMultiHopHtlcClaims, + }, + { + name: "switch circuit persistence", + test: testSwitchCircuitPersistence, + }, + { + name: "switch offline delivery", + test: testSwitchOfflineDelivery, + }, + { + name: "switch offline delivery persistence", + test: testSwitchOfflineDeliveryPersistence, + }, + { + name: "switch offline delivery outgoing offline", + test: testSwitchOfflineDeliveryOutgoingOffline, + }, + { + // TODO(roasbeef): test always needs to be last as Bob's state + // is borked since we trick him into attempting to cheat Alice? + name: "revoked uncooperative close retribution", + test: testRevokedCloseRetribution, + }, + { + name: "failing link", + test: testFailingChannel, + }, + { + name: "garbage collect link nodes", + test: testGarbageCollectLinkNodes, + }, + { + name: "abandonchannel", + test: testAbandonChannel, + }, + { + name: "revoked uncooperative close retribution zero value remote output", + test: testRevokedCloseRetributionZeroValueRemoteOutput, + }, + { + name: "revoked uncooperative close retribution remote hodl", + test: testRevokedCloseRetributionRemoteHodl, + }, + { + name: "revoked uncooperative close retribution altruist watchtower", + test: testRevokedCloseRetributionAltruistWatchtower, + }, + { + name: "data loss protection", + test: testDataLossProtection, + }, + { + name: "query routes", + test: testQueryRoutes, + }, + { + name: "route fee cutoff", + test: testRouteFeeCutoff, + }, + { + name: "send update disable channel", + test: testSendUpdateDisableChannel, + }, + { + name: "streaming channel backup update", + test: testChannelBackupUpdates, + }, + { + name: "export channel backup", + test: testExportChannelBackup, + }, + { + name: "channel backup restore", + test: testChannelBackupRestore, + }, + { + name: "hold invoice sender persistence", + test: testHoldInvoicePersistence, + }, + { + name: "cpfp", + test: testCPFP, + }, + { + name: "macaroon authentication", + test: testMacaroonAuthentication, + }, + { + name: "bake macaroon", + test: testBakeMacaroon, + }, + { + name: "delete macaroon id", + test: testDeleteMacaroonID, + }, + { + name: "immediate payment after channel opened", + test: testPaymentFollowingChannelOpen, + }, + { + name: "external channel funding", + test: testExternalFundingChanPoint, + }, + { + name: "psbt channel funding", + test: testPsbtChanFunding, + }, + { + name: "sendtoroute multi path payment", + test: testSendToRouteMultiPath, + }, + { + name: "send multi path payment", + test: testSendMultiPathPayment, + }, + { + name: "REST API", + test: testRestAPI, + }, + { + name: "intercept forwarded htlc packets", + test: testForwardInterceptor, + }, + { + name: "wumbo channels", + test: testWumboChannels, + }, + { + name: "maximum channel size", + test: testMaxChannelSize, + }, + { + name: "connection timeout", + test: testNetworkConnectionTimeout, + }, +} diff --git a/lntest/itest/lnd_wumbo_channels_test.go b/lntest/itest/lnd_wumbo_channels_test.go index 31f379b86..b4f48c3b2 100644 --- a/lntest/itest/lnd_wumbo_channels_test.go +++ b/lntest/itest/lnd_wumbo_channels_test.go @@ -1,5 +1,3 @@ -// +build rpctest - package itest import ( @@ -62,7 +60,7 @@ func testWumboChannels(net *lntest.NetworkHarness, t *harnessTest) { // The test should indicate a failure due to the channel being too // large. - if !strings.Contains(err.Error(), "channel too large") { + if !strings.Contains(err.Error(), "exceeds maximum chan size") { t.Fatalf("channel should be rejected due to size, instead "+ "error was: %v", err) } diff --git a/lntest/itest/log_error_whitelist.txt b/lntest/itest/log_error_whitelist.txt index deb6abc9c..acbd1ae60 100644 --- a/lntest/itest/log_error_whitelist.txt +++ b/lntest/itest/log_error_whitelist.txt @@ -40,7 +40,7 @@