mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-06-23 15:22:38 +02:00
Merge pull request #6080 from guggero/pr-consolidation
multi: consolidate trivial PRs that are stuck on CI pipeline steps
This commit is contained in:
commit
d3faef5691
@ -58,6 +58,9 @@ linters:
|
|||||||
# Init functions are used by loggers throughout the codebase.
|
# Init functions are used by loggers throughout the codebase.
|
||||||
- gochecknoinits
|
- gochecknoinits
|
||||||
|
|
||||||
|
# interfacer has been archived.
|
||||||
|
- interfacer
|
||||||
|
|
||||||
issues:
|
issues:
|
||||||
# Only show newly introduced problems.
|
# Only show newly introduced problems.
|
||||||
new-from-rev: 01f696afce2f9c0d4ed854edefa3846891d01d8a
|
new-from-rev: 01f696afce2f9c0d4ed854edefa3846891d01d8a
|
||||||
|
@ -882,8 +882,9 @@ func fetchChanBucket(tx kvdb.RTx, nodeKey *btcec.PublicKey,
|
|||||||
// channel's data resides in given: the public key for the node, the outpoint,
|
// channel's data resides in given: the public key for the node, the outpoint,
|
||||||
// and the chainhash that the channel resides on. This differs from
|
// and the chainhash that the channel resides on. This differs from
|
||||||
// fetchChanBucket in that it returns a writeable bucket.
|
// fetchChanBucket in that it returns a writeable bucket.
|
||||||
func fetchChanBucketRw(tx kvdb.RwTx, nodeKey *btcec.PublicKey, // nolint:interfacer
|
func fetchChanBucketRw(tx kvdb.RwTx, nodeKey *btcec.PublicKey,
|
||||||
outPoint *wire.OutPoint, chainHash chainhash.Hash) (kvdb.RwBucket, error) {
|
outPoint *wire.OutPoint, chainHash chainhash.Hash) (kvdb.RwBucket,
|
||||||
|
error) {
|
||||||
|
|
||||||
// First fetch the top level bucket which stores all data related to
|
// First fetch the top level bucket which stores all data related to
|
||||||
// current, active channels.
|
// current, active channels.
|
||||||
@ -1891,7 +1892,7 @@ func (k *CircuitKey) SetBytes(bs []byte) error {
|
|||||||
|
|
||||||
// Bytes returns the serialized bytes for this circuit key.
|
// Bytes returns the serialized bytes for this circuit key.
|
||||||
func (k CircuitKey) Bytes() []byte {
|
func (k CircuitKey) Bytes() []byte {
|
||||||
var bs = make([]byte, 16)
|
bs := make([]byte, 16)
|
||||||
binary.BigEndian.PutUint64(bs[:8], k.ChanID.ToUint64())
|
binary.BigEndian.PutUint64(bs[:8], k.ChanID.ToUint64())
|
||||||
binary.BigEndian.PutUint64(bs[8:], k.HtlcID)
|
binary.BigEndian.PutUint64(bs[8:], k.HtlcID)
|
||||||
return bs
|
return bs
|
||||||
@ -3468,7 +3469,6 @@ func putChanCommitments(chanBucket kvdb.RwBucket, channel *OpenChannel) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func putChanRevocationState(chanBucket kvdb.RwBucket, channel *OpenChannel) error {
|
func putChanRevocationState(chanBucket kvdb.RwBucket, channel *OpenChannel) error {
|
||||||
|
|
||||||
var b bytes.Buffer
|
var b bytes.Buffer
|
||||||
err := WriteElements(
|
err := WriteElements(
|
||||||
&b, channel.RemoteCurrentRevocation, channel.RevocationProducer,
|
&b, channel.RemoteCurrentRevocation, channel.RevocationProducer,
|
||||||
@ -3659,7 +3659,6 @@ func fetchChanRevocationState(chanBucket kvdb.RBucket, channel *OpenChannel) err
|
|||||||
}
|
}
|
||||||
|
|
||||||
func deleteOpenChannel(chanBucket kvdb.RwBucket) error {
|
func deleteOpenChannel(chanBucket kvdb.RwBucket) error {
|
||||||
|
|
||||||
if err := chanBucket.Delete(chanInfoKey); err != nil {
|
if err := chanBucket.Delete(chanInfoKey); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -3682,7 +3681,6 @@ func deleteOpenChannel(chanBucket kvdb.RwBucket) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// makeLogKey converts a uint64 into an 8 byte array.
|
// makeLogKey converts a uint64 into an 8 byte array.
|
||||||
|
@ -578,7 +578,9 @@ func (c *ChannelGraph) DisabledChannelIDs() ([]uint64, error) {
|
|||||||
//
|
//
|
||||||
// TODO(roasbeef): add iterator interface to allow for memory efficient graph
|
// TODO(roasbeef): add iterator interface to allow for memory efficient graph
|
||||||
// traversal when graph gets mega
|
// traversal when graph gets mega
|
||||||
func (c *ChannelGraph) ForEachNode(cb func(kvdb.RTx, *LightningNode) error) error { // nolint:interfacer
|
func (c *ChannelGraph) ForEachNode(
|
||||||
|
cb func(kvdb.RTx, *LightningNode) error) error {
|
||||||
|
|
||||||
traversal := func(tx kvdb.RTx) error {
|
traversal := func(tx kvdb.RTx) error {
|
||||||
// First grab the nodes bucket which stores the mapping from
|
// First grab the nodes bucket which stores the mapping from
|
||||||
// pubKey to node information.
|
// pubKey to node information.
|
||||||
@ -862,7 +864,6 @@ func (c *ChannelGraph) deleteLightningNode(nodes kvdb.RwBucket,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if err := nodes.Delete(compressedPubKey); err != nil {
|
if err := nodes.Delete(compressedPubKey); err != nil {
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -980,7 +981,6 @@ func (c *ChannelGraph) addChannelEdge(tx kvdb.RwTx, edge *ChannelEdgeInfo) error
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to create shell node "+
|
return fmt.Errorf("unable to create shell node "+
|
||||||
"for: %x", edge.NodeKey1Bytes)
|
"for: %x", edge.NodeKey1Bytes)
|
||||||
|
|
||||||
}
|
}
|
||||||
case node1Err != nil:
|
case node1Err != nil:
|
||||||
return err
|
return err
|
||||||
@ -997,7 +997,6 @@ func (c *ChannelGraph) addChannelEdge(tx kvdb.RwTx, edge *ChannelEdgeInfo) error
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to create shell node "+
|
return fmt.Errorf("unable to create shell node "+
|
||||||
"for: %x", edge.NodeKey2Bytes)
|
"for: %x", edge.NodeKey2Bytes)
|
||||||
|
|
||||||
}
|
}
|
||||||
case node2Err != nil:
|
case node2Err != nil:
|
||||||
return err
|
return err
|
||||||
@ -1012,11 +1011,12 @@ func (c *ChannelGraph) addChannelEdge(tx kvdb.RwTx, edge *ChannelEdgeInfo) error
|
|||||||
|
|
||||||
// Mark edge policies for both sides as unknown. This is to enable
|
// Mark edge policies for both sides as unknown. This is to enable
|
||||||
// efficient incoming channel lookup for a node.
|
// efficient incoming channel lookup for a node.
|
||||||
for _, key := range []*[33]byte{&edge.NodeKey1Bytes,
|
keys := []*[33]byte{
|
||||||
&edge.NodeKey2Bytes} {
|
&edge.NodeKey1Bytes,
|
||||||
|
&edge.NodeKey2Bytes,
|
||||||
err := putChanEdgePolicyUnknown(edges, edge.ChannelID,
|
}
|
||||||
key[:])
|
for _, key := range keys {
|
||||||
|
err := putChanEdgePolicyUnknown(edges, edge.ChannelID, key[:])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -2468,7 +2468,6 @@ func updateEdgePolicy(tx kvdb.RwTx, edge *ChannelEdgePolicy,
|
|||||||
edges := tx.ReadWriteBucket(edgeBucket)
|
edges := tx.ReadWriteBucket(edgeBucket)
|
||||||
if edges == nil {
|
if edges == nil {
|
||||||
return false, ErrEdgeNotFound
|
return false, ErrEdgeNotFound
|
||||||
|
|
||||||
}
|
}
|
||||||
edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
|
edgeIndex := edges.NestedReadWriteBucket(edgeIndexBucket)
|
||||||
if edgeIndex == nil {
|
if edgeIndex == nil {
|
||||||
@ -3125,7 +3124,8 @@ func (c *ChannelEdgeInfo) OtherNodeKeyBytes(thisNodeKey []byte) (
|
|||||||
// the target node in the channel. This is useful when one knows the pubkey of
|
// the target node in the channel. This is useful when one knows the pubkey of
|
||||||
// one of the nodes, and wishes to obtain the full LightningNode for the other
|
// one of the nodes, and wishes to obtain the full LightningNode for the other
|
||||||
// end of the channel.
|
// end of the channel.
|
||||||
func (c *ChannelEdgeInfo) FetchOtherNode(tx kvdb.RTx, thisNodeKey []byte) (*LightningNode, error) {
|
func (c *ChannelEdgeInfo) FetchOtherNode(tx kvdb.RTx,
|
||||||
|
thisNodeKey []byte) (*LightningNode, error) {
|
||||||
|
|
||||||
// Ensure that the node passed in is actually a member of the channel.
|
// Ensure that the node passed in is actually a member of the channel.
|
||||||
var targetNodeBytes [33]byte
|
var targetNodeBytes [33]byte
|
||||||
|
@ -315,7 +315,6 @@ func fetchPayment(bucket kvdb.RBucket) (*MPPayment, error) {
|
|||||||
creationInfo, err := fetchCreationInfo(bucket)
|
creationInfo, err := fetchCreationInfo(bucket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var htlcs []HTLCAttempt
|
var htlcs []HTLCAttempt
|
||||||
@ -732,7 +731,9 @@ func fetchPaymentWithSequenceNumber(tx kvdb.RTx, paymentHash lntypes.Hash,
|
|||||||
// DeletePayment deletes a payment from the DB given its payment hash. If
|
// DeletePayment deletes a payment from the DB given its payment hash. If
|
||||||
// failedHtlcsOnly is set, only failed HTLC attempts of the payment will be
|
// failedHtlcsOnly is set, only failed HTLC attempts of the payment will be
|
||||||
// deleted.
|
// deleted.
|
||||||
func (d *DB) DeletePayment(paymentHash lntypes.Hash, failedHtlcsOnly bool) error { // nolint:interfacer
|
func (d *DB) DeletePayment(paymentHash lntypes.Hash,
|
||||||
|
failedHtlcsOnly bool) error {
|
||||||
|
|
||||||
return kvdb.Update(d, func(tx kvdb.RwTx) error {
|
return kvdb.Update(d, func(tx kvdb.RwTx) error {
|
||||||
payments := tx.ReadWriteBucket(paymentsRootBucket)
|
payments := tx.ReadWriteBucket(paymentsRootBucket)
|
||||||
if payments == nil {
|
if payments == nil {
|
||||||
|
21
config.go
21
config.go
@ -633,11 +633,22 @@ func LoadConfig(interceptor signal.Interceptor) (*Config, error) {
|
|||||||
// file within it.
|
// file within it.
|
||||||
configFileDir := CleanAndExpandPath(preCfg.LndDir)
|
configFileDir := CleanAndExpandPath(preCfg.LndDir)
|
||||||
configFilePath := CleanAndExpandPath(preCfg.ConfigFile)
|
configFilePath := CleanAndExpandPath(preCfg.ConfigFile)
|
||||||
if configFileDir != DefaultLndDir {
|
switch {
|
||||||
if configFilePath == DefaultConfigFile {
|
// User specified --lnddir but no --configfile. Update the config file
|
||||||
configFilePath = filepath.Join(
|
// path to the lnd config directory, but don't require it to exist.
|
||||||
configFileDir, lncfg.DefaultConfigFilename,
|
case configFileDir != DefaultLndDir &&
|
||||||
)
|
configFilePath == DefaultConfigFile:
|
||||||
|
|
||||||
|
configFilePath = filepath.Join(
|
||||||
|
configFileDir, lncfg.DefaultConfigFilename,
|
||||||
|
)
|
||||||
|
|
||||||
|
// User did specify an explicit --configfile, so we check that it does
|
||||||
|
// exist under that path to avoid surprises.
|
||||||
|
case configFilePath != DefaultConfigFile:
|
||||||
|
if !fileExists(configFilePath) {
|
||||||
|
return nil, fmt.Errorf("specified config file does "+
|
||||||
|
"not exist in %s", configFilePath)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -10,7 +10,6 @@ After=bitcoind.service
|
|||||||
[Service]
|
[Service]
|
||||||
ExecStart=/usr/local/bin/lnd
|
ExecStart=/usr/local/bin/lnd
|
||||||
ExecStop=/usr/local/bin/lncli stop
|
ExecStop=/usr/local/bin/lncli stop
|
||||||
PIDFile=/home/bitcoin/.lnd/lnd.pid
|
|
||||||
|
|
||||||
# Replace these with the user:group that will run lnd
|
# Replace these with the user:group that will run lnd
|
||||||
User=bitcoin
|
User=bitcoin
|
||||||
|
@ -29,7 +29,7 @@ Docker container, adding the appropriate command-line options as parameters.
|
|||||||
You first need to build the `lnd` docker image:
|
You first need to build the `lnd` docker image:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
⛰ docker build --tag=myrepository/lnd --build-arg checkout=v0.11.1-beta .
|
⛰ docker build --tag=myrepository/lnd --build-arg checkout=v0.14.1-beta .
|
||||||
```
|
```
|
||||||
|
|
||||||
It is recommended that you checkout the latest released tag.
|
It is recommended that you checkout the latest released tag.
|
||||||
@ -49,7 +49,7 @@ images of `lnd` available in the
|
|||||||
You can just pull those images by specifying a release tag:
|
You can just pull those images by specifying a release tag:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
⛰ docker pull lightninglabs/lnd:v0.12.0-beta
|
⛰ docker pull lightninglabs/lnd:v0.14.1-beta
|
||||||
⛰ docker run lightninglabs/lnd [command-line options]
|
⛰ docker run lightninglabs/lnd [command-line options]
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -61,10 +61,10 @@ script in the image that can be called (before starting the container for
|
|||||||
example):
|
example):
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
⛰ docker run --rm --entrypoint="" lightninglabs/lnd:v0.12.1-beta /verify-install.sh v0.12.1-beta
|
⛰ docker run --rm --entrypoint="" lightninglabs/lnd:v0.14.1-beta /verify-install.sh v0.14.1-beta
|
||||||
⛰ OK=$?
|
⛰ OK=$?
|
||||||
⛰ if [ "$OK" -ne "0" ]; then echo "Verification failed!"; exit 1; done
|
⛰ if [ "$OK" -ne "0" ]; then echo "Verification failed!"; exit 1; done
|
||||||
⛰ docker run lightninglabs/lnd [command-line options]
|
⛰ docker run lightninglabs/lnd:v0.14.1-beta [command-line options]
|
||||||
```
|
```
|
||||||
|
|
||||||
## Volumes
|
## Volumes
|
||||||
@ -118,7 +118,7 @@ To test the Docker production image locally, run the following from the project
|
|||||||
To choose a specific [branch](https://github.com/lightningnetwork/lnd/branches) or [tag](https://hub.docker.com/r/lightninglabs/lnd/tags?page=1&ordering=last_updated) instead, use the "checkout" build-arg. For example, to build the latest tagged commit:
|
To choose a specific [branch](https://github.com/lightningnetwork/lnd/branches) or [tag](https://hub.docker.com/r/lightninglabs/lnd/tags?page=1&ordering=last_updated) instead, use the "checkout" build-arg. For example, to build the latest tagged commit:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
⛰ docker build . --build-arg checkout=v0.13.0-beta -t myrepository/lnd:v0.13.0-beta
|
⛰ docker build . --build-arg checkout=v0.14.1-beta -t myrepository/lnd:v0.14.1-beta
|
||||||
```
|
```
|
||||||
|
|
||||||
To build the image using the most current tag:
|
To build the image using the most current tag:
|
||||||
|
@ -382,7 +382,7 @@ in `--bitcoin.simnet` if needed), and also your own `btcd` node if available:
|
|||||||
## Using bitcoind or litecoind
|
## Using bitcoind or litecoind
|
||||||
|
|
||||||
The configuration for bitcoind and litecoind are nearly identical, the
|
The configuration for bitcoind and litecoind are nearly identical, the
|
||||||
following steps can be mirrored with loss of generality to enable a litecoind
|
following steps can be mirrored without loss of generality to enable a litecoind
|
||||||
backend. Setup will be described in regards to `bitcoind`, but note that `lnd`
|
backend. Setup will be described in regards to `bitcoind`, but note that `lnd`
|
||||||
uses a distinct `litecoin.node=litecoind` argument and analogous
|
uses a distinct `litecoin.node=litecoind` argument and analogous
|
||||||
subconfigurations prefixed by `litecoind`. Note that adding `--txindex` is
|
subconfigurations prefixed by `litecoind`. Note that adding `--txindex` is
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
1. [Code Documentation and Commenting](#code-documentation-and-commenting)
|
1. [Code Documentation and Commenting](#code-documentation-and-commenting)
|
||||||
1. [Model Git Commit Messages](#model-git-commit-messages)
|
1. [Model Git Commit Messages](#model-git-commit-messages)
|
||||||
1. [Ideal Git Commit Structure](#ideal-git-commit-structure)
|
1. [Ideal Git Commit Structure](#ideal-git-commit-structure)
|
||||||
|
1. [Sign Your Git Commits](#sign-your-git-commits)
|
||||||
1. [Code Spacing](#code-spacing)
|
1. [Code Spacing](#code-spacing)
|
||||||
1. [Protobuf Compilation](#protobuf-compilation)
|
1. [Protobuf Compilation](#protobuf-compilation)
|
||||||
1. [Additional Style Constraints On Top of gofmt](#additional-style-constraints-on-top-of-gofmt)
|
1. [Additional Style Constraints On Top of gofmt](#additional-style-constraints-on-top-of-gofmt)
|
||||||
@ -155,7 +156,7 @@ A quick summary of test practices follows:
|
|||||||
or RPC's will need to be accompanied by integration tests which use the
|
or RPC's will need to be accompanied by integration tests which use the
|
||||||
[`networkHarness`framework](https://github.com/lightningnetwork/lnd/blob/master/lntest/harness.go)
|
[`networkHarness`framework](https://github.com/lightningnetwork/lnd/blob/master/lntest/harness.go)
|
||||||
contained within `lnd`. For example integration tests, see
|
contained within `lnd`. For example integration tests, see
|
||||||
[`lnd_test.go`](https://github.com/lightningnetwork/lnd/blob/master/lnd_test.go#L181).
|
[`lnd_test.go`](https://github.com/lightningnetwork/lnd/blob/master/lntest/itest/lnd_test.go).
|
||||||
- The itest log files are automatically scanned for `[ERR]` lines. There
|
- The itest log files are automatically scanned for `[ERR]` lines. There
|
||||||
shouldn't be any of those in the logs, see [Use of Log Levels](#use-of-log-levels).
|
shouldn't be any of those in the logs, see [Use of Log Levels](#use-of-log-levels).
|
||||||
|
|
||||||
@ -320,6 +321,81 @@ Examples of common patterns w.r.t commit structures within the project:
|
|||||||
* If a PR only fixes a trivial issue, such as updating documentation on a
|
* If a PR only fixes a trivial issue, such as updating documentation on a
|
||||||
small scale, fix typos, or any changes that do not modify the code, the
|
small scale, fix typos, or any changes that do not modify the code, the
|
||||||
commit message should end with `[skip ci]` to skip the CI checks.
|
commit message should end with `[skip ci]` to skip the CI checks.
|
||||||
|
|
||||||
|
## Sign your git commits
|
||||||
|
|
||||||
|
When contributing to `lnd` it is recommended to sign your git commits. This is
|
||||||
|
easy to do and will help in assuring the integrity of the tree. See [mailing
|
||||||
|
list entry](https://lists.linuxfoundation.org/pipermail/bitcoin-dev/2014-May/005877.html)
|
||||||
|
for more information.
|
||||||
|
|
||||||
|
### How to sign your commits?
|
||||||
|
|
||||||
|
Provide the `-S` flag (or `--gpg-sign`) to git commit when you commit
|
||||||
|
your changes, for example
|
||||||
|
|
||||||
|
```shell
|
||||||
|
⛰ git commit -m "Commit message" -S
|
||||||
|
```
|
||||||
|
|
||||||
|
Optionally you can provide a key id after the `-S` option to sign with a
|
||||||
|
specific key.
|
||||||
|
|
||||||
|
To instruct `git` to auto-sign every commit, add the following lines to your
|
||||||
|
`~/.gitconfig` file:
|
||||||
|
|
||||||
|
```text
|
||||||
|
[commit]
|
||||||
|
gpgsign = true
|
||||||
|
```
|
||||||
|
|
||||||
|
### What if I forgot?
|
||||||
|
|
||||||
|
You can retroactively sign your previous commit using `--amend`, for example
|
||||||
|
|
||||||
|
```shell
|
||||||
|
⛰ git commit -S --amend
|
||||||
|
```
|
||||||
|
|
||||||
|
If you need to go further back, you can use the interactive rebase
|
||||||
|
command with 'edit'. Replace `HEAD~3` with the base commit from which
|
||||||
|
you want to start.
|
||||||
|
|
||||||
|
```shell
|
||||||
|
⛰ git rebase -i HEAD~3
|
||||||
|
```
|
||||||
|
|
||||||
|
Replace 'pick' by 'edit' for the commit that you want to sign and the
|
||||||
|
rebasing will stop after that commit. Then you can amend the commit as
|
||||||
|
above. Afterwards, do
|
||||||
|
|
||||||
|
```shell
|
||||||
|
⛰ git rebase --continue
|
||||||
|
```
|
||||||
|
|
||||||
|
As this will rewrite history, you cannot do this when your commit is
|
||||||
|
already merged. In that case, too bad, better luck next time.
|
||||||
|
|
||||||
|
If you rewrite history for another reason - for example when squashing
|
||||||
|
commits - make sure that you re-sign as the signatures will be lost.
|
||||||
|
|
||||||
|
Multiple commits can also be re-signed with `git rebase`. For example, signing
|
||||||
|
the last three commits can be done with:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
⛰ git rebase --exec 'git commit --amend --no-edit -n -S' -i HEAD~3
|
||||||
|
```
|
||||||
|
|
||||||
|
### How to check if commits are signed?
|
||||||
|
|
||||||
|
Use `git log` with `--show-signature`,
|
||||||
|
|
||||||
|
```shell
|
||||||
|
⛰ git log --show-signature
|
||||||
|
```
|
||||||
|
|
||||||
|
You can also pass the `--show-signature` option to `git show` to check a single
|
||||||
|
commit.
|
||||||
|
|
||||||
## Code Spacing
|
## Code Spacing
|
||||||
|
|
||||||
|
@ -19,10 +19,6 @@ with lnd in Java. We'll be using Maven as our build tool.
|
|||||||
├── java
|
├── java
|
||||||
│ └── Main.java
|
│ └── Main.java
|
||||||
├── proto
|
├── proto
|
||||||
├── google
|
|
||||||
│ └── api
|
|
||||||
│ ├── annotations.proto
|
|
||||||
│ └── http.proto
|
|
||||||
└── lnrpc
|
└── lnrpc
|
||||||
└── lightning.proto
|
└── lightning.proto
|
||||||
|
|
||||||
@ -30,13 +26,11 @@ with lnd in Java. We'll be using Maven as our build tool.
|
|||||||
Note the ***proto*** folder, where all the proto files are kept.
|
Note the ***proto*** folder, where all the proto files are kept.
|
||||||
|
|
||||||
- [lightning.proto](https://github.com/lightningnetwork/lnd/blob/master/lnrpc/lightning.proto)
|
- [lightning.proto](https://github.com/lightningnetwork/lnd/blob/master/lnrpc/lightning.proto)
|
||||||
- [annotations.proto](https://github.com/grpc-ecosystem/grpc-gateway/blob/master/third_party/googleapis/google/api/annotations.proto)
|
|
||||||
- [http.proto](https://github.com/grpc-ecosystem/grpc-gateway/blob/master/third_party/googleapis/google/api/http.proto)
|
|
||||||
|
|
||||||
#### pom.xml
|
#### pom.xml
|
||||||
```xml
|
```xml
|
||||||
<properties>
|
<properties>
|
||||||
<grpc.version>1.8.0</grpc.version>
|
<grpc.version>1.36.0</grpc.version>
|
||||||
</properties>
|
</properties>
|
||||||
```
|
```
|
||||||
The following dependencies are required.
|
The following dependencies are required.
|
||||||
@ -60,7 +54,7 @@ The following dependencies are required.
|
|||||||
<dependency>
|
<dependency>
|
||||||
<groupId>io.netty</groupId>
|
<groupId>io.netty</groupId>
|
||||||
<artifactId>netty-tcnative-boringssl-static</artifactId>
|
<artifactId>netty-tcnative-boringssl-static</artifactId>
|
||||||
<version>2.0.7.Final</version>
|
<version>2.0.28.Final</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>commons-codec</groupId>
|
<groupId>commons-codec</groupId>
|
||||||
@ -76,16 +70,16 @@ In the build section, we'll need to configure the following things :
|
|||||||
<extension>
|
<extension>
|
||||||
<groupId>kr.motd.maven</groupId>
|
<groupId>kr.motd.maven</groupId>
|
||||||
<artifactId>os-maven-plugin</artifactId>
|
<artifactId>os-maven-plugin</artifactId>
|
||||||
<version>1.5.0.Final</version>
|
<version>1.6.2.Final</version>
|
||||||
</extension>
|
</extension>
|
||||||
</extensions>
|
</extensions>
|
||||||
<plugins>
|
<plugins>
|
||||||
<plugin>
|
<plugin>
|
||||||
<groupId>org.xolstice.maven.plugins</groupId>
|
<groupId>org.xolstice.maven.plugins</groupId>
|
||||||
<artifactId>protobuf-maven-plugin</artifactId>
|
<artifactId>protobuf-maven-plugin</artifactId>
|
||||||
<version>0.5.0</version>
|
<version>0.6.1</version>
|
||||||
<configuration>
|
<configuration>
|
||||||
<protocArtifact>com.google.protobuf:protoc:3.4.0:exe:${os.detected.classifier}</protocArtifact>
|
<protocArtifact>com.google.protobuf:protoc:3.12.0:exe:${os.detected.classifier}</protocArtifact>
|
||||||
<pluginId>grpc-java</pluginId>
|
<pluginId>grpc-java</pluginId>
|
||||||
<pluginArtifact>io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier}</pluginArtifact>
|
<pluginArtifact>io.grpc:protoc-gen-grpc-java:${grpc.version}:exe:${os.detected.classifier}</pluginArtifact>
|
||||||
</configuration>
|
</configuration>
|
||||||
@ -129,36 +123,30 @@ import java.nio.file.Paths;
|
|||||||
import java.util.concurrent.Executor;
|
import java.util.concurrent.Executor;
|
||||||
|
|
||||||
public class Main {
|
public class Main {
|
||||||
static class MacaroonCallCredential implements CallCredentials {
|
static class MacaroonCallCredential extends CallCredentials {
|
||||||
private final String macaroon;
|
private final String macaroon;
|
||||||
|
|
||||||
MacaroonCallCredential(String macaroon) {
|
MacaroonCallCredential(String macaroon) {
|
||||||
this.macaroon = macaroon;
|
this.macaroon = macaroon;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void thisUsesUnstableApi() {}
|
@Override
|
||||||
|
public void applyRequestMetadata(RequestInfo requestInfo, Executor executor, MetadataApplier metadataApplier) {
|
||||||
public void applyRequestMetadata(
|
executor.execute(() -> {
|
||||||
MethodDescriptor < ? , ? > methodDescriptor,
|
try {
|
||||||
Attributes attributes,
|
Metadata headers = new Metadata();
|
||||||
Executor executor,
|
Metadata.Key<String> macaroonKey = Metadata.Key.of("macaroon", Metadata.ASCII_STRING_MARSHALLER);
|
||||||
final MetadataApplier metadataApplier
|
headers.put(macaroonKey, macaroon);
|
||||||
) {
|
metadataApplier.apply(headers);
|
||||||
String authority = attributes.get(ATTR_AUTHORITY);
|
} catch (Throwable e) {
|
||||||
System.out.println(authority);
|
metadataApplier.fail(Status.UNAUTHENTICATED.withCause(e));
|
||||||
executor.execute(new Runnable() {
|
|
||||||
public void run() {
|
|
||||||
try {
|
|
||||||
Metadata headers = new Metadata();
|
|
||||||
Metadata.Key < String > macaroonKey = Metadata.Key.of("macaroon", Metadata.ASCII_STRING_MARSHALLER);
|
|
||||||
headers.put(macaroonKey, macaroon);
|
|
||||||
metadataApplier.apply(headers);
|
|
||||||
} catch (Throwable e) {
|
|
||||||
metadataApplier.fail(Status.UNAUTHENTICATED.withCause(e));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void thisUsesUnstableApi() {
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static final String CERT_PATH = "/Users/user/Library/Application Support/Lnd/tls.cert";
|
private static final String CERT_PATH = "/Users/user/Library/Application Support/Lnd/tls.cert";
|
||||||
@ -199,19 +187,19 @@ Execute the following command in the directory where the **pom.xml** file is loc
|
|||||||
[INFO] ------------------------------------------------------------------------
|
[INFO] ------------------------------------------------------------------------
|
||||||
[INFO] os.detected.name: osx
|
[INFO] os.detected.name: osx
|
||||||
[INFO] os.detected.arch: x86_64
|
[INFO] os.detected.arch: x86_64
|
||||||
[INFO] os.detected.version: 10.13
|
[INFO] os.detected.version: 10.15
|
||||||
[INFO] os.detected.version.major: 10
|
[INFO] os.detected.version.major: 10
|
||||||
[INFO] os.detected.version.minor: 13
|
[INFO] os.detected.version.minor: 15
|
||||||
[INFO] os.detected.classifier: osx-x86_64
|
[INFO] os.detected.classifier: osx-x86_64
|
||||||
[INFO]
|
[INFO]
|
||||||
[INFO] ------------------------------------------------------------------------
|
[INFO] ------------------------------------------------------------------------
|
||||||
[INFO] Building lightning-client 0.0.1-SNAPSHOT
|
[INFO] Building lightning-client 0.0.1-SNAPSHOT
|
||||||
[INFO] ------------------------------------------------------------------------
|
[INFO] ------------------------------------------------------------------------
|
||||||
[INFO]
|
[INFO]
|
||||||
[INFO] --- protobuf-maven-plugin:0.5.0:compile (default) @ lightning-client ---
|
[INFO] --- protobuf-maven-plugin:0.6.1:compile (default) @ lightning-client ---
|
||||||
[INFO] Compiling 3 proto file(s) to /Users/user/Documents/Projects/lightningclient/target/generated-sources/protobuf/java
|
[INFO] Compiling 3 proto file(s) to /Users/user/Documents/Projects/lightningclient/target/generated-sources/protobuf/java
|
||||||
[INFO]
|
[INFO]
|
||||||
[INFO] --- protobuf-maven-plugin:0.5.0:compile-custom (default) @ lightning-client ---
|
[INFO] --- protobuf-maven-plugin:0.6.1:compile-custom (default) @ lightning-client ---
|
||||||
[INFO] Compiling 3 proto file(s) to /Users/user/Documents/Projects/lightningclient/target/generated-sources/protobuf/grpc-java
|
[INFO] Compiling 3 proto file(s) to /Users/user/Documents/Projects/lightningclient/target/generated-sources/protobuf/grpc-java
|
||||||
[INFO]
|
[INFO]
|
||||||
[INFO] --- maven-resources-plugin:2.6:resources (default-resources) @ lightning-client ---
|
[INFO] --- maven-resources-plugin:2.6:resources (default-resources) @ lightning-client ---
|
||||||
|
@ -31,8 +31,8 @@ const grpc = require('@grpc/grpc-js');
|
|||||||
const protoLoader = require('@grpc/proto-loader');
|
const protoLoader = require('@grpc/proto-loader');
|
||||||
const fs = require("fs");
|
const fs = require("fs");
|
||||||
|
|
||||||
// Due to updated ECDSA generated tls.cert we need to let gprc know that
|
// Due to updated ECDSA generated tls.cert we need to let gRPC know that
|
||||||
// we need to use that cipher suite otherwise there will be a handhsake
|
// we need to use that cipher suite otherwise there will be a handshake
|
||||||
// error when we communicate with the lnd rpc server.
|
// error when we communicate with the lnd rpc server.
|
||||||
process.env.GRPC_SSL_CIPHER_SUITES = 'HIGH+ECDSA'
|
process.env.GRPC_SSL_CIPHER_SUITES = 'HIGH+ECDSA'
|
||||||
|
|
||||||
|
@ -6,10 +6,12 @@ describes how it can be configured.
|
|||||||
|
|
||||||
## Building LND with postgres support
|
## Building LND with postgres support
|
||||||
|
|
||||||
To build LND with postgres support, include the following build tag:
|
Since `lnd v0.14.1-beta` the necessary build tags to enable postgres support are
|
||||||
|
already enabled by default. The default release binaries or docker images can
|
||||||
|
be used. To build from source, simply run:
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
⛰ make tags="kvdb_postgres"
|
⛰ make install
|
||||||
```
|
```
|
||||||
|
|
||||||
## Configuring Postgres for LND
|
## Configuring Postgres for LND
|
||||||
@ -29,3 +31,13 @@ LND is configured for Postgres through the following configuration options:
|
|||||||
database, user and password.
|
database, user and password.
|
||||||
* `db.postgres.timeout=...` to set the connection timeout. If not set, no
|
* `db.postgres.timeout=...` to set the connection timeout. If not set, no
|
||||||
timeout applies.
|
timeout applies.
|
||||||
|
|
||||||
|
Example as follows:
|
||||||
|
```
|
||||||
|
[db]
|
||||||
|
db.backend=postgres
|
||||||
|
db.postgres.dsn=postgresql://dbuser:dbpass@127.0.0.1:5432/dbname
|
||||||
|
db.postgres.timeout=0
|
||||||
|
```
|
||||||
|
Connection timeout is disabled, to account for situations where the database
|
||||||
|
might be slow for unexpected reasons.
|
||||||
|
@ -38,11 +38,15 @@ result, it cannot be used in isolation.
|
|||||||
|
|
||||||
### 24-word Cipher Seeds
|
### 24-word Cipher Seeds
|
||||||
|
|
||||||
When a new `lnd` node is created, it's given a 24-word seed phrase, called an
|
When a new `lnd` node is created, it is given a 24-word seed phrase, called an
|
||||||
[`cipher seed`](https://github.com/lightningnetwork/lnd/tree/master/aezeed).
|
[`aezeed cipher seed`](https://github.com/lightningnetwork/lnd/tree/master/aezeed).
|
||||||
The two seed formats look similar, but the only commonality they share are
|
The [BIP39](https://github.com/bitcoin/bips/blob/master/bip-0039.mediawiki) and
|
||||||
using the same default English dictionary. A valid seed phrase obtained over
|
[`aezeed cipher seed`](https://github.com/lightningnetwork/lnd/tree/master/aezeed)
|
||||||
the CLI `lncli create` command looks something like:
|
formats look similar, but the only commonality they share is that they use the
|
||||||
|
same default [English](https://raw.githubusercontent.com/bitcoin/bips/master/bip-0039/english.txt)
|
||||||
|
wordlist.
|
||||||
|
A valid seed phrase obtained over the CLI `lncli create` command looks something
|
||||||
|
like:
|
||||||
```text
|
```text
|
||||||
!!!YOU MUST WRITE DOWN THIS SEED TO BE ABLE TO RESTORE THE WALLET!!!
|
!!!YOU MUST WRITE DOWN THIS SEED TO BE ABLE TO RESTORE THE WALLET!!!
|
||||||
|
|
||||||
|
@ -38,6 +38,10 @@
|
|||||||
|
|
||||||
* A new command `lncli leaseoutput` was [added](https://github.com/lightningnetwork/lnd/pull/5964).
|
* A new command `lncli leaseoutput` was [added](https://github.com/lightningnetwork/lnd/pull/5964).
|
||||||
|
|
||||||
|
* [Consolidated many smaller docs/typo/trivial fixes from PRs that were stuck
|
||||||
|
in review because of unmet contribution guideline
|
||||||
|
requirements](https://github.com/lightningnetwork/lnd/pull/6080).
|
||||||
|
|
||||||
## RPC Server
|
## RPC Server
|
||||||
|
|
||||||
* [Add value to the field
|
* [Add value to the field
|
||||||
@ -58,12 +62,22 @@
|
|||||||
|
|
||||||
# Contributors (Alphabetical Order)
|
# Contributors (Alphabetical Order)
|
||||||
|
|
||||||
|
* 3nprob
|
||||||
* Andreas Schjønhaug
|
* Andreas Schjønhaug
|
||||||
|
* asvdf
|
||||||
* Carsten Otto
|
* Carsten Otto
|
||||||
|
* Dan Bolser
|
||||||
* Daniel McNally
|
* Daniel McNally
|
||||||
* ErikEk
|
* ErikEk
|
||||||
|
* henta
|
||||||
* Joost Jager
|
* Joost Jager
|
||||||
|
* LightningHelper
|
||||||
* Liviu
|
* Liviu
|
||||||
|
* mateuszmp
|
||||||
|
* Naveen Srinivasan
|
||||||
|
* randymcmillan
|
||||||
|
* Rong Ou
|
||||||
|
* Thebora Kompanioni
|
||||||
* Torkel Rogstad
|
* Torkel Rogstad
|
||||||
* Vsevolod Kaganovych
|
* Vsevolod Kaganovych
|
||||||
* Yong Yu
|
* Yong Yu
|
||||||
|
@ -285,7 +285,6 @@ func extraArgsEtcd(etcdCfg *etcd.Config, name string, cluster bool) []string {
|
|||||||
|
|
||||||
if etcdCfg.InsecureSkipVerify {
|
if etcdCfg.InsecureSkipVerify {
|
||||||
extraArgs = append(extraArgs, "--db.etcd.insecure_skip_verify")
|
extraArgs = append(extraArgs, "--db.etcd.insecure_skip_verify")
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if cluster {
|
if cluster {
|
||||||
@ -619,7 +618,6 @@ func (n *NetworkHarness) EnsureConnected(t *testing.T, a, b *HarnessNode) {
|
|||||||
predErr = err
|
predErr = err
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
}, DefaultTimeout)
|
}, DefaultTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("connection not succeeded within 15 "+
|
return fmt.Errorf("connection not succeeded within 15 "+
|
||||||
@ -1353,7 +1351,6 @@ func (n *NetworkHarness) WaitForChannelClose(
|
|||||||
// an optional set of check functions which can be used to make further
|
// an optional set of check functions which can be used to make further
|
||||||
// assertions using channel's values. These functions are responsible for
|
// assertions using channel's values. These functions are responsible for
|
||||||
// failing the test themselves if they do not pass.
|
// failing the test themselves if they do not pass.
|
||||||
// nolint: interfacer
|
|
||||||
func (n *NetworkHarness) AssertChannelExists(node *HarnessNode,
|
func (n *NetworkHarness) AssertChannelExists(node *HarnessNode,
|
||||||
chanPoint *wire.OutPoint, checks ...func(*lnrpc.Channel)) error {
|
chanPoint *wire.OutPoint, checks ...func(*lnrpc.Channel)) error {
|
||||||
|
|
||||||
|
@ -226,7 +226,6 @@ func (cfg *BaseNodeConfig) GenArgs() []string {
|
|||||||
fmt.Sprintf("--datadir=%v", cfg.DataDir),
|
fmt.Sprintf("--datadir=%v", cfg.DataDir),
|
||||||
fmt.Sprintf("--tlscertpath=%v", cfg.TLSCertPath),
|
fmt.Sprintf("--tlscertpath=%v", cfg.TLSCertPath),
|
||||||
fmt.Sprintf("--tlskeypath=%v", cfg.TLSKeyPath),
|
fmt.Sprintf("--tlskeypath=%v", cfg.TLSKeyPath),
|
||||||
fmt.Sprintf("--configfile=%v", cfg.DataDir),
|
|
||||||
fmt.Sprintf("--adminmacaroonpath=%v", cfg.AdminMacPath),
|
fmt.Sprintf("--adminmacaroonpath=%v", cfg.AdminMacPath),
|
||||||
fmt.Sprintf("--readonlymacaroonpath=%v", cfg.ReadMacPath),
|
fmt.Sprintf("--readonlymacaroonpath=%v", cfg.ReadMacPath),
|
||||||
fmt.Sprintf("--invoicemacaroonpath=%v", cfg.InvoiceMacPath),
|
fmt.Sprintf("--invoicemacaroonpath=%v", cfg.InvoiceMacPath),
|
||||||
|
@ -570,7 +570,6 @@ func assertNotConnected(t *harnessTest, alice, bob *lntest.HarnessNode) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
}, defaultTimeout)
|
}, defaultTimeout)
|
||||||
require.NoError(t.t, err)
|
require.NoError(t.t, err)
|
||||||
}
|
}
|
||||||
@ -590,9 +589,8 @@ func shutdownAndAssert(net *lntest.NetworkHarness, t *harnessTest,
|
|||||||
|
|
||||||
// assertChannelBalanceResp makes a ChannelBalance request and checks the
|
// assertChannelBalanceResp makes a ChannelBalance request and checks the
|
||||||
// returned response matches the expected.
|
// returned response matches the expected.
|
||||||
func assertChannelBalanceResp(t *harnessTest,
|
func assertChannelBalanceResp(t *harnessTest, node *lntest.HarnessNode,
|
||||||
node *lntest.HarnessNode,
|
expected *lnrpc.ChannelBalanceResponse) {
|
||||||
expected *lnrpc.ChannelBalanceResponse) { // nolint:interfacer
|
|
||||||
|
|
||||||
resp := getChannelBalance(t, node)
|
resp := getChannelBalance(t, node)
|
||||||
require.True(t.t, proto.Equal(expected, resp), "balance is incorrect")
|
require.True(t.t, proto.Equal(expected, resp), "balance is incorrect")
|
||||||
|
@ -50,32 +50,20 @@ func ErrOutpointIndexTooBig(index uint32) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WriteBytes appends the given bytes to the provided buffer.
|
// WriteBytes appends the given bytes to the provided buffer.
|
||||||
//
|
func WriteBytes(buf *bytes.Buffer, b []byte) error {
|
||||||
// Note: We intentionally skip the interfacer linter check here because we want
|
|
||||||
// to have concrete type (bytes.Buffer) rather than interface type (io.Write)
|
|
||||||
// due to performance concern.
|
|
||||||
func WriteBytes(buf *bytes.Buffer, b []byte) error { // nolint: interfacer
|
|
||||||
_, err := buf.Write(b)
|
_, err := buf.Write(b)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteUint8 appends the uint8 to the provided buffer.
|
// WriteUint8 appends the uint8 to the provided buffer.
|
||||||
//
|
func WriteUint8(buf *bytes.Buffer, n uint8) error {
|
||||||
// Note: We intentionally skip the interfacer linter check here because we want
|
|
||||||
// to have concrete type (bytes.Buffer) rather than interface type (io.Write)
|
|
||||||
// due to performance concern.
|
|
||||||
func WriteUint8(buf *bytes.Buffer, n uint8) error { // nolint: interfacer
|
|
||||||
_, err := buf.Write([]byte{n})
|
_, err := buf.Write([]byte{n})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteUint16 appends the uint16 to the provided buffer. It encodes the
|
// WriteUint16 appends the uint16 to the provided buffer. It encodes the
|
||||||
// integer using big endian byte order.
|
// integer using big endian byte order.
|
||||||
//
|
func WriteUint16(buf *bytes.Buffer, n uint16) error {
|
||||||
// Note: We intentionally skip the interfacer linter check here because we want
|
|
||||||
// to have concrete type (bytes.Buffer) rather than interface type (io.Write)
|
|
||||||
// due to performance concern.
|
|
||||||
func WriteUint16(buf *bytes.Buffer, n uint16) error { // nolint: interfacer
|
|
||||||
var b [2]byte
|
var b [2]byte
|
||||||
binary.BigEndian.PutUint16(b[:], n)
|
binary.BigEndian.PutUint16(b[:], n)
|
||||||
_, err := buf.Write(b[:])
|
_, err := buf.Write(b[:])
|
||||||
@ -93,11 +81,7 @@ func WriteUint32(buf *bytes.Buffer, n uint32) error {
|
|||||||
|
|
||||||
// WriteUint64 appends the uint64 to the provided buffer. It encodes the
|
// WriteUint64 appends the uint64 to the provided buffer. It encodes the
|
||||||
// integer using big endian byte order.
|
// integer using big endian byte order.
|
||||||
//
|
func WriteUint64(buf *bytes.Buffer, n uint64) error {
|
||||||
// Note: We intentionally skip the interfacer linter check here because we want
|
|
||||||
// to have concrete type (bytes.Buffer) rather than interface type (io.Write)
|
|
||||||
// due to performance concern.
|
|
||||||
func WriteUint64(buf *bytes.Buffer, n uint64) error { // nolint: interfacer
|
|
||||||
var b [8]byte
|
var b [8]byte
|
||||||
binary.BigEndian.PutUint64(b[:], n)
|
binary.BigEndian.PutUint64(b[:], n)
|
||||||
_, err := buf.Write(b[:])
|
_, err := buf.Write(b[:])
|
||||||
@ -122,7 +106,6 @@ func WritePublicKey(buf *bytes.Buffer, pub *btcec.PublicKey) error {
|
|||||||
|
|
||||||
serializedPubkey := pub.SerializeCompressed()
|
serializedPubkey := pub.SerializeCompressed()
|
||||||
return WriteBytes(buf, serializedPubkey)
|
return WriteBytes(buf, serializedPubkey)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteChannelID appends the ChannelID to the provided buffer.
|
// WriteChannelID appends the ChannelID to the provided buffer.
|
||||||
@ -195,13 +178,7 @@ func WriteFailCode(buf *bytes.Buffer, e FailCode) error {
|
|||||||
// WriteRawFeatureVector encodes the feature using the feature's Encode method
|
// WriteRawFeatureVector encodes the feature using the feature's Encode method
|
||||||
// and appends the data to the provided buffer. An error will return if the
|
// and appends the data to the provided buffer. An error will return if the
|
||||||
// passed feature is nil.
|
// passed feature is nil.
|
||||||
//
|
func WriteRawFeatureVector(buf *bytes.Buffer, feature *RawFeatureVector) error {
|
||||||
// Note: We intentionally skip the interfacer linter check here because we want
|
|
||||||
// to have concrete type (bytes.Buffer) rather than interface type (io.Write)
|
|
||||||
// due to performance concern.
|
|
||||||
func WriteRawFeatureVector(buf *bytes.Buffer, // nolint: interfacer
|
|
||||||
feature *RawFeatureVector) error {
|
|
||||||
|
|
||||||
if feature == nil {
|
if feature == nil {
|
||||||
return ErrNilFeatureVector
|
return ErrNilFeatureVector
|
||||||
}
|
}
|
||||||
@ -281,11 +258,7 @@ func WriteBool(buf *bytes.Buffer, b bool) error {
|
|||||||
|
|
||||||
// WritePkScript appends the script to the provided buffer. Returns an error if
|
// WritePkScript appends the script to the provided buffer. Returns an error if
|
||||||
// the provided script exceeds 34 bytes.
|
// the provided script exceeds 34 bytes.
|
||||||
//
|
func WritePkScript(buf *bytes.Buffer, s PkScript) error {
|
||||||
// Note: We intentionally skip the interfacer linter check here because we want
|
|
||||||
// to have concrete type (bytes.Buffer) rather than interface type (io.Write)
|
|
||||||
// due to performance concern.
|
|
||||||
func WritePkScript(buf *bytes.Buffer, s PkScript) error { // nolint: interfacer
|
|
||||||
// The largest script we'll accept is a p2wsh which is exactly
|
// The largest script we'll accept is a p2wsh which is exactly
|
||||||
// 34 bytes long.
|
// 34 bytes long.
|
||||||
scriptLength := len(s)
|
scriptLength := len(s)
|
||||||
@ -413,13 +386,7 @@ func WriteNetAddrs(buf *bytes.Buffer, addresses []net.Addr) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// writeDataWithLength writes the data and its length to the buffer.
|
// writeDataWithLength writes the data and its length to the buffer.
|
||||||
//
|
func writeDataWithLength(buf *bytes.Buffer, data []byte) error {
|
||||||
// Note: We intentionally skip the interfacer linter check here because we want
|
|
||||||
// to have concrete type (bytes.Buffer) rather than interface type (io.Write)
|
|
||||||
// due to performance concern.
|
|
||||||
func writeDataWithLength(buf *bytes.Buffer, // nolint: interfacer
|
|
||||||
data []byte) error {
|
|
||||||
|
|
||||||
var l [2]byte
|
var l [2]byte
|
||||||
binary.BigEndian.PutUint16(l[:], uint16(len(data)))
|
binary.BigEndian.PutUint16(l[:], uint16(len(data)))
|
||||||
if _, err := buf.Write(l[:]); err != nil {
|
if _, err := buf.Write(l[:]); err != nil {
|
||||||
|
Loading…
x
Reference in New Issue
Block a user