mirror of
https://github.com/lightningnetwork/lnd.git
synced 2025-09-27 07:57:07 +02:00
routing: apply capacity factor
We multiply the apriori probability with a factor to take capacity into account: P *= 1 - 1 / [1 + exp(-(amount - cutoff)/smearing)] The factor is a function value between 1 (small amount) and 0 (high amount). The zero limit may not be reached exactly depending on the smearing and cutoff combination. The function is a logistic function mirrored about the y-axis. The cutoff determines the amount at which a significant reduction in probability takes place and the smearing parameter defines how smooth the transition from 1 to 0 is. Both, the cutoff and smearing parameters are defined in terms of fixed fractions of the capacity.
This commit is contained in:
@@ -260,6 +260,11 @@ certain large transactions](https://github.com/lightningnetwork/lnd/pull/7100).
|
||||
using the address in question](
|
||||
https://github.com/lightningnetwork/lnd/pull/7025)
|
||||
|
||||
## Pathfinding
|
||||
|
||||
* [Pathfinding takes capacity of edges into account to better estimate the
|
||||
success probability.](https://github.com/lightningnetwork/lnd/pull/6857)
|
||||
|
||||
### Tooling and documentation
|
||||
|
||||
* [The `golangci-lint` tool was updated to
|
||||
|
@@ -113,9 +113,12 @@ func (ctx *mcTestContext) expectP(amt lnwire.MilliSatoshi, expected float64) {
|
||||
ctx.t.Helper()
|
||||
|
||||
p := ctx.mc.GetProbability(mcTestNode1, mcTestNode2, amt, testCapacity)
|
||||
if p != expected {
|
||||
ctx.t.Fatalf("expected probability %v but got %v", expected, p)
|
||||
}
|
||||
|
||||
// We relax the accuracy for the probability check because of the
|
||||
// capacity cutoff factor.
|
||||
require.InDelta(
|
||||
ctx.t, expected, p, 0.001, "probability does not match",
|
||||
)
|
||||
}
|
||||
|
||||
// reportFailure reports a failure by using a test route.
|
||||
|
@@ -10,6 +10,43 @@ import (
|
||||
"github.com/lightningnetwork/lnd/routing/route"
|
||||
)
|
||||
|
||||
const (
|
||||
// capacityCutoffFraction and capacitySmearingFraction define how
|
||||
// capacity-related probability reweighting works.
|
||||
// capacityCutoffFraction defines the fraction of the channel capacity
|
||||
// at which the effect roughly sets in and capacitySmearingFraction
|
||||
// defines over which range the factor changes from 1 to 0.
|
||||
//
|
||||
// We may fall below the minimum required probability
|
||||
// (DefaultMinRouteProbability) when the amount comes close to the
|
||||
// available capacity of a single channel of the route in case of no
|
||||
// prior knowledge about the channels. We want such routes still to be
|
||||
// available and therefore a probability reduction should not completely
|
||||
// drop the total probability below DefaultMinRouteProbability.
|
||||
// For this to hold for a three-hop route we require:
|
||||
// (DefaultAprioriHopProbability)^3 * minCapacityFactor >
|
||||
// DefaultMinRouteProbability
|
||||
//
|
||||
// For DefaultAprioriHopProbability = 0.6 and
|
||||
// DefaultMinRouteProbability = 0.01 this results in
|
||||
// minCapacityFactor ~ 0.05. The following combination of parameters
|
||||
// fulfill the requirement with capacityFactor(cap, cap) ~ 0.076 (see
|
||||
// tests).
|
||||
|
||||
// The capacityCutoffFraction is a trade-off between usage of the
|
||||
// provided capacity and expected probability reduction when we send the
|
||||
// full amount. The success probability in the random balance model can
|
||||
// be approximated with P(a) = 1 - a/c, for amount a and capacity c. If
|
||||
// we require a probability P(a) > 0.25, this translates into a value of
|
||||
// 0.75 for a/c.
|
||||
capacityCutoffFraction = 0.75
|
||||
|
||||
// We don't want to have a sharp drop of the capacity factor to zero at
|
||||
// capacityCutoffFraction, but a smooth smearing such that some residual
|
||||
// probability is left when spending the whole amount, see above.
|
||||
capacitySmearingFraction = 0.1
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrInvalidHalflife is returned when we get an invalid half life.
|
||||
ErrInvalidHalflife = errors.New("penalty half life must be >= 0")
|
||||
@@ -75,18 +112,23 @@ type probabilityEstimator struct {
|
||||
// that have not been tried before. The results parameter is a list of last
|
||||
// payment results for that node.
|
||||
func (p *probabilityEstimator) getNodeProbability(now time.Time,
|
||||
results NodeResults, amt lnwire.MilliSatoshi) float64 {
|
||||
results NodeResults, amt lnwire.MilliSatoshi,
|
||||
capacity btcutil.Amount) float64 {
|
||||
|
||||
// We reduce the apriori hop probability if the amount comes close to
|
||||
// the capacity.
|
||||
apriori := p.AprioriHopProbability * capacityFactor(amt, capacity)
|
||||
|
||||
// If the channel history is not to be taken into account, we can return
|
||||
// early here with the configured a priori probability.
|
||||
if p.AprioriWeight == 1 {
|
||||
return p.AprioriHopProbability
|
||||
return apriori
|
||||
}
|
||||
|
||||
// If there is no channel history, our best estimate is still the a
|
||||
// priori probability.
|
||||
if len(results) == 0 {
|
||||
return p.AprioriHopProbability
|
||||
return apriori
|
||||
}
|
||||
|
||||
// The value of the apriori weight is in the range [0, 1]. Convert it to
|
||||
@@ -114,7 +156,7 @@ func (p *probabilityEstimator) getNodeProbability(now time.Time,
|
||||
// effectively prunes all channels of the node forever. This is the most
|
||||
// aggressive way in which we can penalize nodes and unlikely to yield
|
||||
// good results in a real network.
|
||||
probabilitiesTotal := p.AprioriHopProbability * aprioriFactor
|
||||
probabilitiesTotal := apriori * aprioriFactor
|
||||
totalWeight := aprioriFactor
|
||||
|
||||
for _, result := range results {
|
||||
@@ -147,6 +189,36 @@ func (p *probabilityEstimator) getWeight(age time.Duration) float64 {
|
||||
return math.Pow(2, exp)
|
||||
}
|
||||
|
||||
// capacityFactor is a multiplier that can be used to reduce the probability
|
||||
// depending on how much of the capacity is sent. The limits are 1 for amt == 0
|
||||
// and 0 for amt >> cutoffMsat. The function drops significantly when amt
|
||||
// reaches cutoffMsat. smearingMsat determines over which scale the reduction
|
||||
// takes place.
|
||||
func capacityFactor(amt lnwire.MilliSatoshi, capacity btcutil.Amount) float64 {
|
||||
// If we don't have information about the capacity, which can be the
|
||||
// case for hop hints or local channels, we return unity to not alter
|
||||
// anything.
|
||||
if capacity == 0 {
|
||||
return 1.0
|
||||
}
|
||||
|
||||
capMsat := float64(lnwire.NewMSatFromSatoshis(capacity))
|
||||
amtMsat := float64(amt)
|
||||
|
||||
if amtMsat > capMsat {
|
||||
return 0
|
||||
}
|
||||
|
||||
cutoffMsat := capacityCutoffFraction * capMsat
|
||||
smearingMsat := capacitySmearingFraction * capMsat
|
||||
|
||||
// We compute a logistic function mirrored around the y axis, centered
|
||||
// at cutoffMsat, decaying over the smearingMsat scale.
|
||||
denominator := 1 + math.Exp(-(amtMsat-cutoffMsat)/smearingMsat)
|
||||
|
||||
return 1 - 1/denominator
|
||||
}
|
||||
|
||||
// getPairProbability estimates the probability of successfully traversing to
|
||||
// toNode based on historical payment outcomes for the from node. Those outcomes
|
||||
// are passed in via the results parameter.
|
||||
@@ -154,7 +226,7 @@ func (p *probabilityEstimator) getPairProbability(
|
||||
now time.Time, results NodeResults, toNode route.Vertex,
|
||||
amt lnwire.MilliSatoshi, capacity btcutil.Amount) float64 {
|
||||
|
||||
nodeProbability := p.getNodeProbability(now, results, amt)
|
||||
nodeProbability := p.getNodeProbability(now, results, amt, capacity)
|
||||
|
||||
return p.calculateProbability(
|
||||
now, results, nodeProbability, toNode, amt,
|
||||
|
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/btcsuite/btcd/btcutil"
|
||||
"github.com/lightningnetwork/lnd/lnwire"
|
||||
"github.com/lightningnetwork/lnd/routing/route"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -27,6 +28,10 @@ const (
|
||||
|
||||
// testCapacity is used to define a capacity for some channels.
|
||||
testCapacity = btcutil.Amount(100_000)
|
||||
testAmount = lnwire.MilliSatoshi(50_000_000)
|
||||
|
||||
// Defines the capacityFactor for testAmount and testCapacity.
|
||||
capFactor = 0.9241
|
||||
)
|
||||
|
||||
type estimatorTestContext struct {
|
||||
@@ -84,7 +89,16 @@ func (c *estimatorTestContext) assertPairProbability(now time.Time,
|
||||
func TestProbabilityEstimatorNoResults(t *testing.T) {
|
||||
ctx := newEstimatorTestContext(t)
|
||||
|
||||
ctx.assertPairProbability(testTime, 0, 0, testCapacity, aprioriHopProb)
|
||||
// A zero amount does not trigger capacity rescaling.
|
||||
ctx.assertPairProbability(
|
||||
testTime, 0, 0, testCapacity, aprioriHopProb,
|
||||
)
|
||||
|
||||
// We expect a reduced probability when a higher amount is used.
|
||||
expected := aprioriHopProb * capFactor
|
||||
ctx.assertPairProbability(
|
||||
testTime, 0, testAmount, testCapacity, expected,
|
||||
)
|
||||
}
|
||||
|
||||
// TestProbabilityEstimatorOneSuccess tests the probability estimation for nodes
|
||||
@@ -94,7 +108,7 @@ func TestProbabilityEstimatorOneSuccess(t *testing.T) {
|
||||
|
||||
ctx.results = map[int]TimedPairResult{
|
||||
node1: {
|
||||
SuccessAmt: lnwire.MilliSatoshi(1000),
|
||||
SuccessAmt: testAmount,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -104,12 +118,27 @@ func TestProbabilityEstimatorOneSuccess(t *testing.T) {
|
||||
testTime, node1, 100, testCapacity, aprioriPrevSucProb,
|
||||
)
|
||||
|
||||
// The apriori success probability indicates that in the past we were
|
||||
// able to send the full amount. We don't want to reduce this
|
||||
// probability with the capacity factor, which is tested here.
|
||||
ctx.assertPairProbability(
|
||||
testTime, node1, testAmount, testCapacity, aprioriPrevSucProb,
|
||||
)
|
||||
|
||||
// Untried channels are also influenced by the success. With a
|
||||
// aprioriWeight of 0.75, the a priori probability is assigned weight 3.
|
||||
expectedP := (3*aprioriHopProb + 1*aprioriPrevSucProb) / 4
|
||||
ctx.assertPairProbability(
|
||||
testTime, untriedNode, 100, testCapacity, expectedP,
|
||||
)
|
||||
|
||||
// Check that the correct probability is computed for larger amounts.
|
||||
apriori := aprioriHopProb * capFactor
|
||||
|
||||
expectedP = (3*apriori + 1*aprioriPrevSucProb) / 4
|
||||
ctx.assertPairProbability(
|
||||
testTime, untriedNode, testAmount, testCapacity, expectedP,
|
||||
)
|
||||
}
|
||||
|
||||
// TestProbabilityEstimatorOneFailure tests the probability estimation for nodes
|
||||
@@ -180,3 +209,73 @@ func TestProbabilityEstimatorMix(t *testing.T) {
|
||||
testTime, node2, 100, testCapacity, expectedNodeProb*0.75,
|
||||
)
|
||||
}
|
||||
|
||||
// TestCapacityCutoff tests the mathematical expression and limits for the
|
||||
// capacity factor.
|
||||
func TestCapacityCutoff(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
capacitySat := 1_000_000
|
||||
capacityMSat := capacitySat * 1000
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
amountMsat int
|
||||
expectedFactor float64
|
||||
}{
|
||||
{
|
||||
name: "zero amount",
|
||||
expectedFactor: 1,
|
||||
},
|
||||
{
|
||||
name: "low amount",
|
||||
amountMsat: capacityMSat / 10,
|
||||
expectedFactor: 0.998,
|
||||
},
|
||||
{
|
||||
name: "half amount",
|
||||
amountMsat: capacityMSat / 2,
|
||||
expectedFactor: 0.924,
|
||||
},
|
||||
{
|
||||
name: "cutoff amount",
|
||||
amountMsat: int(
|
||||
capacityCutoffFraction * float64(capacityMSat),
|
||||
),
|
||||
expectedFactor: 0.5,
|
||||
},
|
||||
{
|
||||
name: "high amount",
|
||||
amountMsat: capacityMSat * 80 / 100,
|
||||
expectedFactor: 0.377,
|
||||
},
|
||||
{
|
||||
// Even when we spend the full capacity, we still want
|
||||
// to have some residual probability to not throw away
|
||||
// routes due to a min probability requirement of the
|
||||
// whole path.
|
||||
name: "full amount",
|
||||
amountMsat: capacityMSat,
|
||||
expectedFactor: 0.076,
|
||||
},
|
||||
{
|
||||
name: "more than capacity",
|
||||
amountMsat: capacityMSat + 1,
|
||||
expectedFactor: 0.0,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
test := test
|
||||
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
got := capacityFactor(
|
||||
lnwire.MilliSatoshi(test.amountMsat),
|
||||
btcutil.Amount(capacitySat),
|
||||
)
|
||||
require.InDelta(t, test.expectedFactor, got, 0.001)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user