lntemp: add more methods to support testUpdateChanStatus

This commit is contained in:
yyforyongyu 2022-07-22 17:33:26 +08:00
parent 1350f76627
commit cc69634a2e
No known key found for this signature in database
GPG Key ID: 9BCD95C4FF296868
6 changed files with 693 additions and 0 deletions

View File

@ -6,14 +6,17 @@ import (
"testing"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/txscript"
"github.com/btcsuite/btcd/wire"
"github.com/go-errors/errors"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lntemp/node"
"github.com/lightningnetwork/lnd/lntemp/rpc"
"github.com/lightningnetwork/lnd/lntest"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/lightningnetwork/lnd/lnwallet/chainfee"
"github.com/lightningnetwork/lnd/lnwire"
"github.com/stretchr/testify/require"
)
@ -466,3 +469,202 @@ func (h *HarnessTest) validateNodeState(hn *node.HarnessNode) {
"uncleaned payments, please delete all of them properly",
hn.Name())
}
// GetChanPointFundingTxid takes a channel point and converts it into a chain
// hash.
func (h *HarnessTest) GetChanPointFundingTxid(
cp *lnrpc.ChannelPoint) *chainhash.Hash {
txid, err := lnrpc.GetChanPointFundingTxid(cp)
require.NoError(h, err, "unable to get txid")
return txid
}
// OutPointFromChannelPoint creates an outpoint from a given channel point.
func (h *HarnessTest) OutPointFromChannelPoint(
cp *lnrpc.ChannelPoint) wire.OutPoint {
txid := h.GetChanPointFundingTxid(cp)
return wire.OutPoint{
Hash: *txid,
Index: cp.OutputIndex,
}
}
// OpenChannelParams houses the params to specify when opening a new channel.
type OpenChannelParams struct {
// Amt is the local amount being put into the channel.
Amt btcutil.Amount
// PushAmt is the amount that should be pushed to the remote when the
// channel is opened.
PushAmt btcutil.Amount
// Private is a boolan indicating whether the opened channel should be
// private.
Private bool
// SpendUnconfirmed is a boolean indicating whether we can utilize
// unconfirmed outputs to fund the channel.
SpendUnconfirmed bool
// MinHtlc is the htlc_minimum_msat value set when opening the channel.
MinHtlc lnwire.MilliSatoshi
// RemoteMaxHtlcs is the remote_max_htlcs value set when opening the
// channel, restricting the number of concurrent HTLCs the remote party
// can add to a commitment.
RemoteMaxHtlcs uint16
// FundingShim is an optional funding shim that the caller can specify
// in order to modify the channel funding workflow.
FundingShim *lnrpc.FundingShim
// SatPerVByte is the amount of satoshis to spend in chain fees per
// virtual byte of the transaction.
SatPerVByte btcutil.Amount
// CommitmentType is the commitment type that should be used for the
// channel to be opened.
CommitmentType lnrpc.CommitmentType
}
// openChannel attempts to open a channel between srcNode and destNode with the
// passed channel funding parameters. Once the `OpenChannel` is called, it will
// consume the first event it receives from the open channel client and asserts
// it's a channel pending event.
func (h *HarnessTest) openChannel(srcNode, destNode *node.HarnessNode,
p OpenChannelParams) rpc.OpenChanClient {
// Specify the minimal confirmations of the UTXOs used for channel
// funding.
minConfs := int32(1)
if p.SpendUnconfirmed {
minConfs = 0
}
// Prepare the request and open the channel.
openReq := &lnrpc.OpenChannelRequest{
NodePubkey: destNode.PubKey[:],
LocalFundingAmount: int64(p.Amt),
PushSat: int64(p.PushAmt),
Private: p.Private,
MinConfs: minConfs,
SpendUnconfirmed: p.SpendUnconfirmed,
MinHtlcMsat: int64(p.MinHtlc),
RemoteMaxHtlcs: uint32(p.RemoteMaxHtlcs),
FundingShim: p.FundingShim,
SatPerByte: int64(p.SatPerVByte),
CommitmentType: p.CommitmentType,
}
respStream := srcNode.RPC.OpenChannel(openReq)
// Consume the "channel pending" update. This waits until the node
// notifies us that the final message in the channel funding workflow
// has been sent to the remote node.
resp := h.ReceiveOpenChannelUpdate(respStream)
// Check that the update is channel pending.
_, ok := resp.Update.(*lnrpc.OpenStatusUpdate_ChanPending)
require.Truef(h, ok, "expected channel pending: update, instead got %v",
resp)
return respStream
}
// OpenChannel attempts to open a channel with the specified parameters
// extended from Alice to Bob. Additionally, the following items are asserted,
// - 6 blocks will be mined so the channel will be announced if it's public.
// - the funding transaction should be found in the first block.
// - both nodes should see the channel edge update in their network graph.
// - both nodes can report the status of the new channel from ListChannels.
func (h *HarnessTest) OpenChannel(alice, bob *node.HarnessNode,
p OpenChannelParams) *lnrpc.ChannelPoint {
// Wait until srcNode and destNode have the latest chain synced.
// Otherwise, we may run into a check within the funding manager that
// prevents any funding workflows from being kicked off if the chain
// isn't yet synced.
h.WaitForBlockchainSync(alice)
h.WaitForBlockchainSync(bob)
chanOpenUpdate := h.openChannel(alice, bob, p)
// Mine 6 blocks, then wait for Alice's node to notify us that the
// channel has been opened. The funding transaction should be found
// within the first newly mined block. We mine 6 blocks so that in the
// case that the channel is public, it is announced to the network.
block := h.Miner.MineBlocksAndAssertNumTxes(6, 1)[0]
// Wait for the channel open event.
fundingChanPoint := h.WaitForChannelOpenEvent(chanOpenUpdate)
// Check that the funding tx is found in the first block.
fundingTxID := h.GetChanPointFundingTxid(fundingChanPoint)
h.Miner.AssertTxInBlock(block, fundingTxID)
// Check that both alice and bob have seen the channel from their
// network topology.
h.AssertTopologyChannelOpen(alice, fundingChanPoint)
h.AssertTopologyChannelOpen(bob, fundingChanPoint)
// Check that the channel can be seen in their ListChannels.
h.AssertChannelExists(alice, fundingChanPoint)
h.AssertChannelExists(bob, fundingChanPoint)
// Finally, check the blocks are synced.
h.WaitForBlockchainSync(alice)
h.WaitForBlockchainSync(bob)
return fundingChanPoint
}
// closeChannel attempts to close the channel indicated by the passed channel
// point, initiated by the passed node. Once the CloseChannel rpc is called, it
// will consume one event and assert it's a close pending event. In addition,
// it will check that the closing tx can be found in the mempool.
func (h *HarnessTest) closeChannel(hn *node.HarnessNode, cp *lnrpc.ChannelPoint,
force bool) (rpc.CloseChanClient, *chainhash.Hash) {
// Calls the rpc to close the channel.
closeReq := &lnrpc.CloseChannelRequest{
ChannelPoint: cp,
Force: force,
}
stream := hn.RPC.CloseChannel(closeReq)
// Consume the "channel close" update in order to wait for the closing
// transaction to be broadcast, then wait for the closing tx to be seen
// within the network.
event := h.ReceiveCloseChannelUpdate(stream)
pendingClose, ok := event.Update.(*lnrpc.CloseStatusUpdate_ClosePending)
require.Truef(h, ok, "expected channel close update, instead got %v",
pendingClose)
closeTxid, err := chainhash.NewHash(pendingClose.ClosePending.Txid)
require.NoErrorf(h, err, "unable to decode closeTxid: %v",
pendingClose.ClosePending.Txid)
// Assert the closing tx is in the mempool.
h.Miner.AssertTxInMempool(closeTxid)
return stream, closeTxid
}
// CloseChannel attempts to close a non-anchored channel identified by the
// passed channel point owned by the passed harness node. The following items
// are asserted,
// 1. a close pending event is sent from the close channel client.
// 2. the closing tx is found in the mempool.
// 3. the node reports the channel being waiting to close.
// 4. a block is mined and the closing tx should be found in it.
// 5. the node reports zero waiting close channels.
// 6. the node receives a topology update regarding the channel close.
func (h *HarnessTest) CloseChannel(hn *node.HarnessNode,
cp *lnrpc.ChannelPoint, force bool) *chainhash.Hash {
stream, _ := h.closeChannel(hn, cp, force)
return h.assertChannelClosed(hn, cp, false, stream)
}

View File

@ -4,9 +4,13 @@ import (
"context"
"fmt"
"strings"
"time"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/lightningnetwork/lnd/channeldb"
"github.com/lightningnetwork/lnd/lnrpc"
"github.com/lightningnetwork/lnd/lntemp/node"
"github.com/lightningnetwork/lnd/lntemp/rpc"
"github.com/lightningnetwork/lnd/lntest/wait"
"github.com/stretchr/testify/require"
)
@ -142,3 +146,355 @@ func (h *HarnessTest) EnsureConnected(a, b *node.HarnessNode) {
h.AssertPeerConnected(a, b)
h.AssertPeerConnected(b, a)
}
// AssertNumEdges checks that an expected number of edges can be found in the
// node specified.
func (h *HarnessTest) AssertNumEdges(hn *node.HarnessNode,
expected int, includeUnannounced bool) []*lnrpc.ChannelEdge {
var edges []*lnrpc.ChannelEdge
old := hn.State.Edge.Public
if includeUnannounced {
old = hn.State.Edge.Total
}
err := wait.NoError(func() error {
req := &lnrpc.ChannelGraphRequest{
IncludeUnannounced: includeUnannounced,
}
chanGraph := hn.RPC.DescribeGraph(req)
total := len(chanGraph.Edges)
if total-old == expected {
if expected != 0 {
// NOTE: assume edges come in ascending order
// that the old edges are at the front of the
// slice.
edges = chanGraph.Edges[old:]
}
return nil
}
return errNumNotMatched(hn.Name(), "num of channel edges",
expected, total-old, total, old)
}, DefaultTimeout)
require.NoError(h, err, "timeout while checking for edges")
return edges
}
// ReceiveOpenChannelUpdate waits until a message is received on the stream or
// the timeout is reached.
func (h *HarnessTest) ReceiveOpenChannelUpdate(
stream rpc.OpenChanClient) *lnrpc.OpenStatusUpdate {
chanMsg := make(chan *lnrpc.OpenStatusUpdate)
errChan := make(chan error)
go func() {
// Consume one message. This will block until the message is
// received.
resp, err := stream.Recv()
if err != nil {
errChan <- err
return
}
chanMsg <- resp
}()
select {
case <-time.After(DefaultTimeout):
require.Fail(h, "timeout", "timeout waiting for open channel "+
"update sent")
case err := <-errChan:
require.Failf(h, "open channel stream",
"received err from open channel stream: %v", err)
case updateMsg := <-chanMsg:
return updateMsg
}
return nil
}
// WaitForChannelOpenEvent waits for a notification that a channel is open by
// consuming a message from the passed open channel stream.
func (h HarnessTest) WaitForChannelOpenEvent(
stream rpc.OpenChanClient) *lnrpc.ChannelPoint {
// Consume one event.
event := h.ReceiveOpenChannelUpdate(stream)
resp, ok := event.Update.(*lnrpc.OpenStatusUpdate_ChanOpen)
require.Truef(h, ok, "expected channel open update, instead got %v",
resp)
return resp.ChanOpen.ChannelPoint
}
// AssertTopologyChannelOpen asserts that a given channel outpoint is seen by
// the passed node's network topology.
func (h *HarnessTest) AssertTopologyChannelOpen(hn *node.HarnessNode,
chanPoint *lnrpc.ChannelPoint) {
err := hn.Watcher.WaitForChannelOpen(chanPoint)
require.NoErrorf(h, err, "%s didn't report channel", hn.Name())
}
// AssertChannelExists asserts that an active channel identified by the
// specified channel point exists from the point-of-view of the node.
func (h *HarnessTest) AssertChannelExists(hn *node.HarnessNode,
cp *lnrpc.ChannelPoint) *lnrpc.Channel {
var (
channel *lnrpc.Channel
err error
)
err = wait.NoError(func() error {
channel, err = h.findChannel(hn, cp)
if err != nil {
return err
}
// Check whether the channel is active, exit early if it is.
if channel.Active {
return nil
}
return fmt.Errorf("channel point not active")
}, DefaultTimeout)
require.NoErrorf(h, err, "%s: timeout checking for channel point: %v",
hn.Name(), cp)
return channel
}
// findChannel tries to find a target channel in the node using the given
// channel point.
func (h *HarnessTest) findChannel(hn *node.HarnessNode,
chanPoint *lnrpc.ChannelPoint) (*lnrpc.Channel, error) {
// Get the funding point.
fp := h.OutPointFromChannelPoint(chanPoint)
req := &lnrpc.ListChannelsRequest{}
channelInfo := hn.RPC.ListChannels(req)
// Find the target channel.
for _, channel := range channelInfo.Channels {
if channel.ChannelPoint == fp.String() {
return channel, nil
}
}
return nil, fmt.Errorf("channel not found using %s", chanPoint)
}
// ReceiveCloseChannelUpdate waits until a message is received on the subscribe
// channel close stream or the timeout is reached.
func (h *HarnessTest) ReceiveCloseChannelUpdate(
stream rpc.CloseChanClient) *lnrpc.CloseStatusUpdate {
chanMsg := make(chan *lnrpc.CloseStatusUpdate)
errChan := make(chan error)
go func() {
// Consume one message. This will block until the message is
// received.
resp, err := stream.Recv()
if err != nil {
errChan <- err
return
}
chanMsg <- resp
}()
select {
case <-time.After(DefaultTimeout):
require.Fail(h, "timeout", "timeout waiting for close channel "+
"update sent")
case err := <-errChan:
require.Failf(h, "close channel stream",
"received err from close channel stream: %v", err)
case updateMsg := <-chanMsg:
return updateMsg
}
return nil
}
type WaitingCloseChannel *lnrpc.PendingChannelsResponse_WaitingCloseChannel
// AssertChannelWaitingClose asserts that the given channel found in the node
// is waiting close. Returns the WaitingCloseChannel if found.
func (h *HarnessTest) AssertChannelWaitingClose(hn *node.HarnessNode,
chanPoint *lnrpc.ChannelPoint) WaitingCloseChannel {
var target WaitingCloseChannel
op := h.OutPointFromChannelPoint(chanPoint)
err := wait.NoError(func() error {
resp := hn.RPC.PendingChannels()
for _, waitingClose := range resp.WaitingCloseChannels {
if waitingClose.Channel.ChannelPoint == op.String() {
target = waitingClose
return nil
}
}
return fmt.Errorf("%v: channel %s not found in waiting close",
hn.Name(), op)
}, DefaultTimeout)
require.NoError(h, err, "assert channel waiting close timed out")
return target
}
// AssertTopologyChannelClosed asserts a given channel is closed by checking
// the graph topology subscription of the specified node. Returns the closed
// channel update if found.
func (h *HarnessTest) AssertTopologyChannelClosed(hn *node.HarnessNode,
chanPoint *lnrpc.ChannelPoint) *lnrpc.ClosedChannelUpdate {
closedChan, err := hn.Watcher.WaitForChannelClose(chanPoint)
require.NoError(h, err, "failed to wait for channel close")
return closedChan
}
// WaitForChannelCloseEvent waits for a notification that a channel is closed
// by consuming a message from the passed close channel stream. Returns the
// closing txid if found.
func (h HarnessTest) WaitForChannelCloseEvent(
stream rpc.CloseChanClient) *chainhash.Hash {
// Consume one event.
event := h.ReceiveCloseChannelUpdate(stream)
resp, ok := event.Update.(*lnrpc.CloseStatusUpdate_ChanClose)
require.Truef(h, ok, "expected channel open update, instead got %v",
resp)
txid, err := chainhash.NewHash(resp.ChanClose.ClosingTxid)
require.NoErrorf(h, err, "wrong format found in closing txid: %v",
resp.ChanClose.ClosingTxid)
return txid
}
// AssertNumWaitingClose checks that a PendingChannels response from the node
// reports the expected number of waiting close channels.
func (h *HarnessTest) AssertNumWaitingClose(hn *node.HarnessNode, num int) {
oldWaiting := hn.State.CloseChannel.WaitingClose
err := wait.NoError(func() error {
resp := hn.RPC.PendingChannels()
total := len(resp.WaitingCloseChannels)
got := total - oldWaiting
if got == num {
return nil
}
return errNumNotMatched(hn.Name(), "waiting close channels",
num, got, total, oldWaiting)
}, DefaultTimeout)
require.NoErrorf(h, err, "%s: assert waiting close timeout",
hn.Name())
}
// AssertNumPendingForceClose checks that a PendingChannels response from the
// node reports the expected number of pending force close channels.
func (h *HarnessTest) AssertNumPendingForceClose(hn *node.HarnessNode,
num int) {
oldForce := hn.State.CloseChannel.PendingForceClose
err := wait.NoError(func() error {
resp := hn.RPC.PendingChannels()
total := len(resp.PendingForceClosingChannels)
got := total - oldForce
if got == num {
return nil
}
return errNumNotMatched(hn.Name(), "pending force close "+
"channels", num, got, total, oldForce)
}, DefaultTimeout)
require.NoErrorf(h, err, "%s: assert pending force close timeout",
hn.Name())
}
// assertChannelClosed asserts that the channel is properly cleaned up after
// initiating a cooperative or local close.
func (h *HarnessTest) assertChannelClosed(hn *node.HarnessNode,
cp *lnrpc.ChannelPoint, anchors bool,
stream rpc.CloseChanClient) *chainhash.Hash {
// Assert the channel is waiting close.
resp := h.AssertChannelWaitingClose(hn, cp)
// Assert that the channel is in coop broadcasted.
require.Contains(h, resp.Channel.ChanStatusFlags,
channeldb.ChanStatusCoopBroadcasted.String(),
"channel not coop broadcasted")
// We'll now, generate a single block, wait for the final close status
// update, then ensure that the closing transaction was included in the
// block. If there are anchors, we also expect an anchor sweep.
expectedTxes := 1
if anchors {
expectedTxes = 2
}
block := h.Miner.MineBlocksAndAssertNumTxes(1, expectedTxes)[0]
// Consume one close event and assert the closing txid can be found in
// the block.
closingTxid := h.WaitForChannelCloseEvent(stream)
h.Miner.AssertTxInBlock(block, closingTxid)
// We should see zero waiting close channels now.
h.AssertNumWaitingClose(hn, 0)
// Finally, check that the node's topology graph has seen this channel
// closed.
h.AssertTopologyChannelClosed(hn, cp)
return closingTxid
}
// AssertChannelPolicyUpdate checks that the required policy update has
// happened on the given node.
func (h *HarnessTest) AssertChannelPolicyUpdate(hn *node.HarnessNode,
advertisingNode *node.HarnessNode, policy *lnrpc.RoutingPolicy,
chanPoint *lnrpc.ChannelPoint, includeUnannounced bool) {
require.NoError(
h, hn.Watcher.WaitForChannelPolicyUpdate(
advertisingNode, policy,
chanPoint, includeUnannounced,
), "%s: error while waiting for channel update", hn.Name(),
)
}
// WaitForGraphSync waits until the node is synced to graph or times out.
func (h *HarnessTest) WaitForGraphSync(hn *node.HarnessNode) {
err := wait.NoError(func() error {
resp := hn.RPC.GetInfo()
if resp.SyncedToGraph {
return nil
}
return fmt.Errorf("node not synced to graph")
}, DefaultTimeout)
require.NoError(h, err, "%s: timeout while sync to graph", hn.Name())
}

View File

@ -1,6 +1,7 @@
package lntemp
import (
"bytes"
"context"
"fmt"
"io/ioutil"
@ -9,6 +10,7 @@ import (
"strings"
"testing"
"github.com/btcsuite/btcd/btcutil"
"github.com/btcsuite/btcd/chaincfg"
"github.com/btcsuite/btcd/chaincfg/chainhash"
"github.com/btcsuite/btcd/integration/rpctest"
@ -186,3 +188,81 @@ func (h *HarnessMiner) AssertNumTxsInMempool(n int) []*chainhash.Hash {
return mem
}
// AssertTxInBlock asserts that a given txid can be found in the passed block.
func (h *HarnessMiner) AssertTxInBlock(block *wire.MsgBlock,
txid *chainhash.Hash) {
blockTxes := make([]chainhash.Hash, 0)
for _, tx := range block.Transactions {
sha := tx.TxHash()
blockTxes = append(blockTxes, sha)
if bytes.Equal(txid[:], sha[:]) {
return
}
}
require.Failf(h, "tx was not included in block", "tx:%v, block has:%v",
txid, blockTxes)
}
// MineBlocksAndAssertNumTxes mine 'num' of blocks and check that blocks are
// present in node blockchain. numTxs should be set to the number of
// transactions (excluding the coinbase) we expect to be included in the first
// mined block.
func (h *HarnessMiner) MineBlocksAndAssertNumTxes(num uint32,
numTxs int) []*wire.MsgBlock {
// If we expect transactions to be included in the blocks we'll mine,
// we wait here until they are seen in the miner's mempool.
txids := h.AssertNumTxsInMempool(numTxs)
// Mine blocks.
blocks := h.MineBlocks(num)
// Finally, assert that all the transactions were included in the first
// block.
for _, txid := range txids {
h.AssertTxInBlock(blocks[0], txid)
}
return blocks
}
// GetRawTransaction makes a RPC call to the miner's GetRawTransaction and
// asserts.
func (h *HarnessMiner) GetRawTransaction(txid *chainhash.Hash) *btcutil.Tx {
tx, err := h.Client.GetRawTransaction(txid)
require.NoErrorf(h, err, "failed to get raw tx: %v", txid)
return tx
}
// AssertTxInMempool asserts a given transaction can be found in the mempool.
func (h *HarnessMiner) AssertTxInMempool(txid *chainhash.Hash) *wire.MsgTx {
var msgTx *wire.MsgTx
err := wait.NoError(func() error {
// We require the RPC call to be succeeded and won't wait for
// it as it's an unexpected behavior.
mempool := h.GetRawMempool()
if len(mempool) == 0 {
return fmt.Errorf("empty mempool")
}
for _, memTx := range mempool {
// Check the values are equal.
if *memTx == *txid {
return nil
}
}
return fmt.Errorf("txid %v not found in mempool: %v", txid,
mempool)
}, lntest.MinerMempoolTimeout)
require.NoError(h, err, "timeout checking mempool")
return msgTx
}

View File

@ -188,3 +188,29 @@ func (h *HarnessRPC) ChannelBalance() *lnrpc.ChannelBalanceResponse {
return resp
}
type OpenChanClient lnrpc.Lightning_OpenChannelClient
// OpenChannel makes a rpc call to LightningClient and returns the open channel
// client.
func (h *HarnessRPC) OpenChannel(req *lnrpc.OpenChannelRequest) OpenChanClient {
stream, err := h.LN.OpenChannel(h.runCtx, req)
h.NoError(err, "OpenChannel")
return stream
}
type CloseChanClient lnrpc.Lightning_CloseChannelClient
// CloseChannel makes a rpc call to LightningClient and returns the close
// channel client.
func (h *HarnessRPC) CloseChannel(
req *lnrpc.CloseChannelRequest) CloseChanClient {
// Use runCtx here instead of a timeout context to keep the client
// alive for the entire test case.
stream, err := h.LN.CloseChannel(h.runCtx, req)
h.NoError(err, "CloseChannel")
return stream
}

View File

@ -1,5 +1,25 @@
package rpc
import (
"context"
"github.com/lightningnetwork/lnd/lnrpc/routerrpc"
)
// =====================
// RouterClient related RPCs.
// =====================
// UpdateChanStatus makes a UpdateChanStatus RPC call to node's RouterClient
// and asserts.
func (h *HarnessRPC) UpdateChanStatus(
req *routerrpc.UpdateChanStatusRequest) *routerrpc.UpdateChanStatusResponse {
ctxt, cancel := context.WithTimeout(h.runCtx, DefaultTimeout)
defer cancel()
resp, err := h.Router.UpdateChanStatus(ctxt, req)
h.NoError(err, "UpdateChanStatus")
return resp
}

View File

@ -1,6 +1,7 @@
package lntemp
import (
"fmt"
"io"
"os"
@ -35,3 +36,11 @@ func CopyFile(dest, src string) error {
return d.Close()
}
// errNumNotMatched is a helper method to return a nicely formatted error.
func errNumNotMatched(name string, subject string,
want, got, total, old int) error {
return fmt.Errorf("%s: assert %s failed: want %d, got: %d, total: "+
"%d, previously had: %d", name, subject, want, got, total, old)
}