mirror of
https://github.com/bitcoin/bitcoin.git
synced 2026-01-22 00:00:55 +01:00
Compare commits
230 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
03422e564b | ||
|
|
1d1246314f | ||
|
|
a32d7c23fc | ||
|
|
58d4fa7da3 | ||
|
|
cb69988572 | ||
|
|
99f5cf1893 | ||
|
|
c9a5baddee | ||
|
|
5f6b312e51 | ||
|
|
6e8936032f | ||
|
|
0dbc48a5bd | ||
|
|
53e6196a90 | ||
|
|
7c2bf4b175 | ||
|
|
91ae0b06b9 | ||
|
|
33cd5539b2 | ||
|
|
2c0913d0b3 | ||
|
|
d179eed6ca | ||
|
|
685e4c78f8 | ||
|
|
3f508edcfb | ||
|
|
907c314057 | ||
|
|
c418c0550d | ||
|
|
a5cef7b077 | ||
|
|
c9ffe9044d | ||
|
|
bf86073e45 | ||
|
|
2de93f0a9b | ||
|
|
5f9c7b0abd | ||
|
|
e1169b0529 | ||
|
|
5b4192bc4c | ||
|
|
3d770a88d3 | ||
|
|
9aa0c15972 | ||
|
|
498e950daa | ||
|
|
fa161e8cc1 | ||
|
|
06d15fbea6 | ||
|
|
614ef85ff9 | ||
|
|
7462125724 | ||
|
|
8b66659921 | ||
|
|
cb8887e87d | ||
|
|
09bc76de60 | ||
|
|
9777fe1272 | ||
|
|
fef7b46841 | ||
|
|
4ec21e8a64 | ||
|
|
908fced296 | ||
|
|
b4b85279a9 | ||
|
|
821f3e6751 | ||
|
|
540413d995 | ||
|
|
9bb2a02f0d | ||
|
|
df5069bb0e | ||
|
|
4ed2627734 | ||
|
|
bcf3806f4c | ||
|
|
cc6f55191a | ||
|
|
4bb9ce8a95 | ||
|
|
890ac25638 | ||
|
|
fe1975a974 | ||
|
|
611cc5096e | ||
|
|
61e282b62d | ||
|
|
e47299a8f2 | ||
|
|
7a34a4614c | ||
|
|
7ae6242960 | ||
|
|
3e80ab7f2a | ||
|
|
0027672c80 | ||
|
|
633c4a1f36 | ||
|
|
7634d8eac4 | ||
|
|
94688d8e43 | ||
|
|
5e0dd9e07c | ||
|
|
49be9f0c88 | ||
|
|
d6c83b95cf | ||
|
|
b73f0653f3 | ||
|
|
b987348435 | ||
|
|
cbc3fe59c4 | ||
|
|
0bee740845 | ||
|
|
794b007896 | ||
|
|
1f60d45504 | ||
|
|
624a007f47 | ||
|
|
3e4abb5025 | ||
|
|
31ab2f862a | ||
|
|
eb18cc1272 | ||
|
|
d87227d6d2 | ||
|
|
1dd1783873 | ||
|
|
83ad563ade | ||
|
|
62886598db | ||
|
|
2a8bca465d | ||
|
|
9bbe66e592 | ||
|
|
375437c26b | ||
|
|
a916677ace | ||
|
|
9dfa0c8d90 | ||
|
|
cef633ce63 | ||
|
|
fb8706ebd7 | ||
|
|
f6be478b0a | ||
|
|
4295a7aea5 | ||
|
|
ff893aa557 | ||
|
|
198494ce53 | ||
|
|
23feab1f38 | ||
|
|
a5ec248323 | ||
|
|
b394a96396 | ||
|
|
1672225670 | ||
|
|
254e990ce5 | ||
|
|
b70b4a24e6 | ||
|
|
702fd2ee21 | ||
|
|
a60d7cc97d | ||
|
|
ea2a6bef85 | ||
|
|
9a903f9f00 | ||
|
|
084cae9ca5 | ||
|
|
e34374e252 | ||
|
|
d31ac725cf | ||
|
|
5e15fce30c | ||
|
|
0e2c6bdf78 | ||
|
|
da94272e3c | ||
|
|
42ea51a65f | ||
|
|
ea51b0f5dd | ||
|
|
0a35573534 | ||
|
|
752fbae697 | ||
|
|
486650ae6a | ||
|
|
c18a9ca788 | ||
|
|
9556745dc2 | ||
|
|
4f84082a74 | ||
|
|
863ae74a1f | ||
|
|
c493f43621 | ||
|
|
cb07f19e90 | ||
|
|
75d548475d | ||
|
|
305d8ac90b | ||
|
|
464dedd6ab | ||
|
|
b17a3f9e5a | ||
|
|
a27cdd8edf | ||
|
|
befe654f0f | ||
|
|
4e5fc31ae6 | ||
|
|
156e305dc1 | ||
|
|
fa5b2498ca | ||
|
|
8b0bdd3923 | ||
|
|
749c8a5655 | ||
|
|
fc349288cb | ||
|
|
3b354d213f | ||
|
|
30eac2d79a | ||
|
|
9b0097976f | ||
|
|
8bb1efd985 | ||
|
|
a7aa3ccc4f | ||
|
|
bea02dc3b6 | ||
|
|
e111904a76 | ||
|
|
69d1cd202d | ||
|
|
f70be14f8f | ||
|
|
1f21d161a5 | ||
|
|
03b01966fa | ||
|
|
8a7d7ffe65 | ||
|
|
147003c73e | ||
|
|
091cdebfb8 | ||
|
|
8e03382279 | ||
|
|
6b07362b37 | ||
|
|
a114a0208b | ||
|
|
3606b6b3a7 | ||
|
|
733760a700 | ||
|
|
c6a629100b | ||
|
|
8d9e8adc05 | ||
|
|
d6ebe1369f | ||
|
|
ae8c7df7a5 | ||
|
|
63462c2b4b | ||
|
|
4731623777 | ||
|
|
a37cec537b | ||
|
|
d9f0d4e073 | ||
|
|
41fd852d3f | ||
|
|
a9429ca26d | ||
|
|
3226944918 | ||
|
|
ab295bb4be | ||
|
|
bbf379b055 | ||
|
|
2215c22a00 | ||
|
|
b8c79a057c | ||
|
|
a9874310c0 | ||
|
|
32d75a7ffc | ||
|
|
b09e13cb6f | ||
|
|
0e6d753584 | ||
|
|
f1c0d78b2d | ||
|
|
2611ad79a5 | ||
|
|
1db3352cc6 | ||
|
|
75f2065293 | ||
|
|
122fdfdae9 | ||
|
|
526d2b0472 | ||
|
|
f2306fbe01 | ||
|
|
a402396dce | ||
|
|
5e499e7a56 | ||
|
|
40d705cb70 | ||
|
|
4f5529351f | ||
|
|
4374f0ee35 | ||
|
|
2f5858952e | ||
|
|
fe20b83ca9 | ||
|
|
7f84015352 | ||
|
|
b52c67c4b1 | ||
|
|
9058617afb | ||
|
|
8b0eee66e9 | ||
|
|
45c656b914 | ||
|
|
edc2c700a7 | ||
|
|
b49d963cf7 | ||
|
|
114f7e944b | ||
|
|
d485a6c5a8 | ||
|
|
cd0910b787 | ||
|
|
b8b97c98e8 | ||
|
|
6c083ac95c | ||
|
|
719208c66f | ||
|
|
3f65ba2b3b | ||
|
|
ced6c940da | ||
|
|
b06808c58e | ||
|
|
b7e201181b | ||
|
|
8360d5b37d | ||
|
|
c3c82c48d9 | ||
|
|
45eba4b1e0 | ||
|
|
0179a39f9d | ||
|
|
de45c065f0 | ||
|
|
f142c11ac6 | ||
|
|
18b8ee1cd1 | ||
|
|
cfd1280f23 | ||
|
|
4f7f531af6 | ||
|
|
f84ee3dab6 | ||
|
|
86edc20a17 | ||
|
|
73adfe3bb9 | ||
|
|
76bc30beab | ||
|
|
cbdbc75139 | ||
|
|
52a4158f1f | ||
|
|
66dde4edf7 | ||
|
|
ea91961899 | ||
|
|
f891e34cf9 | ||
|
|
ebea65121e | ||
|
|
f0ff08d784 | ||
|
|
48b92080a7 | ||
|
|
1fe7f40407 | ||
|
|
d6bb231a9f | ||
|
|
ec8f5fc8b5 | ||
|
|
24f117ef05 | ||
|
|
fca1a415ce | ||
|
|
ded0599281 | ||
|
|
a07c8a032c | ||
|
|
3b38a6a96a | ||
|
|
df854637b1 | ||
|
|
37269105c8 | ||
|
|
084d1ddf8f |
40
build-aux/m4/l_atomic.m4
Normal file
40
build-aux/m4/l_atomic.m4
Normal file
@@ -0,0 +1,40 @@
|
||||
# Some versions of gcc/libstdc++ require linking with -latomic if
|
||||
# using the C++ atomic library.
|
||||
#
|
||||
# Sourced from http://bugs.debian.org/797228
|
||||
|
||||
m4_define([_CHECK_ATOMIC_testbody], [[
|
||||
#include <atomic>
|
||||
#include <cstdint>
|
||||
|
||||
int main() {
|
||||
std::atomic<int64_t> a{};
|
||||
|
||||
int64_t v = 5;
|
||||
int64_t r = a.fetch_add(v);
|
||||
return static_cast<int>(r);
|
||||
}
|
||||
]])
|
||||
|
||||
AC_DEFUN([CHECK_ATOMIC], [
|
||||
|
||||
AC_LANG_PUSH(C++)
|
||||
|
||||
AC_MSG_CHECKING([whether std::atomic can be used without link library])
|
||||
|
||||
AC_LINK_IFELSE([AC_LANG_SOURCE([_CHECK_ATOMIC_testbody])],[
|
||||
AC_MSG_RESULT([yes])
|
||||
],[
|
||||
AC_MSG_RESULT([no])
|
||||
LIBS="$LIBS -latomic"
|
||||
AC_MSG_CHECKING([whether std::atomic needs -latomic])
|
||||
AC_LINK_IFELSE([AC_LANG_SOURCE([_CHECK_ATOMIC_testbody])],[
|
||||
AC_MSG_RESULT([yes])
|
||||
],[
|
||||
AC_MSG_RESULT([no])
|
||||
AC_MSG_FAILURE([cannot figure our how to use std::atomic])
|
||||
])
|
||||
])
|
||||
|
||||
AC_LANG_POP
|
||||
])
|
||||
47
configure.ac
47
configure.ac
@@ -1,10 +1,10 @@
|
||||
dnl require autoconf 2.60 (AS_ECHO/AS_ECHO_N)
|
||||
AC_PREREQ([2.60])
|
||||
define(_CLIENT_VERSION_MAJOR, 0)
|
||||
define(_CLIENT_VERSION_MINOR, 12)
|
||||
define(_CLIENT_VERSION_REVISION, 99)
|
||||
define(_CLIENT_VERSION_MINOR, 13)
|
||||
define(_CLIENT_VERSION_REVISION, 1)
|
||||
define(_CLIENT_VERSION_BUILD, 0)
|
||||
define(_CLIENT_VERSION_IS_RELEASE, false)
|
||||
define(_CLIENT_VERSION_IS_RELEASE, true)
|
||||
define(_COPYRIGHT_YEAR, 2016)
|
||||
define(_COPYRIGHT_HOLDERS,[The %s developers])
|
||||
define(_COPYRIGHT_HOLDERS_SUBSTITUTION,[[Bitcoin Core]])
|
||||
@@ -57,6 +57,9 @@ case $host in
|
||||
esac
|
||||
dnl Require C++11 compiler (no GNU extensions)
|
||||
AX_CXX_COMPILE_STDCXX([11], [noext], [mandatory])
|
||||
dnl Check if -latomic is required for <std::atomic>
|
||||
CHECK_ATOMIC
|
||||
|
||||
dnl Libtool init checks.
|
||||
LT_INIT([pic-only])
|
||||
|
||||
@@ -67,7 +70,8 @@ AC_PATH_TOOL(STRIP, strip)
|
||||
AC_PATH_TOOL(GCOV, gcov)
|
||||
AC_PATH_PROG(LCOV, lcov)
|
||||
AC_PATH_PROG(JAVA, java)
|
||||
AC_PATH_PROGS([PYTHON], [python3 python2.7 python2 python])
|
||||
dnl Python 3.x is supported from 3.4 on (see https://github.com/bitcoin/bitcoin/issues/7893)
|
||||
AC_PATH_PROGS([PYTHON], [python3.6 python3.5 python3.4 python3 python2.7 python2 python])
|
||||
AC_PATH_PROG(GENHTML, genhtml)
|
||||
AC_PATH_PROG([GIT], [git])
|
||||
AC_PATH_PROG(CCACHE,ccache)
|
||||
@@ -366,8 +370,15 @@ case $host in
|
||||
TARGET_OS=linux
|
||||
LEVELDB_TARGET_FLAGS="-DOS_LINUX"
|
||||
;;
|
||||
*freebsd*)
|
||||
LEVELDB_TARGET_FLAGS="-DOS_FREEBSD"
|
||||
;;
|
||||
*openbsd*)
|
||||
LEVELDB_TARGET_FLAGS="-DOS_OPENBSD"
|
||||
;;
|
||||
*)
|
||||
OTHER_OS=`echo ${host_os} | awk '{print toupper($0)}'`
|
||||
AC_MSG_WARN([Guessing LevelDB OS as OS_${OTHER_OS}, please check whether this is correct, if not add an entry to configure.ac.])
|
||||
LEVELDB_TARGET_FLAGS="-DOS_${OTHER_OS}"
|
||||
;;
|
||||
esac
|
||||
@@ -825,6 +836,12 @@ fi
|
||||
|
||||
dnl univalue check
|
||||
|
||||
need_bundled_univalue=yes
|
||||
|
||||
if test x$build_bitcoin_utils$build_bitcoind$bitcoin_enable_qt$use_tests$use_bench = xnonononono; then
|
||||
need_bundled_univalue=no
|
||||
else
|
||||
|
||||
if test x$system_univalue != xno ; then
|
||||
found_univalue=no
|
||||
if test x$use_pkgconfig = xyes; then
|
||||
@@ -846,6 +863,7 @@ if test x$system_univalue != xno ; then
|
||||
|
||||
if test x$found_univalue = xyes ; then
|
||||
system_univalue=yes
|
||||
need_bundled_univalue=no
|
||||
elif test x$system_univalue = xyes ; then
|
||||
AC_MSG_ERROR([univalue not found])
|
||||
else
|
||||
@@ -853,22 +871,17 @@ if test x$system_univalue != xno ; then
|
||||
fi
|
||||
fi
|
||||
|
||||
if test x$system_univalue = xno ; then
|
||||
if test x$need_bundled_univalue = xyes ; then
|
||||
UNIVALUE_CFLAGS='-I$(srcdir)/univalue/include'
|
||||
UNIVALUE_LIBS='univalue/libunivalue.la'
|
||||
fi
|
||||
AM_CONDITIONAL([EMBEDDED_UNIVALUE],[test x$system_univalue = xno])
|
||||
|
||||
fi
|
||||
|
||||
AM_CONDITIONAL([EMBEDDED_UNIVALUE],[test x$need_bundled_univalue = xyes])
|
||||
AC_SUBST(UNIVALUE_CFLAGS)
|
||||
AC_SUBST(UNIVALUE_LIBS)
|
||||
|
||||
CXXFLAGS_TEMP="$CXXFLAGS"
|
||||
LIBS_TEMP="$LIBS"
|
||||
CXXFLAGS="$CXXFLAGS $SSL_CFLAGS $CRYPTO_CFLAGS"
|
||||
LIBS="$LIBS $SSL_LIBS $CRYPTO_LIBS"
|
||||
AC_CHECK_HEADER([openssl/ec.h],, AC_MSG_ERROR(OpenSSL ec header missing),)
|
||||
CXXFLAGS="$CXXFLAGS_TEMP"
|
||||
LIBS="$LIBS_TEMP"
|
||||
|
||||
BITCOIN_QT_PATH_PROGS([PROTOC], [protoc],$protoc_bin_path)
|
||||
|
||||
AC_MSG_CHECKING([whether to build bitcoind])
|
||||
@@ -1003,8 +1016,8 @@ else
|
||||
AC_MSG_RESULT([no])
|
||||
fi
|
||||
|
||||
if test x$build_bitcoin_utils$build_bitcoin_libs$build_bitcoind$bitcoin_enable_qt$use_tests = xnonononono; then
|
||||
AC_MSG_ERROR([No targets! Please specify at least one of: --with-utils --with-libs --with-daemon --with-gui or --enable-tests])
|
||||
if test x$build_bitcoin_utils$build_bitcoin_libs$build_bitcoind$bitcoin_enable_qt$use_bench$use_tests = xnononononono; then
|
||||
AC_MSG_ERROR([No targets! Please specify at least one of: --with-utils --with-libs --with-daemon --with-gui --enable-bench or --enable-tests])
|
||||
fi
|
||||
|
||||
AM_CONDITIONAL([TARGET_DARWIN], [test x$TARGET_OS = xdarwin])
|
||||
@@ -1096,7 +1109,7 @@ PKGCONFIG_LIBDIR_TEMP="$PKG_CONFIG_LIBDIR"
|
||||
unset PKG_CONFIG_LIBDIR
|
||||
PKG_CONFIG_LIBDIR="$PKGCONFIG_LIBDIR_TEMP"
|
||||
|
||||
if test x$system_univalue = xno; then
|
||||
if test x$need_bundled_univalue = xyes; then
|
||||
AC_CONFIG_SUBDIRS([src/univalue])
|
||||
fi
|
||||
|
||||
|
||||
@@ -51,8 +51,9 @@ maintained:
|
||||
* for `src/secp256k1`: https://github.com/bitcoin-core/secp256k1.git (branch master)
|
||||
* for `src/leveldb`: https://github.com/bitcoin-core/leveldb.git (branch bitcoin-fork)
|
||||
* for `src/univalue`: https://github.com/bitcoin-core/univalue.git (branch master)
|
||||
* for `src/crypto/ctaes`: https://github.com/bitcoin-core/ctaes.git (branch master)
|
||||
|
||||
Usage: `git-subtree-check.sh DIR COMMIT`
|
||||
Usage: `git-subtree-check.sh DIR (COMMIT)`
|
||||
|
||||
`COMMIT` may be omitted, in which case `HEAD` is used.
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ import time
|
||||
import re
|
||||
|
||||
year = time.gmtime()[0]
|
||||
CMD_GIT_DATE = 'git log --format=@%%at -1 %s | date +"%%Y" -u -f -'
|
||||
CMD_GIT_DATE = 'git log --format=%%ad --date=short -1 %s | cut -d"-" -f 1'
|
||||
CMD_REGEX= "perl -pi -e 's/(20\d\d)(?:-20\d\d)? The Bitcoin/$1-%s The Bitcoin/' %s"
|
||||
REGEX_CURRENT= re.compile("%s The Bitcoin" % year)
|
||||
CMD_LIST_FILES= "find %s | grep %s"
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
#!/bin/sh
|
||||
# Copyright (c) 2015 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
DIR="$1"
|
||||
COMMIT="$2"
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2014-2015 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
'''
|
||||
Run this script every time you change one of the png files. Using pngcrush, it will optimize the png files, remove various color profiles, remove ancillary chunks (alla) and text chunks (text).
|
||||
#pngcrush -brute -ow -rem gAMA -rem cHRM -rem iCCP -rem sRGB -rem alla -rem text
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
#!/usr/bin/python2
|
||||
# Copyright (c) 2015-2016 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
'''
|
||||
Perform basic ELF security checks on a series of executables.
|
||||
Exit status will be 0 if successful, and the program will be silent.
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
#!/usr/bin/python2
|
||||
# Copyright (c) 2015-2016 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
'''
|
||||
Test script for security-check.py
|
||||
'''
|
||||
|
||||
Binary file not shown.
@@ -1,4 +1,8 @@
|
||||
#!/bin/sh
|
||||
# Copyright (c) 2014-2015 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
set -e
|
||||
|
||||
UNSIGNED="$1"
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
#!/bin/sh
|
||||
# Copyright (c) 2014-2015 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
set -e
|
||||
|
||||
ROOTDIR=dist
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
# Copyright (c) 2013 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
#network interface on which to limit traffic
|
||||
IF="eth0"
|
||||
#limit of the network interface in question
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2011 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
# Helpful little script that spits out a comma-separated list of
|
||||
# language codes for Qt icons that should be included
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2013-2015 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
#
|
||||
# Generate seeds.txt from Pieter's DNS seeder
|
||||
#
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
# Copyright (c) 2013 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
from distutils.core import setup
|
||||
setup(name='btcspendfrom',
|
||||
version='1.0',
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2013 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
#
|
||||
# Use the raw transactions API to spend bitcoins received on particular addresses,
|
||||
# and send any change back to that same address.
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
# Copyright (c) 2012 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
'''
|
||||
Bitcoin base58 encoding and decoding.
|
||||
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
#!/usr/bin/env python
|
||||
# Copyright (c) 2012 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
'''
|
||||
Generate valid and invalid base58 address and private key test vectors.
|
||||
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) 2013 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
if [ -d "$1" ]; then
|
||||
cd "$1"
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
#!/bin/sh
|
||||
# Copyright (c) 2014-2016 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
INPUT=$(cat /dev/stdin)
|
||||
VALID=false
|
||||
REVSIG=false
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) 2014-2015 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
if ! [[ "$2" =~ ^(git@)?(www.)?github.com(:|/)bitcoin/bitcoin(.git)?$ ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
@@ -1,4 +1,8 @@
|
||||
#!/bin/sh
|
||||
# Copyright (c) 2014-2016 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
# Not technically POSIX-compliant due to use of "local", but almost every
|
||||
# shell anyone uses today supports it, so its probably fine
|
||||
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
#!/bin/bash
|
||||
# Copyright (c) 2016 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
### This script attempts to download the signature file SHA256SUMS.asc from bitcoin.org
|
||||
### It first checks if the signature passes, and then downloads the files specified in
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
#!/usr/bin/env python2
|
||||
# Copyright (c) 2014-2016 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
import array
|
||||
import binascii
|
||||
|
||||
@@ -3,6 +3,7 @@ $(package)_version=1_59_0
|
||||
$(package)_download_path=http://sourceforge.net/projects/boost/files/boost/1.59.0
|
||||
$(package)_file_name=$(package)_$($(package)_version).tar.bz2
|
||||
$(package)_sha256_hash=727a932322d94287b62abb1bd2d41723eec4356a7728909e38adb65ca25241ca
|
||||
$(package)_patches=fix-win-wake-from-sleep.patch
|
||||
|
||||
define $(package)_set_vars
|
||||
$(package)_config_opts_release=variant=release
|
||||
@@ -25,6 +26,7 @@ $(package)_cxxflags_linux=-fPIC
|
||||
endef
|
||||
|
||||
define $(package)_preprocess_cmds
|
||||
patch -p1 < $($(package)_patch_dir)/fix-win-wake-from-sleep.patch && \
|
||||
echo "using $(boost_toolset_$(host_os)) : : $($(package)_cxx) : <cxxflags>\"$($(package)_cxxflags) $($(package)_cppflags)\" <linkflags>\"$($(package)_ldflags)\" <archiver>\"$(boost_archiver_$(host_os))\" <striper>\"$(host_STRIP)\" <ranlib>\"$(host_RANLIB)\" <rc>\"$(host_WINDRES)\" : ;" > user-config.jam
|
||||
endef
|
||||
|
||||
|
||||
@@ -4,6 +4,11 @@ $(package)_download_path=https://pypi.python.org/packages/source/b/biplist
|
||||
$(package)_file_name=biplist-$($(package)_version).tar.gz
|
||||
$(package)_sha256_hash=b57cadfd26e4754efdf89e9e37de87885f9b5c847b2615688ca04adfaf6ca604
|
||||
$(package)_install_libdir=$(build_prefix)/lib/python/dist-packages
|
||||
$(package)_patches=sorted_list.patch
|
||||
|
||||
define $(package)_preprocess_cmds
|
||||
patch -p1 < $($(package)_patch_dir)/sorted_list.patch
|
||||
endef
|
||||
|
||||
define $(package)_build_cmds
|
||||
python setup.py build
|
||||
|
||||
31
depends/patches/boost/fix-win-wake-from-sleep.patch
Normal file
31
depends/patches/boost/fix-win-wake-from-sleep.patch
Normal file
@@ -0,0 +1,31 @@
|
||||
--- old/libs/thread/src/win32/thread.cpp
|
||||
+++ new/libs/thread/src/win32/thread.cpp
|
||||
@@ -645,7 +645,7 @@
|
||||
} Detailed;
|
||||
} Reason;
|
||||
} REASON_CONTEXT, *PREASON_CONTEXT;
|
||||
- static REASON_CONTEXT default_reason_context={0/*POWER_REQUEST_CONTEXT_VERSION*/, 0x00000001/*POWER_REQUEST_CONTEXT_SIMPLE_STRING*/, (LPWSTR)L"generic"};
|
||||
+ //static REASON_CONTEXT default_reason_context={0/*POWER_REQUEST_CONTEXT_VERSION*/, 0x00000001/*POWER_REQUEST_CONTEXT_SIMPLE_STRING*/, (LPWSTR)L"generic"};
|
||||
typedef BOOL (WINAPI *setwaitabletimerex_t)(HANDLE, const LARGE_INTEGER *, LONG, PTIMERAPCROUTINE, LPVOID, PREASON_CONTEXT, ULONG);
|
||||
static inline BOOL WINAPI SetWaitableTimerEx_emulation(HANDLE hTimer, const LARGE_INTEGER *lpDueTime, LONG lPeriod, PTIMERAPCROUTINE pfnCompletionRoutine, LPVOID lpArgToCompletionRoutine, PREASON_CONTEXT WakeContext, ULONG TolerableDelay)
|
||||
{
|
||||
@@ -715,7 +715,8 @@
|
||||
if(time_left.milliseconds/20>tolerable) // 5%
|
||||
tolerable=time_left.milliseconds/20;
|
||||
LARGE_INTEGER due_time=get_due_time(target_time);
|
||||
- bool const set_time_succeeded=detail_::SetWaitableTimerEx()(timer_handle,&due_time,0,0,0,&detail_::default_reason_context,tolerable)!=0;
|
||||
+ //bool const set_time_succeeded=detail_::SetWaitableTimerEx()(timer_handle,&due_time,0,0,0,&detail_::default_reason_context,tolerable)!=0;
|
||||
+ bool const set_time_succeeded=detail_::SetWaitableTimerEx()(timer_handle,&due_time,0,0,0,NULL,tolerable)!=0;
|
||||
if(set_time_succeeded)
|
||||
{
|
||||
timeout_index=handle_count;
|
||||
@@ -799,7 +800,8 @@
|
||||
if(time_left.milliseconds/20>tolerable) // 5%
|
||||
tolerable=time_left.milliseconds/20;
|
||||
LARGE_INTEGER due_time=get_due_time(target_time);
|
||||
- bool const set_time_succeeded=detail_::SetWaitableTimerEx()(timer_handle,&due_time,0,0,0,&detail_::default_reason_context,tolerable)!=0;
|
||||
+ //bool const set_time_succeeded=detail_::SetWaitableTimerEx()(timer_handle,&due_time,0,0,0,&detail_::default_reason_context,tolerable)!=0;
|
||||
+ bool const set_time_succeeded=detail_::SetWaitableTimerEx()(timer_handle,&due_time,0,0,0,NULL,tolerable)!=0;
|
||||
if(set_time_succeeded)
|
||||
{
|
||||
timeout_index=handle_count;
|
||||
29
depends/patches/native_biplist/sorted_list.patch
Normal file
29
depends/patches/native_biplist/sorted_list.patch
Normal file
@@ -0,0 +1,29 @@
|
||||
--- a/biplist/__init__.py 2014-10-26 19:03:11.000000000 +0000
|
||||
+++ b/biplist/__init__.py 2016-07-19 19:30:17.663521999 +0000
|
||||
@@ -541,7 +541,7 @@
|
||||
return HashableWrapper(n)
|
||||
elif isinstance(root, dict):
|
||||
n = {}
|
||||
- for key, value in iteritems(root):
|
||||
+ for key, value in sorted(iteritems(root)):
|
||||
n[self.wrapRoot(key)] = self.wrapRoot(value)
|
||||
return HashableWrapper(n)
|
||||
elif isinstance(root, list):
|
||||
@@ -616,7 +616,7 @@
|
||||
elif isinstance(obj, dict):
|
||||
size = proc_size(len(obj))
|
||||
self.incrementByteCount('dictBytes', incr=1+size)
|
||||
- for key, value in iteritems(obj):
|
||||
+ for key, value in sorted(iteritems(obj)):
|
||||
check_key(key)
|
||||
self.computeOffsets(key, asReference=True)
|
||||
self.computeOffsets(value, asReference=True)
|
||||
@@ -714,7 +714,7 @@
|
||||
keys = []
|
||||
values = []
|
||||
objectsToWrite = []
|
||||
- for key, value in iteritems(obj):
|
||||
+ for key, value in sorted(iteritems(obj)):
|
||||
keys.append(key)
|
||||
values.append(value)
|
||||
for key in keys:
|
||||
@@ -34,7 +34,7 @@ PROJECT_NAME = Bitcoin
|
||||
# This could be handy for archiving the generated documentation or
|
||||
# if some version control system is used.
|
||||
|
||||
PROJECT_NUMBER = 0.12.99
|
||||
PROJECT_NUMBER = 0.13.1
|
||||
|
||||
# Using the PROJECT_BRIEF tag one can provide an optional one line description
|
||||
# for a project that appears at the top of each page and should give viewer
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
Bitcoin Core 0.12.99
|
||||
Bitcoin Core 0.13.1
|
||||
=====================
|
||||
|
||||
Setup
|
||||
|
||||
@@ -22,7 +22,7 @@ These tools inject timestamps by default, which produce non-deterministic
|
||||
binaries. The ZERO_AR_DATE environment variable is used to disable that.
|
||||
|
||||
This version of cctools has been patched to use the current version of clang's
|
||||
headers and and its libLTO.so rather than those from llvmgcc, as it was
|
||||
headers and its libLTO.so rather than those from llvmgcc, as it was
|
||||
originally done in toolchain4.
|
||||
|
||||
To complicate things further, all builds must target an Apple SDK. These SDKs
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
Bitcoin Core 0.12.99
|
||||
Bitcoin Core 0.13.1
|
||||
=====================
|
||||
|
||||
Intro
|
||||
|
||||
@@ -26,4 +26,9 @@ BIPs that are implemented by Bitcoin Core (up-to-date up to **v0.13.0**):
|
||||
* [`BIP 125`](https://github.com/bitcoin/bips/blob/master/bip-0125.mediawiki): Opt-in full replace-by-fee signaling honoured in mempool and mining as of **v0.12.0** ([PR 6871](https://github.com/bitcoin/bitcoin/pull/6871)).
|
||||
* [`BIP 130`](https://github.com/bitcoin/bips/blob/master/bip-0130.mediawiki): direct headers announcement is negotiated with peer versions `>=70012` as of **v0.12.0** ([PR 6494](https://github.com/bitcoin/bitcoin/pull/6494)).
|
||||
* [`BIP 133`](https://github.com/bitcoin/bips/blob/master/bip-0133.mediawiki): feefilter messages are respected and sent for peer versions `>=70013` as of **v0.13.0** ([PR 7542](https://github.com/bitcoin/bitcoin/pull/7542)).
|
||||
* [`BIP 141`](https://github.com/bitcoin/bips/blob/master/bip-0141.mediawiki): Segregated Witness (Consensus Layer) as of **v0.13.0** ([PR 8149](https://github.com/bitcoin/bitcoin/pull/8149)), and defined for mainnet as of **v0.13.1** ([PR 8937](https://github.com/bitcoin/bitcoin/pull/8937)).
|
||||
* [`BIP 143`](https://github.com/bitcoin/bips/blob/master/bip-0143.mediawiki): Transaction Signature Verification for Version 0 Witness Program as of **v0.13.0** ([PR 8149](https://github.com/bitcoin/bitcoin/pull/8149)) and defined for mainnet as of **v0.13.1** ([PR 8937](https://github.com/bitcoin/bitcoin/pull/8937)).
|
||||
* [`BIP 144`](https://github.com/bitcoin/bips/blob/master/bip-0144.mediawiki): Segregated Witness as of **0.13.0** ([PR 8149](https://github.com/bitcoin/bitcoin/pull/8149)).
|
||||
* [`BIP 145`](https://github.com/bitcoin/bips/blob/master/bip-0145.mediawiki): getblocktemplate updates for Segregated Witness as of **v0.13.0** ([PR 8149](https://github.com/bitcoin/bitcoin/pull/8149)).
|
||||
* [`BIP 147`](https://github.com/bitcoin/bips/blob/master/bip-0147.mediawiki): NULLDUMMY softfork as of **v0.13.1** ([PR 8636](https://github.com/bitcoin/bitcoin/pull/8636) and [PR 8937](https://github.com/bitcoin/bitcoin/pull/8937)).
|
||||
* [`BIP 152`](https://github.com/bitcoin/bips/blob/master/bip-0152.mediawiki): Compact block transfer and related optimizations are used as of **v0.13.0** ([PR 8068](https://github.com/bitcoin/bitcoin/pull/8068)).
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
OpenBSD build guide
|
||||
======================
|
||||
(updated for OpenBSD 5.7)
|
||||
(updated for OpenBSD 5.9)
|
||||
|
||||
This guide describes how to build bitcoind and command-line utilities on OpenBSD.
|
||||
|
||||
@@ -15,11 +15,10 @@ Run the following as root to install the base dependencies for building:
|
||||
pkg_add gmake libtool libevent
|
||||
pkg_add autoconf # (select highest version, e.g. 2.69)
|
||||
pkg_add automake # (select highest version, e.g. 1.15)
|
||||
pkg_add python # (select version 2.7.x, not 3.x)
|
||||
ln -sf /usr/local/bin/python2.7 /usr/local/bin/python2
|
||||
pkg_add python # (select highest version, e.g. 3.5)
|
||||
```
|
||||
|
||||
The default C++ compiler that comes with OpenBSD 5.7 is g++ 4.2. This version is old (from 2007), and is not able to compile the current version of Bitcoin Core. It is possible to patch it up to compile, but with the planned transition to C++11 this is a losing battle. So here we will be installing a newer compiler.
|
||||
The default C++ compiler that comes with OpenBSD 5.9 is g++ 4.2. This version is old (from 2007), and is not able to compile the current version of Bitcoin Core, primarily as it has no C++11 support, but even before there were issues. So here we will be installing a newer compiler.
|
||||
|
||||
GCC
|
||||
-------
|
||||
@@ -27,7 +26,7 @@ GCC
|
||||
You can install a newer version of gcc with:
|
||||
|
||||
```bash
|
||||
pkg_add g++ # (select newest 4.x version, e.g. 4.9.2)
|
||||
pkg_add g++ # (select newest 4.x version, e.g. 4.9.3)
|
||||
```
|
||||
|
||||
This compiler will not overwrite the system compiler, it will be installed as `egcc` and `eg++` in `/usr/local/bin`.
|
||||
@@ -49,18 +48,15 @@ BOOST_PREFIX="${BITCOIN_ROOT}/boost"
|
||||
mkdir -p $BOOST_PREFIX
|
||||
|
||||
# Fetch the source and verify that it is not tampered with
|
||||
wget http://heanet.dl.sourceforge.net/project/boost/boost/1.59.0/boost_1_59_0.tar.bz2
|
||||
echo '727a932322d94287b62abb1bd2d41723eec4356a7728909e38adb65ca25241ca boost_1_59_0.tar.bz2' | sha256 -c
|
||||
# MUST output: (SHA256) boost_1_59_0.tar.bz2: OK
|
||||
tar -xjf boost_1_59_0.tar.bz2
|
||||
curl -o boost_1_61_0.tar.bz2 http://heanet.dl.sourceforge.net/project/boost/boost/1.61.0/boost_1_61_0.tar.bz2
|
||||
echo 'a547bd06c2fd9a71ba1d169d9cf0339da7ebf4753849a8f7d6fdb8feee99b640 boost_1_61_0.tar.bz2' | sha256 -c
|
||||
# MUST output: (SHA256) boost_1_61_0.tar.bz2: OK
|
||||
tar -xjf boost_1_61_0.tar.bz2
|
||||
|
||||
# Boost 1.59 needs two small patches for OpenBSD
|
||||
cd boost_1_59_0
|
||||
# Boost 1.61 needs one small patch for OpenBSD
|
||||
cd boost_1_61_0
|
||||
# Also here: https://gist.githubusercontent.com/laanwj/bf359281dc319b8ff2e1/raw/92250de8404b97bb99d72ab898f4a8cb35ae1ea3/patch-boost_test_impl_execution_monitor_ipp.patch
|
||||
patch -p0 < /usr/ports/devel/boost/patches/patch-boost_test_impl_execution_monitor_ipp
|
||||
# https://github.com/boostorg/filesystem/commit/90517e459681790a091566dce27ca3acabf9a70c
|
||||
sed 's/__OPEN_BSD__/__OpenBSD__/g' < libs/filesystem/src/path.cpp > libs/filesystem/src/path.cpp.tmp
|
||||
mv libs/filesystem/src/path.cpp.tmp libs/filesystem/src/path.cpp
|
||||
|
||||
# Build w/ minimum configuration necessary for bitcoin
|
||||
echo 'using gcc : : eg++ : <cxxflags>"-fvisibility=hidden -fPIC" <linkflags>"" <archiver>"ar" <striper>"strip" <ranlib>"ranlib" <rc>"" : ;' > user-config.jam
|
||||
@@ -84,7 +80,7 @@ BDB_PREFIX="${BITCOIN_ROOT}/db4"
|
||||
mkdir -p $BDB_PREFIX
|
||||
|
||||
# Fetch the source and verify that it is not tampered with
|
||||
wget 'http://download.oracle.com/berkeley-db/db-4.8.30.NC.tar.gz'
|
||||
curl -o db-4.8.30.NC.tar.gz 'http://download.oracle.com/berkeley-db/db-4.8.30.NC.tar.gz'
|
||||
echo '12edc0df75bf9abd7f82f821795bcee50f42cb2e5f76a6a281b85732798364ef db-4.8.30.NC.tar.gz' | sha256 -c
|
||||
# MUST output: (SHA256) db-4.8.30.NC.tar.gz: OK
|
||||
tar -xzf db-4.8.30.NC.tar.gz
|
||||
@@ -93,9 +89,25 @@ tar -xzf db-4.8.30.NC.tar.gz
|
||||
cd db-4.8.30.NC/build_unix/
|
||||
# Note: Do a static build so that it can be embedded into the executable, instead of having to find a .so at runtime
|
||||
../dist/configure --enable-cxx --disable-shared --with-pic --prefix=$BDB_PREFIX CC=egcc CXX=eg++ CPP=ecpp
|
||||
make install
|
||||
make install # do NOT use -jX, this is broken
|
||||
```
|
||||
|
||||
### Resource limits
|
||||
|
||||
The standard ulimit restrictions in OpenBSD are very strict:
|
||||
|
||||
data(kbytes) 1572864
|
||||
|
||||
This is, unfortunately, no longer enough to compile some `.cpp` files in the project,
|
||||
at least with gcc 4.9.3 (see issue https://github.com/bitcoin/bitcoin/issues/6658).
|
||||
If your user is in the `staff` group the limit can be raised with:
|
||||
|
||||
ulimit -d 3000000
|
||||
|
||||
The change will only affect the current shell and processes spawned by it. To
|
||||
make the change system-wide, change `datasize-cur` and `datasize-max` in
|
||||
`/etc/login.conf`, and reboot.
|
||||
|
||||
### Building Bitcoin Core
|
||||
|
||||
**Important**: use `gmake`, not `make`. The non-GNU `make` will exit with a horrible error.
|
||||
@@ -123,7 +135,7 @@ To configure without wallet:
|
||||
|
||||
Build and run the tests:
|
||||
```bash
|
||||
gmake
|
||||
gmake # can use -jX here for parallelism
|
||||
gmake check
|
||||
```
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ Then install [Homebrew](http://brew.sh).
|
||||
Dependencies
|
||||
----------------------
|
||||
|
||||
brew install automake berkeley-db4 libtool boost --c++11 miniupnpc openssl pkg-config protobuf --c++11 qt5 libevent
|
||||
brew install automake berkeley-db4 libtool boost --c++11 miniupnpc openssl pkg-config homebrew/versions/protobuf260 --c++11 qt5 libevent
|
||||
|
||||
NOTE: Building with Qt4 is still supported, however, could result in a broken UI. Building with Qt5 is recommended.
|
||||
|
||||
@@ -90,6 +90,6 @@ Uncheck everything except Qt Creator during the installation process.
|
||||
Notes
|
||||
-----
|
||||
|
||||
* Tested on OS X 10.7 through 10.11 on 64-bit Intel processors only.
|
||||
* Tested on OS X 10.8 through 10.12 on 64-bit Intel processors only.
|
||||
|
||||
* Building with downloaded Qt binaries is not officially supported. See the notes in [#7714](https://github.com/bitcoin/bitcoin/issues/7714)
|
||||
|
||||
@@ -293,9 +293,10 @@ These steps can be performed on, for example, an Ubuntu VM. The depends system
|
||||
will also work on other Linux distributions, however the commands for
|
||||
installing the toolchain will be different.
|
||||
|
||||
First install the toolchain:
|
||||
Make sure you install the build requirements mentioned above.
|
||||
Then, install the toolchain and curl:
|
||||
|
||||
sudo apt-get install g++-arm-linux-gnueabihf
|
||||
sudo apt-get install g++-arm-linux-gnueabihf curl
|
||||
|
||||
To build executables for ARM:
|
||||
|
||||
|
||||
@@ -16,9 +16,11 @@ These steps can be performed on, for example, an Ubuntu VM. The depends system
|
||||
will also work on other Linux distributions, however the commands for
|
||||
installing the toolchain will be different.
|
||||
|
||||
First install the toolchains:
|
||||
Make sure you install the build requirements mentioned in
|
||||
[build-unix.md](/doc/build-unix.md).
|
||||
Then, install the toolchains and curl:
|
||||
|
||||
sudo apt-get install g++-mingw-w64-i686 mingw-w64-i686-dev g++-mingw-w64-x86-64 mingw-w64-x86-64-dev
|
||||
sudo apt-get install g++-mingw-w64-i686 mingw-w64-i686-dev g++-mingw-w64-x86-64 mingw-w64-x86-64-dev curl
|
||||
|
||||
To build executables for Windows 32-bit:
|
||||
|
||||
|
||||
@@ -55,7 +55,7 @@ In the VirtualBox GUI click "New" and choose the following parameters in the wiz
|
||||
|
||||

|
||||
|
||||
- Memory Size: at least 1024MB, anything less will really slow down the build.
|
||||
- Memory Size: at least 3000MB, anything less and the build might not complete.
|
||||
|
||||

|
||||
|
||||
@@ -337,7 +337,7 @@ Getting and building the inputs
|
||||
--------------------------------
|
||||
|
||||
Follow the instructions in [doc/release-process.md](release-process.md#fetch-and-build-inputs-first-time-or-when-dependency-versions-change)
|
||||
in the bitcoin repository under 'Fetch and build inputs' to install sources which require
|
||||
in the bitcoin repository under 'Fetch and create inputs' to install sources which require
|
||||
manual intervention. Also optionally follow the next step: 'Seed the Gitian sources cache
|
||||
and offline git repositories' which will fetch the remaining files required for building
|
||||
offline.
|
||||
|
||||
Binary file not shown.
|
Before Width: | Height: | Size: 87 KiB After Width: | Height: | Size: 22 KiB |
@@ -19,8 +19,7 @@ This is *not* a hard limit; only a threshold to minimize the outbound
|
||||
traffic. When the limit is about to be reached, the uploaded data is cut by no
|
||||
longer serving historic blocks (blocks older than one week).
|
||||
Keep in mind that new nodes require other nodes that are willing to serve
|
||||
historic blocks. **The recommended minimum is 144 blocks per day (max. 144MB
|
||||
per day)**
|
||||
historic blocks.
|
||||
|
||||
Whitelisted peers will never be disconnected, although their traffic counts for
|
||||
calculating the target.
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
(note: this is a temporary file, to be added-to by anybody, and moved to
|
||||
release-notes at release time)
|
||||
Bitcoin Core version 0.13.1 is now available from:
|
||||
|
||||
Bitcoin Core version *version* is now available from:
|
||||
<https://bitcoin.org/bin/bitcoin-core-0.13.1/>
|
||||
|
||||
<https://bitcoin.org/bin/bitcoin-core-*version*/>
|
||||
|
||||
This is a new major version release, including new features, various bugfixes
|
||||
and performance improvements, as well as updated translations.
|
||||
This is a new minor version release, including activation parameters for the
|
||||
segwit softfork, various bugfixes and performance improvements, as well as
|
||||
updated translations.
|
||||
|
||||
Please report bugs using the issue tracker at github:
|
||||
|
||||
@@ -31,91 +29,211 @@ libraries such as Qt are no longer being tested on XP.
|
||||
|
||||
We do not have time nor resources to provide support for an OS that is
|
||||
end-of-life. From 0.13.0 on, Windows XP is no longer supported. Users are
|
||||
suggested to upgrade to a newer verion of Windows, or install an alternative OS
|
||||
suggested to upgrade to a newer version of Windows, or install an alternative OS
|
||||
that is supported.
|
||||
|
||||
No attempt is made to prevent installing or running the software on Windows XP,
|
||||
you can still do so at your own risk, but do not expect it to work: do not
|
||||
report issues about Windows XP to the issue tracker.
|
||||
|
||||
From 0.13.1 onwards OS X 10.7 is no longer supported. 0.13.0 was intended to work on 10.7+,
|
||||
but severe issues with the libc++ version on 10.7.x keep it from running reliably.
|
||||
0.13.1 now requires 10.8+, and will communicate that to 10.7 users, rather than crashing unexpectedly.
|
||||
|
||||
Notable changes
|
||||
===============
|
||||
|
||||
Database cache memory increased
|
||||
--------------------------------
|
||||
Segregated witness soft fork
|
||||
----------------------------
|
||||
|
||||
As a result of growth of the UTXO set, performance with the prior default
|
||||
database cache of 100 MiB has suffered.
|
||||
For this reason the default was changed to 300 MiB in this release.
|
||||
Segregated witness (segwit) is a soft fork that, if activated, will
|
||||
allow transaction-producing software to separate (segregate) transaction
|
||||
signatures (witnesses) from the part of the data in a transaction that is
|
||||
covered by the txid. This provides several immediate benefits:
|
||||
|
||||
For nodes on low-memory systems, the database cache can be changed back to
|
||||
100 MiB (or to another value) by either:
|
||||
- **Elimination of unwanted transaction malleability:** Segregating the witness
|
||||
allows both existing and upgraded software to calculate the transaction
|
||||
identifier (txid) of transactions without referencing the witness, which can
|
||||
sometimes be changed by third-parties (such as miners) or by co-signers in a
|
||||
multisig spend. This solves all known cases of unwanted transaction
|
||||
malleability, which is a problem that makes programming Bitcoin wallet
|
||||
software more difficult and which seriously complicates the design of smart
|
||||
contracts for Bitcoin.
|
||||
|
||||
- Adding `dbcache=100` in bitcoin.conf
|
||||
- Changing it in the GUI under `Options → Size of database cache`
|
||||
- **Capacity increase:** Segwit transactions contain new fields that are not
|
||||
part of the data currently used to calculate the size of a block, which
|
||||
allows a block containing segwit transactions to hold more data than allowed
|
||||
by the current maximum block size. Estimates based on the transactions
|
||||
currently found in blocks indicate that if all wallets switch to using
|
||||
segwit, the network will be able to support about 70% more transactions. The
|
||||
network will also be able to support more of the advanced-style payments
|
||||
(such as multisig) than it can support now because of the different weighting
|
||||
given to different parts of a transaction after segwit activates (see the
|
||||
following section for details).
|
||||
|
||||
Note that the database cache setting has the most performance impact
|
||||
during initial sync of a node, and when catching up after downtime.
|
||||
- **Weighting data based on how it affects node performance:** Some parts of
|
||||
each Bitcoin block need to be stored by nodes in order to validate future
|
||||
blocks; other parts of a block can be immediately forgotten (pruned) or used
|
||||
only for helping other nodes sync their copy of the block chain. One large
|
||||
part of the immediately prunable data are transaction signatures (witnesses),
|
||||
and segwit makes it possible to give a different "weight" to segregated
|
||||
witnesses to correspond with the lower demands they place on node resources.
|
||||
Specifically, each byte of a segregated witness is given a weight of 1, each
|
||||
other byte in a block is given a weight of 4, and the maximum allowed weight
|
||||
of a block is 4 million. Weighting the data this way better aligns the most
|
||||
profitable strategy for creating blocks with the long-term costs of block
|
||||
validation.
|
||||
|
||||
bitcoin-cli: arguments privacy
|
||||
--------------------------------
|
||||
- **Signature covers value:** A simple improvement in the way signatures are
|
||||
generated in segwit simplifies the design of secure signature generators
|
||||
(such as hardware wallets), reduces the amount of data the signature
|
||||
generator needs to download, and allows the signature generator to operate
|
||||
more quickly. This is made possible by having the generator sign the amount
|
||||
of bitcoins they think they are spending, and by having full nodes refuse to
|
||||
accept those signatures unless the amount of bitcoins being spent is exactly
|
||||
the same as was signed. For non-segwit transactions, wallets instead had to
|
||||
download the complete previous transactions being spent for every payment
|
||||
they made, which could be a slow operation on hardware wallets and in other
|
||||
situations where bandwidth or computation speed was constrained.
|
||||
|
||||
The RPC command line client gained a new argument, `-stdin`
|
||||
to read extra arguments from standard input, one per line until EOF/Ctrl-D.
|
||||
For example:
|
||||
- **Linear scaling of sighash operations:** In 2015 a block was produced that
|
||||
required about 25 seconds to validate on modern hardware because of the way
|
||||
transaction signature hashes are performed. Other similar blocks, or blocks
|
||||
that could take even longer to validate, can still be produced today. The
|
||||
problem that caused this can't be fixed in a soft fork without unwanted
|
||||
side-effects, but transactions that opt-in to using segwit will now use a
|
||||
different signature method that doesn't suffer from this problem and doesn't
|
||||
have any unwanted side-effects.
|
||||
|
||||
$ echo -e "mysecretcode\n120" | src/bitcoin-cli -stdin walletpassphrase
|
||||
- **Increased security for multisig:** Bitcoin addresses (both P2PKH addresses
|
||||
that start with a '1' and P2SH addresses that start with a '3') use a hash
|
||||
function known as RIPEMD-160. For P2PKH addresses, this provides about 160
|
||||
bits of security---which is beyond what cryptographers believe can be broken
|
||||
today. But because P2SH is more flexible, only about 80 bits of security is
|
||||
provided per address. Although 80 bits is very strong security, it is within
|
||||
the realm of possibility that it can be broken by a powerful adversary.
|
||||
Segwit allows advanced transactions to use the SHA256 hash function instead,
|
||||
which provides about 128 bits of security (that is 281 trillion times as
|
||||
much security as 80 bits and is equivalent to the maximum bits of security
|
||||
believed to be provided by Bitcoin's choice of parameters for its Elliptic
|
||||
Curve Digital Security Algorithm [ECDSA].)
|
||||
|
||||
It is recommended to use this for sensitive information such as wallet
|
||||
passphrases, as command-line arguments can usually be read from the process
|
||||
table by any user on the system.
|
||||
- **More efficient almost-full-node security** Satoshi Nakamoto's original
|
||||
Bitcoin paper describes a method for allowing newly-started full nodes to
|
||||
skip downloading and validating some data from historic blocks that are
|
||||
protected by large amounts of proof of work. Unfortunately, Nakamoto's
|
||||
method can't guarantee that a newly-started node using this method will
|
||||
produce an accurate copy of Bitcoin's current ledger (called the UTXO set),
|
||||
making the node vulnerable to falling out of consensus with other nodes.
|
||||
Although the problems with Nakamoto's method can't be fixed in a soft fork,
|
||||
Segwit accomplishes something similar to his original proposal: it makes it
|
||||
possible for a node to optionally skip downloading some blockchain data
|
||||
(specifically, the segregated witnesses) while still ensuring that the node
|
||||
can build an accurate copy of the UTXO set for the block chain with the most
|
||||
proof of work. Segwit enables this capability at the consensus layer, but
|
||||
note that Bitcoin Core does not provide an option to use this capability as
|
||||
of this 0.13.1 release.
|
||||
|
||||
RPC low-level changes
|
||||
----------------------
|
||||
- **Script versioning:** Segwit makes it easy for future soft forks to allow
|
||||
Bitcoin users to individually opt-in to almost any change in the Bitcoin
|
||||
Script language when those users receive new transactions. Features
|
||||
currently being researched by Bitcoin Core contributors that may use this
|
||||
capability include support for Schnorr signatures, which can improve the
|
||||
privacy and efficiency of multisig transactions (or transactions with
|
||||
multiple inputs), and Merklized Abstract Syntax Trees (MAST), which can
|
||||
improve the privacy and efficiency of scripts with two or more conditions.
|
||||
Other Bitcoin community members are studying several other improvements
|
||||
that can be made using script versioning.
|
||||
|
||||
- `gettxoutsetinfo` UTXO hash (`hash_serialized`) has changed. There was a divergence between
|
||||
32-bit and 64-bit platforms, and the txids were missing in the hashed data. This has been
|
||||
fixed, but this means that the output will be different than from previous versions.
|
||||
Activation for the segwit soft fork is being managed using BIP9
|
||||
versionbits. Segwit's version bit is bit 1, and nodes will begin
|
||||
tracking which blocks signal support for segwit at the beginning of the
|
||||
first retarget period after segwit's start date of 15 November 2016. If
|
||||
95% of blocks within a 2,016-block retarget period (about two weeks)
|
||||
signal support for segwit, the soft fork will be locked in. After
|
||||
another 2,016 blocks, segwit will activate.
|
||||
|
||||
- Full UTF-8 support in the RPC API. Non-ASCII characters in, for example,
|
||||
wallet labels have always been malformed because they weren't taken into account
|
||||
properly in JSON RPC processing. This is no longer the case. This also affects
|
||||
the GUI debug console.
|
||||
For more information about segwit, please see the [segwit FAQ][], the
|
||||
[segwit wallet developers guide][] or BIPs [141][BIP141], [143][BIP143],
|
||||
[144][BIP144], and [145][BIP145]. If you're a miner or mining pool
|
||||
operator, please see the [versionbits FAQ][] for information about
|
||||
signaling support for a soft fork.
|
||||
|
||||
C++11 and Python 3
|
||||
[Segwit FAQ]: https://bitcoincore.org/en/2016/01/26/segwit-benefits/
|
||||
[segwit wallet developers guide]: https://bitcoincore.org/en/segwit_wallet_dev/
|
||||
[BIP141]: https://github.com/bitcoin/bips/blob/master/bip-0141.mediawiki
|
||||
[BIP143]: https://github.com/bitcoin/bips/blob/master/bip-0143.mediawiki
|
||||
[BIP144]: https://github.com/bitcoin/bips/blob/master/bip-0144.mediawiki
|
||||
[BIP145]: https://github.com/bitcoin/bips/blob/master/bip-0145.mediawiki
|
||||
[versionbits FAQ]: https://bitcoincore.org/en/2016/06/08/version-bits-miners-faq/
|
||||
|
||||
|
||||
Null dummy soft fork
|
||||
-------------------
|
||||
|
||||
Various code modernizations have been done. The Bitcoin Core code base has
|
||||
started using C++11. This means that a C++11-capable compiler is now needed for
|
||||
building. Effectively this means GCC 4.7 or higher, or Clang 3.3 or higher.
|
||||
Combined with the segwit soft fork is an additional change that turns a
|
||||
long-existing network relay policy into a consensus rule. The
|
||||
`OP_CHECKMULTISIG` and `OP_CHECKMULTISIGVERIFY` opcodes consume an extra
|
||||
stack element ("dummy element") after signature validation. The dummy
|
||||
element is not inspected in any manner, and could be replaced by any
|
||||
value without invalidating the script.
|
||||
|
||||
When cross-compiling for a target that doesn't have C++11 libraries, configure with
|
||||
`./configure --enable-glibc-back-compat ... LDFLAGS=-static-libstdc++`.
|
||||
Because any value can be used for this dummy element, it's possible for
|
||||
a third-party to insert data into other people's transactions, changing
|
||||
the transaction's txid (called transaction malleability) and possibly
|
||||
causing other problems.
|
||||
|
||||
Since Bitcoin Core 0.10.0, nodes have defaulted to only relaying and
|
||||
mining transactions whose dummy element was a null value (0x00, also
|
||||
called OP_0). The null dummy soft fork turns this relay rule into a
|
||||
consensus rule both for non-segwit transactions and segwit transactions,
|
||||
so that this method of mutating transactions is permanently eliminated
|
||||
from the network.
|
||||
|
||||
Signaling for the null dummy soft fork is done by signaling support
|
||||
for segwit, and the null dummy soft fork will activate at the same time
|
||||
as segwit.
|
||||
|
||||
For more information, please see [BIP147][].
|
||||
|
||||
[BIP147]: https://github.com/bitcoin/bips/blob/master/bip-0147.mediawiki
|
||||
|
||||
Low-level RPC changes
|
||||
---------------------
|
||||
|
||||
- `importprunedfunds` only accepts two required arguments. Some versions accept
|
||||
an optional third arg, which was always ignored. Make sure to never pass more
|
||||
than two arguments.
|
||||
|
||||
For running the functional tests in `qa/rpc-tests`, Python3.4 or higher is now
|
||||
required.
|
||||
|
||||
Linux ARM builds
|
||||
------------------
|
||||
----------------
|
||||
|
||||
Due to popular request, Linux ARM builds have been added to the uploaded
|
||||
executables.
|
||||
With the 0.13.0 release, pre-built Linux ARM binaries were added to the set of
|
||||
uploaded executables. Additional detail on the ARM architecture targeted by each
|
||||
is provided below.
|
||||
|
||||
The following extra files can be found in the download directory or torrent:
|
||||
|
||||
- `bitcoin-${VERSION}-arm-linux-gnueabihf.tar.gz`: Linux binaries for the most
|
||||
common 32-bit ARM architecture.
|
||||
- `bitcoin-${VERSION}-aarch64-linux-gnu.tar.gz`: Linux binaries for the most
|
||||
common 64-bit ARM architecture.
|
||||
- `bitcoin-${VERSION}-arm-linux-gnueabihf.tar.gz`: Linux binaries targeting
|
||||
the 32-bit ARMv7-A architecture.
|
||||
- `bitcoin-${VERSION}-aarch64-linux-gnu.tar.gz`: Linux binaries targeting
|
||||
the 64-bit ARMv8-A architecture.
|
||||
|
||||
ARM builds are still experimental. If you have problems on a certain device or
|
||||
Linux distribution combination please report them on the bug tracker, it may be
|
||||
possible to resolve them.
|
||||
possible to resolve them. Note that the device you use must be (backward)
|
||||
compatible with the architecture targeted by the binary that you use.
|
||||
For example, a Raspberry Pi 2 Model B or Raspberry Pi 3 Model B (in its 32-bit
|
||||
execution state) device, can run the 32-bit ARMv7-A targeted binary. However,
|
||||
no model of Raspberry Pi 1 device can run either binary because they are all
|
||||
ARMv6 architecture devices that are not compatible with ARMv7-A or ARMv8-A.
|
||||
|
||||
Note that Android is not considered ARM Linux in this context. The executables
|
||||
are not expected to work out of the box on Android.
|
||||
|
||||
0.13.0 Change log
|
||||
|
||||
0.13.1 Change log
|
||||
=================
|
||||
|
||||
Detailed release notes follow. This overview includes changes that affect
|
||||
@@ -123,85 +241,170 @@ behavior, not code moves, refactors and string updates. For convenience in locat
|
||||
the code changes and accompanying discussion, both the pull request and
|
||||
git merge commit are mentioned.
|
||||
|
||||
### RPC and REST
|
||||
### Consensus
|
||||
- #8636 `9dfa0c8` Implement NULLDUMMY softfork (BIP147) (jl2012)
|
||||
- #8848 `7a34a46` Add NULLDUMMY verify flag in bitcoinconsensus.h (jl2012)
|
||||
- #8937 `8b66659` Define start and end time for segwit deployment (sipa)
|
||||
|
||||
Asm script outputs replacements for OP_NOP2 and OP_NOP3
|
||||
-------------------------------------------------------
|
||||
|
||||
OP_NOP2 has been renamed to OP_CHECKLOCKTIMEVERIFY by [BIP
|
||||
65](https://github.com/bitcoin/bips/blob/master/bip-0065.mediawiki)
|
||||
|
||||
OP_NOP3 has been renamed to OP_CHECKSEQUENCEVERIFY by [BIP
|
||||
112](https://github.com/bitcoin/bips/blob/master/bip-0112.mediawiki)
|
||||
|
||||
The following outputs are affected by this change:
|
||||
- RPC `getrawtransaction` (in verbose mode)
|
||||
- RPC `decoderawtransaction`
|
||||
- RPC `decodescript`
|
||||
- REST `/rest/tx/` (JSON format)
|
||||
- REST `/rest/block/` (JSON format when including extended tx details)
|
||||
- `bitcoin-tx -json`
|
||||
|
||||
New mempool information RPC calls
|
||||
---------------------------------
|
||||
|
||||
RPC calls have been added to output detailed statistics for individual mempool
|
||||
entries, as well as to calculate the in-mempool ancestors or descendants of a
|
||||
transaction: see `getmempoolentry`, `getmempoolancestors`, `getmempooldescendants`.
|
||||
|
||||
### ZMQ
|
||||
|
||||
Each ZMQ notification now contains an up-counting sequence number that allows
|
||||
listeners to detect lost notifications.
|
||||
The sequence number is always the last element in a multi-part ZMQ notification and
|
||||
therefore backward compatible.
|
||||
Each message type has its own counter.
|
||||
(https://github.com/bitcoin/bitcoin/pull/7762)
|
||||
|
||||
### Configuration and command-line options
|
||||
### RPC and other APIs
|
||||
- #8581 `526d2b0` Drop misleading option in importprunedfunds (MarcoFalke)
|
||||
- #8699 `a5ec248` Remove createwitnessaddress RPC command (jl2012)
|
||||
- #8780 `794b007` Deprecate getinfo (MarcoFalke)
|
||||
- #8832 `83ad563` Throw JSONRPCError when utxo set can not be read (MarcoFalke)
|
||||
- #8884 `b987348` getblockchaininfo help: pruneheight is the lowest, not highest, block (luke-jr)
|
||||
- #8858 `3f508ed` rpc: Generate auth cookie in hex instead of base64 (laanwj)
|
||||
- #8951 `7c2bf4b` RPC/Mining: getblocktemplate: Update and fix formatting of help (luke-jr)
|
||||
|
||||
### Block and transaction handling
|
||||
- #8611 `a9429ca` Reduce default number of blocks to check at startup (sipa)
|
||||
- #8634 `3e80ab7` Add policy: null signature for failed CHECK(MULTI)SIG (jl2012)
|
||||
- #8525 `1672225` Do not store witness txn in rejection cache (sipa)
|
||||
- #8499 `9777fe1` Add several policy limits and disable uncompressed keys for segwit scripts (jl2012)
|
||||
- #8526 `0027672` Make non-minimal OP_IF/NOTIF argument non-standard for P2WSH (jl2012)
|
||||
- #8524 `b8c79a0` Precompute sighashes (sipa)
|
||||
- #8651 `b8c79a0` Predeclare PrecomputedTransactionData as struct (sipa)
|
||||
|
||||
### P2P protocol and network code
|
||||
|
||||
The p2p alert system has been removed in #7692 and the 'alert' message is no longer supported.
|
||||
|
||||
|
||||
Fee filtering of invs (BIP 133)
|
||||
------------------------------------
|
||||
|
||||
The optional new p2p message "feefilter" is implemented and the protocol
|
||||
version is bumped to 70013. Upon receiving a feefilter message from a peer,
|
||||
a node will not send invs for any transactions which do not meet the filter
|
||||
feerate. [BIP 133](https://github.com/bitcoin/bips/blob/master/bip-0133.mediawiki)
|
||||
|
||||
### Validation
|
||||
- #8740 `42ea51a` No longer send local address in addrMe (laanwj)
|
||||
- #8427 `69d1cd2` Ignore `notfound` P2P messages (laanwj)
|
||||
- #8573 `4f84082` Set jonasschnellis dns-seeder filter flag (jonasschnelli)
|
||||
- #8712 `23feab1` Remove maxuploadtargets recommended minimum (jonasschnelli)
|
||||
- #8862 `7ae6242` Fix a few cases where messages were sent after requested disconnect (theuni)
|
||||
- #8393 `fe1975a` Support for compact blocks together with segwit (sipa)
|
||||
- #8282 `2611ad7` Feeler connections to increase online addrs in the tried table (EthanHeilman)
|
||||
- #8612 `2215c22` Check for compatibility with download in FindNextBlocksToDownload (sipa)
|
||||
- #8606 `bbf379b` Fix some locks (sipa)
|
||||
- #8594 `ab295bb` Do not add random inbound peers to addrman (gmaxwell)
|
||||
- #8940 `5b4192b` Add x9 service bit support to dnsseed.bluematt.me, seed.bitcoinstats.com (TheBlueMatt, cdecker)
|
||||
- #8944 `685e4c7` Remove bogus assert on number of oubound connections. (TheBlueMatt)
|
||||
- #8949 `0dbc48a` Be more agressive in getting connections to peers with relevant services (gmaxwell)
|
||||
|
||||
### Build system
|
||||
|
||||
### Wallet
|
||||
|
||||
Hierarchical Deterministic Key Generation
|
||||
-----------------------------------------
|
||||
Newly created wallets will use hierarchical deterministic key generation
|
||||
according to BIP32 (keypath m/0'/0'/k').
|
||||
Existing wallets will still use traditional key generation.
|
||||
|
||||
Backups of HD wallets, regardless of when they have been created, can
|
||||
therefore be used to re-generate all possible private keys, even the
|
||||
ones which haven't already been generated during the time of the backup.
|
||||
|
||||
HD key generation for new wallets can be disabled by `-usehd=0`. Keep in
|
||||
mind that this flag only has affect on newly created wallets.
|
||||
You can't disable HD key generation once you have created a HD wallet.
|
||||
|
||||
There is no distinction between internal (change) and external keys.
|
||||
|
||||
[Pull request](https://github.com/bitcoin/bitcoin/pull/8035/files), [BIP 32](https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki)
|
||||
- #8293 `fa5b249` Allow building libbitcoinconsensus without any univalue (luke-jr)
|
||||
- #8492 `8b0bdd3` Allow building bench_bitcoin by itself (luke-jr)
|
||||
- #8563 `147003c` Add configure check for -latomic (ajtowns)
|
||||
- #8626 `ea51b0f` Berkeley DB v6 compatibility fix (netsafe)
|
||||
- #8520 `75f2065` Remove check for `openssl/ec.h` (laanwj)
|
||||
|
||||
### GUI
|
||||
- #8481 `d9f0d4e` Fix minimize and close bugs (adlawren)
|
||||
- #8487 `a37cec5` Persist the datadir after option reset (achow101)
|
||||
- #8697 `41fd852` Fix op order to append first alert (rodasmith)
|
||||
- #8678 `8e03382` Fix UI bug that could result in paying unexpected fee (jonasschnelli)
|
||||
- #8911 `7634d8e` Translate all files, even if wallet disabled (laanwj)
|
||||
- #8540 `1db3352` Fix random segfault when closing "Choose data directory" dialog (laanwj)
|
||||
- #7579 `f1c0d78` Show network/chain errors in the GUI (jonasschnelli)
|
||||
|
||||
### Tests
|
||||
### Wallet
|
||||
- #8443 `464dedd` Trivial cleanup of HD wallet changes (jonasschnelli)
|
||||
- #8539 `cb07f19` CDB: fix debug output (crowning-)
|
||||
- #8664 `091cdeb` Fix segwit-related wallet bug (sdaftuar)
|
||||
- #8693 `c6a6291` Add witness address to address book (instagibbs)
|
||||
- #8765 `6288659` Remove "unused" ThreadFlushWalletDB from removeprunedfunds (jonasschnelli)
|
||||
|
||||
### Tests and QA
|
||||
- #8713 `ae8c7df` create_cache: Delete temp dir when done (MarcoFalke)
|
||||
- #8716 `e34374e` Check legacy wallet as well (MarcoFalke)
|
||||
- #8750 `d6ebe13` Refactor RPCTestHandler to prevent TimeoutExpired (MarcoFalke)
|
||||
- #8652 `63462c2` remove root test directory for RPC tests (yurizhykin)
|
||||
- #8724 `da94272` walletbackup: Sync blocks inside the loop (MarcoFalke)
|
||||
- #8400 `bea02dc` enable rpcbind_test (yurizhykin)
|
||||
- #8417 `f70be14` Add walletdump RPC test (including HD- & encryption-tests) (jonasschnelli)
|
||||
- #8419 `a7aa3cc` Enable size accounting in mining unit tests (sdaftuar)
|
||||
- #8442 `8bb1efd` Rework hd wallet dump test (MarcoFalke)
|
||||
- #8528 `3606b6b` Update p2p-segwit.py to reflect correct behavior (instagibbs)
|
||||
- #8531 `a27cdd8` abandonconflict: Use assert_equal (MarcoFalke)
|
||||
- #8667 `6b07362` Fix SIGHASH_SINGLE bug in test_framework SignatureHash (jl2012)
|
||||
- #8673 `03b0196` Fix obvious assignment/equality error in test (JeremyRubin)
|
||||
- #8739 `cef633c` Fix broken sendcmpct test in p2p-compactblocks.py (sdaftuar)
|
||||
- #8418 `ff893aa` Add tests for compact blocks (sdaftuar)
|
||||
- #8803 `375437c` Ping regularly in p2p-segwit.py to keep connection alive (jl2012)
|
||||
- #8827 `9bbe66e` Split up slow RPC calls to avoid pruning test timeouts (sdaftuar)
|
||||
- #8829 `2a8bca4` Add bitcoin-tx JSON tests (jnewbery)
|
||||
- #8834 `1dd1783` blockstore: Switch to dumb dbm (MarcoFalke)
|
||||
- #8835 `d87227d` nulldummy.py: Don't run unused code (MarcoFalke)
|
||||
- #8836 `eb18cc1` bitcoin-util-test.py should fail if the output file is empty (jnewbery)
|
||||
- #8839 `31ab2f8` Avoid ConnectionResetErrors during RPC tests (laanwj)
|
||||
- #8840 `cbc3fe5` Explicitly set encoding to utf8 when opening text files (laanwj)
|
||||
- #8841 `3e4abb5` Fix nulldummy test (jl2012)
|
||||
- #8854 `624a007` Fix race condition in p2p-compactblocks test (sdaftuar)
|
||||
- #8857 `1f60d45` mininode: Only allow named args in wait_until (MarcoFalke)
|
||||
- #8860 `0bee740` util: Move wait_bitcoinds() into stop_nodes() (MarcoFalke)
|
||||
- #8882 `b73f065` Fix race conditions in p2p-compactblocks.py and sendheaders.py (sdaftuar)
|
||||
- #8904 `cc6f551` Fix compact block shortids for a test case (dagurval)
|
||||
|
||||
### Documentation
|
||||
- #8754 `0e2c6bd` Target protobuf 2.6 in OS X build notes. (fanquake)
|
||||
- #8461 `b17a3f9` Document return value of networkhashps for getmininginfo RPC endpoint (jlopp)
|
||||
- #8512 `156e305` Corrected JSON typo on setban of net.cpp (sevastos)
|
||||
- #8683 `8a7d7ff` Fix incorrect file name bitcoin.qrc (bitcoinsSG)
|
||||
- #8891 `5e0dd9e` Update bips.md for Segregated Witness (fanquake)
|
||||
- #8545 `863ae74` Update git-subtree-check.sh README (MarcoFalke)
|
||||
- #8607 `486650a` Fix doxygen off-by-one comments, fix typos (MarcoFalke)
|
||||
- #8560 `c493f43` Fix two VarInt examples in serialize.h (cbarcenas)
|
||||
- #8737 `084cae9` UndoReadFromDisk works on undo files (rev), not on block files (paveljanik)
|
||||
- #8625 `0a35573` Clarify statement about parallel jobs in rpc-tests.py (isle2983)
|
||||
- #8624 `0e6d753` build: Mention curl (MarcoFalke)
|
||||
- #8604 `b09e13c` build,doc: Update for 0.13.0+ and OpenBSD 5.9 (laanwj)
|
||||
- #8939 `06d15fb` Update implemented bips for 0.13.1 (sipa)
|
||||
|
||||
### Miscellaneous
|
||||
- #8742 `d31ac72` Specify Protobuf version 2 in paymentrequest.proto (fanquake)
|
||||
- #8414,#8558,#8676,#8700,#8701,#8702 Add missing copyright headers (isle2983, kazcw)
|
||||
- #8899 `4ed2627` Fix wake from sleep issue with Boost 1.59.0 (fanquake)
|
||||
- #8817 `bcf3806` update bitcoin-tx to output witness data (jnewbery)
|
||||
- #8513 `4e5fc31` Fix a type error that would not compile on OSX. (JeremyRubin)
|
||||
- #8392 `30eac2d` Fix several node initialization issues (sipa)
|
||||
- #8548 `305d8ac` Use `__func__` to get function name for output printing (MarcoFalke)
|
||||
- #8291 `a987431` [util] CopyrightHolders: Check for untranslated substitution (MarcoFalke)
|
||||
|
||||
Credits
|
||||
=======
|
||||
|
||||
Thanks to everyone who directly contributed to this release:
|
||||
|
||||
- adlawren
|
||||
- Alexey Vesnin
|
||||
- Anders Øyvind Urke-Sætre
|
||||
- Andrew Chow
|
||||
- Anthony Towns
|
||||
- BtcDrak
|
||||
- Chris Stewart
|
||||
- Christian Barcenas
|
||||
- Christian Decker
|
||||
- Cory Fields
|
||||
- crowning-
|
||||
- Dagur Valberg Johannsson
|
||||
- David A. Harding
|
||||
- Eric Lombrozo
|
||||
- Ethan Heilman
|
||||
- fanquake
|
||||
- Gaurav Rana
|
||||
- Gregory Maxwell
|
||||
- instagibbs
|
||||
- isle2983
|
||||
- Jameson Lopp
|
||||
- Jeremy Rubin
|
||||
- jnewbery
|
||||
- Johnson Lau
|
||||
- Jonas Schnelli
|
||||
- jonnynewbs
|
||||
- Justin Camarena
|
||||
- Kaz Wesley
|
||||
- leijurv
|
||||
- Luke Dashjr
|
||||
- MarcoFalke
|
||||
- Marty Jones
|
||||
- Matt Corallo
|
||||
- Micha
|
||||
- Michael Ford
|
||||
- mruddy
|
||||
- Pavel Janík
|
||||
- Pieter Wuille
|
||||
- rodasmith
|
||||
- Sev
|
||||
- Suhas Daftuar
|
||||
- whythat
|
||||
- Wladimir J. van der Laan
|
||||
|
||||
As well as everyone that helped translating on [Transifex](https://www.transifex.com/projects/p/bitcoin/).
|
||||
|
||||
863
doc/release-notes/release-notes-0.13.0.md
Normal file
863
doc/release-notes/release-notes-0.13.0.md
Normal file
@@ -0,0 +1,863 @@
|
||||
Bitcoin Core version 0.13.0 is now available from:
|
||||
|
||||
<https://bitcoin.org/bin/bitcoin-core-0.13.0/>
|
||||
|
||||
This is a new major version release, including new features, various bugfixes
|
||||
and performance improvements, as well as updated translations.
|
||||
|
||||
Please report bugs using the issue tracker at github:
|
||||
|
||||
<https://github.com/bitcoin/bitcoin/issues>
|
||||
|
||||
To receive security and update notifications, please subscribe to:
|
||||
|
||||
<https://bitcoincore.org/en/list/announcements/join/>
|
||||
|
||||
Compatibility
|
||||
==============
|
||||
|
||||
Microsoft ended support for Windows XP on [April 8th, 2014](https://www.microsoft.com/en-us/WindowsForBusiness/end-of-xp-support),
|
||||
an OS initially released in 2001. This means that not even critical security
|
||||
updates will be released anymore. Without security updates, using a bitcoin
|
||||
wallet on a XP machine is irresponsible at least.
|
||||
|
||||
In addition to that, with 0.12.x there have been varied reports of Bitcoin Core
|
||||
randomly crashing on Windows XP. It is [not clear](https://github.com/bitcoin/bitcoin/issues/7681#issuecomment-217439891)
|
||||
what the source of these crashes is, but it is likely that upstream
|
||||
libraries such as Qt are no longer being tested on XP.
|
||||
|
||||
We do not have time nor resources to provide support for an OS that is
|
||||
end-of-life. From 0.13.0 on, Windows XP is no longer supported. Users are
|
||||
suggested to upgrade to a newer verion of Windows, or install an alternative OS
|
||||
that is supported.
|
||||
|
||||
No attempt is made to prevent installing or running the software on Windows XP,
|
||||
you can still do so at your own risk, but do not expect it to work: do not
|
||||
report issues about Windows XP to the issue tracker.
|
||||
|
||||
Notable changes
|
||||
===============
|
||||
|
||||
Database cache memory increased
|
||||
--------------------------------
|
||||
|
||||
As a result of growth of the UTXO set, performance with the prior default
|
||||
database cache of 100 MiB has suffered.
|
||||
For this reason the default was changed to 300 MiB in this release.
|
||||
|
||||
For nodes on low-memory systems, the database cache can be changed back to
|
||||
100 MiB (or to another value) by either:
|
||||
|
||||
- Adding `dbcache=100` in bitcoin.conf
|
||||
- Changing it in the GUI under `Options → Size of database cache`
|
||||
|
||||
Note that the database cache setting has the most performance impact
|
||||
during initial sync of a node, and when catching up after downtime.
|
||||
|
||||
|
||||
bitcoin-cli: arguments privacy
|
||||
------------------------------
|
||||
|
||||
The RPC command line client gained a new argument, `-stdin`
|
||||
to read extra arguments from standard input, one per line until EOF/Ctrl-D.
|
||||
For example:
|
||||
|
||||
$ src/bitcoin-cli -stdin walletpassphrase
|
||||
mysecretcode
|
||||
120
|
||||
..... press Ctrl-D here to end input
|
||||
$
|
||||
|
||||
It is recommended to use this for sensitive information such as wallet
|
||||
passphrases, as command-line arguments can usually be read from the process
|
||||
table by any user on the system.
|
||||
|
||||
|
||||
C++11 and Python 3
|
||||
------------------
|
||||
|
||||
Various code modernizations have been done. The Bitcoin Core code base has
|
||||
started using C++11. This means that a C++11-capable compiler is now needed for
|
||||
building. Effectively this means GCC 4.7 or higher, or Clang 3.3 or higher.
|
||||
|
||||
When cross-compiling for a target that doesn't have C++11 libraries, configure with
|
||||
`./configure --enable-glibc-back-compat ... LDFLAGS=-static-libstdc++`.
|
||||
|
||||
For running the functional tests in `qa/rpc-tests`, Python3.4 or higher is now
|
||||
required.
|
||||
|
||||
|
||||
Linux ARM builds
|
||||
----------------
|
||||
|
||||
Due to popular request, Linux ARM builds have been added to the uploaded
|
||||
executables.
|
||||
|
||||
The following extra files can be found in the download directory or torrent:
|
||||
|
||||
- `bitcoin-${VERSION}-arm-linux-gnueabihf.tar.gz`: Linux binaries for the most
|
||||
common 32-bit ARM architecture.
|
||||
- `bitcoin-${VERSION}-aarch64-linux-gnu.tar.gz`: Linux binaries for the most
|
||||
common 64-bit ARM architecture.
|
||||
|
||||
ARM builds are still experimental. If you have problems on a certain device or
|
||||
Linux distribution combination please report them on the bug tracker, it may be
|
||||
possible to resolve them.
|
||||
|
||||
Note that Android is not considered ARM Linux in this context. The executables
|
||||
are not expected to work out of the box on Android.
|
||||
|
||||
|
||||
Compact Block support (BIP 152)
|
||||
-------------------------------
|
||||
|
||||
Support for block relay using the Compact Blocks protocol has been implemented
|
||||
in PR 8068.
|
||||
|
||||
The primary goal is reducing the bandwidth spikes at relay time, though in many
|
||||
cases it also reduces propagation delay. It is automatically enabled between
|
||||
compatible peers.
|
||||
[BIP 152](https://github.com/bitcoin/bips/blob/master/bip-0152.mediawiki)
|
||||
|
||||
As a side-effect, ordinary non-mining nodes will download and upload blocks
|
||||
faster if those blocks were produced by miners using similar transaction
|
||||
filtering policies. This means that a miner who produces a block with many
|
||||
transactions discouraged by your node will be relayed slower than one with
|
||||
only transactions already in your memory pool. The overall effect of such
|
||||
relay differences on the network may result in blocks which include widely-
|
||||
discouraged transactions losing a stale block race, and therefore miners may
|
||||
wish to configure their node to take common relay policies into consideration.
|
||||
|
||||
|
||||
Hierarchical Deterministic Key Generation
|
||||
-----------------------------------------
|
||||
Newly created wallets will use hierarchical deterministic key generation
|
||||
according to BIP32 (keypath m/0'/0'/k').
|
||||
Existing wallets will still use traditional key generation.
|
||||
|
||||
Backups of HD wallets, regardless of when they have been created, can
|
||||
therefore be used to re-generate all possible private keys, even the
|
||||
ones which haven't already been generated during the time of the backup.
|
||||
**Attention:** Encrypting the wallet will create a new seed which requires
|
||||
a new backup!
|
||||
|
||||
Wallet dumps (created using the `dumpwallet` RPC) will contain the deterministic
|
||||
seed. This is expected to allow future versions to import the seed and all
|
||||
associated funds, but this is not yet implemented.
|
||||
|
||||
HD key generation for new wallets can be disabled by `-usehd=0`. Keep in
|
||||
mind that this flag only has affect on newly created wallets.
|
||||
You can't disable HD key generation once you have created a HD wallet.
|
||||
|
||||
There is no distinction between internal (change) and external keys.
|
||||
|
||||
HD wallets are incompatible with older versions of Bitcoin Core.
|
||||
|
||||
[Pull request](https://github.com/bitcoin/bitcoin/pull/8035/files), [BIP 32](https://github.com/bitcoin/bips/blob/master/bip-0032.mediawiki)
|
||||
|
||||
|
||||
Segregated Witness
|
||||
------------------
|
||||
|
||||
The code preparations for Segregated Witness ("segwit"), as described in [BIP
|
||||
141](https://github.com/bitcoin/bips/blob/master/bip-0141.mediawiki), [BIP
|
||||
143](https://github.com/bitcoin/bips/blob/master/bip-0143.mediawiki), [BIP
|
||||
144](https://github.com/bitcoin/bips/blob/master/bip-0144.mediawiki), and [BIP
|
||||
145](https://github.com/bitcoin/bips/blob/master/bip-0145.mediawiki) are
|
||||
finished and included in this release. However, BIP 141 does not yet specify
|
||||
activation parameters on mainnet, and so this release does not support segwit
|
||||
use on mainnet. Testnet use is supported, and after BIP 141 is updated with
|
||||
proposed parameters, a future release of Bitcoin Core is expected that
|
||||
implements those parameters for mainnet.
|
||||
|
||||
Furthermore, because segwit activation is not yet specified for mainnet,
|
||||
version 0.13.0 will behave similarly as other pre-segwit releases even after a
|
||||
future activation of BIP 141 on the network. Upgrading from 0.13.0 will be
|
||||
required in order to utilize segwit-related features on mainnet (such as signal
|
||||
BIP 141 activation, mine segwit blocks, fully validate segwit blocks, relay
|
||||
segwit blocks to other segwit nodes, and use segwit transactions in the
|
||||
wallet, etc).
|
||||
|
||||
|
||||
Mining transaction selection ("Child Pays For Parent")
|
||||
------------------------------------------------------
|
||||
|
||||
The mining transaction selection algorithm has been replaced with an algorithm
|
||||
that selects transactions based on their feerate inclusive of unconfirmed
|
||||
ancestor transactions. This means that a low-fee transaction can become more
|
||||
likely to be selected if a high-fee transaction that spends its outputs is
|
||||
relayed.
|
||||
|
||||
With this change, the `-blockminsize` command line option has been removed.
|
||||
|
||||
The command line option `-blockmaxsize` remains an option to specify the
|
||||
maximum number of serialized bytes in a generated block. In addition, the new
|
||||
command line option `-blockmaxweight` has been added, which specifies the
|
||||
maximum "block weight" of a generated block, as defined by [BIP 141 (Segregated
|
||||
Witness)] (https://github.com/bitcoin/bips/blob/master/bip-0141.mediawiki).
|
||||
|
||||
In preparation for Segregated Witness, the mining algorithm has been modified
|
||||
to optimize transaction selection for a given block weight, rather than a given
|
||||
number of serialized bytes in a block. In this release, transaction selection
|
||||
is unaffected by this distinction (as BIP 141 activation is not supported on
|
||||
mainnet in this release, see above), but in future releases and after BIP 141
|
||||
activation, these calculations would be expected to differ.
|
||||
|
||||
For optimal runtime performance, miners using this release should specify
|
||||
`-blockmaxweight` on the command line, and not specify `-blockmaxsize`.
|
||||
Additionally (or only) specifying `-blockmaxsize`, or relying on default
|
||||
settings for both, may result in performance degradation, as the logic to
|
||||
support `-blockmaxsize` performs additional computation to ensure that
|
||||
constraint is met. (Note that for mainnet, in this release, the equivalent
|
||||
parameter for `-blockmaxweight` would be four times the desired
|
||||
`-blockmaxsize`. See [BIP 141]
|
||||
(https://github.com/bitcoin/bips/blob/master/bip-0141.mediawiki) for additional
|
||||
details.)
|
||||
|
||||
In the future, the `-blockmaxsize` option may be removed, as block creation is
|
||||
no longer optimized for this metric. Feedback is requested on whether to
|
||||
deprecate or keep this command line option in future releases.
|
||||
|
||||
|
||||
Reindexing changes
|
||||
------------------
|
||||
|
||||
In earlier versions, reindexing did validation while reading through the block
|
||||
files on disk. These two have now been split up, so that all blocks are known
|
||||
before validation starts. This was necessary to make certain optimizations that
|
||||
are available during normal synchronizations also available during reindexing.
|
||||
|
||||
The two phases are distinct in the Bitcoin-Qt GUI. During the first one,
|
||||
"Reindexing blocks on disk" is shown. During the second (slower) one,
|
||||
"Processing blocks on disk" is shown.
|
||||
|
||||
It is possible to only redo validation now, without rebuilding the block index,
|
||||
using the command line option `-reindex-chainstate` (in addition to
|
||||
`-reindex` which does both). This new option is useful when the blocks on disk
|
||||
are assumed to be fine, but the chainstate is still corrupted. It is also
|
||||
useful for benchmarks.
|
||||
|
||||
|
||||
Removal of internal miner
|
||||
--------------------------
|
||||
|
||||
As CPU mining has been useless for a long time, the internal miner has been
|
||||
removed in this release, and replaced with a simpler implementation for the
|
||||
test framework.
|
||||
|
||||
The overall result of this is that `setgenerate` RPC call has been removed, as
|
||||
well as the `-gen` and `-genproclimit` command-line options.
|
||||
|
||||
For testing, the `generate` call can still be used to mine a block, and a new
|
||||
RPC call `generatetoaddress` has been added to mine to a specific address. This
|
||||
works with wallet disabled.
|
||||
|
||||
|
||||
New bytespersigop implementation
|
||||
--------------------------------
|
||||
|
||||
The former implementation of the bytespersigop filter accidentally broke bare
|
||||
multisig (which is meant to be controlled by the `permitbaremultisig` option),
|
||||
since the consensus protocol always counts these older transaction forms as 20
|
||||
sigops for backwards compatibility. Simply fixing this bug by counting more
|
||||
accurately would have reintroduced a vulnerability. It has therefore been
|
||||
replaced with a new implementation that rather than filter such transactions,
|
||||
instead treats them (for fee purposes only) as if they were in fact the size
|
||||
of a transaction actually using all 20 sigops.
|
||||
|
||||
|
||||
Low-level P2P changes
|
||||
----------------------
|
||||
|
||||
- The optional new p2p message "feefilter" is implemented and the protocol
|
||||
version is bumped to 70013. Upon receiving a feefilter message from a peer,
|
||||
a node will not send invs for any transactions which do not meet the filter
|
||||
feerate. [BIP 133](https://github.com/bitcoin/bips/blob/master/bip-0133.mediawiki)
|
||||
|
||||
- The P2P alert system has been removed in PR #7692 and the `alert` P2P message
|
||||
is no longer supported.
|
||||
|
||||
- The transaction relay mechanism used to relay one quarter of all transactions
|
||||
instantly, while queueing up the rest and sending them out in batch. As
|
||||
this resulted in chains of dependent transactions being reordered, it
|
||||
systematically hurt transaction relay. The relay code was redesigned in PRs
|
||||
\#7840 and #8082, and now always batches transactions announcements while also
|
||||
sorting them according to dependency order. This significantly reduces orphan
|
||||
transactions. To compensate for the removal of instant relay, the frequency of
|
||||
batch sending was doubled for outgoing peers.
|
||||
|
||||
- Since PR #7840 the BIP35 `mempool` command is also subject to batch processing.
|
||||
Also the `mempool` message is no longer handled for non-whitelisted peers when
|
||||
`NODE_BLOOM` is disabled through `-peerbloomfilters=0`.
|
||||
|
||||
- The maximum size of orphan transactions that are kept in memory until their
|
||||
ancestors arrive has been raised in PR #8179 from 5000 to 99999 bytes. They
|
||||
are now also removed from memory when they are included in a block, conflict
|
||||
with a block, and time out after 20 minutes.
|
||||
|
||||
- We respond at most once to a getaddr request during the lifetime of a
|
||||
connection since PR #7856.
|
||||
|
||||
- Connections to peers who have recently been the first one to give us a valid
|
||||
new block or transaction are protected from disconnections since PR #8084.
|
||||
|
||||
|
||||
Low-level RPC changes
|
||||
----------------------
|
||||
|
||||
- RPC calls have been added to output detailed statistics for individual mempool
|
||||
entries, as well as to calculate the in-mempool ancestors or descendants of a
|
||||
transaction: see `getmempoolentry`, `getmempoolancestors`, `getmempooldescendants`.
|
||||
|
||||
- `gettxoutsetinfo` UTXO hash (`hash_serialized`) has changed. There was a divergence between
|
||||
32-bit and 64-bit platforms, and the txids were missing in the hashed data. This has been
|
||||
fixed, but this means that the output will be different than from previous versions.
|
||||
|
||||
- Full UTF-8 support in the RPC API. Non-ASCII characters in, for example,
|
||||
wallet labels have always been malformed because they weren't taken into account
|
||||
properly in JSON RPC processing. This is no longer the case. This also affects
|
||||
the GUI debug console.
|
||||
|
||||
- Asm script outputs replacements for OP_NOP2 and OP_NOP3
|
||||
|
||||
- OP_NOP2 has been renamed to OP_CHECKLOCKTIMEVERIFY by [BIP
|
||||
65](https://github.com/bitcoin/bips/blob/master/bip-0065.mediawiki)
|
||||
|
||||
- OP_NOP3 has been renamed to OP_CHECKSEQUENCEVERIFY by [BIP
|
||||
112](https://github.com/bitcoin/bips/blob/master/bip-0112.mediawiki)
|
||||
|
||||
- The following outputs are affected by this change:
|
||||
|
||||
- RPC `getrawtransaction` (in verbose mode)
|
||||
- RPC `decoderawtransaction`
|
||||
- RPC `decodescript`
|
||||
- REST `/rest/tx/` (JSON format)
|
||||
- REST `/rest/block/` (JSON format when including extended tx details)
|
||||
- `bitcoin-tx -json`
|
||||
|
||||
- The sorting of the output of the `getrawmempool` output has changed.
|
||||
|
||||
- New RPC commands: `generatetoaddress`, `importprunedfunds`, `removeprunedfunds`, `signmessagewithprivkey`,
|
||||
`getmempoolancestors`, `getmempooldescendants`, `getmempoolentry`,
|
||||
`createwitnessaddress`, `addwitnessaddress`.
|
||||
|
||||
- Removed RPC commands: `setgenerate`, `getgenerate`.
|
||||
|
||||
- New options were added to `fundrawtransaction`: `includeWatching`, `changeAddress`, `changePosition` and `feeRate`.
|
||||
|
||||
|
||||
Low-level ZMQ changes
|
||||
----------------------
|
||||
|
||||
- Each ZMQ notification now contains an up-counting sequence number that allows
|
||||
listeners to detect lost notifications.
|
||||
The sequence number is always the last element in a multi-part ZMQ notification and
|
||||
therefore backward compatible. Each message type has its own counter.
|
||||
PR [#7762](https://github.com/bitcoin/bitcoin/pull/7762).
|
||||
|
||||
|
||||
0.13.0 Change log
|
||||
=================
|
||||
|
||||
Detailed release notes follow. This overview includes changes that affect
|
||||
behavior, not code moves, refactors and string updates. For convenience in locating
|
||||
the code changes and accompanying discussion, both the pull request and
|
||||
git merge commit are mentioned.
|
||||
|
||||
### RPC and other APIs
|
||||
|
||||
- #7156 `9ee02cf` Remove cs_main lock from `createrawtransaction` (laanwj)
|
||||
- #7326 `2cd004b` Fix typo, wrong information in gettxout help text (paveljanik)
|
||||
- #7222 `82429d0` Indicate which transactions are signaling opt-in RBF (sdaftuar)
|
||||
- #7480 `b49a623` Changed getnetworkhps value to double to avoid overflow (instagibbs)
|
||||
- #7550 `8b958ab` Input-from-stdin mode for bitcoin-cli (laanwj)
|
||||
- #7670 `c9a1265` Use cached block hash in blockToJSON() (rat4)
|
||||
- #7726 `9af69fa` Correct importaddress help reference to importpubkey (CypherGrue)
|
||||
- #7766 `16555b6` Register calls where they are defined (laanwj)
|
||||
- #7797 `e662a76` Fix generatetoaddress failing to parse address (mruddy)
|
||||
- #7774 `916b15a` Add versionHex in getblock and getblockheader JSON results (mruddy)
|
||||
- #7863 `72c54e3` Getblockchaininfo: make bip9_softforks an object, not an array (rustyrussell)
|
||||
- #7842 `d97101e` Do not print minping time in getpeerinfo when no ping received yet (paveljanik)
|
||||
- #7518 `be14ca5` Add multiple options to fundrawtransaction (promag)
|
||||
- #7756 `9e47fce` Add cursor to iterate over utxo set, use this in `gettxoutsetinfo` (laanwj)
|
||||
- #7848 `88616d2` Divergence between 32- and 64-bit when hashing >4GB affects `gettxoutsetinfo` (laanwj)
|
||||
- #7827 `4205ad7` Speed up `getchaintips` (mrbandrews)
|
||||
- #7762 `a1eb344` Append a message sequence number to every ZMQ notification (jonasschnelli)
|
||||
- #7688 `46880ed` List solvability in listunspent output and improve help (sipa)
|
||||
- #7926 `5725807` Push back `getaddednodeinfo` dead value (instagibbs)
|
||||
- #7953 `0630353` Create `signmessagewithprivkey` rpc (achow101)
|
||||
- #8049 `c028c7b` Expose information on whether transaction relay is enabled in `getnetworkinfo` (laanwj)
|
||||
- #7967 `8c1e49b` Add feerate option to `fundrawtransaction` (jonasschnelli)
|
||||
- #8118 `9b6a48c` Reduce unnecessary hashing in `signrawtransaction` (jonasnick)
|
||||
- #7957 `79004d4` Add support for transaction sequence number (jonasschnelli)
|
||||
- #8153 `75ec320` `fundrawtransaction` feeRate: Use BTC/kB (MarcoFalke)
|
||||
- #7292 `7ce9ac5` Expose ancestor/descendant information over RPC (sdaftuar)
|
||||
- #8171 `62fcf27` Fix createrawtx sequence number unsigned int parsing (jonasschnelli)
|
||||
- #7892 `9c3d0fa` Add full UTF-8 support to RPC (laanwj)
|
||||
- #8317 `304eff3` Don't use floating point in rpcwallet (MarcoFalke)
|
||||
- #8258 `5a06ebb` Hide softfork in `getblockchaininfo` if timeout is 0 (jl2012)
|
||||
- #8244 `1922e5a` Remove unnecessary LOCK(cs_main) in getrawmempool (dcousens)
|
||||
|
||||
### Block and transaction handling
|
||||
|
||||
- #7056 `6a07208` Save last db read (morcos)
|
||||
- #6842 `0192806` Limitfreerelay edge case bugfix (ptschip)
|
||||
- #7084 `11d74f6` Replace maxFeeRate of 10000*minRelayTxFee with maxTxFee in mempool (MarcoFalke)
|
||||
- #7539 `9f33dba` Add tags to mempool's mapTx indices (sdaftuar)
|
||||
- #7592 `26a2a72` Re-remove ERROR logging for mempool rejects (laanwj)
|
||||
- #7187 `14d6324` Keep reorgs fast for SequenceLocks checks (morcos)
|
||||
- #7594 `01f4267` Mempool: Add tracking of ancestor packages (sdaftuar)
|
||||
- #7904 `fc9e334` Txdb: Fix assert crash in new UTXO set cursor (laanwj)
|
||||
- #7927 `f9c2ac7` Minor changes to dbwrapper to simplify support for other databases (laanwj)
|
||||
- #7933 `e26b620` Fix OOM when deserializing UTXO entries with invalid length (sipa)
|
||||
- #8020 `5e374f7` Use SipHash-2-4 for various non-cryptographic hashes (sipa)
|
||||
- #8076 `d720980` VerifyDB: don't check blocks that have been pruned (sdaftuar)
|
||||
- #8080 `862fd24` Do not use mempool for GETDATA for tx accepted after the last mempool req (gmaxwell)
|
||||
- #7997 `a82f033` Replace mapNextTx with slimmer setSpends (kazcw)
|
||||
- #8220 `1f86d64` Stop trimming when mapTx is empty (sipa)
|
||||
- #8273 `396f9d6` Bump `-dbcache` default to 300MiB (laanwj)
|
||||
- #7225 `eb33179` Eliminate unnecessary call to CheckBlock (sdaftuar)
|
||||
- #7907 `006cdf6` Optimize and Cleanup CScript::FindAndDelete (pstratem)
|
||||
- #7917 `239d419` Optimize reindex (sipa)
|
||||
- #7763 `3081fb9` Put hex-encoded version in UpdateTip (sipa)
|
||||
- #8149 `d612837` Testnet-only segregated witness (sipa)
|
||||
- #8305 `3730393` Improve handling of unconnecting headers (sdaftuar)
|
||||
- #8363 `fca1a41` Rename "block cost" to "block weight" (sdaftuar)
|
||||
- #8381 `f84ee3d` Make witness v0 outputs non-standard (jl2012)
|
||||
- #8364 `3f65ba2` Treat high-sigop transactions as larger rather than rejecting them (sipa)
|
||||
|
||||
### P2P protocol and network code
|
||||
|
||||
- #6589 `dc0305d` Log bytes recv/sent per command (jonasschnelli)
|
||||
- #7164 `3b43cad` Do not download transactions during initial blockchain sync (ptschip)
|
||||
- #7458 `898fedf` peers.dat, banlist.dat recreated when missing (kirkalx)
|
||||
- #7637 `3da5d1b` Fix memleak in TorController (laanwj, jonasschnelli)
|
||||
- #7553 `9f14e5a` Remove vfReachable and modify IsReachable to only use vfLimited (pstratem)
|
||||
- #7708 `9426632` De-neuter NODE_BLOOM (pstratem)
|
||||
- #7692 `29b2be6` Remove P2P alert system (btcdrak)
|
||||
- #7542 `c946a15` Implement "feefilter" P2P message (morcos)
|
||||
- #7573 `352fd57` Add `-maxtimeadjustment` command line option (mruddy)
|
||||
- #7570 `232592a` Add IPv6 Link-Local Address Support (mruddy)
|
||||
- #7874 `e6a4d48` Improve AlreadyHave (morcos)
|
||||
- #7856 `64e71b3` Only send one GetAddr response per connection (gmaxwell)
|
||||
- #7868 `7daa3ad` Split DNS resolving functionality out of net structures (theuni)
|
||||
- #7919 `7617682` Fix headers announcements edge case (sdaftuar)
|
||||
- #7514 `d9594bf` Fix IsInitialBlockDownload for testnet (jmacwhyte)
|
||||
- #7959 `03cf6e8` fix race that could fail to persist a ban (kazcw)
|
||||
- #7840 `3b9a0bf` Several performance and privacy improvements to inv/mempool handling (sipa)
|
||||
- #8011 `65aecda` Don't run ThreadMessageHandler at lowered priority (kazcw)
|
||||
- #7696 `5c3f8dd` Fix de-serialization bug where AddrMan is left corrupted (EthanHeilman)
|
||||
- #7932 `ed749bd` CAddrMan::Deserialize handle corrupt serializations better (pstratem)
|
||||
- #7906 `83121cc` Prerequisites for p2p encapsulation changes (theuni)
|
||||
- #8033 `18436d8` Fix Socks5() connect failures to be less noisy and less unnecessarily scary (wtogami)
|
||||
- #8082 `01d8359` Defer inserting into maprelay until just before relaying (gmaxwell)
|
||||
- #7960 `6a22373` Only use AddInventoryKnown for transactions (sdaftuar)
|
||||
- #8078 `2156fa2` Disable the mempool P2P command when bloom filters disabled (petertodd)
|
||||
- #8065 `67c91f8` Addrman offline attempts (gmaxwell)
|
||||
- #7703 `761cddb` Tor: Change auth order to only use password auth if -torpassword (laanwj)
|
||||
- #8083 `cd0c513` Add support for dnsseeds with option to filter by servicebits (jonasschnelli)
|
||||
- #8173 `4286f43` Use SipHash for node eviction (sipa)
|
||||
- #8154 `1445835` Drop vAddrToSend after sending big addr message (kazcw)
|
||||
- #7749 `be9711e` Enforce expected outbound services (sipa)
|
||||
- #8208 `0a64777` Do not set extra flags for unfiltered DNS seed results (sipa)
|
||||
- #8084 `e4bb4a8` Add recently accepted blocks and txn to AttemptToEvictConnection (gmaxwell)
|
||||
- #8113 `3f89a53` Rework addnode behaviour (sipa)
|
||||
- #8179 `94ab58b` Evict orphans which are included or precluded by accepted blocks (gmaxwell)
|
||||
- #8068 `e9d76a1` Compact Blocks (TheBlueMatt)
|
||||
- #8204 `0833894` Update petertodd's testnet seed (petertodd)
|
||||
- #8247 `5cd35d3` Mark my dnsseed as supporting filtering (sipa)
|
||||
- #8275 `042c323` Remove bad chain alert partition check (btcdrak)
|
||||
- #8271 `1bc9c80` Do not send witnesses in cmpctblock (sipa)
|
||||
- #8312 `ca40ef6` Fix mempool DoS vulnerability from malleated transactions (sdaftuar)
|
||||
- #7180 `16ccb74` Account for `sendheaders` `verack` messages (laanwj)
|
||||
- #8102 `425278d` Bugfix: use global ::fRelayTxes instead of CNode in version send (sipa)
|
||||
- #8408 `b7e2011` Prevent fingerprinting, disk-DoS with compact blocks (sdaftuar)
|
||||
|
||||
### Build system
|
||||
|
||||
- #7302 `41f1a3e` C++11 build/runtime fixes (theuni)
|
||||
- #7322 `fd9356b` c++11: add scoped enum fallbacks to CPPFLAGS rather than defining them locally (theuni)
|
||||
- #7441 `a6771fc` Use Debian 8.3 in gitian build guide (fanquake)
|
||||
- #7349 `152a821` Build against system UniValue when available (luke-jr)
|
||||
- #7520 `621940e` LibreSSL doesn't define OPENSSL_VERSION, use LIBRESSL_VERSION_TEXT instead (paveljanik)
|
||||
- #7528 `9b9bfce` autogen.sh: warn about needing autoconf if autoreconf is not found (knocte)
|
||||
- #7504 `19324cf` Crystal clean make clean (paveljanik)
|
||||
- #7619 `18b3f1b` Add missing sudo entry in gitian VM setup (btcdrak)
|
||||
- #7616 `639ec58` [depends] Delete unused patches (MarcoFalke)
|
||||
- #7658 `c15eb28` Add curl to Gitian setup instructions (btcdrak)
|
||||
- #7710 `909b72b` [Depends] Bump miniupnpc and config.guess+sub (fanquake)
|
||||
- #7723 `5131005` build: python 3 compatibility (laanwj)
|
||||
- #7477 `28ad4d9` Fix quoting of copyright holders in configure.ac (domob1812)
|
||||
- #7711 `a67bc5e` [build-aux] Update Boost & check macros to latest serials (fanquake)
|
||||
- #7788 `4dc1b3a` Use relative paths instead of absolute paths in protoc calls (paveljanik)
|
||||
- #7809 `bbd210d` depends: some base fixes/changes (theuni)
|
||||
- #7603 `73fc922` Build System: Use PACKAGE_TARNAME in NSIS script (JeremyRand)
|
||||
- #7905 `187186b` test: move accounting_tests and rpc_wallet_tests to wallet/test (laanwj)
|
||||
- #7911 `351abf9` leveldb: integrate leveldb into our buildsystem (theuni)
|
||||
- #7944 `a407807` Re-instate TARGET_OS=linux in configure.ac. Removed by 351abf9e035 (randy-waterhouse)
|
||||
- #7920 `c3e3cfb` Switch Travis to Trusty (theuni)
|
||||
- #7954 `08b37c5` build: quiet annoying warnings without adding new ones (theuni)
|
||||
- #7165 `06162f1` build: Enable C++11 in build, require C++11 compiler (laanwj)
|
||||
- #7982 `559fbae` build: No need to check for leveldb atomics (theuni)
|
||||
- #8002 `f9b4582` [depends] Add -stdlib=libc++ to darwin CXX flags (fanquake)
|
||||
- #7993 `6a034ed` [depends] Bump Freetype, ccache, ZeroMQ, miniupnpc, expat (fanquake)
|
||||
- #8167 `19ea173` Ship debug tarballs/zips with debug symbols (theuni)
|
||||
- #8175 `f0299d8` Add --disable-bench to config flags for windows (laanwj)
|
||||
- #7283 `fd9881a` [gitian] Default reference_datetime to commit author date (MarcoFalke)
|
||||
- #8181 `9201ce8` Get rid of `CLIENT_DATE` (laanwj)
|
||||
- #8133 `fde0ac4` Finish up out-of-tree changes (theuni)
|
||||
- #8188 `65a9d7d` Add armhf/aarch64 gitian builds (theuni)
|
||||
- #8194 `cca1c8c` [gitian] set correct PATH for wrappers (MarcoFalke)
|
||||
- #8198 `5201614` Sync ax_pthread with upstream draft4 (fanquake)
|
||||
- #8210 `12a541e` [Qt] Bump to Qt5.6.1 (jonasschnelli)
|
||||
- #8285 `da50997` windows: Add testnet link to installer (laanwj)
|
||||
- #8304 `0cca2fe` [travis] Update SDK_URL (MarcoFalke)
|
||||
- #8310 `6ae20df` Require boost for bench (theuni)
|
||||
- #8315 `2e51590` Don't require sudo for Linux (theuni)
|
||||
- #8314 `67caef6` Fix pkg-config issues for 0.13 (theuni)
|
||||
- #8373 `1fe7f40` Fix OSX non-deterministic dmg (theuni)
|
||||
- #8358 `cfd1280` Gbuild: Set memory explicitly (default is too low) (MarcoFalke)
|
||||
|
||||
### GUI
|
||||
|
||||
- #7154 `00b4b8d` Add InMempool() info to transaction details (jonasschnelli)
|
||||
- #7068 `5f3c670` [RPC-Tests] add simple way to run rpc test over QT clients (jonasschnelli)
|
||||
- #7218 `a1c185b` Fix misleading translation (MarcoFalke)
|
||||
- #7214 `be9a9a3` qt5: Use the fixed font the system recommends (MarcoFalke)
|
||||
- #7256 `08ab906` Add note to coin control dialog QT5 workaround (fanquake)
|
||||
- #7255 `e289807` Replace some instances of formatWithUnit with formatHtmlWithUnit (fanquake)
|
||||
- #7317 `3b57e9c` Fix RPCTimerInterface ordering issue (jonasschnelli)
|
||||
- #7327 `c079d79` Transaction View: LastMonth calculation fixed (crowning-)
|
||||
- #7334 `e1060c5` coincontrol workaround is still needed in qt5.4 (fixed in qt5.5) (MarcoFalke)
|
||||
- #7383 `ae2db67` Rename "amount" to "requested amount" in receive coins table (jonasschnelli)
|
||||
- #7396 `cdcbc59` Add option to increase/decrease font size in the console window (jonasschnelli)
|
||||
- #7437 `9645218` Disable tab navigation for peers tables (Kefkius)
|
||||
- #7604 `354b03d` build: Remove spurious dollar sign. Fixes #7189 (dooglus)
|
||||
- #7605 `7f001bd` Remove openssl info from init/log and from Qt debug window (jonasschnelli)
|
||||
- #7628 `87d6562` Add 'copy full transaction details' option (ericshawlinux)
|
||||
- #7613 `3798e5d` Add autocomplete to bitcoin-qt's console window (GamerSg)
|
||||
- #7668 `b24266c` Fix history deletion bug after font size change (achow101)
|
||||
- #7680 `41d2dfa` Remove reflection from `about` icon (laanwj)
|
||||
- #7686 `f034bce` Remove 0-fee from send dialog (MarcoFalke)
|
||||
- #7506 `b88e0b0` Use CCoinControl selection in CWallet::FundTransaction (promag)
|
||||
- #7732 `0b98dd7` Debug window: replace "Build date" with "Datadir" (jonasschnelli)
|
||||
- #7761 `60db51d` remove trailing output-index from transaction-id (jonasschnelli)
|
||||
- #7772 `6383268` Clear the input line after activating autocomplete (paveljanik)
|
||||
- #7925 `f604bf6` Fix out-of-tree GUI builds (laanwj)
|
||||
- #7939 `574ddc6` Make it possible to show details for multiple transactions (laanwj)
|
||||
- #8012 `b33824b` Delay user confirmation of send (Tyler-Hardin)
|
||||
- #8006 `7c8558d` Add option to disable the system tray icon (Tyler-Hardin)
|
||||
- #8046 `169d379` Fix Cmd-Q / Menu Quit shutdown on OSX (jonasschnelli)
|
||||
- #8042 `6929711` Don't allow to open the debug window during splashscreen & verification state (jonasschnelli)
|
||||
- #8014 `77b49ac` Sort transactions by date (Tyler-Hardin)
|
||||
- #8073 `eb2f6f7` askpassphrasedialog: Clear pass fields on accept (rat4)
|
||||
- #8129 `ee1533e` Fix RPC console auto completer (UdjinM6)
|
||||
- #7636 `fb0ac48` Add bitcoin address label to request payment QR code (makevoid)
|
||||
- #8231 `760a6c7` Fix a bug where the SplashScreen will not be hidden during startup (jonasschnelli)
|
||||
- #8256 `af2421c` BUG: bitcoin-qt crash (fsb4000)
|
||||
- #8257 `ff03c50` Do not ask a UI question from bitcoind (sipa)
|
||||
- #8288 `91abb77` Network-specific example address (laanwj)
|
||||
- #7707 `a914968` UI support for abandoned transactions (jonasschnelli)
|
||||
- #8207 `f7a403b` Add a link to the Bitcoin-Core repository and website to the About Dialog (MarcoFalke)
|
||||
- #8281 `6a87eb0` Remove client name from debug window (laanwj)
|
||||
- #8407 `45eba4b` Add dbcache migration path (jonasschnelli)
|
||||
|
||||
### Wallet
|
||||
|
||||
- #7262 `fc08994` Reduce inefficiency of GetAccountAddress() (dooglus)
|
||||
- #7537 `78e81b0` Warn on unexpected EOF while salvaging wallet (laanwj)
|
||||
- #7521 `3368895` Don't resend wallet txs that aren't in our own mempool (morcos)
|
||||
- #7576 `86a1ec5` Move wallet help string creation to CWallet (jonasschnelli)
|
||||
- #7577 `5b3b5a7` Move "load wallet phase" to CWallet (jonasschnelli)
|
||||
- #7608 `0735c0c` Move hardcoded file name out of log messages (MarcoFalke)
|
||||
- #7649 `4900641` Prevent multiple calls to CWallet::AvailableCoins (promag)
|
||||
- #7646 `e5c3511` Fix lockunspent help message (promag)
|
||||
- #7558 `b35a591` Add import/removeprunedfunds rpc call (instagibbs)
|
||||
- #6215 `48c5adf` add bip32 pub key serialization (jonasschnelli)
|
||||
- #7913 `bafd075` Fix for incorrect locking in GetPubKey() (keystore.cpp) (yurizhykin)
|
||||
- #8036 `41138f9` init: Move berkeleydb version reporting to wallet (laanwj)
|
||||
- #8028 `373b50d` Fix insanity of CWalletDB::WriteTx and CWalletTx::WriteToDisk (pstratem)
|
||||
- #8061 `f6b7df3` Improve Wallet encapsulation (pstratem)
|
||||
- #7891 `950be19` Always require OS randomness when generating secret keys (sipa)
|
||||
- #7689 `b89ef13` Replace OpenSSL AES with ctaes-based version (sipa)
|
||||
- #7825 `f972b04` Prevent multiple calls to ExtractDestination (pedrobranco)
|
||||
- #8137 `243ac0c` Improve CWallet API with new AccountMove function (pstratem)
|
||||
- #8142 `52c3f34` Improve CWallet API with new GetAccountPubkey function (pstratem)
|
||||
- #8035 `b67a472` Add simplest BIP32/deterministic key generation implementation (jonasschnelli)
|
||||
- #7687 `a6ddb19` Stop treating importaddress'ed scripts as change (sipa)
|
||||
- #8298 `aef3811` wallet: Revert input selection post-pruning (laanwj)
|
||||
- #8324 `bc94b87` Keep HD seed during salvagewallet (jonasschnelli)
|
||||
- #8323 `238300b` Add HD keypath to CKeyMetadata, report metadata in validateaddress (jonasschnelli)
|
||||
- #8367 `3b38a6a` Ensure <0.13 clients can't open HD wallets (jonasschnelli)
|
||||
- #8378 `ebea651` Move SetMinVersion for FEATURE_HD to SetHDMasterKey (pstratem)
|
||||
- #8390 `73adfe3` Correct hdmasterkeyid/masterkeyid name confusion (jonasschnelli)
|
||||
- #8206 `18b8ee1` Add HD xpriv to dumpwallet (jonasschnelli)
|
||||
- #8389 `c3c82c4` Create a new HD seed after encrypting the wallet (jonasschnelli)
|
||||
|
||||
### Tests and QA
|
||||
|
||||
- #7320 `d3dfc6d` Test walletpassphrase timeout (MarcoFalke)
|
||||
- #7208 `47c5ed1` Make max tip age an option instead of chainparam (laanwj)
|
||||
- #7372 `21376af` Trivial: [qa] wallet: Print maintenance (MarcoFalke)
|
||||
- #7280 `668906f` [travis] Fail when documentation is outdated (MarcoFalke)
|
||||
- #7177 `93b0576` [qa] Change default block priority size to 0 (MarcoFalke)
|
||||
- #7236 `02676c5` Use createrawtx locktime parm in txn_clone (dgenr8)
|
||||
- #7212 `326ffed` Adds unittests for CAddrMan and CAddrinfo, removes source of non-determinism (EthanHeilman)
|
||||
- #7490 `d007511` tests: Remove May15 test (laanwj)
|
||||
- #7531 `18cb2d5` Add bip68-sequence.py to extended rpc tests (btcdrak)
|
||||
- #7536 `ce5fc02` test: test leading spaces for ParseHex (laanwj)
|
||||
- #7620 `1b68de3` [travis] Only run check-doc.py once (MarcoFalke)
|
||||
- #7455 `7f96671` [travis] Exit early when check-doc.py fails (MarcoFalke)
|
||||
- #7667 `56d2c4e` Move GetTempPath() to testutil (musalbas)
|
||||
- #7517 `f1ca891` test: script_error checking in script_invalid tests (laanwj)
|
||||
- #7684 `3d0dfdb` Extend tests (MarcoFalke)
|
||||
- #7697 `622fe6c` Tests: make prioritise_transaction.py more robust (sdaftuar)
|
||||
- #7709 `efde86b` Tests: fix missing import in mempool_packages (sdaftuar)
|
||||
- #7702 `29e1131` Add tests verifychain, lockunspent, getbalance, listsinceblock (MarcoFalke)
|
||||
- #7720 `3b4324b` rpc-test: Normalize assert() (MarcoFalke)
|
||||
- #7757 `26794d4` wallet: Wait for reindex to catch up (MarcoFalke)
|
||||
- #7764 `a65b36c` Don't run pruning.py twice (MarcoFalke)
|
||||
- #7773 `7c80e72` Fix comments in tests (btcdrak)
|
||||
- #7489 `e9723cb` tests: Make proxy_test work on travis servers without IPv6 (laanwj)
|
||||
- #7801 `70ac71b` Remove misleading "errorString syntax" (MarcoFalke)
|
||||
- #7803 `401c65c` maxblocksinflight: Actually enable test (MarcoFalke)
|
||||
- #7802 `3bc71e1` httpbasics: Actually test second connection (MarcoFalke)
|
||||
- #7849 `ab8586e` tests: add varints_bitpatterns test (laanwj)
|
||||
- #7846 `491171f` Clean up lockorder data of destroyed mutexes (sipa)
|
||||
- #7853 `6ef5e00` py2: Unfiddle strings into bytes explicitly (MarcoFalke)
|
||||
- #7878 `53adc83` [test] bctest.py: Revert faa41ee (MarcoFalke)
|
||||
- #7798 `cabba24` [travis] Print the commit which was evaluated (MarcoFalke)
|
||||
- #7833 `b1bf511` tests: Check Content-Type header returned from RPC server (laanwj)
|
||||
- #7851 `fa9d86f` pull-tester: Don't mute zmq ImportError (MarcoFalke)
|
||||
- #7822 `0e6fd5e` Add listunspent() test for spendable/unspendable UTXO (jpdffonseca)
|
||||
- #7912 `59ad568` Tests: Fix deserialization of reject messages (sdaftuar)
|
||||
- #7941 `0ea3941` Fixing comment in script_test.json test case (Christewart)
|
||||
- #7807 `0ad1041` Fixed miner test values, gave constants for less error-prone values (instagibbs)
|
||||
- #7980 `88b77c7` Smartfees: Properly use ordered dict (MarcoFalke)
|
||||
- #7814 `77b637f` Switch to py3 (MarcoFalke)
|
||||
- #8030 `409a8a1` Revert fatal-ness of missing python-zmq (laanwj)
|
||||
- #8018 `3e90fe6` Autofind rpc tests --srcdir (jonasschnelli)
|
||||
- #8016 `5767e80` Fix multithread CScheduler and reenable test (paveljanik)
|
||||
- #7972 `423ca30` pull-tester: Run rpc test in parallel (MarcoFalke)
|
||||
- #8039 `69b3a6d` Bench: Add crypto hash benchmarks (laanwj)
|
||||
- #8041 `5b736dd` Fix bip9-softforks blockstore issue (MarcoFalke)
|
||||
- #7994 `1f01443` Add op csv tests to script_tests.json (Christewart)
|
||||
- #8038 `e2bf830` Various minor fixes (MarcoFalke)
|
||||
- #8072 `1b87e5b` Travis: 'make check' in parallel and verbose (theuni)
|
||||
- #8056 `8844ef1` Remove hardcoded "4 nodes" from test_framework (MarcoFalke)
|
||||
- #8047 `37f9a1f` Test_framework: Set wait-timeout for bitcoind procs (MarcoFalke)
|
||||
- #8095 `6700cc9` Test framework: only cleanup on successful test runs (sdaftuar)
|
||||
- #8098 `06bd4f6` Test_framework: Append portseed to tmpdir (MarcoFalke)
|
||||
- #8104 `6ff2c8d` Add timeout to sync_blocks() and sync_mempools() (sdaftuar)
|
||||
- #8111 `61b8684` Benchmark SipHash (sipa)
|
||||
- #8107 `52b803e` Bench: Added base58 encoding/decoding benchmarks (yurizhykin)
|
||||
- #8115 `0026e0e` Avoid integer division in the benchmark inner-most loop (gmaxwell)
|
||||
- #8090 `a2df115` Adding P2SH(p2pkh) script test case (Christewart)
|
||||
- #7992 `ec45cc5` Extend #7956 with one more test (TheBlueMatt)
|
||||
- #8139 `ae5575b` Fix interrupted HTTP RPC connection workaround for Python 3.5+ (sipa)
|
||||
- #8164 `0f24eaf` [Bitcoin-Tx] fix missing test fixtures, fix 32bit atoi issue (jonasschnelli)
|
||||
- #8166 `0b5279f` Src/test: Do not shadow local variables (paveljanik)
|
||||
- #8141 `44c1b1c` Continuing port of java comparison tool (mrbandrews)
|
||||
- #8201 `36b7400` fundrawtransaction: Fix race, assert amounts (MarcoFalke)
|
||||
- #8214 `ed2cd59` Mininode: fail on send_message instead of silent return (MarcoFalke)
|
||||
- #8215 `a072d1a` Don't use floating point in wallet tests (MarcoFalke)
|
||||
- #8066 `65c2058` Test_framework: Use different rpc_auth_pair for each node (MarcoFalke)
|
||||
- #8216 `0d41d70` Assert 'changePosition out of bounds' (MarcoFalke)
|
||||
- #8222 `961893f` Enable mempool consistency checks in unit tests (sipa)
|
||||
- #7751 `84370d5` test_framework: python3.4 authproxy compat (laanwj)
|
||||
- #7744 `d8e862a` test_framework: detect failure of bitcoind startup (laanwj)
|
||||
- #8280 `115735d` Increase sync_blocks() timeouts in pruning.py (MarcoFalke)
|
||||
- #8340 `af9b7a9` Solve trivial merge conflict in p2p-segwit.py (MarcoFalke)
|
||||
- #8067 `3e4cf8f` Travis: use slim generic image, and some fixups (theuni)
|
||||
- #7951 `5c7df70` Test_framework: Properly print exception (MarcoFalke)
|
||||
- #8070 `7771aa5` Remove non-determinism which is breaking net_tests #8069 (EthanHeilman)
|
||||
- #8309 `bb2646a` Add wallet-hd test (MarcoFalke)
|
||||
- #8444 `cd0910b` Fix p2p-feefilter.py for changed tx relay behavior (sdaftuar)
|
||||
|
||||
### Mining
|
||||
|
||||
- #7507 `11c7699` Remove internal miner (Leviathn)
|
||||
- #7663 `c87f51e` Make the generate RPC call function for non-regtest (sipa)
|
||||
- #7671 `e2ebd25` Add generatetoaddress RPC to mine to an address (achow101)
|
||||
- #7935 `66ed450` Versionbits: GBT support (luke-jr)
|
||||
- #7600 `66db2d6` Select transactions using feerate-with-ancestors (sdaftuar)
|
||||
- #8295 `f5660d3` Mining-related fixups for 0.13.0 (sdaftuar)
|
||||
- #7796 `536b75e` Add support for negative fee rates, fixes `prioritizetransaction` (MarcoFalke)
|
||||
- #8362 `86edc20` Scale legacy sigop count in CreateNewBlock (sdaftuar)
|
||||
- #8489 `8b0eee6` Bugfix: Use pre-BIP141 sigops until segwit activates (GBT) (luke-jr)
|
||||
|
||||
### Documentation and miscellaneous
|
||||
|
||||
- #7423 `69e2a40` Add example for building with constrained resources (jarret)
|
||||
- #8254 `c2c69ed` Add OSX ZMQ requirement to QA readme (fanquake)
|
||||
- #8203 `377d131` Clarify documentation for running a tor node (nathaniel-mahieu)
|
||||
- #7428 `4b12266` Add example for listing ./configure flags (nathaniel-mahieu)
|
||||
- #7847 `3eae681` Add arch linux build example (mruddy)
|
||||
- #7968 `ff69aaf` Fedora build requirements (wtogami)
|
||||
- #8013 `fbedc09` Fedora build requirements, add gcc-c++ and fix typo (wtogami)
|
||||
- #8009 `fbd8478` Fixed invalid example paths in gitian-building.md (JeremyRand)
|
||||
- #8240 `63fbdbc` Mention Windows XP end of support in release notes (laanwj)
|
||||
- #8303 `5077d2c` Update bips.md for CSV softfork (fanquake)
|
||||
- #7789 `e0b3e19` Add note about using the Qt official binary installer (paveljanik)
|
||||
- #7791 `e30a5b0` Change Precise to Trusty in gitian-building.md (JeremyRand)
|
||||
- #7838 `8bb5d3d` Update gitian build guide to debian 8.4.0 (fanquake)
|
||||
- #7855 `b778e59` Replace precise with trusty (MarcoFalke)
|
||||
- #7975 `fc23fee` Update bitcoin-core GitHub links (MarcoFalke)
|
||||
- #8034 `e3a8207` Add basic git squash workflow (fanquake)
|
||||
- #7813 `214ec0b` Update port in tor.md (MarcoFalke)
|
||||
- #8193 `37c9830` Use Debian 8.5 in the gitian-build guide (fanquake)
|
||||
- #8261 `3685e0c` Clarify help for `getblockchaininfo` (paveljanik)
|
||||
- #7185 `ea0f5a2` Note that reviewers should mention the id of the commits they reviewed (pstratem)
|
||||
- #7290 `c851d8d` [init] Add missing help for args (MarcoFalke)
|
||||
- #7281 `f9fd4c2` Improve CheckInputs() comment about sig verification (petertodd)
|
||||
- #7417 `1e06bab` Minor improvements to the release process (PRabahy)
|
||||
- #7444 `4cdbd42` Improve block validity/ConnectBlock() comments (petertodd)
|
||||
- #7527 `db2e1c0` Fix and cleanup listreceivedbyX documentation (instagibbs)
|
||||
- #7541 `b6e00af` Clarify description of blockindex (pinheadmz)
|
||||
- #7590 `f06af57` Improving wording related to Boost library requirements [updated] (jonathancross)
|
||||
- #7635 `0fa88ef` Add dependency info to test docs (elliotolds)
|
||||
- #7609 `3ba07bd` RPM spec file project (AliceWonderMiscreations)
|
||||
- #7850 `229a17c` Removed call to `TryCreateDirectory` from `GetDefaultDataDir` in `src/util.cpp` (alexreg)
|
||||
- #7888 `ec870e1` Prevector: fix 2 bugs in currently unreached code paths (kazcw)
|
||||
- #7922 `90653bc` CBase58Data::SetString: cleanse the full vector (kazcw)
|
||||
- #7881 `c4e8390` Update release process (laanwj)
|
||||
- #7952 `a9c8b74` Log invalid block hash to make debugging easier (paveljanik)
|
||||
- #7974 `8206835` More comments on the design of AttemptToEvictConnection (gmaxwell)
|
||||
- #7795 `47a7cfb` UpdateTip: log only one line at most per block (laanwj)
|
||||
- #8110 `e7e25ea` Add benchmarking notes (fanquake)
|
||||
- #8121 `58f0c92` Update implemented BIPs list (fanquake)
|
||||
- #8029 `58725ba` Simplify OS X build notes (fanquake)
|
||||
- #8143 `d46b8b5` comment nit: miners don't vote (instagibbs)
|
||||
- #8136 `22e0b35` Log/report in 10% steps during VerifyDB (jonasschnelli)
|
||||
- #8168 `d366185` util: Add ParseUInt32 and ParseUInt64 (laanwj)
|
||||
- #8178 `f7b1bfc` Add git and github tips and tricks to developer notes (sipa)
|
||||
- #8177 `67db011` developer notes: updates for C++11 (kazcw)
|
||||
- #8229 `8ccdac1` [Doc] Update OS X build notes for 10.11 SDK (fanquake)
|
||||
- #8233 `9f1807a` Mention Linux ARM executables in release process and notes (laanwj)
|
||||
- #7540 `ff46dd4` Rename OP_NOP3 to OP_CHECKSEQUENCEVERIFY (btcdrak)
|
||||
- #8289 `26316ff` bash-completion: Adapt for 0.12 and 0.13 (roques)
|
||||
- #7453 `3dc3149` Missing patches from 0.12 (MarcoFalke)
|
||||
- #7113 `54a550b` Switch to a more efficient rolling Bloom filter (sipa)
|
||||
- #7257 `de9e5ea` Combine common error strings for different options so translations can be shared and reused (luke-jr)
|
||||
- #7304 `b8f485c` [contrib] Add clang-format-diff.py (MarcoFalke)
|
||||
- #7378 `e6f97ef` devtools: replace github-merge with python version (laanwj)
|
||||
- #7395 `0893705` devtools: show pull and commit information in github-merge (laanwj)
|
||||
- #7402 `6a5932b` devtools: github-merge get toplevel dir without extra whitespace (achow101)
|
||||
- #7425 `20a408c` devtools: Fix utf-8 support in messages for github-merge (laanwj)
|
||||
- #7632 `409f843` Delete outdated test-patches reference (Lewuathe)
|
||||
- #7662 `386f438` remove unused NOBLKS_VERSION_{START,END} constants (rat4)
|
||||
- #7737 `aa0d2b2` devtools: make github-merge.py use py3 (laanwj)
|
||||
- #7781 `55db5f0` devtools: Auto-set branch to merge to in github-merge (laanwj)
|
||||
- #7934 `f17032f` Improve rolling bloom filter performance and benchmark (sipa)
|
||||
- #8004 `2efe38b` signal handling: fReopenDebugLog and fRequestShutdown should be type sig_atomic_t (catilac)
|
||||
- #7713 `f6598df` Fixes for verify-commits script (petertodd)
|
||||
- #8412 `8360d5b` libconsensus: Expose a flag for BIP112 (jtimon)
|
||||
|
||||
Credits
|
||||
=======
|
||||
|
||||
Thanks to everyone who directly contributed to this release:
|
||||
|
||||
- 21E14
|
||||
- accraze
|
||||
- Adam Brown
|
||||
- Alexander Regueiro
|
||||
- Alex Morcos
|
||||
- Alfie John
|
||||
- Alice Wonder
|
||||
- AlSzacrel
|
||||
- Andrew Chow
|
||||
- Andrés G. Aragoneses
|
||||
- Bob McElrath
|
||||
- BtcDrak
|
||||
- calebogden
|
||||
- Cédric Félizard
|
||||
- Chirag Davé
|
||||
- Chris Moore
|
||||
- Chris Stewart
|
||||
- Christian von Roques
|
||||
- Chris Wheeler
|
||||
- Cory Fields
|
||||
- crowning-
|
||||
- Daniel Cousens
|
||||
- Daniel Kraft
|
||||
- Denis Lukianov
|
||||
- Elias Rohrer
|
||||
- Elliot Olds
|
||||
- Eric Shaw
|
||||
- error10
|
||||
- Ethan Heilman
|
||||
- face
|
||||
- fanquake
|
||||
- Francesco 'makevoid' Canessa
|
||||
- fsb4000
|
||||
- Gavin Andresen
|
||||
- gladoscc
|
||||
- Gregory Maxwell
|
||||
- Gregory Sanders
|
||||
- instagibbs
|
||||
- James O'Beirne
|
||||
- Jannes Faber
|
||||
- Jarret Dyrbye
|
||||
- Jeremy Rand
|
||||
- jloughry
|
||||
- jmacwhyte
|
||||
- Joao Fonseca
|
||||
- Johnson Lau
|
||||
- Jonas Nick
|
||||
- Jonas Schnelli
|
||||
- Jonathan Cross
|
||||
- João Barbosa
|
||||
- Jorge Timón
|
||||
- Kaz Wesley
|
||||
- Kefkius
|
||||
- kirkalx
|
||||
- Krzysztof Jurewicz
|
||||
- Leviathn
|
||||
- lewuathe
|
||||
- Luke Dashjr
|
||||
- Luv Khemani
|
||||
- Marcel Krüger
|
||||
- Marco Falke
|
||||
- Mark Friedenbach
|
||||
- Matt
|
||||
- Matt Bogosian
|
||||
- Matt Corallo
|
||||
- Matthew English
|
||||
- Matthew Zipkin
|
||||
- mb300sd
|
||||
- Mitchell Cash
|
||||
- mrbandrews
|
||||
- mruddy
|
||||
- Murch
|
||||
- Mustafa
|
||||
- Nathaniel Mahieu
|
||||
- Nicolas Dorier
|
||||
- Patrick Strateman
|
||||
- Paul Rabahy
|
||||
- paveljanik
|
||||
- Pavel Janík
|
||||
- Pavel Vasin
|
||||
- Pedro Branco
|
||||
- Peter Todd
|
||||
- Philip Kaufmann
|
||||
- Pieter Wuille
|
||||
- Prayag Verma
|
||||
- ptschip
|
||||
- Puru
|
||||
- randy-waterhouse
|
||||
- R E Broadley
|
||||
- Rusty Russell
|
||||
- Suhas Daftuar
|
||||
- Suriyaa Kudo
|
||||
- TheLazieR Yip
|
||||
- Thomas Kerin
|
||||
- Tom Harding
|
||||
- Tyler Hardin
|
||||
- UdjinM6
|
||||
- Warren Togami
|
||||
- Will Binns
|
||||
- Wladimir J. van der Laan
|
||||
- Yuri Zhykin
|
||||
|
||||
As well as everyone that helped translating on [Transifex](https://www.transifex.com/projects/p/bitcoin/).
|
||||
@@ -112,16 +112,16 @@ The gbuild invocations below <b>DO NOT DO THIS</b> by default.
|
||||
### Build and sign Bitcoin Core for Linux, Windows, and OS X:
|
||||
|
||||
pushd ./gitian-builder
|
||||
./bin/gbuild --commit bitcoin=v${VERSION} ../bitcoin/contrib/gitian-descriptors/gitian-linux.yml
|
||||
./bin/gbuild --memory 3000 --commit bitcoin=v${VERSION} ../bitcoin/contrib/gitian-descriptors/gitian-linux.yml
|
||||
./bin/gsign --signer $SIGNER --release ${VERSION}-linux --destination ../gitian.sigs/ ../bitcoin/contrib/gitian-descriptors/gitian-linux.yml
|
||||
mv build/out/bitcoin-*.tar.gz build/out/src/bitcoin-*.tar.gz ../
|
||||
|
||||
./bin/gbuild --commit bitcoin=v${VERSION} ../bitcoin/contrib/gitian-descriptors/gitian-win.yml
|
||||
./bin/gbuild --memory 3000 --commit bitcoin=v${VERSION} ../bitcoin/contrib/gitian-descriptors/gitian-win.yml
|
||||
./bin/gsign --signer $SIGNER --release ${VERSION}-win-unsigned --destination ../gitian.sigs/ ../bitcoin/contrib/gitian-descriptors/gitian-win.yml
|
||||
mv build/out/bitcoin-*-win-unsigned.tar.gz inputs/bitcoin-win-unsigned.tar.gz
|
||||
mv build/out/bitcoin-*.zip build/out/bitcoin-*.exe ../
|
||||
|
||||
./bin/gbuild --commit bitcoin=v${VERSION} ../bitcoin/contrib/gitian-descriptors/gitian-osx.yml
|
||||
./bin/gbuild --memory 3000 --commit bitcoin=v${VERSION} ../bitcoin/contrib/gitian-descriptors/gitian-osx.yml
|
||||
./bin/gsign --signer $SIGNER --release ${VERSION}-osx-unsigned --destination ../gitian.sigs/ ../bitcoin/contrib/gitian-descriptors/gitian-osx.yml
|
||||
mv build/out/bitcoin-*-osx-unsigned.tar.gz inputs/bitcoin-osx-unsigned.tar.gz
|
||||
mv build/out/bitcoin-*.tar.gz build/out/bitcoin-*.dmg ../
|
||||
|
||||
@@ -30,12 +30,17 @@ The interface is defined in the C header `bitcoinconsensus.h` located in `src/s
|
||||
- `bitcoinconsensus_SCRIPT_FLAGS_VERIFY_NONE`
|
||||
- `bitcoinconsensus_SCRIPT_FLAGS_VERIFY_P2SH` - Evaluate P2SH ([BIP16](https://github.com/bitcoin/bips/blob/master/bip-0016.mediawiki)) subscripts
|
||||
- `bitcoinconsensus_SCRIPT_FLAGS_VERIFY_DERSIG` - Enforce strict DER ([BIP66](https://github.com/bitcoin/bips/blob/master/bip-0066.mediawiki)) compliance
|
||||
- `bitcoinconsensus_SCRIPT_FLAGS_VERIFY_NULLDUMMY` - Enforce NULLDUMMY ([BIP147](https://github.com/bitcoin/bips/blob/master/bip-0147.mediawiki))
|
||||
- `bitcoinconsensus_SCRIPT_FLAGS_VERIFY_CHECKLOCKTIMEVERIFY` - Enable CHECKLOCKTIMEVERIFY ([BIP65](https://github.com/bitcoin/bips/blob/master/bip-0065.mediawiki))
|
||||
- `bitcoinconsensus_SCRIPT_FLAGS_VERIFY_CHECKSEQUENCEVERIFY` - Enable CHECKSEQUENCEVERIFY ([BIP112](https://github.com/bitcoin/bips/blob/master/bip-0112.mediawiki))
|
||||
- `bitcoinconsensus_SCRIPT_FLAGS_VERIFY_WITNESS` - Enable WITNESS ([BIP141](https://github.com/bitcoin/bips/blob/master/bip-0141.mediawiki))
|
||||
|
||||
##### Errors
|
||||
- `bitcoinconsensus_ERR_OK` - No errors with input parameters *(see the return value of `bitcoinconsensus_verify_script` for the verification status)*
|
||||
- `bitcoinconsensus_ERR_TX_INDEX` - An invalid index for `txTo`
|
||||
- `bitcoinconsensus_ERR_TX_SIZE_MISMATCH` - `txToLen` did not match with the size of `txTo`
|
||||
- `bitcoinconsensus_ERR_DESERIALIZE` - An error deserializing `txTo`
|
||||
- `bitcoinconsensus_ERR_AMOUNT_REQUIRED` - Input amount is required if WITNESS is used
|
||||
|
||||
### Example Implementations
|
||||
- [NBitcoin](https://github.com/NicolasDorier/NBitcoin/blob/master/NBitcoin/Script.cs#L814) (.NET Bindings)
|
||||
|
||||
@@ -6,7 +6,7 @@ The Bitcoin-Core project has been designed to support multiple localisations. Th
|
||||
### Helping to translate (using Transifex)
|
||||
Transifex is setup to monitor the Github repo for updates, and when code containing new translations is found, Transifex will process any changes. It may take several hours after a pull-request has been merged, to appear in the Transifex web interface.
|
||||
|
||||
Multiple language support is critical in assisting Bitcoin’s global adoption, and growth. One of Bitcoin’s greatest strengths is cross-boarder money transfers, any help making that easier is greatly appreciated.
|
||||
Multiple language support is critical in assisting Bitcoin’s global adoption, and growth. One of Bitcoin’s greatest strengths is cross-border money transfers, any help making that easier is greatly appreciated.
|
||||
|
||||
See the [Transifex Bitcoin project](https://www.transifex.com/projects/p/bitcoin/) to assist in translations. You should also join the translation mailing list for announcements - see details below.
|
||||
|
||||
@@ -94,7 +94,7 @@ When new plurals are added to the source file, it's important to do the followin
|
||||
7. Save the source file
|
||||
|
||||
### Translating a new language
|
||||
To create a new language template, you will need to edit the languages manifest file `src/qt/bitcoin.qrc` and add a new entry. Below is an example of the english language entry.
|
||||
To create a new language template, you will need to edit the languages manifest file `src/qt/bitcoin_locale.qrc` and add a new entry. Below is an example of the English language entry.
|
||||
|
||||
```xml
|
||||
<qresource prefix="/translations">
|
||||
|
||||
@@ -41,8 +41,8 @@ Run all possible tests with
|
||||
|
||||
qa/pull-tester/rpc-tests.py -extended
|
||||
|
||||
By default, tests will be run in parallel if you want to specify how many
|
||||
tests should be run in parallel, append `-parallel=n` (default n=4).
|
||||
By default, tests will be run in parallel. To specify how many jobs to run,
|
||||
append `-parallel=n` (default n=4).
|
||||
|
||||
If you want to create a basic coverage report for the rpc test suite, append `--coverage`.
|
||||
|
||||
|
||||
@@ -94,12 +94,12 @@ if not (ENABLE_WALLET == 1 and ENABLE_UTILS == 1 and ENABLE_BITCOIND == 1):
|
||||
if ENABLE_ZMQ:
|
||||
try:
|
||||
import zmq
|
||||
except ImportError as e:
|
||||
print("WARNING: \"import zmq\" failed. Set ENABLE_ZMQ=0 or " \
|
||||
"to run zmq tests, see dependency info in /qa/README.md.")
|
||||
ENABLE_ZMQ=0
|
||||
except ImportError:
|
||||
print("ERROR: \"import zmq\" failed. Set ENABLE_ZMQ=0 or "
|
||||
"to run zmq tests, see dependency info in /qa/README.md.")
|
||||
# ENABLE_ZMQ=0
|
||||
raise
|
||||
|
||||
#Tests
|
||||
testScripts = [
|
||||
# longest test should go first, to favor running tests in parallel
|
||||
'p2p-fullblocktest.py',
|
||||
@@ -107,6 +107,7 @@ testScripts = [
|
||||
'bip68-112-113-p2p.py',
|
||||
'wallet.py',
|
||||
'wallet-hd.py',
|
||||
'wallet-dump.py',
|
||||
'listtransactions.py',
|
||||
'receivedby.py',
|
||||
'mempool_resurrect_test.py',
|
||||
@@ -141,6 +142,8 @@ testScripts = [
|
||||
'segwit.py',
|
||||
'importprunedfunds.py',
|
||||
'signmessages.py',
|
||||
'p2p-compactblocks.py',
|
||||
'nulldummy.py',
|
||||
]
|
||||
if ENABLE_ZMQ:
|
||||
testScripts.append('zmq_test.py')
|
||||
@@ -158,7 +161,7 @@ testScriptsExt = [
|
||||
'txn_clone.py --mineblock',
|
||||
'forknotify.py',
|
||||
'invalidateblock.py',
|
||||
# 'rpcbind_test.py', #temporary, bug in libevent, see #6655
|
||||
'rpcbind_test.py',
|
||||
'smartfees.py',
|
||||
'maxblocksinflight.py',
|
||||
'p2p-acceptblock.py',
|
||||
@@ -248,21 +251,27 @@ class RPCTestHandler:
|
||||
self.num_running += 1
|
||||
t = self.test_list.pop(0)
|
||||
port_seed = ["--portseed=%s" % len(self.test_list)]
|
||||
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
|
||||
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
|
||||
self.jobs.append((t,
|
||||
time.time(),
|
||||
subprocess.Popen((RPC_TESTS_DIR + t).split() + self.flags + port_seed,
|
||||
universal_newlines=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE)))
|
||||
stdout=log_stdout,
|
||||
stderr=log_stderr),
|
||||
log_stdout,
|
||||
log_stderr))
|
||||
if not self.jobs:
|
||||
raise IndexError('pop from empty list')
|
||||
while True:
|
||||
# Return first proc that finishes
|
||||
time.sleep(.5)
|
||||
for j in self.jobs:
|
||||
(name, time0, proc) = j
|
||||
(name, time0, proc, log_out, log_err) = j
|
||||
if proc.poll() is not None:
|
||||
(stdout, stderr) = proc.communicate(timeout=3)
|
||||
log_out.seek(0), log_err.seek(0)
|
||||
[stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)]
|
||||
log_out.close(), log_err.close()
|
||||
passed = stderr == "" and proc.returncode == 0
|
||||
self.num_running -= 1
|
||||
self.jobs.remove(j)
|
||||
|
||||
@@ -68,7 +68,7 @@ class AbandonConflictTest(BitcoinTestFramework):
|
||||
|
||||
# In mempool txs from self should increase balance from change
|
||||
newbalance = self.nodes[0].getbalance()
|
||||
assert(newbalance == balance - Decimal("30") + Decimal("24.9996"))
|
||||
assert_equal(newbalance, balance - Decimal("30") + Decimal("24.9996"))
|
||||
balance = newbalance
|
||||
|
||||
# Restart the node with a higher min relay fee so the parent tx is no longer in mempool
|
||||
@@ -78,16 +78,16 @@ class AbandonConflictTest(BitcoinTestFramework):
|
||||
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.0001"])
|
||||
|
||||
# Verify txs no longer in mempool
|
||||
assert(len(self.nodes[0].getrawmempool()) == 0)
|
||||
assert_equal(len(self.nodes[0].getrawmempool()), 0)
|
||||
|
||||
# Not in mempool txs from self should only reduce balance
|
||||
# inputs are still spent, but change not received
|
||||
newbalance = self.nodes[0].getbalance()
|
||||
assert(newbalance == balance - Decimal("24.9996"))
|
||||
assert_equal(newbalance, balance - Decimal("24.9996"))
|
||||
# Unconfirmed received funds that are not in mempool, also shouldn't show
|
||||
# up in unconfirmed balance
|
||||
unconfbalance = self.nodes[0].getunconfirmedbalance() + self.nodes[0].getbalance()
|
||||
assert(unconfbalance == newbalance)
|
||||
assert_equal(unconfbalance, newbalance)
|
||||
# Also shouldn't show up in listunspent
|
||||
assert(not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)])
|
||||
balance = newbalance
|
||||
@@ -96,35 +96,35 @@ class AbandonConflictTest(BitcoinTestFramework):
|
||||
# including that the child tx was also abandoned
|
||||
self.nodes[0].abandontransaction(txAB1)
|
||||
newbalance = self.nodes[0].getbalance()
|
||||
assert(newbalance == balance + Decimal("30"))
|
||||
assert_equal(newbalance, balance + Decimal("30"))
|
||||
balance = newbalance
|
||||
|
||||
# Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned
|
||||
stop_node(self.nodes[0],0)
|
||||
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.00001"])
|
||||
assert(len(self.nodes[0].getrawmempool()) == 0)
|
||||
assert(self.nodes[0].getbalance() == balance)
|
||||
assert_equal(len(self.nodes[0].getrawmempool()), 0)
|
||||
assert_equal(self.nodes[0].getbalance(), balance)
|
||||
|
||||
# But if its received again then it is unabandoned
|
||||
# And since now in mempool, the change is available
|
||||
# But its child tx remains abandoned
|
||||
self.nodes[0].sendrawtransaction(signed["hex"])
|
||||
newbalance = self.nodes[0].getbalance()
|
||||
assert(newbalance == balance - Decimal("20") + Decimal("14.99998"))
|
||||
assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998"))
|
||||
balance = newbalance
|
||||
|
||||
# Send child tx again so its unabandoned
|
||||
self.nodes[0].sendrawtransaction(signed2["hex"])
|
||||
newbalance = self.nodes[0].getbalance()
|
||||
assert(newbalance == balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996"))
|
||||
assert_equal(newbalance, balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996"))
|
||||
balance = newbalance
|
||||
|
||||
# Remove using high relay fee again
|
||||
stop_node(self.nodes[0],0)
|
||||
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.0001"])
|
||||
assert(len(self.nodes[0].getrawmempool()) == 0)
|
||||
assert_equal(len(self.nodes[0].getrawmempool()), 0)
|
||||
newbalance = self.nodes[0].getbalance()
|
||||
assert(newbalance == balance - Decimal("24.9996"))
|
||||
assert_equal(newbalance, balance - Decimal("24.9996"))
|
||||
balance = newbalance
|
||||
|
||||
# Create a double spend of AB1 by spending again from only A's 10 output
|
||||
@@ -143,7 +143,7 @@ class AbandonConflictTest(BitcoinTestFramework):
|
||||
|
||||
# Verify that B and C's 10 BTC outputs are available for spending again because AB1 is now conflicted
|
||||
newbalance = self.nodes[0].getbalance()
|
||||
assert(newbalance == balance + Decimal("20"))
|
||||
assert_equal(newbalance, balance + Decimal("20"))
|
||||
balance = newbalance
|
||||
|
||||
# There is currently a minor bug around this and so this test doesn't work. See Issue #7315
|
||||
@@ -151,7 +151,7 @@ class AbandonConflictTest(BitcoinTestFramework):
|
||||
# Don't think C's should either
|
||||
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
|
||||
newbalance = self.nodes[0].getbalance()
|
||||
#assert(newbalance == balance - Decimal("10"))
|
||||
#assert_equal(newbalance, balance - Decimal("10"))
|
||||
print("If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer")
|
||||
print("conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315")
|
||||
print(str(balance) + " -> " + str(newbalance) + " ?")
|
||||
|
||||
@@ -195,7 +195,6 @@ class BIP9SoftForksTest(ComparisonTestFramework):
|
||||
# Restart all
|
||||
self.test.block_store.close()
|
||||
stop_nodes(self.nodes)
|
||||
wait_bitcoinds()
|
||||
shutil.rmtree(self.options.tmpdir)
|
||||
self.setup_chain()
|
||||
self.setup_network()
|
||||
|
||||
@@ -12,9 +12,15 @@ from test_framework.test_framework import BitcoinTestFramework
|
||||
|
||||
class CreateCache(BitcoinTestFramework):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
# Test network and test nodes are not required:
|
||||
self.num_nodes = 0
|
||||
self.nodes = []
|
||||
|
||||
def setup_network(self):
|
||||
# Don't setup any test nodes
|
||||
self.options.noshutdown = True
|
||||
pass
|
||||
|
||||
def run_test(self):
|
||||
pass
|
||||
|
||||
@@ -22,7 +22,7 @@ class ForkNotifyTest(BitcoinTestFramework):
|
||||
def setup_network(self):
|
||||
self.nodes = []
|
||||
self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt")
|
||||
with open(self.alert_filename, 'w') as f:
|
||||
with open(self.alert_filename, 'w', encoding='utf8') as f:
|
||||
pass # Just open then close to create zero-length file
|
||||
self.nodes.append(start_node(0, self.options.tmpdir,
|
||||
["-blockversion=2", "-alertnotify=echo %s >> \"" + self.alert_filename + "\""]))
|
||||
@@ -44,7 +44,7 @@ class ForkNotifyTest(BitcoinTestFramework):
|
||||
self.nodes[1].generate(1)
|
||||
self.sync_all()
|
||||
|
||||
with open(self.alert_filename, 'r') as f:
|
||||
with open(self.alert_filename, 'r', encoding='utf8') as f:
|
||||
alert_text = f.read()
|
||||
|
||||
if len(alert_text) == 0:
|
||||
@@ -56,7 +56,7 @@ class ForkNotifyTest(BitcoinTestFramework):
|
||||
self.nodes[1].generate(1)
|
||||
self.sync_all()
|
||||
|
||||
with open(self.alert_filename, 'r') as f:
|
||||
with open(self.alert_filename, 'r', encoding='utf8') as f:
|
||||
alert_text2 = f.read()
|
||||
|
||||
if alert_text != alert_text2:
|
||||
|
||||
@@ -470,7 +470,6 @@ class RawTransactionsTest(BitcoinTestFramework):
|
||||
self.nodes[1].encryptwallet("test")
|
||||
self.nodes.pop(1)
|
||||
stop_nodes(self.nodes)
|
||||
wait_bitcoinds()
|
||||
|
||||
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
|
||||
# This test is not meant to test fee estimation and we'd like
|
||||
|
||||
@@ -20,14 +20,10 @@ class ImportPrunedFundsTest(BitcoinTestFramework):
|
||||
self.is_network_split=False
|
||||
self.sync_all()
|
||||
|
||||
def run_test (self):
|
||||
import time
|
||||
begintime = int(time.time())
|
||||
|
||||
def run_test(self):
|
||||
print("Mining blocks...")
|
||||
self.nodes[0].generate(101)
|
||||
|
||||
# sync
|
||||
self.sync_all()
|
||||
|
||||
# address
|
||||
@@ -72,7 +68,6 @@ class ImportPrunedFundsTest(BitcoinTestFramework):
|
||||
rawtxn2 = self.nodes[0].gettransaction(txnid2)['hex']
|
||||
proof2 = self.nodes[0].gettxoutproof([txnid2])
|
||||
|
||||
|
||||
txnid3 = self.nodes[0].sendtoaddress(address3, 0.025)
|
||||
self.nodes[0].generate(1)
|
||||
rawtxn3 = self.nodes[0].gettransaction(txnid3)['hex']
|
||||
@@ -82,28 +77,27 @@ class ImportPrunedFundsTest(BitcoinTestFramework):
|
||||
|
||||
#Import with no affiliated address
|
||||
try:
|
||||
result1 = self.nodes[1].importprunedfunds(rawtxn1, proof1, "")
|
||||
self.nodes[1].importprunedfunds(rawtxn1, proof1)
|
||||
except JSONRPCException as e:
|
||||
assert('No addresses' in e.error['message'])
|
||||
else:
|
||||
assert(False)
|
||||
|
||||
|
||||
balance1 = self.nodes[1].getbalance("", 0, True)
|
||||
assert_equal(balance1, Decimal(0))
|
||||
|
||||
#Import with affiliated address with no rescan
|
||||
self.nodes[1].importaddress(address2, "", False)
|
||||
result2 = self.nodes[1].importprunedfunds(rawtxn2, proof2, "")
|
||||
balance2 = Decimal(self.nodes[1].getbalance("", 0, True))
|
||||
self.nodes[1].importaddress(address2, "add2", False)
|
||||
result2 = self.nodes[1].importprunedfunds(rawtxn2, proof2)
|
||||
balance2 = self.nodes[1].getbalance("add2", 0, True)
|
||||
assert_equal(balance2, Decimal('0.05'))
|
||||
|
||||
#Import with private key with no rescan
|
||||
self.nodes[1].importprivkey(address3_privkey, "", False)
|
||||
result3 = self.nodes[1].importprunedfunds(rawtxn3, proof3, "")
|
||||
balance3 = Decimal(self.nodes[1].getbalance("", 0, False))
|
||||
self.nodes[1].importprivkey(address3_privkey, "add3", False)
|
||||
result3 = self.nodes[1].importprunedfunds(rawtxn3, proof3)
|
||||
balance3 = self.nodes[1].getbalance("add3", 0, False)
|
||||
assert_equal(balance3, Decimal('0.025'))
|
||||
balance3 = Decimal(self.nodes[1].getbalance("", 0, True))
|
||||
balance3 = self.nodes[1].getbalance("*", 0, True)
|
||||
assert_equal(balance3, Decimal('0.075'))
|
||||
|
||||
#Addresses Test - after import
|
||||
@@ -118,7 +112,6 @@ class ImportPrunedFundsTest(BitcoinTestFramework):
|
||||
assert_equal(address_info['ismine'], True)
|
||||
|
||||
#Remove transactions
|
||||
|
||||
try:
|
||||
self.nodes[1].removeprunedfunds(txnid1)
|
||||
except JSONRPCException as e:
|
||||
@@ -126,18 +119,16 @@ class ImportPrunedFundsTest(BitcoinTestFramework):
|
||||
else:
|
||||
assert(False)
|
||||
|
||||
|
||||
balance1 = Decimal(self.nodes[1].getbalance("", 0, True))
|
||||
balance1 = self.nodes[1].getbalance("*", 0, True)
|
||||
assert_equal(balance1, Decimal('0.075'))
|
||||
|
||||
|
||||
self.nodes[1].removeprunedfunds(txnid2)
|
||||
balance2 = Decimal(self.nodes[1].getbalance("", 0, True))
|
||||
balance2 = self.nodes[1].getbalance("*", 0, True)
|
||||
assert_equal(balance2, Decimal('0.025'))
|
||||
|
||||
self.nodes[1].removeprunedfunds(txnid3)
|
||||
balance3 = Decimal(self.nodes[1].getbalance("", 0, True))
|
||||
balance3 = self.nodes[1].getbalance("*", 0, True)
|
||||
assert_equal(balance3, Decimal('0.0'))
|
||||
|
||||
if __name__ == '__main__':
|
||||
ImportPrunedFundsTest ().main ()
|
||||
ImportPrunedFundsTest().main()
|
||||
|
||||
@@ -12,6 +12,11 @@ class KeyPoolTest(BitcoinTestFramework):
|
||||
|
||||
def run_test(self):
|
||||
nodes = self.nodes
|
||||
addr_before_encrypting = nodes[0].getnewaddress()
|
||||
addr_before_encrypting_data = nodes[0].validateaddress(addr_before_encrypting)
|
||||
wallet_info_old = nodes[0].getwalletinfo()
|
||||
assert(addr_before_encrypting_data['hdmasterkeyid'] == wallet_info_old['hdmasterkeyid'])
|
||||
|
||||
# Encrypt wallet and wait to terminate
|
||||
nodes[0].encryptwallet('test')
|
||||
bitcoind_processes[0].wait()
|
||||
@@ -19,6 +24,11 @@ class KeyPoolTest(BitcoinTestFramework):
|
||||
nodes[0] = start_node(0, self.options.tmpdir)
|
||||
# Keep creating keys
|
||||
addr = nodes[0].getnewaddress()
|
||||
addr_data = nodes[0].validateaddress(addr)
|
||||
wallet_info = nodes[0].getwalletinfo()
|
||||
assert(addr_before_encrypting_data['hdmasterkeyid'] != wallet_info['hdmasterkeyid'])
|
||||
assert(addr_data['hdmasterkeyid'] == wallet_info['hdmasterkeyid'])
|
||||
|
||||
try:
|
||||
addr = nodes[0].getnewaddress()
|
||||
raise AssertionError('Keypool should be exhausted after one address')
|
||||
|
||||
@@ -75,7 +75,7 @@ class TestNode(NodeConnCB):
|
||||
def received_pong():
|
||||
return (self.last_pong.nonce == self.ping_counter)
|
||||
self.connection.send_message(msg_ping(nonce=self.ping_counter))
|
||||
success = wait_until(received_pong, timeout)
|
||||
success = wait_until(received_pong, timeout=timeout)
|
||||
self.ping_counter += 1
|
||||
return success
|
||||
|
||||
|
||||
@@ -26,7 +26,7 @@ class HTTPBasicsTest (BitcoinTestFramework):
|
||||
#Append rpcauth to bitcoin.conf before initialization
|
||||
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
|
||||
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
|
||||
with open(os.path.join(self.options.tmpdir+"/node0", "bitcoin.conf"), 'a') as f:
|
||||
with open(os.path.join(self.options.tmpdir+"/node0", "bitcoin.conf"), 'a', encoding='utf8') as f:
|
||||
f.write(rpcauth+"\n")
|
||||
f.write(rpcauth2+"\n")
|
||||
|
||||
|
||||
148
qa/rpc-tests/nulldummy.py
Executable file
148
qa/rpc-tests/nulldummy.py
Executable file
@@ -0,0 +1,148 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (c) 2016 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import *
|
||||
from test_framework.mininode import CTransaction, NetworkThread
|
||||
from test_framework.blocktools import create_coinbase, create_block, add_witness_commitment
|
||||
from test_framework.script import CScript
|
||||
from io import BytesIO
|
||||
import time
|
||||
|
||||
NULLDUMMY_ERROR = "64: non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero)"
|
||||
|
||||
def trueDummy(tx):
|
||||
scriptSig = CScript(tx.vin[0].scriptSig)
|
||||
newscript = []
|
||||
for i in scriptSig:
|
||||
if (len(newscript) == 0):
|
||||
assert(len(i) == 0)
|
||||
newscript.append(b'\x51')
|
||||
else:
|
||||
newscript.append(i)
|
||||
tx.vin[0].scriptSig = CScript(newscript)
|
||||
tx.rehash()
|
||||
|
||||
'''
|
||||
This test is meant to exercise NULLDUMMY softfork.
|
||||
Connect to a single node.
|
||||
Generate 2 blocks (save the coinbases for later).
|
||||
Generate 427 more blocks.
|
||||
[Policy/Consensus] Check that NULLDUMMY compliant transactions are accepted in the 430th block.
|
||||
[Policy] Check that non-NULLDUMMY transactions are rejected before activation.
|
||||
[Consensus] Check that the new NULLDUMMY rules are not enforced on the 431st block.
|
||||
[Policy/Consensus] Check that the new NULLDUMMY rules are enforced on the 432nd block.
|
||||
'''
|
||||
|
||||
class NULLDUMMYTest(BitcoinTestFramework):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.num_nodes = 1
|
||||
self.setup_clean_chain = True
|
||||
|
||||
def setup_network(self):
|
||||
# Must set the blockversion for this test
|
||||
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
|
||||
extra_args=[['-debug', '-whitelist=127.0.0.1', '-walletprematurewitness']])
|
||||
|
||||
def run_test(self):
|
||||
self.address = self.nodes[0].getnewaddress()
|
||||
self.ms_address = self.nodes[0].addmultisigaddress(1,[self.address])
|
||||
self.wit_address = self.nodes[0].addwitnessaddress(self.address)
|
||||
self.wit_ms_address = self.nodes[0].addwitnessaddress(self.ms_address)
|
||||
|
||||
NetworkThread().start() # Start up network handling in another thread
|
||||
self.coinbase_blocks = self.nodes[0].generate(2) # Block 2
|
||||
coinbase_txid = []
|
||||
for i in self.coinbase_blocks:
|
||||
coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0])
|
||||
self.nodes[0].generate(427) # Block 429
|
||||
self.lastblockhash = self.nodes[0].getbestblockhash()
|
||||
self.tip = int("0x" + self.lastblockhash, 0)
|
||||
self.lastblockheight = 429
|
||||
self.lastblocktime = int(time.time()) + 429
|
||||
|
||||
print ("Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [430]")
|
||||
test1txs = [self.create_transaction(self.nodes[0], coinbase_txid[0], self.ms_address, 49)]
|
||||
txid1 = self.tx_submit(self.nodes[0], test1txs[0])
|
||||
test1txs.append(self.create_transaction(self.nodes[0], txid1, self.ms_address, 48))
|
||||
txid2 = self.tx_submit(self.nodes[0], test1txs[1])
|
||||
test1txs.append(self.create_transaction(self.nodes[0], coinbase_txid[1], self.wit_ms_address, 49))
|
||||
txid3 = self.tx_submit(self.nodes[0], test1txs[2])
|
||||
self.block_submit(self.nodes[0], test1txs, False, True)
|
||||
|
||||
print ("Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation")
|
||||
test2tx = self.create_transaction(self.nodes[0], txid2, self.ms_address, 48)
|
||||
trueDummy(test2tx)
|
||||
txid4 = self.tx_submit(self.nodes[0], test2tx, NULLDUMMY_ERROR)
|
||||
|
||||
print ("Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [431]")
|
||||
self.block_submit(self.nodes[0], [test2tx], False, True)
|
||||
|
||||
print ("Test 4: Non-NULLDUMMY base multisig transaction is invalid after activation")
|
||||
test4tx = self.create_transaction(self.nodes[0], txid4, self.address, 47)
|
||||
test6txs=[CTransaction(test4tx)]
|
||||
trueDummy(test4tx)
|
||||
self.tx_submit(self.nodes[0], test4tx, NULLDUMMY_ERROR)
|
||||
self.block_submit(self.nodes[0], [test4tx])
|
||||
|
||||
print ("Test 5: Non-NULLDUMMY P2WSH multisig transaction invalid after activation")
|
||||
test5tx = self.create_transaction(self.nodes[0], txid3, self.wit_address, 48)
|
||||
test6txs.append(CTransaction(test5tx))
|
||||
test5tx.wit.vtxinwit[0].scriptWitness.stack[0] = b'\x01'
|
||||
self.tx_submit(self.nodes[0], test5tx, NULLDUMMY_ERROR)
|
||||
self.block_submit(self.nodes[0], [test5tx], True)
|
||||
|
||||
print ("Test 6: NULLDUMMY compliant base/witness transactions should be accepted to mempool and in block after activation [432]")
|
||||
for i in test6txs:
|
||||
self.tx_submit(self.nodes[0], i)
|
||||
self.block_submit(self.nodes[0], test6txs, True, True)
|
||||
|
||||
|
||||
def create_transaction(self, node, txid, to_address, amount):
|
||||
inputs = [{ "txid" : txid, "vout" : 0}]
|
||||
outputs = { to_address : amount }
|
||||
rawtx = node.createrawtransaction(inputs, outputs)
|
||||
signresult = node.signrawtransaction(rawtx)
|
||||
tx = CTransaction()
|
||||
f = BytesIO(hex_str_to_bytes(signresult['hex']))
|
||||
tx.deserialize(f)
|
||||
return tx
|
||||
|
||||
|
||||
def tx_submit(self, node, tx, msg = ""):
|
||||
tx.rehash()
|
||||
try:
|
||||
node.sendrawtransaction(bytes_to_hex_str(tx.serialize_with_witness()), True)
|
||||
except JSONRPCException as exp:
|
||||
assert_equal(exp.error["message"], msg)
|
||||
else:
|
||||
assert_equal('', msg)
|
||||
return tx.hash
|
||||
|
||||
|
||||
def block_submit(self, node, txs, witness = False, accept = False):
|
||||
block = create_block(self.tip, create_coinbase(self.lastblockheight + 1), self.lastblocktime + 1)
|
||||
block.nVersion = 4
|
||||
for tx in txs:
|
||||
tx.rehash()
|
||||
block.vtx.append(tx)
|
||||
block.hashMerkleRoot = block.calc_merkle_root()
|
||||
witness and add_witness_commitment(block)
|
||||
block.rehash()
|
||||
block.solve()
|
||||
node.submitblock(bytes_to_hex_str(block.serialize(True)))
|
||||
if (accept):
|
||||
assert_equal(node.getbestblockhash(), block.hash)
|
||||
self.tip = block.sha256
|
||||
self.lastblockhash = block.hash
|
||||
self.lastblocktime += 1
|
||||
self.lastblockheight += 1
|
||||
else:
|
||||
assert_equal(node.getbestblockhash(), self.lastblockhash)
|
||||
|
||||
if __name__ == '__main__':
|
||||
NULLDUMMYTest().main()
|
||||
852
qa/rpc-tests/p2p-compactblocks.py
Executable file
852
qa/rpc-tests/p2p-compactblocks.py
Executable file
@@ -0,0 +1,852 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (c) 2016 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
from test_framework.mininode import *
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import *
|
||||
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment
|
||||
from test_framework.siphash import siphash256
|
||||
from test_framework.script import CScript, OP_TRUE
|
||||
|
||||
'''
|
||||
CompactBlocksTest -- test compact blocks (BIP 152)
|
||||
|
||||
Version 1 compact blocks are pre-segwit (txids)
|
||||
Version 2 compact blocks are post-segwit (wtxids)
|
||||
'''
|
||||
|
||||
# TestNode: A peer we use to send messages to bitcoind, and store responses.
|
||||
class TestNode(SingleNodeConnCB):
|
||||
def __init__(self):
|
||||
SingleNodeConnCB.__init__(self)
|
||||
self.last_sendcmpct = []
|
||||
self.last_headers = None
|
||||
self.last_inv = None
|
||||
self.last_cmpctblock = None
|
||||
self.block_announced = False
|
||||
self.last_getdata = None
|
||||
self.last_getblocktxn = None
|
||||
self.last_block = None
|
||||
self.last_blocktxn = None
|
||||
# Store the hashes of blocks we've seen announced.
|
||||
# This is for synchronizing the p2p message traffic,
|
||||
# so we can eg wait until a particular block is announced.
|
||||
self.set_announced_blockhashes = set()
|
||||
|
||||
def on_sendcmpct(self, conn, message):
|
||||
self.last_sendcmpct.append(message)
|
||||
|
||||
def on_block(self, conn, message):
|
||||
self.last_block = message
|
||||
|
||||
def on_cmpctblock(self, conn, message):
|
||||
self.last_cmpctblock = message
|
||||
self.block_announced = True
|
||||
self.last_cmpctblock.header_and_shortids.header.calc_sha256()
|
||||
self.set_announced_blockhashes.add(self.last_cmpctblock.header_and_shortids.header.sha256)
|
||||
|
||||
def on_headers(self, conn, message):
|
||||
self.last_headers = message
|
||||
self.block_announced = True
|
||||
for x in self.last_headers.headers:
|
||||
x.calc_sha256()
|
||||
self.set_announced_blockhashes.add(x.sha256)
|
||||
|
||||
def on_inv(self, conn, message):
|
||||
self.last_inv = message
|
||||
for x in self.last_inv.inv:
|
||||
if x.type == 2:
|
||||
self.block_announced = True
|
||||
self.set_announced_blockhashes.add(x.hash)
|
||||
|
||||
def on_getdata(self, conn, message):
|
||||
self.last_getdata = message
|
||||
|
||||
def on_getblocktxn(self, conn, message):
|
||||
self.last_getblocktxn = message
|
||||
|
||||
def on_blocktxn(self, conn, message):
|
||||
self.last_blocktxn = message
|
||||
|
||||
# Requires caller to hold mininode_lock
|
||||
def received_block_announcement(self):
|
||||
return self.block_announced
|
||||
|
||||
def clear_block_announcement(self):
|
||||
with mininode_lock:
|
||||
self.block_announced = False
|
||||
self.last_inv = None
|
||||
self.last_headers = None
|
||||
self.last_cmpctblock = None
|
||||
|
||||
def get_headers(self, locator, hashstop):
|
||||
msg = msg_getheaders()
|
||||
msg.locator.vHave = locator
|
||||
msg.hashstop = hashstop
|
||||
self.connection.send_message(msg)
|
||||
|
||||
def send_header_for_blocks(self, new_blocks):
|
||||
headers_message = msg_headers()
|
||||
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
|
||||
self.send_message(headers_message)
|
||||
|
||||
def request_headers_and_sync(self, locator, hashstop=0):
|
||||
self.clear_block_announcement()
|
||||
self.get_headers(locator, hashstop)
|
||||
assert(wait_until(self.received_block_announcement, timeout=30))
|
||||
assert(self.received_block_announcement())
|
||||
self.clear_block_announcement()
|
||||
|
||||
# Block until a block announcement for a particular block hash is
|
||||
# received.
|
||||
def wait_for_block_announcement(self, block_hash, timeout=30):
|
||||
def received_hash():
|
||||
return (block_hash in self.set_announced_blockhashes)
|
||||
return wait_until(received_hash, timeout=timeout)
|
||||
|
||||
class CompactBlocksTest(BitcoinTestFramework):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.setup_clean_chain = True
|
||||
# Node0 = pre-segwit, node1 = segwit-aware
|
||||
self.num_nodes = 2
|
||||
self.utxos = []
|
||||
|
||||
def setup_network(self):
|
||||
self.nodes = []
|
||||
|
||||
# Start up node0 to be a version 1, pre-segwit node.
|
||||
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
|
||||
[["-debug", "-logtimemicros=1", "-bip9params=segwit:0:0"],
|
||||
["-debug", "-logtimemicros", "-txindex"]])
|
||||
connect_nodes(self.nodes[0], 1)
|
||||
|
||||
def build_block_on_tip(self, node, segwit=False):
|
||||
height = node.getblockcount()
|
||||
tip = node.getbestblockhash()
|
||||
mtp = node.getblockheader(tip)['mediantime']
|
||||
block = create_block(int(tip, 16), create_coinbase(height + 1), mtp + 1)
|
||||
block.nVersion = 4
|
||||
if segwit:
|
||||
add_witness_commitment(block)
|
||||
block.solve()
|
||||
return block
|
||||
|
||||
# Create 10 more anyone-can-spend utxo's for testing.
|
||||
def make_utxos(self):
|
||||
# Doesn't matter which node we use, just use node0.
|
||||
block = self.build_block_on_tip(self.nodes[0])
|
||||
self.test_node.send_and_ping(msg_block(block))
|
||||
assert(int(self.nodes[0].getbestblockhash(), 16) == block.sha256)
|
||||
self.nodes[0].generate(100)
|
||||
|
||||
total_value = block.vtx[0].vout[0].nValue
|
||||
out_value = total_value // 10
|
||||
tx = CTransaction()
|
||||
tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b''))
|
||||
for i in range(10):
|
||||
tx.vout.append(CTxOut(out_value, CScript([OP_TRUE])))
|
||||
tx.rehash()
|
||||
|
||||
block2 = self.build_block_on_tip(self.nodes[0])
|
||||
block2.vtx.append(tx)
|
||||
block2.hashMerkleRoot = block2.calc_merkle_root()
|
||||
block2.solve()
|
||||
self.test_node.send_and_ping(msg_block(block2))
|
||||
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256)
|
||||
self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)])
|
||||
return
|
||||
|
||||
# Test "sendcmpct" (between peers preferring the same version):
|
||||
# - No compact block announcements unless sendcmpct is sent.
|
||||
# - If sendcmpct is sent with version > preferred_version, the message is ignored.
|
||||
# - If sendcmpct is sent with boolean 0, then block announcements are not
|
||||
# made with compact blocks.
|
||||
# - If sendcmpct is then sent with boolean 1, then new block announcements
|
||||
# are made with compact blocks.
|
||||
# If old_node is passed in, request compact blocks with version=preferred-1
|
||||
# and verify that it receives block announcements via compact block.
|
||||
def test_sendcmpct(self, node, test_node, preferred_version, old_node=None):
|
||||
# Make sure we get a SENDCMPCT message from our peer
|
||||
def received_sendcmpct():
|
||||
return (len(test_node.last_sendcmpct) > 0)
|
||||
got_message = wait_until(received_sendcmpct, timeout=30)
|
||||
assert(received_sendcmpct())
|
||||
assert(got_message)
|
||||
with mininode_lock:
|
||||
# Check that the first version received is the preferred one
|
||||
assert_equal(test_node.last_sendcmpct[0].version, preferred_version)
|
||||
# And that we receive versions down to 1.
|
||||
assert_equal(test_node.last_sendcmpct[-1].version, 1)
|
||||
test_node.last_sendcmpct = []
|
||||
|
||||
tip = int(node.getbestblockhash(), 16)
|
||||
|
||||
def check_announcement_of_new_block(node, peer, predicate):
|
||||
peer.clear_block_announcement()
|
||||
node.generate(1)
|
||||
got_message = wait_until(lambda: peer.block_announced, timeout=30)
|
||||
assert(peer.block_announced)
|
||||
assert(got_message)
|
||||
with mininode_lock:
|
||||
assert(predicate(peer))
|
||||
|
||||
# We shouldn't get any block announcements via cmpctblock yet.
|
||||
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None)
|
||||
|
||||
# Try one more time, this time after requesting headers.
|
||||
test_node.request_headers_and_sync(locator=[tip])
|
||||
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None and p.last_inv is not None)
|
||||
|
||||
# Test a few ways of using sendcmpct that should NOT
|
||||
# result in compact block announcements.
|
||||
# Before each test, sync the headers chain.
|
||||
test_node.request_headers_and_sync(locator=[tip])
|
||||
|
||||
# Now try a SENDCMPCT message with too-high version
|
||||
sendcmpct = msg_sendcmpct()
|
||||
sendcmpct.version = preferred_version+1
|
||||
sendcmpct.announce = True
|
||||
test_node.send_and_ping(sendcmpct)
|
||||
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None)
|
||||
|
||||
# Headers sync before next test.
|
||||
test_node.request_headers_and_sync(locator=[tip])
|
||||
|
||||
# Now try a SENDCMPCT message with valid version, but announce=False
|
||||
sendcmpct.version = preferred_version
|
||||
sendcmpct.announce = False
|
||||
test_node.send_and_ping(sendcmpct)
|
||||
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None)
|
||||
|
||||
# Headers sync before next test.
|
||||
test_node.request_headers_and_sync(locator=[tip])
|
||||
|
||||
# Finally, try a SENDCMPCT message with announce=True
|
||||
sendcmpct.version = preferred_version
|
||||
sendcmpct.announce = True
|
||||
test_node.send_and_ping(sendcmpct)
|
||||
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is not None)
|
||||
|
||||
# Try one more time (no headers sync should be needed!)
|
||||
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is not None)
|
||||
|
||||
# Try one more time, after turning on sendheaders
|
||||
test_node.send_and_ping(msg_sendheaders())
|
||||
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is not None)
|
||||
|
||||
# Try one more time, after sending a version-1, announce=false message.
|
||||
sendcmpct.version = preferred_version-1
|
||||
sendcmpct.announce = False
|
||||
test_node.send_and_ping(sendcmpct)
|
||||
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is not None)
|
||||
|
||||
# Now turn off announcements
|
||||
sendcmpct.version = preferred_version
|
||||
sendcmpct.announce = False
|
||||
test_node.send_and_ping(sendcmpct)
|
||||
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None and p.last_headers is not None)
|
||||
|
||||
if old_node is not None:
|
||||
# Verify that a peer using an older protocol version can receive
|
||||
# announcements from this node.
|
||||
sendcmpct.version = preferred_version-1
|
||||
sendcmpct.announce = True
|
||||
old_node.send_and_ping(sendcmpct)
|
||||
# Header sync
|
||||
old_node.request_headers_and_sync(locator=[tip])
|
||||
check_announcement_of_new_block(node, old_node, lambda p: p.last_cmpctblock is not None)
|
||||
|
||||
# This test actually causes bitcoind to (reasonably!) disconnect us, so do this last.
|
||||
def test_invalid_cmpctblock_message(self):
|
||||
self.nodes[0].generate(101)
|
||||
block = self.build_block_on_tip(self.nodes[0])
|
||||
|
||||
cmpct_block = P2PHeaderAndShortIDs()
|
||||
cmpct_block.header = CBlockHeader(block)
|
||||
cmpct_block.prefilled_txn_length = 1
|
||||
# This index will be too high
|
||||
prefilled_txn = PrefilledTransaction(1, block.vtx[0])
|
||||
cmpct_block.prefilled_txn = [prefilled_txn]
|
||||
self.test_node.send_and_ping(msg_cmpctblock(cmpct_block))
|
||||
assert(int(self.nodes[0].getbestblockhash(), 16) == block.hashPrevBlock)
|
||||
|
||||
# Compare the generated shortids to what we expect based on BIP 152, given
|
||||
# bitcoind's choice of nonce.
|
||||
def test_compactblock_construction(self, node, test_node, version, use_witness_address):
|
||||
# Generate a bunch of transactions.
|
||||
node.generate(101)
|
||||
num_transactions = 25
|
||||
address = node.getnewaddress()
|
||||
if use_witness_address:
|
||||
# Want at least one segwit spend, so move all funds to
|
||||
# a witness address.
|
||||
address = node.addwitnessaddress(address)
|
||||
value_to_send = node.getbalance()
|
||||
node.sendtoaddress(address, satoshi_round(value_to_send-Decimal(0.1)))
|
||||
node.generate(1)
|
||||
|
||||
segwit_tx_generated = False
|
||||
for i in range(num_transactions):
|
||||
txid = node.sendtoaddress(address, 0.1)
|
||||
hex_tx = node.gettransaction(txid)["hex"]
|
||||
tx = FromHex(CTransaction(), hex_tx)
|
||||
if not tx.wit.is_null():
|
||||
segwit_tx_generated = True
|
||||
|
||||
if use_witness_address:
|
||||
assert(segwit_tx_generated) # check that our test is not broken
|
||||
|
||||
# Wait until we've seen the block announcement for the resulting tip
|
||||
tip = int(self.nodes[0].getbestblockhash(), 16)
|
||||
assert(self.test_node.wait_for_block_announcement(tip))
|
||||
|
||||
# Now mine a block, and look at the resulting compact block.
|
||||
test_node.clear_block_announcement()
|
||||
block_hash = int(node.generate(1)[0], 16)
|
||||
|
||||
# Store the raw block in our internal format.
|
||||
block = FromHex(CBlock(), node.getblock("%02x" % block_hash, False))
|
||||
[tx.calc_sha256() for tx in block.vtx]
|
||||
block.rehash()
|
||||
|
||||
# Don't care which type of announcement came back for this test; just
|
||||
# request the compact block if we didn't get one yet.
|
||||
wait_until(test_node.received_block_announcement, timeout=30)
|
||||
assert(test_node.received_block_announcement())
|
||||
|
||||
with mininode_lock:
|
||||
if test_node.last_cmpctblock is None:
|
||||
test_node.clear_block_announcement()
|
||||
inv = CInv(4, block_hash) # 4 == "CompactBlock"
|
||||
test_node.send_message(msg_getdata([inv]))
|
||||
|
||||
wait_until(test_node.received_block_announcement, timeout=30)
|
||||
assert(test_node.received_block_announcement())
|
||||
|
||||
# Now we should have the compactblock
|
||||
header_and_shortids = None
|
||||
with mininode_lock:
|
||||
assert(test_node.last_cmpctblock is not None)
|
||||
# Convert the on-the-wire representation to absolute indexes
|
||||
header_and_shortids = HeaderAndShortIDs(test_node.last_cmpctblock.header_and_shortids)
|
||||
|
||||
# Check that we got the right block!
|
||||
header_and_shortids.header.calc_sha256()
|
||||
assert_equal(header_and_shortids.header.sha256, block_hash)
|
||||
|
||||
# Make sure the prefilled_txn appears to have included the coinbase
|
||||
assert(len(header_and_shortids.prefilled_txn) >= 1)
|
||||
assert_equal(header_and_shortids.prefilled_txn[0].index, 0)
|
||||
|
||||
# Check that all prefilled_txn entries match what's in the block.
|
||||
for entry in header_and_shortids.prefilled_txn:
|
||||
entry.tx.calc_sha256()
|
||||
# This checks the non-witness parts of the tx agree
|
||||
assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256)
|
||||
|
||||
# And this checks the witness
|
||||
wtxid = entry.tx.calc_sha256(True)
|
||||
if version == 2:
|
||||
assert_equal(wtxid, block.vtx[entry.index].calc_sha256(True))
|
||||
else:
|
||||
# Shouldn't have received a witness
|
||||
assert(entry.tx.wit.is_null())
|
||||
|
||||
# Check that the cmpctblock message announced all the transactions.
|
||||
assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx))
|
||||
|
||||
# And now check that all the shortids are as expected as well.
|
||||
# Determine the siphash keys to use.
|
||||
[k0, k1] = header_and_shortids.get_siphash_keys()
|
||||
|
||||
index = 0
|
||||
while index < len(block.vtx):
|
||||
if (len(header_and_shortids.prefilled_txn) > 0 and
|
||||
header_and_shortids.prefilled_txn[0].index == index):
|
||||
# Already checked prefilled transactions above
|
||||
header_and_shortids.prefilled_txn.pop(0)
|
||||
else:
|
||||
tx_hash = block.vtx[index].sha256
|
||||
if version == 2:
|
||||
tx_hash = block.vtx[index].calc_sha256(True)
|
||||
shortid = calculate_shortid(k0, k1, tx_hash)
|
||||
assert_equal(shortid, header_and_shortids.shortids[0])
|
||||
header_and_shortids.shortids.pop(0)
|
||||
index += 1
|
||||
|
||||
# Test that bitcoind requests compact blocks when we announce new blocks
|
||||
# via header or inv, and that responding to getblocktxn causes the block
|
||||
# to be successfully reconstructed.
|
||||
# Post-segwit: upgraded nodes would only make this request of cb-version-2,
|
||||
# NODE_WITNESS peers. Unupgraded nodes would still make this request of
|
||||
# any cb-version-1-supporting peer.
|
||||
def test_compactblock_requests(self, node, test_node, version, segwit):
|
||||
# Try announcing a block with an inv or header, expect a compactblock
|
||||
# request
|
||||
for announce in ["inv", "header"]:
|
||||
block = self.build_block_on_tip(node, segwit=segwit)
|
||||
with mininode_lock:
|
||||
test_node.last_getdata = None
|
||||
|
||||
if announce == "inv":
|
||||
test_node.send_message(msg_inv([CInv(2, block.sha256)]))
|
||||
else:
|
||||
test_node.send_header_for_blocks([block])
|
||||
success = wait_until(lambda: test_node.last_getdata is not None, timeout=30)
|
||||
assert(success)
|
||||
assert_equal(len(test_node.last_getdata.inv), 1)
|
||||
assert_equal(test_node.last_getdata.inv[0].type, 4)
|
||||
assert_equal(test_node.last_getdata.inv[0].hash, block.sha256)
|
||||
|
||||
# Send back a compactblock message that omits the coinbase
|
||||
comp_block = HeaderAndShortIDs()
|
||||
comp_block.header = CBlockHeader(block)
|
||||
comp_block.nonce = 0
|
||||
[k0, k1] = comp_block.get_siphash_keys()
|
||||
coinbase_hash = block.vtx[0].sha256
|
||||
if version == 2:
|
||||
coinbase_hash = block.vtx[0].calc_sha256(True)
|
||||
comp_block.shortids = [
|
||||
calculate_shortid(k0, k1, coinbase_hash) ]
|
||||
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
|
||||
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
|
||||
# Expect a getblocktxn message.
|
||||
with mininode_lock:
|
||||
assert(test_node.last_getblocktxn is not None)
|
||||
absolute_indexes = test_node.last_getblocktxn.block_txn_request.to_absolute()
|
||||
assert_equal(absolute_indexes, [0]) # should be a coinbase request
|
||||
|
||||
# Send the coinbase, and verify that the tip advances.
|
||||
if version == 2:
|
||||
msg = msg_witness_blocktxn()
|
||||
else:
|
||||
msg = msg_blocktxn()
|
||||
msg.block_transactions.blockhash = block.sha256
|
||||
msg.block_transactions.transactions = [block.vtx[0]]
|
||||
test_node.send_and_ping(msg)
|
||||
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
|
||||
|
||||
# Create a chain of transactions from given utxo, and add to a new block.
|
||||
def build_block_with_transactions(self, node, utxo, num_transactions):
|
||||
block = self.build_block_on_tip(node)
|
||||
|
||||
for i in range(num_transactions):
|
||||
tx = CTransaction()
|
||||
tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b''))
|
||||
tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE])))
|
||||
tx.rehash()
|
||||
utxo = [tx.sha256, 0, tx.vout[0].nValue]
|
||||
block.vtx.append(tx)
|
||||
|
||||
block.hashMerkleRoot = block.calc_merkle_root()
|
||||
block.solve()
|
||||
return block
|
||||
|
||||
# Test that we only receive getblocktxn requests for transactions that the
|
||||
# node needs, and that responding to them causes the block to be
|
||||
# reconstructed.
|
||||
def test_getblocktxn_requests(self, node, test_node, version):
|
||||
with_witness = (version==2)
|
||||
|
||||
def test_getblocktxn_response(compact_block, peer, expected_result):
|
||||
msg = msg_cmpctblock(compact_block.to_p2p())
|
||||
peer.send_and_ping(msg)
|
||||
with mininode_lock:
|
||||
assert(peer.last_getblocktxn is not None)
|
||||
absolute_indexes = peer.last_getblocktxn.block_txn_request.to_absolute()
|
||||
assert_equal(absolute_indexes, expected_result)
|
||||
|
||||
def test_tip_after_message(node, peer, msg, tip):
|
||||
peer.send_and_ping(msg)
|
||||
assert_equal(int(node.getbestblockhash(), 16), tip)
|
||||
|
||||
# First try announcing compactblocks that won't reconstruct, and verify
|
||||
# that we receive getblocktxn messages back.
|
||||
utxo = self.utxos.pop(0)
|
||||
|
||||
block = self.build_block_with_transactions(node, utxo, 5)
|
||||
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
|
||||
comp_block = HeaderAndShortIDs()
|
||||
comp_block.initialize_from_block(block, use_witness=with_witness)
|
||||
|
||||
test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5])
|
||||
|
||||
msg_bt = msg_blocktxn()
|
||||
if with_witness:
|
||||
msg_bt = msg_witness_blocktxn() # serialize with witnesses
|
||||
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[1:])
|
||||
test_tip_after_message(node, test_node, msg_bt, block.sha256)
|
||||
|
||||
utxo = self.utxos.pop(0)
|
||||
block = self.build_block_with_transactions(node, utxo, 5)
|
||||
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
|
||||
|
||||
# Now try interspersing the prefilled transactions
|
||||
comp_block.initialize_from_block(block, prefill_list=[0, 1, 5], use_witness=with_witness)
|
||||
test_getblocktxn_response(comp_block, test_node, [2, 3, 4])
|
||||
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[2:5])
|
||||
test_tip_after_message(node, test_node, msg_bt, block.sha256)
|
||||
|
||||
# Now try giving one transaction ahead of time.
|
||||
utxo = self.utxos.pop(0)
|
||||
block = self.build_block_with_transactions(node, utxo, 5)
|
||||
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
|
||||
test_node.send_and_ping(msg_tx(block.vtx[1]))
|
||||
assert(block.vtx[1].hash in node.getrawmempool())
|
||||
|
||||
# Prefill 4 out of the 6 transactions, and verify that only the one
|
||||
# that was not in the mempool is requested.
|
||||
comp_block.initialize_from_block(block, prefill_list=[0, 2, 3, 4], use_witness=with_witness)
|
||||
test_getblocktxn_response(comp_block, test_node, [5])
|
||||
|
||||
msg_bt.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]])
|
||||
test_tip_after_message(node, test_node, msg_bt, block.sha256)
|
||||
|
||||
# Now provide all transactions to the node before the block is
|
||||
# announced and verify reconstruction happens immediately.
|
||||
utxo = self.utxos.pop(0)
|
||||
block = self.build_block_with_transactions(node, utxo, 10)
|
||||
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
|
||||
for tx in block.vtx[1:]:
|
||||
test_node.send_message(msg_tx(tx))
|
||||
test_node.sync_with_ping()
|
||||
# Make sure all transactions were accepted.
|
||||
mempool = node.getrawmempool()
|
||||
for tx in block.vtx[1:]:
|
||||
assert(tx.hash in mempool)
|
||||
|
||||
# Clear out last request.
|
||||
with mininode_lock:
|
||||
test_node.last_getblocktxn = None
|
||||
|
||||
# Send compact block
|
||||
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=with_witness)
|
||||
test_tip_after_message(node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256)
|
||||
with mininode_lock:
|
||||
# Shouldn't have gotten a request for any transaction
|
||||
assert(test_node.last_getblocktxn is None)
|
||||
|
||||
# Incorrectly responding to a getblocktxn shouldn't cause the block to be
|
||||
# permanently failed.
|
||||
def test_incorrect_blocktxn_response(self, node, test_node, version):
|
||||
if (len(self.utxos) == 0):
|
||||
self.make_utxos()
|
||||
utxo = self.utxos.pop(0)
|
||||
|
||||
block = self.build_block_with_transactions(node, utxo, 10)
|
||||
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
|
||||
# Relay the first 5 transactions from the block in advance
|
||||
for tx in block.vtx[1:6]:
|
||||
test_node.send_message(msg_tx(tx))
|
||||
test_node.sync_with_ping()
|
||||
# Make sure all transactions were accepted.
|
||||
mempool = node.getrawmempool()
|
||||
for tx in block.vtx[1:6]:
|
||||
assert(tx.hash in mempool)
|
||||
|
||||
# Send compact block
|
||||
comp_block = HeaderAndShortIDs()
|
||||
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=(version == 2))
|
||||
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
|
||||
absolute_indexes = []
|
||||
with mininode_lock:
|
||||
assert(test_node.last_getblocktxn is not None)
|
||||
absolute_indexes = test_node.last_getblocktxn.block_txn_request.to_absolute()
|
||||
assert_equal(absolute_indexes, [6, 7, 8, 9, 10])
|
||||
|
||||
# Now give an incorrect response.
|
||||
# Note that it's possible for bitcoind to be smart enough to know we're
|
||||
# lying, since it could check to see if the shortid matches what we're
|
||||
# sending, and eg disconnect us for misbehavior. If that behavior
|
||||
# change were made, we could just modify this test by having a
|
||||
# different peer provide the block further down, so that we're still
|
||||
# verifying that the block isn't marked bad permanently. This is good
|
||||
# enough for now.
|
||||
msg = msg_blocktxn()
|
||||
if version==2:
|
||||
msg = msg_witness_blocktxn()
|
||||
msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]] + block.vtx[7:])
|
||||
test_node.send_and_ping(msg)
|
||||
|
||||
# Tip should not have updated
|
||||
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
|
||||
|
||||
# We should receive a getdata request
|
||||
success = wait_until(lambda: test_node.last_getdata is not None, timeout=10)
|
||||
assert(success)
|
||||
assert_equal(len(test_node.last_getdata.inv), 1)
|
||||
assert(test_node.last_getdata.inv[0].type == 2 or test_node.last_getdata.inv[0].type == 2|MSG_WITNESS_FLAG)
|
||||
assert_equal(test_node.last_getdata.inv[0].hash, block.sha256)
|
||||
|
||||
# Deliver the block
|
||||
if version==2:
|
||||
test_node.send_and_ping(msg_witness_block(block))
|
||||
else:
|
||||
test_node.send_and_ping(msg_block(block))
|
||||
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
|
||||
|
||||
def test_getblocktxn_handler(self, node, test_node, version):
|
||||
# bitcoind won't respond for blocks whose height is more than 15 blocks
|
||||
# deep.
|
||||
MAX_GETBLOCKTXN_DEPTH = 15
|
||||
chain_height = node.getblockcount()
|
||||
current_height = chain_height
|
||||
while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH):
|
||||
block_hash = node.getblockhash(current_height)
|
||||
block = FromHex(CBlock(), node.getblock(block_hash, False))
|
||||
|
||||
msg = msg_getblocktxn()
|
||||
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [])
|
||||
num_to_request = random.randint(1, len(block.vtx))
|
||||
msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request)))
|
||||
test_node.send_message(msg)
|
||||
success = wait_until(lambda: test_node.last_blocktxn is not None, timeout=10)
|
||||
assert(success)
|
||||
|
||||
[tx.calc_sha256() for tx in block.vtx]
|
||||
with mininode_lock:
|
||||
assert_equal(test_node.last_blocktxn.block_transactions.blockhash, int(block_hash, 16))
|
||||
all_indices = msg.block_txn_request.to_absolute()
|
||||
for index in all_indices:
|
||||
tx = test_node.last_blocktxn.block_transactions.transactions.pop(0)
|
||||
tx.calc_sha256()
|
||||
assert_equal(tx.sha256, block.vtx[index].sha256)
|
||||
if version == 1:
|
||||
# Witnesses should have been stripped
|
||||
assert(tx.wit.is_null())
|
||||
else:
|
||||
# Check that the witness matches
|
||||
assert_equal(tx.calc_sha256(True), block.vtx[index].calc_sha256(True))
|
||||
test_node.last_blocktxn = None
|
||||
current_height -= 1
|
||||
|
||||
# Next request should be ignored, as we're past the allowed depth.
|
||||
block_hash = node.getblockhash(current_height)
|
||||
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0])
|
||||
test_node.send_and_ping(msg)
|
||||
with mininode_lock:
|
||||
assert_equal(test_node.last_blocktxn, None)
|
||||
|
||||
def test_compactblocks_not_at_tip(self, node, test_node):
|
||||
# Test that requesting old compactblocks doesn't work.
|
||||
MAX_CMPCTBLOCK_DEPTH = 11
|
||||
new_blocks = []
|
||||
for i in range(MAX_CMPCTBLOCK_DEPTH):
|
||||
test_node.clear_block_announcement()
|
||||
new_blocks.append(node.generate(1)[0])
|
||||
wait_until(test_node.received_block_announcement, timeout=30)
|
||||
|
||||
test_node.clear_block_announcement()
|
||||
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
|
||||
success = wait_until(lambda: test_node.last_cmpctblock is not None, timeout=30)
|
||||
assert(success)
|
||||
|
||||
test_node.clear_block_announcement()
|
||||
node.generate(1)
|
||||
wait_until(test_node.received_block_announcement, timeout=30)
|
||||
test_node.clear_block_announcement()
|
||||
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
|
||||
success = wait_until(lambda: test_node.last_block is not None, timeout=30)
|
||||
assert(success)
|
||||
with mininode_lock:
|
||||
test_node.last_block.block.calc_sha256()
|
||||
assert_equal(test_node.last_block.block.sha256, int(new_blocks[0], 16))
|
||||
|
||||
# Generate an old compactblock, and verify that it's not accepted.
|
||||
cur_height = node.getblockcount()
|
||||
hashPrevBlock = int(node.getblockhash(cur_height-5), 16)
|
||||
block = self.build_block_on_tip(node)
|
||||
block.hashPrevBlock = hashPrevBlock
|
||||
block.solve()
|
||||
|
||||
comp_block = HeaderAndShortIDs()
|
||||
comp_block.initialize_from_block(block)
|
||||
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
|
||||
|
||||
tips = node.getchaintips()
|
||||
found = False
|
||||
for x in tips:
|
||||
if x["hash"] == block.hash:
|
||||
assert_equal(x["status"], "headers-only")
|
||||
found = True
|
||||
break
|
||||
assert(found)
|
||||
|
||||
# Requesting this block via getblocktxn should silently fail
|
||||
# (to avoid fingerprinting attacks).
|
||||
msg = msg_getblocktxn()
|
||||
msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0])
|
||||
with mininode_lock:
|
||||
test_node.last_blocktxn = None
|
||||
test_node.send_and_ping(msg)
|
||||
with mininode_lock:
|
||||
assert(test_node.last_blocktxn is None)
|
||||
|
||||
def activate_segwit(self, node):
|
||||
node.generate(144*3)
|
||||
assert_equal(get_bip9_status(node, "segwit")["status"], 'active')
|
||||
|
||||
def test_end_to_end_block_relay(self, node, listeners):
|
||||
utxo = self.utxos.pop(0)
|
||||
|
||||
block = self.build_block_with_transactions(node, utxo, 10)
|
||||
|
||||
[l.clear_block_announcement() for l in listeners]
|
||||
|
||||
# ToHex() won't serialize with witness, but this block has no witnesses
|
||||
# anyway. TODO: repeat this test with witness tx's to a segwit node.
|
||||
node.submitblock(ToHex(block))
|
||||
|
||||
for l in listeners:
|
||||
wait_until(lambda: l.received_block_announcement(), timeout=30)
|
||||
with mininode_lock:
|
||||
for l in listeners:
|
||||
assert(l.last_cmpctblock is not None)
|
||||
l.last_cmpctblock.header_and_shortids.header.calc_sha256()
|
||||
assert_equal(l.last_cmpctblock.header_and_shortids.header.sha256, block.sha256)
|
||||
|
||||
# Helper for enabling cb announcements
|
||||
# Send the sendcmpct request and sync headers
|
||||
def request_cb_announcements(self, peer, node, version):
|
||||
tip = node.getbestblockhash()
|
||||
peer.get_headers(locator=[int(tip, 16)], hashstop=0)
|
||||
|
||||
msg = msg_sendcmpct()
|
||||
msg.version = version
|
||||
msg.announce = True
|
||||
peer.send_and_ping(msg)
|
||||
|
||||
|
||||
def run_test(self):
|
||||
# Setup the p2p connections and start up the network thread.
|
||||
self.test_node = TestNode()
|
||||
self.segwit_node = TestNode()
|
||||
self.old_node = TestNode() # version 1 peer <--> segwit node
|
||||
|
||||
connections = []
|
||||
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node))
|
||||
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1],
|
||||
self.segwit_node, services=NODE_NETWORK|NODE_WITNESS))
|
||||
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1],
|
||||
self.old_node, services=NODE_NETWORK))
|
||||
self.test_node.add_connection(connections[0])
|
||||
self.segwit_node.add_connection(connections[1])
|
||||
self.old_node.add_connection(connections[2])
|
||||
|
||||
NetworkThread().start() # Start up network handling in another thread
|
||||
|
||||
# Test logic begins here
|
||||
self.test_node.wait_for_verack()
|
||||
|
||||
# We will need UTXOs to construct transactions in later tests.
|
||||
self.make_utxos()
|
||||
|
||||
print("Running tests, pre-segwit activation:")
|
||||
|
||||
print("\tTesting SENDCMPCT p2p message... ")
|
||||
self.test_sendcmpct(self.nodes[0], self.test_node, 1)
|
||||
sync_blocks(self.nodes)
|
||||
self.test_sendcmpct(self.nodes[1], self.segwit_node, 2, old_node=self.old_node)
|
||||
sync_blocks(self.nodes)
|
||||
|
||||
print("\tTesting compactblock construction...")
|
||||
self.test_compactblock_construction(self.nodes[0], self.test_node, 1, False)
|
||||
sync_blocks(self.nodes)
|
||||
self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, False)
|
||||
sync_blocks(self.nodes)
|
||||
|
||||
print("\tTesting compactblock requests... ")
|
||||
self.test_compactblock_requests(self.nodes[0], self.test_node, 1, False)
|
||||
sync_blocks(self.nodes)
|
||||
self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, False)
|
||||
sync_blocks(self.nodes)
|
||||
|
||||
print("\tTesting getblocktxn requests...")
|
||||
self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)
|
||||
sync_blocks(self.nodes)
|
||||
self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2)
|
||||
sync_blocks(self.nodes)
|
||||
|
||||
print("\tTesting getblocktxn handler...")
|
||||
self.test_getblocktxn_handler(self.nodes[0], self.test_node, 1)
|
||||
sync_blocks(self.nodes)
|
||||
self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2)
|
||||
self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)
|
||||
sync_blocks(self.nodes)
|
||||
|
||||
print("\tTesting compactblock requests/announcements not at chain tip...")
|
||||
self.test_compactblocks_not_at_tip(self.nodes[0], self.test_node)
|
||||
sync_blocks(self.nodes)
|
||||
self.test_compactblocks_not_at_tip(self.nodes[1], self.segwit_node)
|
||||
self.test_compactblocks_not_at_tip(self.nodes[1], self.old_node)
|
||||
sync_blocks(self.nodes)
|
||||
|
||||
print("\tTesting handling of incorrect blocktxn responses...")
|
||||
self.test_incorrect_blocktxn_response(self.nodes[0], self.test_node, 1)
|
||||
sync_blocks(self.nodes)
|
||||
self.test_incorrect_blocktxn_response(self.nodes[1], self.segwit_node, 2)
|
||||
sync_blocks(self.nodes)
|
||||
|
||||
# End-to-end block relay tests
|
||||
print("\tTesting end-to-end block relay...")
|
||||
self.request_cb_announcements(self.test_node, self.nodes[0], 1)
|
||||
self.request_cb_announcements(self.old_node, self.nodes[1], 1)
|
||||
self.request_cb_announcements(self.segwit_node, self.nodes[1], 2)
|
||||
self.test_end_to_end_block_relay(self.nodes[0], [self.segwit_node, self.test_node, self.old_node])
|
||||
self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node])
|
||||
|
||||
# Advance to segwit activation
|
||||
print ("\nAdvancing to segwit activation\n")
|
||||
self.activate_segwit(self.nodes[1])
|
||||
print ("Running tests, post-segwit activation...")
|
||||
|
||||
print("\tTesting compactblock construction...")
|
||||
self.test_compactblock_construction(self.nodes[1], self.old_node, 1, True)
|
||||
self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, True)
|
||||
sync_blocks(self.nodes)
|
||||
|
||||
print("\tTesting compactblock requests (unupgraded node)... ")
|
||||
self.test_compactblock_requests(self.nodes[0], self.test_node, 1, True)
|
||||
|
||||
print("\tTesting getblocktxn requests (unupgraded node)...")
|
||||
self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)
|
||||
|
||||
# Need to manually sync node0 and node1, because post-segwit activation,
|
||||
# node1 will not download blocks from node0.
|
||||
print("\tSyncing nodes...")
|
||||
assert(self.nodes[0].getbestblockhash() != self.nodes[1].getbestblockhash())
|
||||
while (self.nodes[0].getblockcount() > self.nodes[1].getblockcount()):
|
||||
block_hash = self.nodes[0].getblockhash(self.nodes[1].getblockcount()+1)
|
||||
self.nodes[1].submitblock(self.nodes[0].getblock(block_hash, False))
|
||||
assert_equal(self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash())
|
||||
|
||||
print("\tTesting compactblock requests (segwit node)... ")
|
||||
self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, True)
|
||||
|
||||
print("\tTesting getblocktxn requests (segwit node)...")
|
||||
self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2)
|
||||
sync_blocks(self.nodes)
|
||||
|
||||
print("\tTesting getblocktxn handler (segwit node should return witnesses)...")
|
||||
self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2)
|
||||
self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)
|
||||
|
||||
# Test that if we submitblock to node1, we'll get a compact block
|
||||
# announcement to all peers.
|
||||
# (Post-segwit activation, blocks won't propagate from node0 to node1
|
||||
# automatically, so don't bother testing a block announced to node0.)
|
||||
print("\tTesting end-to-end block relay...")
|
||||
self.request_cb_announcements(self.test_node, self.nodes[0], 1)
|
||||
self.request_cb_announcements(self.old_node, self.nodes[1], 1)
|
||||
self.request_cb_announcements(self.segwit_node, self.nodes[1], 2)
|
||||
self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node])
|
||||
|
||||
print("\tTesting invalid index in cmpctblock message...")
|
||||
self.test_invalid_cmpctblock_message()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
CompactBlocksTest().main()
|
||||
@@ -62,6 +62,7 @@ class FeeFilterTest(BitcoinTestFramework):
|
||||
|
||||
def run_test(self):
|
||||
node1 = self.nodes[1]
|
||||
node0 = self.nodes[0]
|
||||
# Get out of IBD
|
||||
node1.generate(1)
|
||||
sync_blocks(self.nodes)
|
||||
@@ -91,8 +92,17 @@ class FeeFilterTest(BitcoinTestFramework):
|
||||
node1.settxfee(Decimal("0.00010000"))
|
||||
[node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
|
||||
sync_mempools(self.nodes) # must be sure node 0 has received all txs
|
||||
time.sleep(10) # wait 10 secs to be sure its doesn't relay any
|
||||
assert(allInvsMatch([], test_node))
|
||||
|
||||
# Send one transaction from node0 that should be received, so that we
|
||||
# we can sync the test on receipt (if node1's txs were relayed, they'd
|
||||
# be received by the time this node0 tx is received). This is
|
||||
# unfortunately reliant on the current relay behavior where we batch up
|
||||
# to 35 entries in an inv, which means that when this next transaction
|
||||
# is eligible for relay, the prior transactions from node1 are eligible
|
||||
# as well.
|
||||
node0.settxfee(Decimal("0.00020000"))
|
||||
txids = [node0.sendtoaddress(node0.getnewaddress(), 1)]
|
||||
assert(allInvsMatch(txids, test_node))
|
||||
test_node.clear_invs()
|
||||
|
||||
# Remove fee filter and check that txs are received again
|
||||
|
||||
@@ -63,7 +63,7 @@ class TestNode(NodeConnCB):
|
||||
def received_pong():
|
||||
return (self.last_pong.nonce == self.ping_counter)
|
||||
self.connection.send_message(msg_ping(nonce=self.ping_counter))
|
||||
success = wait_until(received_pong, timeout)
|
||||
success = wait_until(received_pong, timeout=timeout)
|
||||
self.ping_counter += 1
|
||||
return success
|
||||
|
||||
|
||||
@@ -166,6 +166,17 @@ class UTXO(object):
|
||||
self.n = n
|
||||
self.nValue = nValue
|
||||
|
||||
# Helper for getting the script associated with a P2PKH
|
||||
def GetP2PKHScript(pubkeyhash):
|
||||
return CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)])
|
||||
|
||||
# Add signature for a P2PK witness program.
|
||||
def sign_P2PK_witness_input(script, txTo, inIdx, hashtype, value, key):
|
||||
tx_hash = SegwitVersion1SignatureHash(script, txTo, inIdx, hashtype, value)
|
||||
signature = key.sign(tx_hash) + chr(hashtype).encode('latin-1')
|
||||
txTo.wit.vtxinwit[inIdx].scriptWitness.stack = [signature, script]
|
||||
txTo.rehash()
|
||||
|
||||
|
||||
class SegWitTest(BitcoinTestFramework):
|
||||
def setup_chain(self):
|
||||
@@ -302,13 +313,18 @@ class SegWitTest(BitcoinTestFramework):
|
||||
sync_blocks(self.nodes)
|
||||
|
||||
# We'll add an unnecessary witness to this transaction that would cause
|
||||
# it to be too large according to IsStandard.
|
||||
# it to be non-standard, to test that violating policy with a witness before
|
||||
# segwit activation doesn't blind a node to a transaction. Transactions
|
||||
# rejected for having a witness before segwit activation shouldn't be added
|
||||
# to the rejection cache.
|
||||
tx3 = CTransaction()
|
||||
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
|
||||
tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, scriptPubKey))
|
||||
tx3.wit.vtxinwit.append(CTxInWitness())
|
||||
tx3.wit.vtxinwit[0].scriptWitness.stack = [b'a'*400000]
|
||||
tx3.rehash()
|
||||
# Note that this should be rejected for the premature witness reason,
|
||||
# rather than a policy check, since segwit hasn't activated yet.
|
||||
self.std_node.test_transaction_acceptance(tx3, True, False, b'no-witness-yet')
|
||||
|
||||
# If we send without witness, it should be accepted.
|
||||
@@ -904,14 +920,6 @@ class SegWitTest(BitcoinTestFramework):
|
||||
# But eliminating the witness should fix it
|
||||
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
|
||||
|
||||
# Verify that inv's to test_node come with getdata's for non-witness tx's
|
||||
# Just tweak the transaction, announce it, and verify we get a getdata
|
||||
# for a normal tx
|
||||
tx.vout[0].scriptPubKey = CScript([OP_TRUE, OP_TRUE])
|
||||
tx.rehash()
|
||||
self.test_node.announce_tx_and_wait_for_getdata(tx)
|
||||
assert(self.test_node.last_getdata.inv[0].type == 1)
|
||||
|
||||
# Cleanup: mine the first transaction and update utxo
|
||||
self.nodes[0].generate(1)
|
||||
assert_equal(len(self.nodes[0].getrawmempool()), 0)
|
||||
@@ -946,8 +954,7 @@ class SegWitTest(BitcoinTestFramework):
|
||||
self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
|
||||
|
||||
# Verify that removing the witness succeeds.
|
||||
# Re-announcing won't result in a getdata for ~2.5 minutes, so just
|
||||
# deliver the modified transaction.
|
||||
self.test_node.announce_tx_and_wait_for_getdata(tx)
|
||||
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
|
||||
|
||||
# Now try to add extra witness data to a valid witness tx.
|
||||
@@ -961,8 +968,24 @@ class SegWitTest(BitcoinTestFramework):
|
||||
|
||||
tx3 = CTransaction()
|
||||
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
|
||||
tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, CScript([OP_TRUE])))
|
||||
tx3.wit.vtxinwit.append(CTxInWitness())
|
||||
|
||||
# Add too-large for IsStandard witness and check that it does not enter reject filter
|
||||
p2sh_program = CScript([OP_TRUE])
|
||||
p2sh_pubkey = hash160(p2sh_program)
|
||||
witness_program2 = CScript([b'a'*400000])
|
||||
tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])))
|
||||
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
|
||||
tx3.rehash()
|
||||
|
||||
# Node will not be blinded to the transaction
|
||||
self.std_node.announce_tx_and_wait_for_getdata(tx3)
|
||||
self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size')
|
||||
self.std_node.announce_tx_and_wait_for_getdata(tx3)
|
||||
self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size')
|
||||
|
||||
# Remove witness stuffing, instead add extra witness push on stack
|
||||
tx3.vout[0] = CTxOut(tx2.vout[0].nValue-1000, CScript([OP_TRUE]))
|
||||
tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program ]
|
||||
tx3.rehash()
|
||||
|
||||
@@ -1002,7 +1025,7 @@ class SegWitTest(BitcoinTestFramework):
|
||||
def test_block_relay(self, segwit_activated):
|
||||
print("\tTesting block relay")
|
||||
|
||||
blocktype = 2|MSG_WITNESS_FLAG if segwit_activated else 2
|
||||
blocktype = 2|MSG_WITNESS_FLAG
|
||||
|
||||
# test_node has set NODE_WITNESS, so all getdata requests should be for
|
||||
# witness blocks.
|
||||
@@ -1065,12 +1088,12 @@ class SegWitTest(BitcoinTestFramework):
|
||||
assert_equal(wit_block.serialize(False), non_wit_block.serialize())
|
||||
assert_equal(wit_block.serialize(True), block.serialize(True))
|
||||
|
||||
# Test size, vsize, cost
|
||||
# Test size, vsize, weight
|
||||
rpc_details = self.nodes[0].getblock(block.hash, True)
|
||||
assert_equal(rpc_details["size"], len(block.serialize(True)))
|
||||
assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))
|
||||
cost = 3*len(block.serialize(False)) + len(block.serialize(True))
|
||||
assert_equal(rpc_details["cost"], cost)
|
||||
weight = 3*len(block.serialize(False)) + len(block.serialize(True))
|
||||
assert_equal(rpc_details["weight"], weight)
|
||||
|
||||
# Upgraded node should not ask for blocks from unupgraded
|
||||
block4 = self.build_next_block(nVersion=4)
|
||||
@@ -1086,6 +1109,82 @@ class SegWitTest(BitcoinTestFramework):
|
||||
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
|
||||
assert(block4.sha256 not in self.old_node.getdataset)
|
||||
|
||||
# V0 segwit outputs should be standard after activation, but not before.
|
||||
def test_standardness_v0(self, segwit_activated):
|
||||
print("\tTesting standardness of v0 outputs (%s activation)" % ("after" if segwit_activated else "before"))
|
||||
assert(len(self.utxo))
|
||||
|
||||
witness_program = CScript([OP_TRUE])
|
||||
witness_hash = sha256(witness_program)
|
||||
scriptPubKey = CScript([OP_0, witness_hash])
|
||||
|
||||
p2sh_pubkey = hash160(witness_program)
|
||||
p2sh_scriptPubKey = CScript([OP_HASH160, p2sh_pubkey, OP_EQUAL])
|
||||
|
||||
# First prepare a p2sh output (so that spending it will pass standardness)
|
||||
p2sh_tx = CTransaction()
|
||||
p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
|
||||
p2sh_tx.vout = [CTxOut(self.utxo[0].nValue-1000, p2sh_scriptPubKey)]
|
||||
p2sh_tx.rehash()
|
||||
|
||||
# Mine it on test_node to create the confirmed output.
|
||||
self.test_node.test_transaction_acceptance(p2sh_tx, with_witness=True, accepted=True)
|
||||
self.nodes[0].generate(1)
|
||||
sync_blocks(self.nodes)
|
||||
|
||||
# Now test standardness of v0 P2WSH outputs.
|
||||
# Start by creating a transaction with two outputs.
|
||||
tx = CTransaction()
|
||||
tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
|
||||
tx.vout = [CTxOut(p2sh_tx.vout[0].nValue-10000, scriptPubKey)]
|
||||
tx.vout.append(CTxOut(8000, scriptPubKey)) # Might burn this later
|
||||
tx.rehash()
|
||||
|
||||
self.std_node.test_transaction_acceptance(tx, with_witness=True, accepted=segwit_activated)
|
||||
|
||||
# Now create something that looks like a P2PKH output. This won't be spendable.
|
||||
scriptPubKey = CScript([OP_0, hash160(witness_hash)])
|
||||
tx2 = CTransaction()
|
||||
if segwit_activated:
|
||||
# if tx was accepted, then we spend the second output.
|
||||
tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
|
||||
tx2.vout = [CTxOut(7000, scriptPubKey)]
|
||||
tx2.wit.vtxinwit.append(CTxInWitness())
|
||||
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
|
||||
else:
|
||||
# if tx wasn't accepted, we just re-spend the p2sh output we started with.
|
||||
tx2.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
|
||||
tx2.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000, scriptPubKey)]
|
||||
tx2.rehash()
|
||||
|
||||
self.std_node.test_transaction_acceptance(tx2, with_witness=True, accepted=segwit_activated)
|
||||
|
||||
# Now update self.utxo for later tests.
|
||||
tx3 = CTransaction()
|
||||
if segwit_activated:
|
||||
# tx and tx2 were both accepted. Don't bother trying to reclaim the
|
||||
# P2PKH output; just send tx's first output back to an anyone-can-spend.
|
||||
sync_mempools(self.nodes)
|
||||
tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
|
||||
tx3.vout = [CTxOut(tx.vout[0].nValue-1000, CScript([OP_TRUE]))]
|
||||
tx3.wit.vtxinwit.append(CTxInWitness())
|
||||
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
|
||||
tx3.rehash()
|
||||
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
|
||||
else:
|
||||
# tx and tx2 didn't go anywhere; just clean up the p2sh_tx output.
|
||||
tx3.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
|
||||
tx3.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000, witness_program)]
|
||||
tx3.rehash()
|
||||
self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
|
||||
|
||||
self.nodes[0].generate(1)
|
||||
sync_blocks(self.nodes)
|
||||
self.utxo.pop(0)
|
||||
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
|
||||
assert_equal(len(self.nodes[1].getrawmempool()), 0)
|
||||
|
||||
|
||||
# Verify that future segwit upgraded transactions are non-standard,
|
||||
# but valid in blocks. Can run this before and after segwit activation.
|
||||
def test_segwit_versions(self):
|
||||
@@ -1232,13 +1331,6 @@ class SegWitTest(BitcoinTestFramework):
|
||||
sync_blocks(self.nodes)
|
||||
self.utxo.pop(0)
|
||||
|
||||
# Add signature for a P2PK witness program.
|
||||
def sign_P2PK_witness_input(script, txTo, inIdx, hashtype, value, key):
|
||||
tx_hash = SegwitVersion1SignatureHash(script, txTo, inIdx, hashtype, value)
|
||||
signature = key.sign(tx_hash) + chr(hashtype).encode('latin-1')
|
||||
txTo.wit.vtxinwit[inIdx].scriptWitness.stack = [signature, script]
|
||||
txTo.rehash()
|
||||
|
||||
# Test each hashtype
|
||||
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
|
||||
for sigflag in [ 0, SIGHASH_ANYONECANPAY ]:
|
||||
@@ -1293,6 +1385,9 @@ class SegWitTest(BitcoinTestFramework):
|
||||
block = self.build_next_block()
|
||||
used_sighash_single_out_of_bounds = False
|
||||
for i in range(NUM_TESTS):
|
||||
# Ping regularly to keep the connection alive
|
||||
if (not i % 100):
|
||||
self.test_node.sync_with_ping()
|
||||
# Choose random number of inputs to use.
|
||||
num_inputs = random.randint(1, 10)
|
||||
# Create a slight bias for producing more utxos
|
||||
@@ -1349,7 +1444,7 @@ class SegWitTest(BitcoinTestFramework):
|
||||
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
|
||||
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
|
||||
|
||||
script = CScript([CScriptOp(OP_DUP), CScriptOp(OP_HASH160), pubkeyhash, CScriptOp(OP_EQUALVERIFY), CScriptOp(OP_CHECKSIG)])
|
||||
script = GetP2PKHScript(pubkeyhash)
|
||||
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
|
||||
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
|
||||
|
||||
@@ -1612,6 +1707,211 @@ class SegWitTest(BitcoinTestFramework):
|
||||
assert(block_version & (1 << VB_WITNESS_BIT) != 0)
|
||||
self.nodes[0].setmocktime(0) # undo mocktime
|
||||
|
||||
# Uncompressed pubkeys are no longer supported in default relay policy,
|
||||
# but (for now) are still valid in blocks.
|
||||
def test_uncompressed_pubkey(self):
|
||||
print("\tTesting uncompressed pubkeys")
|
||||
# Segwit transactions using uncompressed pubkeys are not accepted
|
||||
# under default policy, but should still pass consensus.
|
||||
key = CECKey()
|
||||
key.set_secretbytes(b"9")
|
||||
key.set_compressed(False)
|
||||
pubkey = CPubKey(key.get_pubkey())
|
||||
assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey
|
||||
|
||||
assert(len(self.utxo) > 0)
|
||||
utxo = self.utxo.pop(0)
|
||||
|
||||
# Test 1: P2WPKH
|
||||
# First create a P2WPKH output that uses an uncompressed pubkey
|
||||
pubkeyhash = hash160(pubkey)
|
||||
scriptPKH = CScript([OP_0, pubkeyhash])
|
||||
tx = CTransaction()
|
||||
tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b""))
|
||||
tx.vout.append(CTxOut(utxo.nValue-1000, scriptPKH))
|
||||
tx.rehash()
|
||||
|
||||
# Confirm it in a block.
|
||||
block = self.build_next_block()
|
||||
self.update_witness_block_with_transactions(block, [tx])
|
||||
self.test_node.test_witness_block(block, accepted=True)
|
||||
|
||||
# Now try to spend it. Send it to a P2WSH output, which we'll
|
||||
# use in the next test.
|
||||
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
|
||||
witness_hash = sha256(witness_program)
|
||||
scriptWSH = CScript([OP_0, witness_hash])
|
||||
|
||||
tx2 = CTransaction()
|
||||
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
|
||||
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptWSH))
|
||||
script = GetP2PKHScript(pubkeyhash)
|
||||
sig_hash = SegwitVersion1SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
|
||||
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
|
||||
tx2.wit.vtxinwit.append(CTxInWitness())
|
||||
tx2.wit.vtxinwit[0].scriptWitness.stack = [ signature, pubkey ]
|
||||
tx2.rehash()
|
||||
|
||||
# Should fail policy test.
|
||||
self.test_node.test_transaction_acceptance(tx2, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
|
||||
# But passes consensus.
|
||||
block = self.build_next_block()
|
||||
self.update_witness_block_with_transactions(block, [tx2])
|
||||
self.test_node.test_witness_block(block, accepted=True)
|
||||
|
||||
# Test 2: P2WSH
|
||||
# Try to spend the P2WSH output created in last test.
|
||||
# Send it to a P2SH(P2WSH) output, which we'll use in the next test.
|
||||
p2sh_witness_hash = hash160(scriptWSH)
|
||||
scriptP2SH = CScript([OP_HASH160, p2sh_witness_hash, OP_EQUAL])
|
||||
scriptSig = CScript([scriptWSH])
|
||||
|
||||
tx3 = CTransaction()
|
||||
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
|
||||
tx3.vout.append(CTxOut(tx2.vout[0].nValue-1000, scriptP2SH))
|
||||
tx3.wit.vtxinwit.append(CTxInWitness())
|
||||
sign_P2PK_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
|
||||
|
||||
# Should fail policy test.
|
||||
self.test_node.test_transaction_acceptance(tx3, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
|
||||
# But passes consensus.
|
||||
block = self.build_next_block()
|
||||
self.update_witness_block_with_transactions(block, [tx3])
|
||||
self.test_node.test_witness_block(block, accepted=True)
|
||||
|
||||
# Test 3: P2SH(P2WSH)
|
||||
# Try to spend the P2SH output created in the last test.
|
||||
# Send it to a P2PKH output, which we'll use in the next test.
|
||||
scriptPubKey = GetP2PKHScript(pubkeyhash)
|
||||
tx4 = CTransaction()
|
||||
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), scriptSig))
|
||||
tx4.vout.append(CTxOut(tx3.vout[0].nValue-1000, scriptPubKey))
|
||||
tx4.wit.vtxinwit.append(CTxInWitness())
|
||||
sign_P2PK_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
|
||||
|
||||
# Should fail policy test.
|
||||
self.test_node.test_transaction_acceptance(tx4, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
|
||||
block = self.build_next_block()
|
||||
self.update_witness_block_with_transactions(block, [tx4])
|
||||
self.test_node.test_witness_block(block, accepted=True)
|
||||
|
||||
# Test 4: Uncompressed pubkeys should still be valid in non-segwit
|
||||
# transactions.
|
||||
tx5 = CTransaction()
|
||||
tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b""))
|
||||
tx5.vout.append(CTxOut(tx4.vout[0].nValue-1000, CScript([OP_TRUE])))
|
||||
(sig_hash, err) = SignatureHash(scriptPubKey, tx5, 0, SIGHASH_ALL)
|
||||
signature = key.sign(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
|
||||
tx5.vin[0].scriptSig = CScript([signature, pubkey])
|
||||
tx5.rehash()
|
||||
# Should pass policy and consensus.
|
||||
self.test_node.test_transaction_acceptance(tx5, True, True)
|
||||
block = self.build_next_block()
|
||||
self.update_witness_block_with_transactions(block, [tx5])
|
||||
self.test_node.test_witness_block(block, accepted=True)
|
||||
self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
|
||||
|
||||
def test_non_standard_witness(self):
|
||||
print("\tTesting detection of non-standard P2WSH witness")
|
||||
pad = chr(1).encode('latin-1')
|
||||
|
||||
# Create scripts for tests
|
||||
scripts = []
|
||||
scripts.append(CScript([OP_DROP] * 100))
|
||||
scripts.append(CScript([OP_DROP] * 99))
|
||||
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60))
|
||||
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61))
|
||||
|
||||
p2wsh_scripts = []
|
||||
|
||||
assert(len(self.utxo))
|
||||
tx = CTransaction()
|
||||
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
|
||||
|
||||
# For each script, generate a pair of P2WSH and P2SH-P2WSH output.
|
||||
outputvalue = (self.utxo[0].nValue - 1000) // (len(scripts) * 2)
|
||||
for i in scripts:
|
||||
p2wsh = CScript([OP_0, sha256(i)])
|
||||
p2sh = hash160(p2wsh)
|
||||
p2wsh_scripts.append(p2wsh)
|
||||
tx.vout.append(CTxOut(outputvalue, p2wsh))
|
||||
tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL])))
|
||||
tx.rehash()
|
||||
txid = tx.sha256
|
||||
self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
|
||||
|
||||
self.nodes[0].generate(1)
|
||||
sync_blocks(self.nodes)
|
||||
|
||||
# Creating transactions for tests
|
||||
p2wsh_txs = []
|
||||
p2sh_txs = []
|
||||
for i in range(len(scripts)):
|
||||
p2wsh_tx = CTransaction()
|
||||
p2wsh_tx.vin.append(CTxIn(COutPoint(txid,i*2)))
|
||||
p2wsh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
|
||||
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
|
||||
p2wsh_tx.rehash()
|
||||
p2wsh_txs.append(p2wsh_tx)
|
||||
p2sh_tx = CTransaction()
|
||||
p2sh_tx.vin.append(CTxIn(COutPoint(txid,i*2+1), CScript([p2wsh_scripts[i]])))
|
||||
p2sh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(hex_str_to_bytes(""))])))
|
||||
p2sh_tx.wit.vtxinwit.append(CTxInWitness())
|
||||
p2sh_tx.rehash()
|
||||
p2sh_txs.append(p2sh_tx)
|
||||
|
||||
# Testing native P2WSH
|
||||
# Witness stack size, excluding witnessScript, over 100 is non-standard
|
||||
p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
|
||||
self.std_node.test_transaction_acceptance(p2wsh_txs[0], True, False, b'bad-witness-nonstandard')
|
||||
# Non-standard nodes should accept
|
||||
self.test_node.test_transaction_acceptance(p2wsh_txs[0], True, True)
|
||||
|
||||
# Stack element size over 80 bytes is non-standard
|
||||
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
|
||||
self.std_node.test_transaction_acceptance(p2wsh_txs[1], True, False, b'bad-witness-nonstandard')
|
||||
# Non-standard nodes should accept
|
||||
self.test_node.test_transaction_acceptance(p2wsh_txs[1], True, True)
|
||||
# Standard nodes should accept if element size is not over 80 bytes
|
||||
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
|
||||
self.std_node.test_transaction_acceptance(p2wsh_txs[1], True, True)
|
||||
|
||||
# witnessScript size at 3600 bytes is standard
|
||||
p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
|
||||
self.test_node.test_transaction_acceptance(p2wsh_txs[2], True, True)
|
||||
self.std_node.test_transaction_acceptance(p2wsh_txs[2], True, True)
|
||||
|
||||
# witnessScript size at 3601 bytes is non-standard
|
||||
p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
|
||||
self.std_node.test_transaction_acceptance(p2wsh_txs[3], True, False, b'bad-witness-nonstandard')
|
||||
# Non-standard nodes should accept
|
||||
self.test_node.test_transaction_acceptance(p2wsh_txs[3], True, True)
|
||||
|
||||
# Repeating the same tests with P2SH-P2WSH
|
||||
p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
|
||||
self.std_node.test_transaction_acceptance(p2sh_txs[0], True, False, b'bad-witness-nonstandard')
|
||||
self.test_node.test_transaction_acceptance(p2sh_txs[0], True, True)
|
||||
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
|
||||
self.std_node.test_transaction_acceptance(p2sh_txs[1], True, False, b'bad-witness-nonstandard')
|
||||
self.test_node.test_transaction_acceptance(p2sh_txs[1], True, True)
|
||||
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
|
||||
self.std_node.test_transaction_acceptance(p2sh_txs[1], True, True)
|
||||
p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
|
||||
self.test_node.test_transaction_acceptance(p2sh_txs[2], True, True)
|
||||
self.std_node.test_transaction_acceptance(p2sh_txs[2], True, True)
|
||||
p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
|
||||
self.std_node.test_transaction_acceptance(p2sh_txs[3], True, False, b'bad-witness-nonstandard')
|
||||
self.test_node.test_transaction_acceptance(p2sh_txs[3], True, True)
|
||||
|
||||
self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node
|
||||
# Valid but non-standard transactions in a block should be accepted by standard node
|
||||
sync_blocks(self.nodes)
|
||||
assert_equal(len(self.nodes[0].getrawmempool()), 0)
|
||||
assert_equal(len(self.nodes[1].getrawmempool()), 0)
|
||||
|
||||
self.utxo.pop(0)
|
||||
|
||||
|
||||
def run_test(self):
|
||||
# Setup the p2p connections and start up the network thread.
|
||||
self.test_node = TestNode() # sets NODE_WITNESS|NODE_NETWORK
|
||||
@@ -1658,6 +1958,7 @@ class SegWitTest(BitcoinTestFramework):
|
||||
self.test_witness_tx_relay_before_segwit_activation()
|
||||
self.test_block_relay(segwit_activated=False)
|
||||
self.test_p2sh_witness(segwit_activated=False)
|
||||
self.test_standardness_v0(segwit_activated=False)
|
||||
|
||||
sync_blocks(self.nodes)
|
||||
|
||||
@@ -1679,9 +1980,12 @@ class SegWitTest(BitcoinTestFramework):
|
||||
self.test_witness_input_length()
|
||||
self.test_block_relay(segwit_activated=True)
|
||||
self.test_tx_relay_after_segwit_activation()
|
||||
self.test_standardness_v0(segwit_activated=True)
|
||||
self.test_segwit_versions()
|
||||
self.test_premature_coinbase_witness_spend()
|
||||
self.test_uncompressed_pubkey()
|
||||
self.test_signature_version_1()
|
||||
self.test_non_standard_witness()
|
||||
sync_blocks(self.nodes)
|
||||
if self.test_upgrade:
|
||||
self.test_upgrade_after_activation(self.nodes[2], 2)
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
from test_framework.mininode import *
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import *
|
||||
import re
|
||||
import time
|
||||
from test_framework.blocktools import create_block, create_coinbase
|
||||
|
||||
@@ -21,6 +22,10 @@ VB_THRESHOLD = 108 # versionbits activation threshold for regtest
|
||||
VB_TOP_BITS = 0x20000000
|
||||
VB_UNKNOWN_BIT = 27 # Choose a bit unassigned to any deployment
|
||||
|
||||
WARN_UNKNOWN_RULES_MINED = "Unknown block versions being mined! It's possible unknown rules are in effect"
|
||||
WARN_UNKNOWN_RULES_ACTIVE = "unknown new rules activated (versionbit {})".format(VB_UNKNOWN_BIT)
|
||||
VB_PATTERN = re.compile("^Warning.*versionbit")
|
||||
|
||||
# TestNode: bare-bones "peer". Used mostly as a conduit for a test to sending
|
||||
# p2p messages to a node, generating the messages in the main testing logic.
|
||||
class TestNode(NodeConnCB):
|
||||
@@ -65,16 +70,12 @@ class VersionBitsWarningTest(BitcoinTestFramework):
|
||||
self.num_nodes = 1
|
||||
|
||||
def setup_network(self):
|
||||
self.nodes = []
|
||||
self.alert_filename = os.path.join(self.options.tmpdir, "alert.txt")
|
||||
# Open and close to create zero-length file
|
||||
with open(self.alert_filename, 'w') as f:
|
||||
with open(self.alert_filename, 'w', encoding='utf8') as _:
|
||||
pass
|
||||
self.node_options = ["-debug", "-logtimemicros=1", "-alertnotify=echo %s >> \"" + self.alert_filename + "\""]
|
||||
self.nodes.append(start_node(0, self.options.tmpdir, self.node_options))
|
||||
|
||||
import re
|
||||
self.vb_pattern = re.compile("^Warning.*versionbit")
|
||||
self.extra_args = [["-debug", "-logtimemicros=1", "-alertnotify=echo %s >> \"" + self.alert_filename + "\""]]
|
||||
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args)
|
||||
|
||||
# Send numblocks blocks via peer with nVersionToUse set.
|
||||
def send_blocks_with_version(self, peer, numblocks, nVersionToUse):
|
||||
@@ -83,7 +84,7 @@ class VersionBitsWarningTest(BitcoinTestFramework):
|
||||
block_time = self.nodes[0].getblockheader(tip)["time"]+1
|
||||
tip = int(tip, 16)
|
||||
|
||||
for i in range(numblocks):
|
||||
for _ in range(numblocks):
|
||||
block = create_block(tip, create_coinbase(height+1), block_time)
|
||||
block.nVersion = nVersionToUse
|
||||
block.solve()
|
||||
@@ -94,9 +95,9 @@ class VersionBitsWarningTest(BitcoinTestFramework):
|
||||
peer.sync_with_ping()
|
||||
|
||||
def test_versionbits_in_alert_file(self):
|
||||
with open(self.alert_filename, 'r') as f:
|
||||
with open(self.alert_filename, 'r', encoding='utf8') as f:
|
||||
alert_text = f.read()
|
||||
assert(self.vb_pattern.match(alert_text))
|
||||
assert(VB_PATTERN.match(alert_text))
|
||||
|
||||
def run_test(self):
|
||||
# Setup the p2p connection and start up the network thread.
|
||||
@@ -122,8 +123,10 @@ class VersionBitsWarningTest(BitcoinTestFramework):
|
||||
# Fill rest of period with regular version blocks
|
||||
self.nodes[0].generate(VB_PERIOD - VB_THRESHOLD + 1)
|
||||
# Check that we're not getting any versionbit-related errors in
|
||||
# getinfo()
|
||||
assert(not self.vb_pattern.match(self.nodes[0].getinfo()["errors"]))
|
||||
# get*info()
|
||||
assert(not VB_PATTERN.match(self.nodes[0].getinfo()["errors"]))
|
||||
assert(not VB_PATTERN.match(self.nodes[0].getmininginfo()["errors"]))
|
||||
assert(not VB_PATTERN.match(self.nodes[0].getnetworkinfo()["warnings"]))
|
||||
|
||||
# 3. Now build one period of blocks with >= VB_THRESHOLD blocks signaling
|
||||
# some unknown bit
|
||||
@@ -132,30 +135,31 @@ class VersionBitsWarningTest(BitcoinTestFramework):
|
||||
# Might not get a versionbits-related alert yet, as we should
|
||||
# have gotten a different alert due to more than 51/100 blocks
|
||||
# being of unexpected version.
|
||||
# Check that getinfo() shows some kind of error.
|
||||
assert(len(self.nodes[0].getinfo()["errors"]) != 0)
|
||||
# Check that get*info() shows some kind of error.
|
||||
assert(WARN_UNKNOWN_RULES_MINED in self.nodes[0].getinfo()["errors"])
|
||||
assert(WARN_UNKNOWN_RULES_MINED in self.nodes[0].getmininginfo()["errors"])
|
||||
assert(WARN_UNKNOWN_RULES_MINED in self.nodes[0].getnetworkinfo()["warnings"])
|
||||
|
||||
# Mine a period worth of expected blocks so the generic block-version warning
|
||||
# is cleared, and restart the node. This should move the versionbit state
|
||||
# to ACTIVE.
|
||||
self.nodes[0].generate(VB_PERIOD)
|
||||
stop_node(self.nodes[0], 0)
|
||||
wait_bitcoinds()
|
||||
stop_nodes(self.nodes)
|
||||
# Empty out the alert file
|
||||
with open(self.alert_filename, 'w') as f:
|
||||
with open(self.alert_filename, 'w', encoding='utf8') as _:
|
||||
pass
|
||||
self.nodes[0] = start_node(0, self.options.tmpdir, ["-debug", "-logtimemicros=1", "-alertnotify=echo %s >> \"" + self.alert_filename + "\""])
|
||||
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args)
|
||||
|
||||
# Connecting one block should be enough to generate an error.
|
||||
self.nodes[0].generate(1)
|
||||
assert(len(self.nodes[0].getinfo()["errors"]) != 0)
|
||||
stop_node(self.nodes[0], 0)
|
||||
wait_bitcoinds()
|
||||
assert(WARN_UNKNOWN_RULES_ACTIVE in self.nodes[0].getinfo()["errors"])
|
||||
assert(WARN_UNKNOWN_RULES_ACTIVE in self.nodes[0].getmininginfo()["errors"])
|
||||
assert(WARN_UNKNOWN_RULES_ACTIVE in self.nodes[0].getnetworkinfo()["warnings"])
|
||||
stop_nodes(self.nodes)
|
||||
self.test_versionbits_in_alert_file()
|
||||
|
||||
# Test framework expects the node to still be running...
|
||||
self.nodes[0] = start_node(0, self.options.tmpdir, ["-debug", "-logtimemicros=1", "-alertnotify=echo %s >> \"" + self.alert_filename + "\""])
|
||||
|
||||
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args)
|
||||
|
||||
if __name__ == '__main__':
|
||||
VersionBitsWarningTest().main()
|
||||
|
||||
@@ -157,7 +157,10 @@ class PruneTest(BitcoinTestFramework):
|
||||
print("Usage possibly still high bc of stale blocks in block files:", calc_usage(self.prunedir))
|
||||
|
||||
print("Mine 220 more blocks so we have requisite history (some blocks will be big and cause pruning of previous chain)")
|
||||
self.nodes[0].generate(220) #node 0 has many large tx's in its mempool from the disconnects
|
||||
for i in range(22):
|
||||
# This can be slow, so do this in multiple RPC calls to avoid
|
||||
# RPC timeouts.
|
||||
self.nodes[0].generate(10) #node 0 has many large tx's in its mempool from the disconnects
|
||||
sync_blocks(self.nodes[0:3], timeout=300)
|
||||
|
||||
usage = calc_usage(self.prunedir)
|
||||
|
||||
@@ -7,7 +7,11 @@
|
||||
# Test -reindex and -reindex-chainstate with CheckBlockIndex
|
||||
#
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import *
|
||||
from test_framework.util import (
|
||||
start_nodes,
|
||||
stop_nodes,
|
||||
assert_equal,
|
||||
)
|
||||
import time
|
||||
|
||||
class ReindexTest(BitcoinTestFramework):
|
||||
@@ -18,16 +22,14 @@ class ReindexTest(BitcoinTestFramework):
|
||||
self.num_nodes = 1
|
||||
|
||||
def setup_network(self):
|
||||
self.nodes = []
|
||||
self.is_network_split = False
|
||||
self.nodes.append(start_node(0, self.options.tmpdir))
|
||||
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir)
|
||||
|
||||
def reindex(self, justchainstate=False):
|
||||
self.nodes[0].generate(3)
|
||||
blockcount = self.nodes[0].getblockcount()
|
||||
stop_node(self.nodes[0], 0)
|
||||
wait_bitcoinds()
|
||||
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug", "-reindex-chainstate" if justchainstate else "-reindex", "-checkblockindex=1"])
|
||||
stop_nodes(self.nodes)
|
||||
extra_args = [["-debug", "-reindex-chainstate" if justchainstate else "-reindex", "-checkblockindex=1"]]
|
||||
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
|
||||
while self.nodes[0].getblockcount() < blockcount:
|
||||
time.sleep(0.1)
|
||||
assert_equal(self.nodes[0].getblockcount(), blockcount)
|
||||
|
||||
@@ -5,143 +5,106 @@
|
||||
|
||||
# Test for -rpcbind, as well as -rpcallowip and -rpcconnect
|
||||
|
||||
# TODO extend this test from the test framework (like all other tests)
|
||||
|
||||
import tempfile
|
||||
import traceback
|
||||
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import *
|
||||
from test_framework.netutil import *
|
||||
|
||||
def run_bind_test(tmpdir, allow_ips, connect_to, addresses, expected):
|
||||
'''
|
||||
Start a node with requested rpcallowip and rpcbind parameters,
|
||||
then try to connect, and check if the set of bound addresses
|
||||
matches the expected set.
|
||||
'''
|
||||
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
|
||||
base_args = ['-disablewallet', '-nolisten']
|
||||
if allow_ips:
|
||||
base_args += ['-rpcallowip=' + x for x in allow_ips]
|
||||
binds = ['-rpcbind='+addr for addr in addresses]
|
||||
nodes = start_nodes(self.num_nodes, tmpdir, [base_args + binds], connect_to)
|
||||
try:
|
||||
pid = bitcoind_processes[0].pid
|
||||
assert_equal(set(get_bind_addrs(pid)), set(expected))
|
||||
finally:
|
||||
stop_nodes(nodes)
|
||||
wait_bitcoinds()
|
||||
class RPCBindTest(BitcoinTestFramework):
|
||||
|
||||
def run_allowip_test(tmpdir, allow_ips, rpchost, rpcport):
|
||||
'''
|
||||
Start a node with rpcwallow IP, and request getinfo
|
||||
at a non-localhost IP.
|
||||
'''
|
||||
base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips]
|
||||
nodes = start_nodes(self.num_nodes, tmpdir, [base_args])
|
||||
try:
|
||||
# connect to node through non-loopback interface
|
||||
url = "http://rt:rt@%s:%d" % (rpchost, rpcport,)
|
||||
node = get_rpc_proxy(url, 1)
|
||||
node.getinfo()
|
||||
finally:
|
||||
node = None # make sure connection will be garbage collected and closed
|
||||
stop_nodes(nodes)
|
||||
wait_bitcoinds()
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.setup_clean_chain = True
|
||||
self.num_nodes = 1
|
||||
|
||||
|
||||
def run_test(tmpdir):
|
||||
assert(sys.platform.startswith('linux')) # due to OS-specific network stats queries, this test works only on Linux
|
||||
# find the first non-loopback interface for testing
|
||||
non_loopback_ip = None
|
||||
for name,ip in all_interfaces():
|
||||
if ip != '127.0.0.1':
|
||||
non_loopback_ip = ip
|
||||
break
|
||||
if non_loopback_ip is None:
|
||||
assert(not 'This test requires at least one non-loopback IPv4 interface')
|
||||
print("Using interface %s for testing" % non_loopback_ip)
|
||||
|
||||
defaultport = rpc_port(0)
|
||||
|
||||
# check default without rpcallowip (IPv4 and IPv6 localhost)
|
||||
run_bind_test(tmpdir, None, '127.0.0.1', [],
|
||||
[('127.0.0.1', defaultport), ('::1', defaultport)])
|
||||
# check default with rpcallowip (IPv6 any)
|
||||
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', [],
|
||||
[('::0', defaultport)])
|
||||
# check only IPv4 localhost (explicit)
|
||||
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
|
||||
[('127.0.0.1', defaultport)])
|
||||
# check only IPv4 localhost (explicit) with alternative port
|
||||
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
|
||||
[('127.0.0.1', 32171)])
|
||||
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
|
||||
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
|
||||
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
|
||||
# check only IPv6 localhost (explicit)
|
||||
run_bind_test(tmpdir, ['[::1]'], '[::1]', ['[::1]'],
|
||||
[('::1', defaultport)])
|
||||
# check both IPv4 and IPv6 localhost (explicit)
|
||||
run_bind_test(tmpdir, ['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
|
||||
[('127.0.0.1', defaultport), ('::1', defaultport)])
|
||||
# check only non-loopback interface
|
||||
run_bind_test(tmpdir, [non_loopback_ip], non_loopback_ip, [non_loopback_ip],
|
||||
[(non_loopback_ip, defaultport)])
|
||||
|
||||
# Check that with invalid rpcallowip, we are denied
|
||||
run_allowip_test(tmpdir, [non_loopback_ip], non_loopback_ip, defaultport)
|
||||
try:
|
||||
run_allowip_test(tmpdir, ['1.1.1.1'], non_loopback_ip, defaultport)
|
||||
assert(not 'Connection not denied by rpcallowip as expected')
|
||||
except ValueError:
|
||||
def setup_network(self):
|
||||
pass
|
||||
|
||||
def main():
|
||||
import optparse
|
||||
def setup_nodes(self):
|
||||
pass
|
||||
|
||||
parser = optparse.OptionParser(usage="%prog [options]")
|
||||
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
|
||||
help="Leave bitcoinds and test.* datadir on exit or error")
|
||||
parser.add_option("--srcdir", dest="srcdir", default="../../src",
|
||||
help="Source directory containing bitcoind/bitcoin-cli (default: %default%)")
|
||||
parser.add_option("--tmpdir", dest="tmpdir", default=tempfile.mkdtemp(prefix="test"),
|
||||
help="Root directory for datadirs")
|
||||
(options, args) = parser.parse_args()
|
||||
def run_bind_test(self, allow_ips, connect_to, addresses, expected):
|
||||
'''
|
||||
Start a node with requested rpcallowip and rpcbind parameters,
|
||||
then try to connect, and check if the set of bound addresses
|
||||
matches the expected set.
|
||||
'''
|
||||
expected = [(addr_to_hex(addr), port) for (addr, port) in expected]
|
||||
base_args = ['-disablewallet', '-nolisten']
|
||||
if allow_ips:
|
||||
base_args += ['-rpcallowip=' + x for x in allow_ips]
|
||||
binds = ['-rpcbind='+addr for addr in addresses]
|
||||
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [base_args + binds], connect_to)
|
||||
try:
|
||||
pid = bitcoind_processes[0].pid
|
||||
assert_equal(set(get_bind_addrs(pid)), set(expected))
|
||||
finally:
|
||||
stop_nodes(self.nodes)
|
||||
|
||||
os.environ['PATH'] = options.srcdir+":"+os.environ['PATH']
|
||||
def run_allowip_test(self, allow_ips, rpchost, rpcport):
|
||||
'''
|
||||
Start a node with rpcallow IP, and request getinfo
|
||||
at a non-localhost IP.
|
||||
'''
|
||||
base_args = ['-disablewallet', '-nolisten'] + ['-rpcallowip='+x for x in allow_ips]
|
||||
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, [base_args])
|
||||
try:
|
||||
# connect to node through non-loopback interface
|
||||
node = get_rpc_proxy(rpc_url(0, "%s:%d" % (rpchost, rpcport)), 0)
|
||||
node.getinfo()
|
||||
finally:
|
||||
node = None # make sure connection will be garbage collected and closed
|
||||
stop_nodes(self.nodes)
|
||||
|
||||
check_json_precision()
|
||||
def run_test(self):
|
||||
# due to OS-specific network stats queries, this test works only on Linux
|
||||
assert(sys.platform.startswith('linux'))
|
||||
# find the first non-loopback interface for testing
|
||||
non_loopback_ip = None
|
||||
for name,ip in all_interfaces():
|
||||
if ip != '127.0.0.1':
|
||||
non_loopback_ip = ip
|
||||
break
|
||||
if non_loopback_ip is None:
|
||||
assert(not 'This test requires at least one non-loopback IPv4 interface')
|
||||
print("Using interface %s for testing" % non_loopback_ip)
|
||||
|
||||
success = False
|
||||
nodes = []
|
||||
try:
|
||||
print("Initializing test directory "+options.tmpdir)
|
||||
if not os.path.isdir(options.tmpdir):
|
||||
os.makedirs(options.tmpdir)
|
||||
initialize_chain(options.tmpdir)
|
||||
defaultport = rpc_port(0)
|
||||
|
||||
run_test(options.tmpdir)
|
||||
# check default without rpcallowip (IPv4 and IPv6 localhost)
|
||||
self.run_bind_test(None, '127.0.0.1', [],
|
||||
[('127.0.0.1', defaultport), ('::1', defaultport)])
|
||||
# check default with rpcallowip (IPv6 any)
|
||||
self.run_bind_test(['127.0.0.1'], '127.0.0.1', [],
|
||||
[('::0', defaultport)])
|
||||
# check only IPv4 localhost (explicit)
|
||||
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1'],
|
||||
[('127.0.0.1', defaultport)])
|
||||
# check only IPv4 localhost (explicit) with alternative port
|
||||
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171'],
|
||||
[('127.0.0.1', 32171)])
|
||||
# check only IPv4 localhost (explicit) with multiple alternative ports on same host
|
||||
self.run_bind_test(['127.0.0.1'], '127.0.0.1:32171', ['127.0.0.1:32171', '127.0.0.1:32172'],
|
||||
[('127.0.0.1', 32171), ('127.0.0.1', 32172)])
|
||||
# check only IPv6 localhost (explicit)
|
||||
self.run_bind_test(['[::1]'], '[::1]', ['[::1]'],
|
||||
[('::1', defaultport)])
|
||||
# check both IPv4 and IPv6 localhost (explicit)
|
||||
self.run_bind_test(['127.0.0.1'], '127.0.0.1', ['127.0.0.1', '[::1]'],
|
||||
[('127.0.0.1', defaultport), ('::1', defaultport)])
|
||||
# check only non-loopback interface
|
||||
self.run_bind_test([non_loopback_ip], non_loopback_ip, [non_loopback_ip],
|
||||
[(non_loopback_ip, defaultport)])
|
||||
|
||||
success = True
|
||||
|
||||
except AssertionError as e:
|
||||
print("Assertion failed: "+e.message)
|
||||
except Exception as e:
|
||||
print("Unexpected exception caught during testing: "+str(e))
|
||||
traceback.print_tb(sys.exc_info()[2])
|
||||
|
||||
if not options.nocleanup:
|
||||
print("Cleaning up")
|
||||
wait_bitcoinds()
|
||||
shutil.rmtree(options.tmpdir)
|
||||
|
||||
if success:
|
||||
print("Tests successful")
|
||||
sys.exit(0)
|
||||
else:
|
||||
print("Failed")
|
||||
sys.exit(1)
|
||||
# Check that with invalid rpcallowip, we are denied
|
||||
self.run_allowip_test([non_loopback_ip], non_loopback_ip, defaultport)
|
||||
try:
|
||||
self.run_allowip_test(['1.1.1.1'], non_loopback_ip, defaultport)
|
||||
assert(not 'Connection not denied by rpcallowip as expected')
|
||||
except JSONRPCException:
|
||||
pass
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
RPCBindTest ().main ()
|
||||
|
||||
@@ -9,9 +9,10 @@
|
||||
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import *
|
||||
from test_framework.mininode import sha256, ripemd160
|
||||
import os
|
||||
import shutil
|
||||
from test_framework.mininode import sha256, ripemd160, CTransaction, CTxIn, COutPoint, CTxOut
|
||||
from test_framework.address import script_to_p2sh, key_to_p2pkh
|
||||
from test_framework.script import CScript, OP_HASH160, OP_CHECKSIG, OP_0, hash160, OP_EQUAL, OP_DUP, OP_EQUALVERIFY, OP_1, OP_2, OP_CHECKMULTISIG
|
||||
from io import BytesIO
|
||||
|
||||
NODE_0 = 0
|
||||
NODE_1 = 1
|
||||
@@ -69,6 +70,11 @@ def getutxo(txid):
|
||||
utxo["txid"] = txid
|
||||
return utxo
|
||||
|
||||
def find_unspent(node, min_value):
|
||||
for utxo in node.listunspent():
|
||||
if utxo['amount'] >= min_value:
|
||||
return utxo
|
||||
|
||||
class SegWitTest(BitcoinTestFramework):
|
||||
|
||||
def setup_chain(self):
|
||||
@@ -117,8 +123,21 @@ class SegWitTest(BitcoinTestFramework):
|
||||
sync_blocks(self.nodes)
|
||||
|
||||
def run_test(self):
|
||||
self.nodes[0].generate(160) #block 160
|
||||
self.nodes[0].generate(161) #block 161
|
||||
|
||||
print("Verify sigops are counted in GBT with pre-BIP141 rules before the fork")
|
||||
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
|
||||
tmpl = self.nodes[0].getblocktemplate({})
|
||||
assert(tmpl['sigoplimit'] == 20000)
|
||||
assert(tmpl['transactions'][0]['hash'] == txid)
|
||||
assert(tmpl['transactions'][0]['sigops'] == 2)
|
||||
tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']})
|
||||
assert(tmpl['sigoplimit'] == 20000)
|
||||
assert(tmpl['transactions'][0]['hash'] == txid)
|
||||
assert(tmpl['transactions'][0]['sigops'] == 2)
|
||||
self.nodes[0].generate(1) #block 162
|
||||
|
||||
balance_presetup = self.nodes[0].getbalance()
|
||||
self.pubkey = []
|
||||
p2sh_ids = [] # p2sh_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE embedded in p2sh
|
||||
wit_ids = [] # wit_ids[NODE][VER] is an array of txids that spend to a witness version VER pkscript to an address for NODE via bare witness
|
||||
@@ -137,18 +156,18 @@ class SegWitTest(BitcoinTestFramework):
|
||||
for i in range(5):
|
||||
for n in range(3):
|
||||
for v in range(2):
|
||||
wit_ids[n][v].append(send_to_witness(v, self.nodes[0], self.nodes[0].listunspent()[0], self.pubkey[n], False, Decimal("49.999")))
|
||||
p2sh_ids[n][v].append(send_to_witness(v, self.nodes[0], self.nodes[0].listunspent()[0], self.pubkey[n], True, Decimal("49.999")))
|
||||
wit_ids[n][v].append(send_to_witness(v, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[n], False, Decimal("49.999")))
|
||||
p2sh_ids[n][v].append(send_to_witness(v, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[n], True, Decimal("49.999")))
|
||||
|
||||
self.nodes[0].generate(1) #block 161
|
||||
self.nodes[0].generate(1) #block 163
|
||||
sync_blocks(self.nodes)
|
||||
|
||||
# Make sure all nodes recognize the transactions as theirs
|
||||
assert_equal(self.nodes[0].getbalance(), 60*50 - 60*50 + 20*Decimal("49.999") + 50)
|
||||
assert_equal(self.nodes[0].getbalance(), balance_presetup - 60*50 + 20*Decimal("49.999") + 50)
|
||||
assert_equal(self.nodes[1].getbalance(), 20*Decimal("49.999"))
|
||||
assert_equal(self.nodes[2].getbalance(), 20*Decimal("49.999"))
|
||||
|
||||
self.nodes[0].generate(262) #block 423
|
||||
self.nodes[0].generate(260) #block 423
|
||||
sync_blocks(self.nodes)
|
||||
|
||||
print("Verify default node can't accept any witness format txs before fork")
|
||||
@@ -205,5 +224,380 @@ class SegWitTest(BitcoinTestFramework):
|
||||
self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V0][0], True) #block 434
|
||||
self.success_mine(self.nodes[0], p2sh_ids[NODE_0][WIT_V1][0], True) #block 435
|
||||
|
||||
print("Verify sigops are counted in GBT with BIP141 rules after the fork")
|
||||
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1)
|
||||
tmpl = self.nodes[0].getblocktemplate({'rules':['segwit']})
|
||||
assert(tmpl['sigoplimit'] == 80000)
|
||||
assert(tmpl['transactions'][0]['txid'] == txid)
|
||||
assert(tmpl['transactions'][0]['sigops'] == 8)
|
||||
|
||||
print("Verify non-segwit miners get a valid GBT response after the fork")
|
||||
send_to_witness(1, self.nodes[0], find_unspent(self.nodes[0], 50), self.pubkey[0], False, Decimal("49.998"))
|
||||
try:
|
||||
tmpl = self.nodes[0].getblocktemplate({})
|
||||
assert(len(tmpl['transactions']) == 1) # Doesn't include witness tx
|
||||
assert(tmpl['sigoplimit'] == 20000)
|
||||
assert(tmpl['transactions'][0]['hash'] == txid)
|
||||
assert(tmpl['transactions'][0]['sigops'] == 2)
|
||||
assert(('!segwit' in tmpl['rules']) or ('segwit' not in tmpl['rules']))
|
||||
except JSONRPCException:
|
||||
# This is an acceptable outcome
|
||||
pass
|
||||
|
||||
print("Verify behaviour of importaddress, addwitnessaddress and listunspent")
|
||||
|
||||
# Some public keys to be used later
|
||||
pubkeys = [
|
||||
"0363D44AABD0F1699138239DF2F042C3282C0671CC7A76826A55C8203D90E39242", # cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb
|
||||
"02D3E626B3E616FC8662B489C123349FECBFC611E778E5BE739B257EAE4721E5BF", # cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97
|
||||
"04A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538A62F5BD8EC85C2477F39650BD391EA6250207065B2A81DA8B009FC891E898F0E", # 91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV
|
||||
"02A47F2CBCEFFA7B9BCDA184E7D5668D3DA6F9079AD41E422FA5FD7B2D458F2538", # cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd
|
||||
"036722F784214129FEB9E8129D626324F3F6716555B603FFE8300BBCB882151228", # cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66
|
||||
"0266A8396EE936BF6D99D17920DB21C6C7B1AB14C639D5CD72B300297E416FD2EC", # cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K
|
||||
"0450A38BD7F0AC212FEBA77354A9B036A32E0F7C81FC4E0C5ADCA7C549C4505D2522458C2D9AE3CEFD684E039194B72C8A10F9CB9D4764AB26FCC2718D421D3B84", # 92h2XPssjBpsJN5CqSP7v9a7cf2kgDunBC6PDFwJHMACM1rrVBJ
|
||||
]
|
||||
|
||||
# Import a compressed key and an uncompressed key, generate some multisig addresses
|
||||
self.nodes[0].importprivkey("92e6XLo5jVAVwrQKPNTs93oQco8f8sDNBcpv73Dsrs397fQtFQn")
|
||||
uncompressed_spendable_address = ["mvozP4UwyGD2mGZU4D2eMvMLPB9WkMmMQu"]
|
||||
self.nodes[0].importprivkey("cNC8eQ5dg3mFAVePDX4ddmPYpPbw41r9bm2jd1nLJT77e6RrzTRR")
|
||||
compressed_spendable_address = ["mmWQubrDomqpgSYekvsU7HWEVjLFHAakLe"]
|
||||
assert ((self.nodes[0].validateaddress(uncompressed_spendable_address[0])['iscompressed'] == False))
|
||||
assert ((self.nodes[0].validateaddress(compressed_spendable_address[0])['iscompressed'] == True))
|
||||
|
||||
self.nodes[0].importpubkey(pubkeys[0])
|
||||
compressed_solvable_address = [key_to_p2pkh(pubkeys[0])]
|
||||
self.nodes[0].importpubkey(pubkeys[1])
|
||||
compressed_solvable_address.append(key_to_p2pkh(pubkeys[1]))
|
||||
self.nodes[0].importpubkey(pubkeys[2])
|
||||
uncompressed_solvable_address = [key_to_p2pkh(pubkeys[2])]
|
||||
|
||||
spendable_anytime = [] # These outputs should be seen anytime after importprivkey and addmultisigaddress
|
||||
spendable_after_importaddress = [] # These outputs should be seen after importaddress
|
||||
solvable_after_importaddress = [] # These outputs should be seen after importaddress but not spendable
|
||||
unsolvable_after_importaddress = [] # These outputs should be unsolvable after importaddress
|
||||
solvable_anytime = [] # These outputs should be solvable after importpubkey
|
||||
unseen_anytime = [] # These outputs should never be seen
|
||||
|
||||
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]]))
|
||||
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]]))
|
||||
compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]]))
|
||||
uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], uncompressed_solvable_address[0]]))
|
||||
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]]))
|
||||
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], compressed_solvable_address[1]]))
|
||||
unknown_address = ["mtKKyoHabkk6e4ppT7NaM7THqPUt7AzPrT", "2NDP3jLWAFT8NDAiUa9qiE6oBt2awmMq7Dx"]
|
||||
|
||||
# Test multisig_without_privkey
|
||||
# We have 2 public keys without private keys, use addmultisigaddress to add to wallet.
|
||||
# Money sent to P2SH of multisig of this should only be seen after importaddress with the BASE58 P2SH address.
|
||||
|
||||
multisig_without_privkey_address = self.nodes[0].addmultisigaddress(2, [pubkeys[3], pubkeys[4]])
|
||||
script = CScript([OP_2, hex_str_to_bytes(pubkeys[3]), hex_str_to_bytes(pubkeys[4]), OP_2, OP_CHECKMULTISIG])
|
||||
solvable_after_importaddress.append(CScript([OP_HASH160, hash160(script), OP_EQUAL]))
|
||||
|
||||
for i in compressed_spendable_address:
|
||||
v = self.nodes[0].validateaddress(i)
|
||||
if (v['isscript']):
|
||||
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
|
||||
# bare and p2sh multisig with compressed keys should always be spendable
|
||||
spendable_anytime.extend([bare, p2sh])
|
||||
# P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after direct importaddress
|
||||
spendable_after_importaddress.extend([p2wsh, p2sh_p2wsh])
|
||||
else:
|
||||
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
|
||||
# normal P2PKH and P2PK with compressed keys should always be spendable
|
||||
spendable_anytime.extend([p2pkh, p2pk])
|
||||
# P2SH_P2PK, P2SH_P2PKH, and witness with compressed keys are spendable after direct importaddress
|
||||
spendable_after_importaddress.extend([p2wpkh, p2sh_p2wpkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
|
||||
|
||||
for i in uncompressed_spendable_address:
|
||||
v = self.nodes[0].validateaddress(i)
|
||||
if (v['isscript']):
|
||||
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
|
||||
# bare and p2sh multisig with uncompressed keys should always be spendable
|
||||
spendable_anytime.extend([bare, p2sh])
|
||||
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
|
||||
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
|
||||
else:
|
||||
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
|
||||
# normal P2PKH and P2PK with uncompressed keys should always be spendable
|
||||
spendable_anytime.extend([p2pkh, p2pk])
|
||||
# P2SH_P2PK and P2SH_P2PKH are spendable after direct importaddress
|
||||
spendable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh])
|
||||
# witness with uncompressed keys are never seen
|
||||
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
|
||||
|
||||
for i in compressed_solvable_address:
|
||||
v = self.nodes[0].validateaddress(i)
|
||||
if (v['isscript']):
|
||||
# Multisig without private is not seen after addmultisigaddress, but seen after importaddress
|
||||
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
|
||||
solvable_after_importaddress.extend([bare, p2sh, p2wsh, p2sh_p2wsh])
|
||||
else:
|
||||
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
|
||||
# normal P2PKH and P2PK with compressed keys should always be seen
|
||||
solvable_anytime.extend([p2pkh, p2pk])
|
||||
# P2SH_P2PK, P2SH_P2PKH, and witness with compressed keys are seen after direct importaddress
|
||||
solvable_after_importaddress.extend([p2wpkh, p2sh_p2wpkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
|
||||
|
||||
for i in uncompressed_solvable_address:
|
||||
v = self.nodes[0].validateaddress(i)
|
||||
if (v['isscript']):
|
||||
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
|
||||
# Base uncompressed multisig without private is not seen after addmultisigaddress, but seen after importaddress
|
||||
solvable_after_importaddress.extend([bare, p2sh])
|
||||
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
|
||||
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
|
||||
else:
|
||||
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
|
||||
# normal P2PKH and P2PK with uncompressed keys should always be seen
|
||||
solvable_anytime.extend([p2pkh, p2pk])
|
||||
# P2SH_P2PK, P2SH_P2PKH with uncompressed keys are seen after direct importaddress
|
||||
solvable_after_importaddress.extend([p2sh_p2pk, p2sh_p2pkh])
|
||||
# witness with uncompressed keys are never seen
|
||||
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh])
|
||||
|
||||
op1 = CScript([OP_1])
|
||||
op0 = CScript([OP_0])
|
||||
# 2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe is the P2SH(P2PKH) version of mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V
|
||||
unsolvable_address = ["mjoE3sSrb8ByYEvgnC3Aox86u1CHnfJA4V", "2N7MGY19ti4KDMSzRfPAssP6Pxyuxoi6jLe", script_to_p2sh(op1), script_to_p2sh(op0)]
|
||||
unsolvable_address_key = hex_str_to_bytes("02341AEC7587A51CDE5279E0630A531AEA2615A9F80B17E8D9376327BAEAA59E3D")
|
||||
unsolvablep2pkh = CScript([OP_DUP, OP_HASH160, hash160(unsolvable_address_key), OP_EQUALVERIFY, OP_CHECKSIG])
|
||||
unsolvablep2wshp2pkh = CScript([OP_0, sha256(unsolvablep2pkh)])
|
||||
p2shop0 = CScript([OP_HASH160, hash160(op0), OP_EQUAL])
|
||||
p2wshop1 = CScript([OP_0, sha256(op1)])
|
||||
unsolvable_after_importaddress.append(unsolvablep2pkh)
|
||||
unsolvable_after_importaddress.append(unsolvablep2wshp2pkh)
|
||||
unsolvable_after_importaddress.append(op1) # OP_1 will be imported as script
|
||||
unsolvable_after_importaddress.append(p2wshop1)
|
||||
unseen_anytime.append(op0) # OP_0 will be imported as P2SH address with no script provided
|
||||
unsolvable_after_importaddress.append(p2shop0)
|
||||
|
||||
spendable_txid = []
|
||||
solvable_txid = []
|
||||
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime, 2))
|
||||
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime, 1))
|
||||
self.mine_and_test_listunspent(spendable_after_importaddress + solvable_after_importaddress + unseen_anytime + unsolvable_after_importaddress, 0)
|
||||
|
||||
importlist = []
|
||||
for i in compressed_spendable_address + uncompressed_spendable_address + compressed_solvable_address + uncompressed_solvable_address:
|
||||
v = self.nodes[0].validateaddress(i)
|
||||
if (v['isscript']):
|
||||
bare = hex_str_to_bytes(v['hex'])
|
||||
importlist.append(bytes_to_hex_str(bare))
|
||||
importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(bare)])))
|
||||
else:
|
||||
pubkey = hex_str_to_bytes(v['pubkey'])
|
||||
p2pk = CScript([pubkey, OP_CHECKSIG])
|
||||
p2pkh = CScript([OP_DUP, OP_HASH160, hash160(pubkey), OP_EQUALVERIFY, OP_CHECKSIG])
|
||||
importlist.append(bytes_to_hex_str(p2pk))
|
||||
importlist.append(bytes_to_hex_str(p2pkh))
|
||||
importlist.append(bytes_to_hex_str(CScript([OP_0, hash160(pubkey)])))
|
||||
importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pk)])))
|
||||
importlist.append(bytes_to_hex_str(CScript([OP_0, sha256(p2pkh)])))
|
||||
|
||||
importlist.append(bytes_to_hex_str(unsolvablep2pkh))
|
||||
importlist.append(bytes_to_hex_str(unsolvablep2wshp2pkh))
|
||||
importlist.append(bytes_to_hex_str(op1))
|
||||
importlist.append(bytes_to_hex_str(p2wshop1))
|
||||
|
||||
for i in importlist:
|
||||
try:
|
||||
self.nodes[0].importaddress(i,"",False,True)
|
||||
except JSONRPCException as exp:
|
||||
assert_equal(exp.error["message"], "The wallet already contains the private key for this address or script")
|
||||
|
||||
self.nodes[0].importaddress(script_to_p2sh(op0)) # import OP_0 as address only
|
||||
self.nodes[0].importaddress(multisig_without_privkey_address) # Test multisig_without_privkey
|
||||
|
||||
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2))
|
||||
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1))
|
||||
self.mine_and_test_listunspent(unsolvable_after_importaddress, 1)
|
||||
self.mine_and_test_listunspent(unseen_anytime, 0)
|
||||
|
||||
# addwitnessaddress should refuse to return a witness address if an uncompressed key is used or the address is
|
||||
# not in the wallet
|
||||
# note that no witness address should be returned by unsolvable addresses
|
||||
# the multisig_without_privkey_address will fail because its keys were not added with importpubkey
|
||||
for i in uncompressed_spendable_address + uncompressed_solvable_address + unknown_address + unsolvable_address + [multisig_without_privkey_address]:
|
||||
try:
|
||||
self.nodes[0].addwitnessaddress(i)
|
||||
except JSONRPCException as exp:
|
||||
assert_equal(exp.error["message"], "Public key or redeemscript not known to wallet, or the key is uncompressed")
|
||||
else:
|
||||
assert(False)
|
||||
|
||||
for i in compressed_spendable_address + compressed_solvable_address:
|
||||
witaddress = self.nodes[0].addwitnessaddress(i)
|
||||
# addwitnessaddress should return the same address if it is a known P2SH-witness address
|
||||
assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress))
|
||||
|
||||
spendable_txid.append(self.mine_and_test_listunspent(spendable_anytime + spendable_after_importaddress, 2))
|
||||
solvable_txid.append(self.mine_and_test_listunspent(solvable_anytime + solvable_after_importaddress, 1))
|
||||
self.mine_and_test_listunspent(unsolvable_after_importaddress, 1)
|
||||
self.mine_and_test_listunspent(unseen_anytime, 0)
|
||||
|
||||
# Repeat some tests. This time we don't add witness scripts with importaddress
|
||||
# Import a compressed key and an uncompressed key, generate some multisig addresses
|
||||
self.nodes[0].importprivkey("927pw6RW8ZekycnXqBQ2JS5nPyo1yRfGNN8oq74HeddWSpafDJH")
|
||||
uncompressed_spendable_address = ["mguN2vNSCEUh6rJaXoAVwY3YZwZvEmf5xi"]
|
||||
self.nodes[0].importprivkey("cMcrXaaUC48ZKpcyydfFo8PxHAjpsYLhdsp6nmtB3E2ER9UUHWnw")
|
||||
compressed_spendable_address = ["n1UNmpmbVUJ9ytXYXiurmGPQ3TRrXqPWKL"]
|
||||
|
||||
self.nodes[0].importpubkey(pubkeys[5])
|
||||
compressed_solvable_address = [key_to_p2pkh(pubkeys[5])]
|
||||
self.nodes[0].importpubkey(pubkeys[6])
|
||||
uncompressed_solvable_address = [key_to_p2pkh(pubkeys[6])]
|
||||
|
||||
spendable_after_addwitnessaddress = [] # These outputs should be seen after importaddress
|
||||
solvable_after_addwitnessaddress=[] # These outputs should be seen after importaddress but not spendable
|
||||
unseen_anytime = [] # These outputs should never be seen
|
||||
|
||||
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], compressed_spendable_address[0]]))
|
||||
uncompressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [uncompressed_spendable_address[0], uncompressed_spendable_address[0]]))
|
||||
compressed_spendable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_spendable_address[0]]))
|
||||
uncompressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_solvable_address[0], uncompressed_solvable_address[0]]))
|
||||
compressed_solvable_address.append(self.nodes[0].addmultisigaddress(2, [compressed_spendable_address[0], compressed_solvable_address[0]]))
|
||||
|
||||
premature_witaddress = []
|
||||
|
||||
for i in compressed_spendable_address:
|
||||
v = self.nodes[0].validateaddress(i)
|
||||
if (v['isscript']):
|
||||
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
|
||||
# P2WSH and P2SH(P2WSH) multisig with compressed keys are spendable after addwitnessaddress
|
||||
spendable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh])
|
||||
premature_witaddress.append(script_to_p2sh(p2wsh))
|
||||
else:
|
||||
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
|
||||
# P2WPKH, P2SH_P2WPKH are spendable after addwitnessaddress
|
||||
spendable_after_addwitnessaddress.extend([p2wpkh, p2sh_p2wpkh])
|
||||
premature_witaddress.append(script_to_p2sh(p2wpkh))
|
||||
|
||||
for i in uncompressed_spendable_address + uncompressed_solvable_address:
|
||||
v = self.nodes[0].validateaddress(i)
|
||||
if (v['isscript']):
|
||||
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
|
||||
# P2WSH and P2SH(P2WSH) multisig with uncompressed keys are never seen
|
||||
unseen_anytime.extend([p2wsh, p2sh_p2wsh])
|
||||
else:
|
||||
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
|
||||
# P2WPKH, P2SH_P2WPKH with uncompressed keys are never seen
|
||||
unseen_anytime.extend([p2wpkh, p2sh_p2wpkh])
|
||||
|
||||
for i in compressed_solvable_address:
|
||||
v = self.nodes[0].validateaddress(i)
|
||||
if (v['isscript']):
|
||||
# P2WSH multisig without private key are seen after addwitnessaddress
|
||||
[bare, p2sh, p2wsh, p2sh_p2wsh] = self.p2sh_address_to_script(v)
|
||||
solvable_after_addwitnessaddress.extend([p2wsh, p2sh_p2wsh])
|
||||
premature_witaddress.append(script_to_p2sh(p2wsh))
|
||||
else:
|
||||
[p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh] = self.p2pkh_address_to_script(v)
|
||||
# P2SH_P2PK, P2SH_P2PKH with compressed keys are seen after addwitnessaddress
|
||||
solvable_after_addwitnessaddress.extend([p2wpkh, p2sh_p2wpkh])
|
||||
premature_witaddress.append(script_to_p2sh(p2wpkh))
|
||||
|
||||
self.mine_and_test_listunspent(spendable_after_addwitnessaddress + solvable_after_addwitnessaddress + unseen_anytime, 0)
|
||||
|
||||
# addwitnessaddress should refuse to return a witness address if an uncompressed key is used
|
||||
# note that a multisig address returned by addmultisigaddress is not solvable until it is added with importaddress
|
||||
# premature_witaddress are not accepted until the script is added with addwitnessaddress first
|
||||
for i in uncompressed_spendable_address + uncompressed_solvable_address + premature_witaddress + [compressed_solvable_address[1]]:
|
||||
try:
|
||||
self.nodes[0].addwitnessaddress(i)
|
||||
except JSONRPCException as exp:
|
||||
assert_equal(exp.error["message"], "Public key or redeemscript not known to wallet, or the key is uncompressed")
|
||||
else:
|
||||
assert(False)
|
||||
|
||||
# after importaddress it should pass addwitnessaddress
|
||||
v = self.nodes[0].validateaddress(compressed_solvable_address[1])
|
||||
self.nodes[0].importaddress(v['hex'],"",False,True)
|
||||
for i in compressed_spendable_address + compressed_solvable_address + premature_witaddress:
|
||||
witaddress = self.nodes[0].addwitnessaddress(i)
|
||||
assert_equal(witaddress, self.nodes[0].addwitnessaddress(witaddress))
|
||||
|
||||
spendable_txid.append(self.mine_and_test_listunspent(spendable_after_addwitnessaddress, 2))
|
||||
solvable_txid.append(self.mine_and_test_listunspent(solvable_after_addwitnessaddress, 1))
|
||||
self.mine_and_test_listunspent(unseen_anytime, 0)
|
||||
|
||||
# Check that spendable outputs are really spendable
|
||||
self.create_and_mine_tx_from_txids(spendable_txid)
|
||||
|
||||
# import all the private keys so solvable addresses become spendable
|
||||
self.nodes[0].importprivkey("cPiM8Ub4heR9NBYmgVzJQiUH1if44GSBGiqaeJySuL2BKxubvgwb")
|
||||
self.nodes[0].importprivkey("cPpAdHaD6VoYbW78kveN2bsvb45Q7G5PhaPApVUGwvF8VQ9brD97")
|
||||
self.nodes[0].importprivkey("91zqCU5B9sdWxzMt1ca3VzbtVm2YM6Hi5Rxn4UDtxEaN9C9nzXV")
|
||||
self.nodes[0].importprivkey("cPQFjcVRpAUBG8BA9hzr2yEzHwKoMgLkJZBBtK9vJnvGJgMjzTbd")
|
||||
self.nodes[0].importprivkey("cQGtcm34xiLjB1v7bkRa4V3aAc9tS2UTuBZ1UnZGeSeNy627fN66")
|
||||
self.nodes[0].importprivkey("cTW5mR5M45vHxXkeChZdtSPozrFwFgmEvTNnanCW6wrqwaCZ1X7K")
|
||||
self.create_and_mine_tx_from_txids(solvable_txid)
|
||||
|
||||
def mine_and_test_listunspent(self, script_list, ismine):
|
||||
utxo = find_unspent(self.nodes[0], 50)
|
||||
tx = CTransaction()
|
||||
tx.vin.append(CTxIn(COutPoint(int('0x'+utxo['txid'],0), utxo['vout'])))
|
||||
for i in script_list:
|
||||
tx.vout.append(CTxOut(10000000, i))
|
||||
tx.rehash()
|
||||
signresults = self.nodes[0].signrawtransaction(bytes_to_hex_str(tx.serialize_without_witness()))['hex']
|
||||
txid = self.nodes[0].sendrawtransaction(signresults, True)
|
||||
self.nodes[0].generate(1)
|
||||
sync_blocks(self.nodes)
|
||||
watchcount = 0
|
||||
spendcount = 0
|
||||
for i in self.nodes[0].listunspent():
|
||||
if (i['txid'] == txid):
|
||||
watchcount += 1
|
||||
if (i['spendable'] == True):
|
||||
spendcount += 1
|
||||
if (ismine == 2):
|
||||
assert_equal(spendcount, len(script_list))
|
||||
elif (ismine == 1):
|
||||
assert_equal(watchcount, len(script_list))
|
||||
assert_equal(spendcount, 0)
|
||||
else:
|
||||
assert_equal(watchcount, 0)
|
||||
return txid
|
||||
|
||||
def p2sh_address_to_script(self,v):
|
||||
bare = CScript(hex_str_to_bytes(v['hex']))
|
||||
p2sh = CScript(hex_str_to_bytes(v['scriptPubKey']))
|
||||
p2wsh = CScript([OP_0, sha256(bare)])
|
||||
p2sh_p2wsh = CScript([OP_HASH160, hash160(p2wsh), OP_EQUAL])
|
||||
return([bare, p2sh, p2wsh, p2sh_p2wsh])
|
||||
|
||||
def p2pkh_address_to_script(self,v):
|
||||
pubkey = hex_str_to_bytes(v['pubkey'])
|
||||
p2wpkh = CScript([OP_0, hash160(pubkey)])
|
||||
p2sh_p2wpkh = CScript([OP_HASH160, hash160(p2wpkh), OP_EQUAL])
|
||||
p2pk = CScript([pubkey, OP_CHECKSIG])
|
||||
p2pkh = CScript(hex_str_to_bytes(v['scriptPubKey']))
|
||||
p2sh_p2pk = CScript([OP_HASH160, hash160(p2pk), OP_EQUAL])
|
||||
p2sh_p2pkh = CScript([OP_HASH160, hash160(p2pkh), OP_EQUAL])
|
||||
p2wsh_p2pk = CScript([OP_0, sha256(p2pk)])
|
||||
p2wsh_p2pkh = CScript([OP_0, sha256(p2pkh)])
|
||||
p2sh_p2wsh_p2pk = CScript([OP_HASH160, hash160(p2wsh_p2pk), OP_EQUAL])
|
||||
p2sh_p2wsh_p2pkh = CScript([OP_HASH160, hash160(p2wsh_p2pkh), OP_EQUAL])
|
||||
return [p2wpkh, p2sh_p2wpkh, p2pk, p2pkh, p2sh_p2pk, p2sh_p2pkh, p2wsh_p2pk, p2wsh_p2pkh, p2sh_p2wsh_p2pk, p2sh_p2wsh_p2pkh]
|
||||
|
||||
def create_and_mine_tx_from_txids(self, txids, success = True):
|
||||
tx = CTransaction()
|
||||
for i in txids:
|
||||
txtmp = CTransaction()
|
||||
txraw = self.nodes[0].getrawtransaction(i)
|
||||
f = BytesIO(hex_str_to_bytes(txraw))
|
||||
txtmp.deserialize(f)
|
||||
for j in range(len(txtmp.vout)):
|
||||
tx.vin.append(CTxIn(COutPoint(int('0x'+i,0), j)))
|
||||
tx.vout.append(CTxOut(0, CScript()))
|
||||
tx.rehash()
|
||||
signresults = self.nodes[0].signrawtransaction(bytes_to_hex_str(tx.serialize_without_witness()))['hex']
|
||||
self.nodes[0].sendrawtransaction(signresults, True)
|
||||
self.nodes[0].generate(1)
|
||||
sync_blocks(self.nodes)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
SegWitTest().main()
|
||||
|
||||
@@ -80,20 +80,19 @@ e. Announce one more that doesn't connect.
|
||||
Expect: disconnect.
|
||||
'''
|
||||
|
||||
class BaseNode(NodeConnCB):
|
||||
direct_fetch_response_time = 0.05
|
||||
|
||||
class BaseNode(SingleNodeConnCB):
|
||||
def __init__(self):
|
||||
NodeConnCB.__init__(self)
|
||||
self.connection = None
|
||||
SingleNodeConnCB.__init__(self)
|
||||
self.last_inv = None
|
||||
self.last_headers = None
|
||||
self.last_block = None
|
||||
self.ping_counter = 1
|
||||
self.last_pong = msg_pong(0)
|
||||
self.last_getdata = None
|
||||
self.sleep_time = 0.05
|
||||
self.block_announced = False
|
||||
self.last_getheaders = None
|
||||
self.disconnected = False
|
||||
self.last_blockhash_announced = None
|
||||
|
||||
def clear_last_announcement(self):
|
||||
with mininode_lock:
|
||||
@@ -101,9 +100,6 @@ class BaseNode(NodeConnCB):
|
||||
self.last_inv = None
|
||||
self.last_headers = None
|
||||
|
||||
def add_connection(self, conn):
|
||||
self.connection = conn
|
||||
|
||||
# Request data for a list of block hashes
|
||||
def get_data(self, block_hashes):
|
||||
msg = msg_getdata()
|
||||
@@ -122,17 +118,17 @@ class BaseNode(NodeConnCB):
|
||||
msg.inv = [CInv(2, blockhash)]
|
||||
self.connection.send_message(msg)
|
||||
|
||||
# Wrapper for the NodeConn's send_message function
|
||||
def send_message(self, message):
|
||||
self.connection.send_message(message)
|
||||
|
||||
def on_inv(self, conn, message):
|
||||
self.last_inv = message
|
||||
self.block_announced = True
|
||||
self.last_blockhash_announced = message.inv[-1].hash
|
||||
|
||||
def on_headers(self, conn, message):
|
||||
self.last_headers = message
|
||||
self.block_announced = True
|
||||
if len(message.headers):
|
||||
self.block_announced = True
|
||||
message.headers[-1].calc_sha256()
|
||||
self.last_blockhash_announced = message.headers[-1].sha256
|
||||
|
||||
def on_block(self, conn, message):
|
||||
self.last_block = message.block
|
||||
@@ -141,9 +137,6 @@ class BaseNode(NodeConnCB):
|
||||
def on_getdata(self, conn, message):
|
||||
self.last_getdata = message
|
||||
|
||||
def on_pong(self, conn, message):
|
||||
self.last_pong = message
|
||||
|
||||
def on_getheaders(self, conn, message):
|
||||
self.last_getheaders = message
|
||||
|
||||
@@ -157,7 +150,7 @@ class BaseNode(NodeConnCB):
|
||||
expect_headers = headers if headers != None else []
|
||||
expect_inv = inv if inv != None else []
|
||||
test_function = lambda: self.block_announced
|
||||
self.sync(test_function)
|
||||
assert(wait_until(test_function, timeout=60))
|
||||
with mininode_lock:
|
||||
self.block_announced = False
|
||||
|
||||
@@ -180,30 +173,14 @@ class BaseNode(NodeConnCB):
|
||||
return success
|
||||
|
||||
# Syncing helpers
|
||||
def sync(self, test_function, timeout=60):
|
||||
while timeout > 0:
|
||||
with mininode_lock:
|
||||
if test_function():
|
||||
return
|
||||
time.sleep(self.sleep_time)
|
||||
timeout -= self.sleep_time
|
||||
raise AssertionError("Sync failed to complete")
|
||||
|
||||
def sync_with_ping(self, timeout=60):
|
||||
self.send_message(msg_ping(nonce=self.ping_counter))
|
||||
test_function = lambda: self.last_pong.nonce == self.ping_counter
|
||||
self.sync(test_function, timeout)
|
||||
self.ping_counter += 1
|
||||
return
|
||||
|
||||
def wait_for_block(self, blockhash, timeout=60):
|
||||
test_function = lambda: self.last_block != None and self.last_block.sha256 == blockhash
|
||||
self.sync(test_function, timeout)
|
||||
assert(wait_until(test_function, timeout=timeout))
|
||||
return
|
||||
|
||||
def wait_for_getheaders(self, timeout=60):
|
||||
test_function = lambda: self.last_getheaders != None
|
||||
self.sync(test_function, timeout)
|
||||
assert(wait_until(test_function, timeout=timeout))
|
||||
return
|
||||
|
||||
def wait_for_getdata(self, hash_list, timeout=60):
|
||||
@@ -211,12 +188,17 @@ class BaseNode(NodeConnCB):
|
||||
return
|
||||
|
||||
test_function = lambda: self.last_getdata != None and [x.hash for x in self.last_getdata.inv] == hash_list
|
||||
self.sync(test_function, timeout)
|
||||
assert(wait_until(test_function, timeout=timeout))
|
||||
return
|
||||
|
||||
def wait_for_disconnect(self, timeout=60):
|
||||
test_function = lambda: self.disconnected
|
||||
self.sync(test_function, timeout)
|
||||
assert(wait_until(test_function, timeout=timeout))
|
||||
return
|
||||
|
||||
def wait_for_block_announcement(self, block_hash, timeout=60):
|
||||
test_function = lambda: self.last_blockhash_announced == block_hash
|
||||
assert(wait_until(test_function, timeout=timeout))
|
||||
return
|
||||
|
||||
def send_header_for_blocks(self, new_blocks):
|
||||
@@ -266,7 +248,9 @@ class SendHeadersTest(BitcoinTestFramework):
|
||||
def mine_reorg(self, length):
|
||||
self.nodes[0].generate(length) # make sure all invalidated blocks are node0's
|
||||
sync_blocks(self.nodes, wait=0.1)
|
||||
[x.clear_last_announcement() for x in self.p2p_connections]
|
||||
for x in self.p2p_connections:
|
||||
x.wait_for_block_announcement(int(self.nodes[0].getbestblockhash(), 16))
|
||||
x.clear_last_announcement()
|
||||
|
||||
tip_height = self.nodes[1].getblockcount()
|
||||
hash_to_invalidate = self.nodes[1].getblockhash(tip_height-(length-1))
|
||||
@@ -495,7 +479,7 @@ class SendHeadersTest(BitcoinTestFramework):
|
||||
|
||||
test_node.send_header_for_blocks(blocks)
|
||||
test_node.sync_with_ping()
|
||||
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=test_node.sleep_time)
|
||||
test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=direct_fetch_response_time)
|
||||
|
||||
[ test_node.send_message(msg_block(x)) for x in blocks ]
|
||||
|
||||
@@ -526,13 +510,13 @@ class SendHeadersTest(BitcoinTestFramework):
|
||||
# both blocks (same work as tip)
|
||||
test_node.send_header_for_blocks(blocks[1:2])
|
||||
test_node.sync_with_ping()
|
||||
test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=test_node.sleep_time)
|
||||
test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=direct_fetch_response_time)
|
||||
|
||||
# Announcing 16 more headers should trigger direct fetch for 14 more
|
||||
# blocks
|
||||
test_node.send_header_for_blocks(blocks[2:18])
|
||||
test_node.sync_with_ping()
|
||||
test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=test_node.sleep_time)
|
||||
test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=direct_fetch_response_time)
|
||||
|
||||
# Announcing 1 more header should not trigger any response
|
||||
test_node.last_getdata = None
|
||||
|
||||
74
qa/rpc-tests/test_framework/address.py
Normal file
74
qa/rpc-tests/test_framework/address.py
Normal file
@@ -0,0 +1,74 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (c) 2016 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
#
|
||||
# address.py
|
||||
#
|
||||
# This file encodes and decodes BASE58 P2PKH and P2SH addresses
|
||||
#
|
||||
|
||||
from .script import hash256, hash160, sha256, CScript, OP_0
|
||||
from .util import bytes_to_hex_str, hex_str_to_bytes
|
||||
|
||||
chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
|
||||
|
||||
def byte_to_base58(b, version):
|
||||
result = ''
|
||||
str = bytes_to_hex_str(b)
|
||||
str = bytes_to_hex_str(chr(version).encode('latin-1')) + str
|
||||
checksum = bytes_to_hex_str(hash256(hex_str_to_bytes(str)))
|
||||
str += checksum[:8]
|
||||
value = int('0x'+str,0)
|
||||
while value > 0:
|
||||
result = chars[value % 58] + result
|
||||
value //= 58
|
||||
while (str[:2] == '00'):
|
||||
result = chars[0] + result
|
||||
str = str[2:]
|
||||
return result
|
||||
|
||||
# TODO: def base58_decode
|
||||
|
||||
def keyhash_to_p2pkh(hash, main = False):
|
||||
assert (len(hash) == 20)
|
||||
version = 0 if main else 111
|
||||
return byte_to_base58(hash, version)
|
||||
|
||||
def scripthash_to_p2sh(hash, main = False):
|
||||
assert (len(hash) == 20)
|
||||
version = 5 if main else 196
|
||||
return byte_to_base58(hash, version)
|
||||
|
||||
def key_to_p2pkh(key, main = False):
|
||||
key = check_key(key)
|
||||
return keyhash_to_p2pkh(hash160(key), main)
|
||||
|
||||
def script_to_p2sh(script, main = False):
|
||||
script = check_script(script)
|
||||
return scripthash_to_p2sh(hash160(script), main)
|
||||
|
||||
def key_to_p2sh_p2wpkh(key, main = False):
|
||||
key = check_key(key)
|
||||
p2shscript = CScript([OP_0, hash160(key)])
|
||||
return script_to_p2sh(p2shscript, main)
|
||||
|
||||
def script_to_p2sh_p2wsh(script, main = False):
|
||||
script = check_script(script)
|
||||
p2shscript = CScript([OP_0, sha256(script)])
|
||||
return script_to_p2sh(p2shscript, main)
|
||||
|
||||
def check_key(key):
|
||||
if (type(key) is str):
|
||||
key = hex_str_to_bytes(key) # Assuming this is hex string
|
||||
if (type(key) is bytes and (len(key) == 33 or len(key) == 65)):
|
||||
return key
|
||||
assert(False)
|
||||
|
||||
def check_script(script):
|
||||
if (type(script) is str):
|
||||
script = hex_str_to_bytes(script) # Assuming this is hex string
|
||||
if (type(script) is bytes or type(script) is CScript):
|
||||
return script
|
||||
assert(False)
|
||||
@@ -126,8 +126,9 @@ class AuthServiceProxy(object):
|
||||
return self._get_response()
|
||||
else:
|
||||
raise
|
||||
except BrokenPipeError:
|
||||
# Python 3.5+ raises this instead of BadStatusLine when the connection was reset
|
||||
except (BrokenPipeError,ConnectionResetError):
|
||||
# Python 3.5+ raises BrokenPipeError instead of BadStatusLine when the connection was reset
|
||||
# ConnectionResetError happens on FreeBSD with Python 3.4
|
||||
self.__conn.close()
|
||||
self.__conn.request(method, path, postdata, headers)
|
||||
return self._get_response()
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (c) 2015-2016 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
# BlockStore: a helper class that keeps a map of blocks and implements
|
||||
# helper functions for responding to getheaders and getdata,
|
||||
# and for constructing a getheaders message
|
||||
@@ -6,11 +9,11 @@
|
||||
|
||||
from .mininode import *
|
||||
from io import BytesIO
|
||||
import dbm.ndbm
|
||||
import dbm.dumb as dbmd
|
||||
|
||||
class BlockStore(object):
|
||||
def __init__(self, datadir):
|
||||
self.blockDB = dbm.ndbm.open(datadir + "/blocks", 'c')
|
||||
self.blockDB = dbmd.open(datadir + "/blocks", 'c')
|
||||
self.currentBlock = 0
|
||||
self.headers_map = dict()
|
||||
|
||||
@@ -120,7 +123,7 @@ class BlockStore(object):
|
||||
|
||||
class TxStore(object):
|
||||
def __init__(self, datadir):
|
||||
self.txDB = dbm.ndbm.open(datadir + "/transactions", 'c')
|
||||
self.txDB = dbmd.open(datadir + "/transactions", 'c')
|
||||
|
||||
def close(self):
|
||||
self.txDB.close()
|
||||
|
||||
@@ -50,7 +50,7 @@ class AuthServiceProxyWrapper(object):
|
||||
rpc_method = self.auth_service_proxy_instance._service_name
|
||||
|
||||
if self.coverage_logfile:
|
||||
with open(self.coverage_logfile, 'a+') as f:
|
||||
with open(self.coverage_logfile, 'a+', encoding='utf8') as f:
|
||||
f.write("%s\n" % rpc_method)
|
||||
|
||||
return return_val
|
||||
@@ -100,7 +100,7 @@ def write_all_rpc_commands(dirname, node):
|
||||
if line and not line.startswith('='):
|
||||
commands.add("%s\n" % line.split()[0])
|
||||
|
||||
with open(filename, 'w') as f:
|
||||
with open(filename, 'w', encoding='utf8') as f:
|
||||
f.writelines(list(commands))
|
||||
|
||||
return True
|
||||
|
||||
@@ -75,6 +75,9 @@ ssl.EC_POINT_mul.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p,
|
||||
# this specifies the curve used with ECDSA.
|
||||
NID_secp256k1 = 714 # from openssl/obj_mac.h
|
||||
|
||||
SECP256K1_ORDER = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141
|
||||
SECP256K1_ORDER_HALF = SECP256K1_ORDER // 2
|
||||
|
||||
# Thx to Sam Devlin for the ctypes magic 64-bit fix.
|
||||
def _check_result(val, func, args):
|
||||
if val == 0:
|
||||
@@ -147,7 +150,7 @@ class CECKey(object):
|
||||
r = self.get_raw_ecdh_key(other_pubkey)
|
||||
return kdf(r)
|
||||
|
||||
def sign(self, hash):
|
||||
def sign(self, hash, low_s = True):
|
||||
# FIXME: need unit tests for below cases
|
||||
if not isinstance(hash, bytes):
|
||||
raise TypeError('Hash must be bytes instance; got %r' % hash.__class__)
|
||||
@@ -159,7 +162,25 @@ class CECKey(object):
|
||||
mb_sig = ctypes.create_string_buffer(sig_size0.value)
|
||||
result = ssl.ECDSA_sign(0, hash, len(hash), mb_sig, ctypes.byref(sig_size0), self.k)
|
||||
assert 1 == result
|
||||
return mb_sig.raw[:sig_size0.value]
|
||||
assert mb_sig.raw[0] == 0x30
|
||||
assert mb_sig.raw[1] == sig_size0.value - 2
|
||||
total_size = mb_sig.raw[1]
|
||||
assert mb_sig.raw[2] == 2
|
||||
r_size = mb_sig.raw[3]
|
||||
assert mb_sig.raw[4 + r_size] == 2
|
||||
s_size = mb_sig.raw[5 + r_size]
|
||||
s_value = int.from_bytes(mb_sig.raw[6+r_size:6+r_size+s_size], byteorder='big')
|
||||
if (not low_s) or s_value <= SECP256K1_ORDER_HALF:
|
||||
return mb_sig.raw[:sig_size0.value]
|
||||
else:
|
||||
low_s_value = SECP256K1_ORDER - s_value
|
||||
low_s_bytes = (low_s_value).to_bytes(33, byteorder='big')
|
||||
while len(low_s_bytes) > 1 and low_s_bytes[0] == 0 and low_s_bytes[1] < 0x80:
|
||||
low_s_bytes = low_s_bytes[1:]
|
||||
new_s_size = len(low_s_bytes)
|
||||
new_total_size_byte = (total_size + new_s_size - s_size).to_bytes(1,byteorder='big')
|
||||
new_s_size_byte = (new_s_size).to_bytes(1,byteorder='big')
|
||||
return b'\x30' + new_total_size_byte + mb_sig.raw[2:5+r_size] + new_s_size_byte + low_s_bytes
|
||||
|
||||
def verify(self, hash, sig):
|
||||
"""Verify a DER signature"""
|
||||
|
||||
@@ -36,9 +36,10 @@ from threading import RLock
|
||||
from threading import Thread
|
||||
import logging
|
||||
import copy
|
||||
from test_framework.siphash import siphash256
|
||||
|
||||
BIP0031_VERSION = 60000
|
||||
MY_VERSION = 60001 # past bip-31 for ping/pong
|
||||
MY_VERSION = 70014 # past bip-31 for ping/pong
|
||||
MY_SUBVERSION = b"/python-mininode-tester:0.0.3/"
|
||||
|
||||
MAX_INV_SZ = 50000
|
||||
@@ -52,7 +53,7 @@ NODE_BLOOM = (1 << 2)
|
||||
NODE_WITNESS = (1 << 3)
|
||||
|
||||
# Keep our own socket map for asyncore, so that we can track disconnects
|
||||
# ourselves (to workaround an issue with closing an asyncore socket when
|
||||
# ourselves (to workaround an issue with closing an asyncore socket when
|
||||
# using select)
|
||||
mininode_socket_map = dict()
|
||||
|
||||
@@ -74,8 +75,19 @@ def ripemd160(s):
|
||||
def hash256(s):
|
||||
return sha256(sha256(s))
|
||||
|
||||
def ser_compact_size(l):
|
||||
r = b""
|
||||
if l < 253:
|
||||
r = struct.pack("B", l)
|
||||
elif l < 0x10000:
|
||||
r = struct.pack("<BH", 253, l)
|
||||
elif l < 0x100000000:
|
||||
r = struct.pack("<BI", 254, l)
|
||||
else:
|
||||
r = struct.pack("<BQ", 255, l)
|
||||
return r
|
||||
|
||||
def deser_string(f):
|
||||
def deser_compact_size(f):
|
||||
nit = struct.unpack("<B", f.read(1))[0]
|
||||
if nit == 253:
|
||||
nit = struct.unpack("<H", f.read(2))[0]
|
||||
@@ -83,16 +95,14 @@ def deser_string(f):
|
||||
nit = struct.unpack("<I", f.read(4))[0]
|
||||
elif nit == 255:
|
||||
nit = struct.unpack("<Q", f.read(8))[0]
|
||||
return nit
|
||||
|
||||
def deser_string(f):
|
||||
nit = deser_compact_size(f)
|
||||
return f.read(nit)
|
||||
|
||||
def ser_string(s):
|
||||
if len(s) < 253:
|
||||
return struct.pack("B", len(s)) + s
|
||||
elif len(s) < 0x10000:
|
||||
return struct.pack("<BH", 253, len(s)) + s
|
||||
elif len(s) < 0x100000000:
|
||||
return struct.pack("<BI", 254, len(s)) + s
|
||||
return struct.pack("<BQ", 255, len(s)) + s
|
||||
return ser_compact_size(len(s)) + s
|
||||
|
||||
def deser_uint256(f):
|
||||
r = 0
|
||||
@@ -125,13 +135,7 @@ def uint256_from_compact(c):
|
||||
|
||||
|
||||
def deser_vector(f, c):
|
||||
nit = struct.unpack("<B", f.read(1))[0]
|
||||
if nit == 253:
|
||||
nit = struct.unpack("<H", f.read(2))[0]
|
||||
elif nit == 254:
|
||||
nit = struct.unpack("<I", f.read(4))[0]
|
||||
elif nit == 255:
|
||||
nit = struct.unpack("<Q", f.read(8))[0]
|
||||
nit = deser_compact_size(f)
|
||||
r = []
|
||||
for i in range(nit):
|
||||
t = c()
|
||||
@@ -144,15 +148,7 @@ def deser_vector(f, c):
|
||||
# entries in the vector (we use this for serializing the vector of transactions
|
||||
# for a witness block).
|
||||
def ser_vector(l, ser_function_name=None):
|
||||
r = b""
|
||||
if len(l) < 253:
|
||||
r = struct.pack("B", len(l))
|
||||
elif len(l) < 0x10000:
|
||||
r = struct.pack("<BH", 253, len(l))
|
||||
elif len(l) < 0x100000000:
|
||||
r = struct.pack("<BI", 254, len(l))
|
||||
else:
|
||||
r = struct.pack("<BQ", 255, len(l))
|
||||
r = ser_compact_size(len(l))
|
||||
for i in l:
|
||||
if ser_function_name:
|
||||
r += getattr(i, ser_function_name)()
|
||||
@@ -162,13 +158,7 @@ def ser_vector(l, ser_function_name=None):
|
||||
|
||||
|
||||
def deser_uint256_vector(f):
|
||||
nit = struct.unpack("<B", f.read(1))[0]
|
||||
if nit == 253:
|
||||
nit = struct.unpack("<H", f.read(2))[0]
|
||||
elif nit == 254:
|
||||
nit = struct.unpack("<I", f.read(4))[0]
|
||||
elif nit == 255:
|
||||
nit = struct.unpack("<Q", f.read(8))[0]
|
||||
nit = deser_compact_size(f)
|
||||
r = []
|
||||
for i in range(nit):
|
||||
t = deser_uint256(f)
|
||||
@@ -177,28 +167,14 @@ def deser_uint256_vector(f):
|
||||
|
||||
|
||||
def ser_uint256_vector(l):
|
||||
r = b""
|
||||
if len(l) < 253:
|
||||
r = struct.pack("B", len(l))
|
||||
elif len(l) < 0x10000:
|
||||
r = struct.pack("<BH", 253, len(l))
|
||||
elif len(l) < 0x100000000:
|
||||
r = struct.pack("<BI", 254, len(l))
|
||||
else:
|
||||
r = struct.pack("<BQ", 255, len(l))
|
||||
r = ser_compact_size(len(l))
|
||||
for i in l:
|
||||
r += ser_uint256(i)
|
||||
return r
|
||||
|
||||
|
||||
def deser_string_vector(f):
|
||||
nit = struct.unpack("<B", f.read(1))[0]
|
||||
if nit == 253:
|
||||
nit = struct.unpack("<H", f.read(2))[0]
|
||||
elif nit == 254:
|
||||
nit = struct.unpack("<I", f.read(4))[0]
|
||||
elif nit == 255:
|
||||
nit = struct.unpack("<Q", f.read(8))[0]
|
||||
nit = deser_compact_size(f)
|
||||
r = []
|
||||
for i in range(nit):
|
||||
t = deser_string(f)
|
||||
@@ -207,28 +183,14 @@ def deser_string_vector(f):
|
||||
|
||||
|
||||
def ser_string_vector(l):
|
||||
r = b""
|
||||
if len(l) < 253:
|
||||
r = struct.pack("B", len(l))
|
||||
elif len(l) < 0x10000:
|
||||
r = struct.pack("<BH", 253, len(l))
|
||||
elif len(l) < 0x100000000:
|
||||
r = struct.pack("<BI", 254, len(l))
|
||||
else:
|
||||
r = struct.pack("<BQ", 255, len(l))
|
||||
r = ser_compact_size(len(l))
|
||||
for sv in l:
|
||||
r += ser_string(sv)
|
||||
return r
|
||||
|
||||
|
||||
def deser_int_vector(f):
|
||||
nit = struct.unpack("<B", f.read(1))[0]
|
||||
if nit == 253:
|
||||
nit = struct.unpack("<H", f.read(2))[0]
|
||||
elif nit == 254:
|
||||
nit = struct.unpack("<I", f.read(4))[0]
|
||||
elif nit == 255:
|
||||
nit = struct.unpack("<Q", f.read(8))[0]
|
||||
nit = deser_compact_size(f)
|
||||
r = []
|
||||
for i in range(nit):
|
||||
t = struct.unpack("<i", f.read(4))[0]
|
||||
@@ -237,15 +199,7 @@ def deser_int_vector(f):
|
||||
|
||||
|
||||
def ser_int_vector(l):
|
||||
r = b""
|
||||
if len(l) < 253:
|
||||
r = struct.pack("B", len(l))
|
||||
elif len(l) < 0x10000:
|
||||
r = struct.pack("<BH", 253, len(l))
|
||||
elif len(l) < 0x100000000:
|
||||
r = struct.pack("<BI", 254, len(l))
|
||||
else:
|
||||
r = struct.pack("<BQ", 255, len(l))
|
||||
r = ser_compact_size(len(l))
|
||||
for i in l:
|
||||
r += struct.pack("<i", i)
|
||||
return r
|
||||
@@ -294,7 +248,8 @@ class CInv(object):
|
||||
1: "TX",
|
||||
2: "Block",
|
||||
1|MSG_WITNESS_FLAG: "WitnessTx",
|
||||
2|MSG_WITNESS_FLAG : "WitnessBlock"
|
||||
2|MSG_WITNESS_FLAG : "WitnessBlock",
|
||||
4: "CompactBlock"
|
||||
}
|
||||
|
||||
def __init__(self, t=0, h=0):
|
||||
@@ -497,7 +452,7 @@ class CTransaction(object):
|
||||
else:
|
||||
self.vout = deser_vector(f, CTxOut)
|
||||
if flags != 0:
|
||||
self.wit.vtxinwit = [CTxInWitness()]*len(self.vin)
|
||||
self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))]
|
||||
self.wit.deserialize(f)
|
||||
self.nLockTime = struct.unpack("<I", f.read(4))[0]
|
||||
self.sha256 = None
|
||||
@@ -563,8 +518,8 @@ class CTransaction(object):
|
||||
return True
|
||||
|
||||
def __repr__(self):
|
||||
return "CTransaction(nVersion=%i vin=%s vout=%s nLockTime=%i)" \
|
||||
% (self.nVersion, repr(self.vin), repr(self.vout), self.nLockTime)
|
||||
return "CTransaction(nVersion=%i vin=%s vout=%s wit=%s nLockTime=%i)" \
|
||||
% (self.nVersion, repr(self.vin), repr(self.vout), repr(self.wit), self.nLockTime)
|
||||
|
||||
|
||||
class CBlockHeader(object):
|
||||
@@ -781,6 +736,208 @@ class CAlert(object):
|
||||
% (len(self.vchMsg), len(self.vchSig))
|
||||
|
||||
|
||||
class PrefilledTransaction(object):
|
||||
def __init__(self, index=0, tx = None):
|
||||
self.index = index
|
||||
self.tx = tx
|
||||
|
||||
def deserialize(self, f):
|
||||
self.index = deser_compact_size(f)
|
||||
self.tx = CTransaction()
|
||||
self.tx.deserialize(f)
|
||||
|
||||
def serialize(self, with_witness=False):
|
||||
r = b""
|
||||
r += ser_compact_size(self.index)
|
||||
if with_witness:
|
||||
r += self.tx.serialize_with_witness()
|
||||
else:
|
||||
r += self.tx.serialize_without_witness()
|
||||
return r
|
||||
|
||||
def serialize_with_witness(self):
|
||||
return self.serialize(with_witness=True)
|
||||
|
||||
def __repr__(self):
|
||||
return "PrefilledTransaction(index=%d, tx=%s)" % (self.index, repr(self.tx))
|
||||
|
||||
# This is what we send on the wire, in a cmpctblock message.
|
||||
class P2PHeaderAndShortIDs(object):
|
||||
def __init__(self):
|
||||
self.header = CBlockHeader()
|
||||
self.nonce = 0
|
||||
self.shortids_length = 0
|
||||
self.shortids = []
|
||||
self.prefilled_txn_length = 0
|
||||
self.prefilled_txn = []
|
||||
|
||||
def deserialize(self, f):
|
||||
self.header.deserialize(f)
|
||||
self.nonce = struct.unpack("<Q", f.read(8))[0]
|
||||
self.shortids_length = deser_compact_size(f)
|
||||
for i in range(self.shortids_length):
|
||||
# shortids are defined to be 6 bytes in the spec, so append
|
||||
# two zero bytes and read it in as an 8-byte number
|
||||
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
|
||||
self.prefilled_txn = deser_vector(f, PrefilledTransaction)
|
||||
self.prefilled_txn_length = len(self.prefilled_txn)
|
||||
|
||||
# When using version 2 compact blocks, we must serialize with_witness.
|
||||
def serialize(self, with_witness=False):
|
||||
r = b""
|
||||
r += self.header.serialize()
|
||||
r += struct.pack("<Q", self.nonce)
|
||||
r += ser_compact_size(self.shortids_length)
|
||||
for x in self.shortids:
|
||||
# We only want the first 6 bytes
|
||||
r += struct.pack("<Q", x)[0:6]
|
||||
if with_witness:
|
||||
r += ser_vector(self.prefilled_txn, "serialize_with_witness")
|
||||
else:
|
||||
r += ser_vector(self.prefilled_txn)
|
||||
return r
|
||||
|
||||
def __repr__(self):
|
||||
return "P2PHeaderAndShortIDs(header=%s, nonce=%d, shortids_length=%d, shortids=%s, prefilled_txn_length=%d, prefilledtxn=%s" % (repr(self.header), self.nonce, self.shortids_length, repr(self.shortids), self.prefilled_txn_length, repr(self.prefilled_txn))
|
||||
|
||||
# P2P version of the above that will use witness serialization (for compact
|
||||
# block version 2)
|
||||
class P2PHeaderAndShortWitnessIDs(P2PHeaderAndShortIDs):
|
||||
def serialize(self):
|
||||
return super(P2PHeaderAndShortWitnessIDs, self).serialize(with_witness=True)
|
||||
|
||||
# Calculate the BIP 152-compact blocks shortid for a given transaction hash
|
||||
def calculate_shortid(k0, k1, tx_hash):
|
||||
expected_shortid = siphash256(k0, k1, tx_hash)
|
||||
expected_shortid &= 0x0000ffffffffffff
|
||||
return expected_shortid
|
||||
|
||||
# This version gets rid of the array lengths, and reinterprets the differential
|
||||
# encoding into indices that can be used for lookup.
|
||||
class HeaderAndShortIDs(object):
|
||||
def __init__(self, p2pheaders_and_shortids = None):
|
||||
self.header = CBlockHeader()
|
||||
self.nonce = 0
|
||||
self.shortids = []
|
||||
self.prefilled_txn = []
|
||||
self.use_witness = False
|
||||
|
||||
if p2pheaders_and_shortids != None:
|
||||
self.header = p2pheaders_and_shortids.header
|
||||
self.nonce = p2pheaders_and_shortids.nonce
|
||||
self.shortids = p2pheaders_and_shortids.shortids
|
||||
last_index = -1
|
||||
for x in p2pheaders_and_shortids.prefilled_txn:
|
||||
self.prefilled_txn.append(PrefilledTransaction(x.index + last_index + 1, x.tx))
|
||||
last_index = self.prefilled_txn[-1].index
|
||||
|
||||
def to_p2p(self):
|
||||
if self.use_witness:
|
||||
ret = P2PHeaderAndShortWitnessIDs()
|
||||
else:
|
||||
ret = P2PHeaderAndShortIDs()
|
||||
ret.header = self.header
|
||||
ret.nonce = self.nonce
|
||||
ret.shortids_length = len(self.shortids)
|
||||
ret.shortids = self.shortids
|
||||
ret.prefilled_txn_length = len(self.prefilled_txn)
|
||||
ret.prefilled_txn = []
|
||||
last_index = -1
|
||||
for x in self.prefilled_txn:
|
||||
ret.prefilled_txn.append(PrefilledTransaction(x.index - last_index - 1, x.tx))
|
||||
last_index = x.index
|
||||
return ret
|
||||
|
||||
def get_siphash_keys(self):
|
||||
header_nonce = self.header.serialize()
|
||||
header_nonce += struct.pack("<Q", self.nonce)
|
||||
hash_header_nonce_as_str = sha256(header_nonce)
|
||||
key0 = struct.unpack("<Q", hash_header_nonce_as_str[0:8])[0]
|
||||
key1 = struct.unpack("<Q", hash_header_nonce_as_str[8:16])[0]
|
||||
return [ key0, key1 ]
|
||||
|
||||
# Version 2 compact blocks use wtxid in shortids (rather than txid)
|
||||
def initialize_from_block(self, block, nonce=0, prefill_list = [0], use_witness = False):
|
||||
self.header = CBlockHeader(block)
|
||||
self.nonce = nonce
|
||||
self.prefilled_txn = [ PrefilledTransaction(i, block.vtx[i]) for i in prefill_list ]
|
||||
self.shortids = []
|
||||
self.use_witness = use_witness
|
||||
[k0, k1] = self.get_siphash_keys()
|
||||
for i in range(len(block.vtx)):
|
||||
if i not in prefill_list:
|
||||
tx_hash = block.vtx[i].sha256
|
||||
if use_witness:
|
||||
tx_hash = block.vtx[i].calc_sha256(with_witness=True)
|
||||
self.shortids.append(calculate_shortid(k0, k1, tx_hash))
|
||||
|
||||
def __repr__(self):
|
||||
return "HeaderAndShortIDs(header=%s, nonce=%d, shortids=%s, prefilledtxn=%s" % (repr(self.header), self.nonce, repr(self.shortids), repr(self.prefilled_txn))
|
||||
|
||||
|
||||
class BlockTransactionsRequest(object):
|
||||
|
||||
def __init__(self, blockhash=0, indexes = None):
|
||||
self.blockhash = blockhash
|
||||
self.indexes = indexes if indexes != None else []
|
||||
|
||||
def deserialize(self, f):
|
||||
self.blockhash = deser_uint256(f)
|
||||
indexes_length = deser_compact_size(f)
|
||||
for i in range(indexes_length):
|
||||
self.indexes.append(deser_compact_size(f))
|
||||
|
||||
def serialize(self):
|
||||
r = b""
|
||||
r += ser_uint256(self.blockhash)
|
||||
r += ser_compact_size(len(self.indexes))
|
||||
for x in self.indexes:
|
||||
r += ser_compact_size(x)
|
||||
return r
|
||||
|
||||
# helper to set the differentially encoded indexes from absolute ones
|
||||
def from_absolute(self, absolute_indexes):
|
||||
self.indexes = []
|
||||
last_index = -1
|
||||
for x in absolute_indexes:
|
||||
self.indexes.append(x-last_index-1)
|
||||
last_index = x
|
||||
|
||||
def to_absolute(self):
|
||||
absolute_indexes = []
|
||||
last_index = -1
|
||||
for x in self.indexes:
|
||||
absolute_indexes.append(x+last_index+1)
|
||||
last_index = absolute_indexes[-1]
|
||||
return absolute_indexes
|
||||
|
||||
def __repr__(self):
|
||||
return "BlockTransactionsRequest(hash=%064x indexes=%s)" % (self.blockhash, repr(self.indexes))
|
||||
|
||||
|
||||
class BlockTransactions(object):
|
||||
|
||||
def __init__(self, blockhash=0, transactions = None):
|
||||
self.blockhash = blockhash
|
||||
self.transactions = transactions if transactions != None else []
|
||||
|
||||
def deserialize(self, f):
|
||||
self.blockhash = deser_uint256(f)
|
||||
self.transactions = deser_vector(f, CTransaction)
|
||||
|
||||
def serialize(self, with_witness=False):
|
||||
r = b""
|
||||
r += ser_uint256(self.blockhash)
|
||||
if with_witness:
|
||||
r += ser_vector(self.transactions, "serialize_with_witness")
|
||||
else:
|
||||
r += ser_vector(self.transactions)
|
||||
return r
|
||||
|
||||
def __repr__(self):
|
||||
return "BlockTransactions(hash=%064x transactions=%s)" % (self.blockhash, repr(self.transactions))
|
||||
|
||||
|
||||
# Objects that correspond to messages on the wire
|
||||
class msg_version(object):
|
||||
command = b"version"
|
||||
@@ -1184,7 +1341,7 @@ class msg_reject(object):
|
||||
% (self.message, self.code, self.reason, self.data)
|
||||
|
||||
# Helper function
|
||||
def wait_until(predicate, attempts=float('inf'), timeout=float('inf')):
|
||||
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf')):
|
||||
attempt = 0
|
||||
elapsed = 0
|
||||
|
||||
@@ -1215,6 +1372,85 @@ class msg_feefilter(object):
|
||||
def __repr__(self):
|
||||
return "msg_feefilter(feerate=%08x)" % self.feerate
|
||||
|
||||
class msg_sendcmpct(object):
|
||||
command = b"sendcmpct"
|
||||
|
||||
def __init__(self):
|
||||
self.announce = False
|
||||
self.version = 1
|
||||
|
||||
def deserialize(self, f):
|
||||
self.announce = struct.unpack("<?", f.read(1))[0]
|
||||
self.version = struct.unpack("<Q", f.read(8))[0]
|
||||
|
||||
def serialize(self):
|
||||
r = b""
|
||||
r += struct.pack("<?", self.announce)
|
||||
r += struct.pack("<Q", self.version)
|
||||
return r
|
||||
|
||||
def __repr__(self):
|
||||
return "msg_sendcmpct(announce=%s, version=%lu)" % (self.announce, self.version)
|
||||
|
||||
class msg_cmpctblock(object):
|
||||
command = b"cmpctblock"
|
||||
|
||||
def __init__(self, header_and_shortids = None):
|
||||
self.header_and_shortids = header_and_shortids
|
||||
|
||||
def deserialize(self, f):
|
||||
self.header_and_shortids = P2PHeaderAndShortIDs()
|
||||
self.header_and_shortids.deserialize(f)
|
||||
|
||||
def serialize(self):
|
||||
r = b""
|
||||
r += self.header_and_shortids.serialize()
|
||||
return r
|
||||
|
||||
def __repr__(self):
|
||||
return "msg_cmpctblock(HeaderAndShortIDs=%s)" % repr(self.header_and_shortids)
|
||||
|
||||
class msg_getblocktxn(object):
|
||||
command = b"getblocktxn"
|
||||
|
||||
def __init__(self):
|
||||
self.block_txn_request = None
|
||||
|
||||
def deserialize(self, f):
|
||||
self.block_txn_request = BlockTransactionsRequest()
|
||||
self.block_txn_request.deserialize(f)
|
||||
|
||||
def serialize(self):
|
||||
r = b""
|
||||
r += self.block_txn_request.serialize()
|
||||
return r
|
||||
|
||||
def __repr__(self):
|
||||
return "msg_getblocktxn(block_txn_request=%s)" % (repr(self.block_txn_request))
|
||||
|
||||
class msg_blocktxn(object):
|
||||
command = b"blocktxn"
|
||||
|
||||
def __init__(self):
|
||||
self.block_transactions = BlockTransactions()
|
||||
|
||||
def deserialize(self, f):
|
||||
self.block_transactions.deserialize(f)
|
||||
|
||||
def serialize(self):
|
||||
r = b""
|
||||
r += self.block_transactions.serialize()
|
||||
return r
|
||||
|
||||
def __repr__(self):
|
||||
return "msg_blocktxn(block_transactions=%s)" % (repr(self.block_transactions))
|
||||
|
||||
class msg_witness_blocktxn(msg_blocktxn):
|
||||
def serialize(self):
|
||||
r = b""
|
||||
r += self.block_transactions.serialize(with_witness=True)
|
||||
return r
|
||||
|
||||
# This is what a callback should look like for NodeConn
|
||||
# Reimplement the on_* functions to provide handling for events
|
||||
class NodeConnCB(object):
|
||||
@@ -1295,6 +1531,10 @@ class NodeConnCB(object):
|
||||
def on_pong(self, conn, message): pass
|
||||
def on_feefilter(self, conn, message): pass
|
||||
def on_sendheaders(self, conn, message): pass
|
||||
def on_sendcmpct(self, conn, message): pass
|
||||
def on_cmpctblock(self, conn, message): pass
|
||||
def on_getblocktxn(self, conn, message): pass
|
||||
def on_blocktxn(self, conn, message): pass
|
||||
|
||||
# More useful callbacks and functions for NodeConnCB's which have a single NodeConn
|
||||
class SingleNodeConnCB(NodeConnCB):
|
||||
@@ -1311,6 +1551,10 @@ class SingleNodeConnCB(NodeConnCB):
|
||||
def send_message(self, message):
|
||||
self.connection.send_message(message)
|
||||
|
||||
def send_and_ping(self, message):
|
||||
self.send_message(message)
|
||||
self.sync_with_ping()
|
||||
|
||||
def on_pong(self, conn, message):
|
||||
self.last_pong = message
|
||||
|
||||
@@ -1319,7 +1563,7 @@ class SingleNodeConnCB(NodeConnCB):
|
||||
def received_pong():
|
||||
return (self.last_pong.nonce == self.ping_counter)
|
||||
self.send_message(msg_ping(nonce=self.ping_counter))
|
||||
success = wait_until(received_pong, timeout)
|
||||
success = wait_until(received_pong, timeout=timeout)
|
||||
self.ping_counter += 1
|
||||
return success
|
||||
|
||||
@@ -1344,7 +1588,11 @@ class NodeConn(asyncore.dispatcher):
|
||||
b"reject": msg_reject,
|
||||
b"mempool": msg_mempool,
|
||||
b"feefilter": msg_feefilter,
|
||||
b"sendheaders": msg_sendheaders
|
||||
b"sendheaders": msg_sendheaders,
|
||||
b"sendcmpct": msg_sendcmpct,
|
||||
b"cmpctblock": msg_cmpctblock,
|
||||
b"getblocktxn": msg_getblocktxn,
|
||||
b"blocktxn": msg_blocktxn
|
||||
}
|
||||
MAGIC_BYTES = {
|
||||
"mainnet": b"\xf9\xbe\xb4\xd9", # mainnet
|
||||
|
||||
@@ -58,7 +58,7 @@ def netstat(typ='tcp'):
|
||||
To get pid of all network process running on system, you must run this script
|
||||
as superuser
|
||||
'''
|
||||
with open('/proc/net/'+typ,'r') as f:
|
||||
with open('/proc/net/'+typ,'r',encoding='utf8') as f:
|
||||
content = f.readlines()
|
||||
content.pop(0)
|
||||
result = []
|
||||
|
||||
@@ -882,7 +882,7 @@ def SignatureHash(script, txTo, inIdx, hashtype):
|
||||
tmp = txtmp.vout[outIdx]
|
||||
txtmp.vout = []
|
||||
for i in range(outIdx):
|
||||
txtmp.vout.append(CTxOut())
|
||||
txtmp.vout.append(CTxOut(-1))
|
||||
txtmp.vout.append(tmp)
|
||||
|
||||
for i in range(len(txtmp.vin)):
|
||||
|
||||
64
qa/rpc-tests/test_framework/siphash.py
Normal file
64
qa/rpc-tests/test_framework/siphash.py
Normal file
@@ -0,0 +1,64 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (c) 2016 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
#
|
||||
# siphash.py - Specialized SipHash-2-4 implementations
|
||||
#
|
||||
# This implements SipHash-2-4 for 256-bit integers.
|
||||
|
||||
def rotl64(n, b):
|
||||
return n >> (64 - b) | (n & ((1 << (64 - b)) - 1)) << b
|
||||
|
||||
def siphash_round(v0, v1, v2, v3):
|
||||
v0 = (v0 + v1) & ((1 << 64) - 1)
|
||||
v1 = rotl64(v1, 13)
|
||||
v1 ^= v0
|
||||
v0 = rotl64(v0, 32)
|
||||
v2 = (v2 + v3) & ((1 << 64) - 1)
|
||||
v3 = rotl64(v3, 16)
|
||||
v3 ^= v2
|
||||
v0 = (v0 + v3) & ((1 << 64) - 1)
|
||||
v3 = rotl64(v3, 21)
|
||||
v3 ^= v0
|
||||
v2 = (v2 + v1) & ((1 << 64) - 1)
|
||||
v1 = rotl64(v1, 17)
|
||||
v1 ^= v2
|
||||
v2 = rotl64(v2, 32)
|
||||
return (v0, v1, v2, v3)
|
||||
|
||||
def siphash256(k0, k1, h):
|
||||
n0 = h & ((1 << 64) - 1)
|
||||
n1 = (h >> 64) & ((1 << 64) - 1)
|
||||
n2 = (h >> 128) & ((1 << 64) - 1)
|
||||
n3 = (h >> 192) & ((1 << 64) - 1)
|
||||
v0 = 0x736f6d6570736575 ^ k0
|
||||
v1 = 0x646f72616e646f6d ^ k1
|
||||
v2 = 0x6c7967656e657261 ^ k0
|
||||
v3 = 0x7465646279746573 ^ k1 ^ n0
|
||||
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
|
||||
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
|
||||
v0 ^= n0
|
||||
v3 ^= n1
|
||||
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
|
||||
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
|
||||
v0 ^= n1
|
||||
v3 ^= n2
|
||||
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
|
||||
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
|
||||
v0 ^= n2
|
||||
v3 ^= n3
|
||||
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
|
||||
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
|
||||
v0 ^= n3
|
||||
v3 ^= 0x2000000000000000
|
||||
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
|
||||
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
|
||||
v0 ^= 0x2000000000000000
|
||||
v2 ^= 0xFF
|
||||
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
|
||||
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
|
||||
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
|
||||
v0, v1, v2, v3 = siphash_round(v0, v1, v2, v3)
|
||||
return v0 ^ v1 ^ v2 ^ v3
|
||||
@@ -21,7 +21,6 @@ from .util import (
|
||||
sync_mempools,
|
||||
stop_nodes,
|
||||
stop_node,
|
||||
wait_bitcoinds,
|
||||
enable_coverage,
|
||||
check_json_precision,
|
||||
initialize_chain_clean,
|
||||
@@ -81,7 +80,6 @@ class BitcoinTestFramework(object):
|
||||
"""
|
||||
assert not self.is_network_split
|
||||
stop_nodes(self.nodes)
|
||||
wait_bitcoinds()
|
||||
self.setup_network(True)
|
||||
|
||||
def sync_all(self):
|
||||
@@ -100,7 +98,6 @@ class BitcoinTestFramework(object):
|
||||
"""
|
||||
assert self.is_network_split
|
||||
stop_nodes(self.nodes)
|
||||
wait_bitcoinds()
|
||||
self.setup_network(False)
|
||||
|
||||
def main(self):
|
||||
@@ -123,7 +120,8 @@ class BitcoinTestFramework(object):
|
||||
self.add_options(parser)
|
||||
(self.options, self.args) = parser.parse_args()
|
||||
|
||||
self.options.tmpdir += '/' + str(self.options.port_seed)
|
||||
# backup dir variable for removal at cleanup
|
||||
self.options.root, self.options.tmpdir = self.options.tmpdir, self.options.tmpdir + '/' + str(self.options.port_seed)
|
||||
|
||||
if self.options.trace_rpc:
|
||||
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
|
||||
@@ -167,13 +165,14 @@ class BitcoinTestFramework(object):
|
||||
if not self.options.noshutdown:
|
||||
print("Stopping nodes")
|
||||
stop_nodes(self.nodes)
|
||||
wait_bitcoinds()
|
||||
else:
|
||||
print("Note: bitcoinds were not stopped and may still be running")
|
||||
|
||||
if not self.options.nocleanup and not self.options.noshutdown and success:
|
||||
print("Cleaning up")
|
||||
shutil.rmtree(self.options.tmpdir)
|
||||
if not os.listdir(self.options.root):
|
||||
os.rmdir(self.options.root)
|
||||
else:
|
||||
print("Not cleaning up dir %s" % self.options.tmpdir)
|
||||
|
||||
|
||||
@@ -157,7 +157,7 @@ def initialize_datadir(dirname, n):
|
||||
if not os.path.isdir(datadir):
|
||||
os.makedirs(datadir)
|
||||
rpc_u, rpc_p = rpc_auth_pair(n)
|
||||
with open(os.path.join(datadir, "bitcoin.conf"), 'w') as f:
|
||||
with open(os.path.join(datadir, "bitcoin.conf"), 'w', encoding='utf8') as f:
|
||||
f.write("regtest=1\n")
|
||||
f.write("rpcuser=" + rpc_u + "\n")
|
||||
f.write("rpcpassword=" + rpc_p + "\n")
|
||||
@@ -171,7 +171,15 @@ def rpc_auth_pair(n):
|
||||
|
||||
def rpc_url(i, rpchost=None):
|
||||
rpc_u, rpc_p = rpc_auth_pair(i)
|
||||
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, rpchost or '127.0.0.1', rpc_port(i))
|
||||
host = '127.0.0.1'
|
||||
port = rpc_port(i)
|
||||
if rpchost:
|
||||
parts = rpchost.split(':')
|
||||
if len(parts) == 2:
|
||||
host, port = parts
|
||||
else:
|
||||
host = rpchost
|
||||
return "http://%s:%s@%s:%d" % (rpc_u, rpc_p, host, int(port))
|
||||
|
||||
def wait_for_bitcoind_start(process, url, i):
|
||||
'''
|
||||
@@ -254,7 +262,6 @@ def initialize_chain(test_dir, num_nodes):
|
||||
|
||||
# Shut them down, and clean up cache directories:
|
||||
stop_nodes(rpcs)
|
||||
wait_bitcoinds()
|
||||
disable_mocktime()
|
||||
for i in range(MAX_NODES):
|
||||
os.remove(log_filename("cache", i, "debug.log"))
|
||||
@@ -353,6 +360,7 @@ def stop_nodes(nodes):
|
||||
except http.client.CannotSendRequest as e:
|
||||
print("WARN: Unable to stop node: " + repr(e))
|
||||
del nodes[:] # Emptying array closes connections as a side effect
|
||||
wait_bitcoinds()
|
||||
|
||||
def set_node_times(nodes, t):
|
||||
for node in nodes:
|
||||
|
||||
104
qa/rpc-tests/wallet-dump.py
Executable file
104
qa/rpc-tests/wallet-dump.py
Executable file
@@ -0,0 +1,104 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (c) 2016 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import (start_nodes, start_node, assert_equal, bitcoind_processes)
|
||||
|
||||
|
||||
def read_dump(file_name, addrs, hd_master_addr_old):
|
||||
"""
|
||||
Read the given dump, count the addrs that match, count change and reserve.
|
||||
Also check that the old hd_master is inactive
|
||||
"""
|
||||
with open(file_name, encoding='utf8') as inputfile:
|
||||
found_addr = 0
|
||||
found_addr_chg = 0
|
||||
found_addr_rsv = 0
|
||||
hd_master_addr_ret = None
|
||||
for line in inputfile:
|
||||
# only read non comment lines
|
||||
if line[0] != "#" and len(line) > 10:
|
||||
# split out some data
|
||||
key_label, comment = line.split("#")
|
||||
# key = key_label.split(" ")[0]
|
||||
keytype = key_label.split(" ")[2]
|
||||
if len(comment) > 1:
|
||||
addr_keypath = comment.split(" addr=")[1]
|
||||
addr = addr_keypath.split(" ")[0]
|
||||
keypath = None
|
||||
if keytype == "inactivehdmaster=1":
|
||||
# ensure the old master is still available
|
||||
assert(hd_master_addr_old == addr)
|
||||
elif keytype == "hdmaster=1":
|
||||
# ensure we have generated a new hd master key
|
||||
assert(hd_master_addr_old != addr)
|
||||
hd_master_addr_ret = addr
|
||||
else:
|
||||
keypath = addr_keypath.rstrip().split("hdkeypath=")[1]
|
||||
|
||||
# count key types
|
||||
for addrObj in addrs:
|
||||
if addrObj['address'] == addr and addrObj['hdkeypath'] == keypath and keytype == "label=":
|
||||
found_addr += 1
|
||||
break
|
||||
elif keytype == "change=1":
|
||||
found_addr_chg += 1
|
||||
break
|
||||
elif keytype == "reserve=1":
|
||||
found_addr_rsv += 1
|
||||
break
|
||||
return found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_ret
|
||||
|
||||
|
||||
class WalletDumpTest(BitcoinTestFramework):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.setup_clean_chain = False
|
||||
self.num_nodes = 1
|
||||
self.extra_args = [["-keypool=90"]]
|
||||
|
||||
def setup_network(self, split=False):
|
||||
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args)
|
||||
|
||||
def run_test (self):
|
||||
tmpdir = self.options.tmpdir
|
||||
|
||||
# generate 20 addresses to compare against the dump
|
||||
test_addr_count = 20
|
||||
addrs = []
|
||||
for i in range(0,test_addr_count):
|
||||
addr = self.nodes[0].getnewaddress()
|
||||
vaddr= self.nodes[0].validateaddress(addr) #required to get hd keypath
|
||||
addrs.append(vaddr)
|
||||
# Should be a no-op:
|
||||
self.nodes[0].keypoolrefill()
|
||||
|
||||
# dump unencrypted wallet
|
||||
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.unencrypted.dump")
|
||||
|
||||
found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_unenc = \
|
||||
read_dump(tmpdir + "/node0/wallet.unencrypted.dump", addrs, None)
|
||||
assert_equal(found_addr, test_addr_count) # all keys must be in the dump
|
||||
assert_equal(found_addr_chg, 50) # 50 blocks where mined
|
||||
assert_equal(found_addr_rsv, 90 + 1) # keypool size (TODO: fix off-by-one)
|
||||
|
||||
#encrypt wallet, restart, unlock and dump
|
||||
self.nodes[0].encryptwallet('test')
|
||||
bitcoind_processes[0].wait()
|
||||
self.nodes[0] = start_node(0, self.options.tmpdir, self.extra_args[0])
|
||||
self.nodes[0].walletpassphrase('test', 10)
|
||||
# Should be a no-op:
|
||||
self.nodes[0].keypoolrefill()
|
||||
self.nodes[0].dumpwallet(tmpdir + "/node0/wallet.encrypted.dump")
|
||||
|
||||
found_addr, found_addr_chg, found_addr_rsv, hd_master_addr_enc = \
|
||||
read_dump(tmpdir + "/node0/wallet.encrypted.dump", addrs, hd_master_addr_unenc)
|
||||
assert_equal(found_addr, test_addr_count)
|
||||
assert_equal(found_addr_chg, 90 + 1 + 50) # old reserve keys are marked as change now
|
||||
assert_equal(found_addr_rsv, 90 + 1) # keypool size (TODO: fix off-by-one)
|
||||
|
||||
if __name__ == '__main__':
|
||||
WalletDumpTest().main ()
|
||||
@@ -31,7 +31,7 @@ class WalletHDTest(BitcoinTestFramework):
|
||||
tmpdir = self.options.tmpdir
|
||||
|
||||
# Make sure we use hd, keep masterkeyid
|
||||
masterkeyid = self.nodes[1].getwalletinfo()['masterkeyid']
|
||||
masterkeyid = self.nodes[1].getwalletinfo()['hdmasterkeyid']
|
||||
assert_equal(len(masterkeyid), 40)
|
||||
|
||||
# Import a non-HD private key in the HD wallet
|
||||
@@ -39,8 +39,8 @@ class WalletHDTest(BitcoinTestFramework):
|
||||
self.nodes[1].importprivkey(self.nodes[0].dumpprivkey(non_hd_add))
|
||||
|
||||
# This should be enough to keep the master key and the non-HD key
|
||||
self.nodes[1].backupwallet(tmpdir + "hd.bak")
|
||||
#self.nodes[1].dumpwallet(tmpdir + "hd.dump")
|
||||
self.nodes[1].backupwallet(tmpdir + "/hd.bak")
|
||||
#self.nodes[1].dumpwallet(tmpdir + "/hd.dump")
|
||||
|
||||
# Derive some HD addresses and remember the last
|
||||
# Also send funds to each add
|
||||
@@ -63,7 +63,7 @@ class WalletHDTest(BitcoinTestFramework):
|
||||
print("Restore backup ...")
|
||||
self.stop_node(1)
|
||||
os.remove(self.options.tmpdir + "/node1/regtest/wallet.dat")
|
||||
shutil.copyfile(tmpdir + "hd.bak", tmpdir + "/node1/regtest/wallet.dat")
|
||||
shutil.copyfile(tmpdir + "/hd.bak", tmpdir + "/node1/regtest/wallet.dat")
|
||||
self.nodes[1] = start_node(1, self.options.tmpdir, self.node_args[1])
|
||||
#connect_nodes_bi(self.nodes, 0, 1)
|
||||
|
||||
|
||||
@@ -18,9 +18,10 @@ class WalletTest (BitcoinTestFramework):
|
||||
super().__init__()
|
||||
self.setup_clean_chain = True
|
||||
self.num_nodes = 4
|
||||
self.extra_args = [['-usehd={:d}'.format(i%2==0)] for i in range(4)]
|
||||
|
||||
def setup_network(self, split=False):
|
||||
self.nodes = start_nodes(3, self.options.tmpdir)
|
||||
self.nodes = start_nodes(3, self.options.tmpdir, self.extra_args[:3])
|
||||
connect_nodes_bi(self.nodes,0,1)
|
||||
connect_nodes_bi(self.nodes,1,2)
|
||||
connect_nodes_bi(self.nodes,0,2)
|
||||
@@ -154,7 +155,7 @@ class WalletTest (BitcoinTestFramework):
|
||||
txid2 = self.nodes[1].sendtoaddress(self.nodes[0].getnewaddress(), 1)
|
||||
sync_mempools(self.nodes)
|
||||
|
||||
self.nodes.append(start_node(3, self.options.tmpdir))
|
||||
self.nodes.append(start_node(3, self.options.tmpdir, self.extra_args[3]))
|
||||
connect_nodes_bi(self.nodes, 0, 3)
|
||||
sync_blocks(self.nodes)
|
||||
|
||||
@@ -198,7 +199,6 @@ class WalletTest (BitcoinTestFramework):
|
||||
|
||||
#do some -walletbroadcast tests
|
||||
stop_nodes(self.nodes)
|
||||
wait_bitcoinds()
|
||||
self.nodes = start_nodes(3, self.options.tmpdir, [["-walletbroadcast=0"],["-walletbroadcast=0"],["-walletbroadcast=0"]])
|
||||
connect_nodes_bi(self.nodes,0,1)
|
||||
connect_nodes_bi(self.nodes,1,2)
|
||||
@@ -224,7 +224,6 @@ class WalletTest (BitcoinTestFramework):
|
||||
|
||||
#restart the nodes with -walletbroadcast=1
|
||||
stop_nodes(self.nodes)
|
||||
wait_bitcoinds()
|
||||
self.nodes = start_nodes(3, self.options.tmpdir)
|
||||
connect_nodes_bi(self.nodes,0,1)
|
||||
connect_nodes_bi(self.nodes,1,2)
|
||||
@@ -334,7 +333,6 @@ class WalletTest (BitcoinTestFramework):
|
||||
for m in maintenance:
|
||||
print("check " + m)
|
||||
stop_nodes(self.nodes)
|
||||
wait_bitcoinds()
|
||||
self.nodes = start_nodes(3, self.options.tmpdir, [[m]] * 3)
|
||||
while m == '-reindex' and [block_count] * 3 != [self.nodes[i].getblockcount() for i in range(3)]:
|
||||
# reindex will leave rpc warm up "early"; Wait for it to finish
|
||||
|
||||
@@ -45,12 +45,12 @@ class WalletBackupTest(BitcoinTestFramework):
|
||||
super().__init__()
|
||||
self.setup_clean_chain = True
|
||||
self.num_nodes = 4
|
||||
# nodes 1, 2,3 are spenders, let's give them a keypool=100
|
||||
self.extra_args = [["-keypool=100"], ["-keypool=100"], ["-keypool=100"], []]
|
||||
|
||||
# This mirrors how the network was setup in the bash test
|
||||
def setup_network(self, split=False):
|
||||
# nodes 1, 2,3 are spenders, let's give them a keypool=100
|
||||
extra_args = [["-keypool=100"], ["-keypool=100"], ["-keypool=100"], []]
|
||||
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, extra_args)
|
||||
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir, self.extra_args)
|
||||
connect_nodes(self.nodes[0], 3)
|
||||
connect_nodes(self.nodes[1], 3)
|
||||
connect_nodes(self.nodes[2], 3)
|
||||
@@ -79,6 +79,7 @@ class WalletBackupTest(BitcoinTestFramework):
|
||||
# Must sync mempools before mining.
|
||||
sync_mempools(self.nodes)
|
||||
self.nodes[3].generate(1)
|
||||
sync_blocks(self.nodes)
|
||||
|
||||
# As above, this mirrors the original bash test.
|
||||
def start_three(self):
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
<plist version="0.9">
|
||||
<dict>
|
||||
<key>LSMinimumSystemVersion</key>
|
||||
<string>10.7.0</string>
|
||||
<string>10.8.0</string>
|
||||
|
||||
<key>LSArchitecturePriority</key>
|
||||
<array>
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
#!/usr/bin/python
|
||||
# Copyright (c) 2012-2016 The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
'''
|
||||
Extract _("...") strings for translation and convert to Qt stringdefs so that
|
||||
they can be picked up by Qt linguist.
|
||||
|
||||
@@ -7,8 +7,8 @@ QT_TS = \
|
||||
qt/locale/bitcoin_af_ZA.ts \
|
||||
qt/locale/bitcoin_ar.ts \
|
||||
qt/locale/bitcoin_be_BY.ts \
|
||||
qt/locale/bitcoin_bg.ts \
|
||||
qt/locale/bitcoin_bg_BG.ts \
|
||||
qt/locale/bitcoin_bg.ts \
|
||||
qt/locale/bitcoin_ca_ES.ts \
|
||||
qt/locale/bitcoin_ca.ts \
|
||||
qt/locale/bitcoin_ca@valencia.ts \
|
||||
@@ -60,6 +60,7 @@ QT_TS = \
|
||||
qt/locale/bitcoin_mn.ts \
|
||||
qt/locale/bitcoin_ms_MY.ts \
|
||||
qt/locale/bitcoin_nb.ts \
|
||||
qt/locale/bitcoin_ne.ts \
|
||||
qt/locale/bitcoin_nl.ts \
|
||||
qt/locale/bitcoin_pam.ts \
|
||||
qt/locale/bitcoin_pl.ts \
|
||||
@@ -280,7 +281,7 @@ RES_ICONS = \
|
||||
qt/res/icons/verify.png \
|
||||
qt/res/icons/transaction_abandoned.png
|
||||
|
||||
BITCOIN_QT_CPP = \
|
||||
BITCOIN_QT_BASE_CPP = \
|
||||
qt/bantablemodel.cpp \
|
||||
qt/bitcoinaddressvalidator.cpp \
|
||||
qt/bitcoinamountfield.cpp \
|
||||
@@ -303,12 +304,9 @@ BITCOIN_QT_CPP = \
|
||||
qt/trafficgraphwidget.cpp \
|
||||
qt/utilitydialog.cpp
|
||||
|
||||
if TARGET_WINDOWS
|
||||
BITCOIN_QT_CPP += qt/winshutdownmonitor.cpp
|
||||
endif
|
||||
BITCOIN_QT_WINDOWS_CPP = qt/winshutdownmonitor.cpp
|
||||
|
||||
if ENABLE_WALLET
|
||||
BITCOIN_QT_CPP += \
|
||||
BITCOIN_QT_WALLET_CPP = \
|
||||
qt/addressbookpage.cpp \
|
||||
qt/addresstablemodel.cpp \
|
||||
qt/askpassphrasedialog.cpp \
|
||||
@@ -335,6 +333,13 @@ BITCOIN_QT_CPP += \
|
||||
qt/walletmodel.cpp \
|
||||
qt/walletmodeltransaction.cpp \
|
||||
qt/walletview.cpp
|
||||
|
||||
BITCOIN_QT_CPP = $(BITCOIN_QT_BASE_CPP)
|
||||
if TARGET_WINDOWS
|
||||
BITCOIN_QT_CPP += $(BITCOIN_QT_WINDOWS_CPP)
|
||||
endif
|
||||
if ENABLE_WALLET
|
||||
BITCOIN_QT_CPP += $(BITCOIN_QT_WALLET_CPP)
|
||||
endif
|
||||
|
||||
RES_IMAGES =
|
||||
@@ -403,7 +408,7 @@ $(srcdir)/qt/bitcoinstrings.cpp: $(libbitcoin_server_a_SOURCES) $(libbitcoin_wal
|
||||
@test -n $(XGETTEXT) || echo "xgettext is required for updating translations"
|
||||
$(AM_V_GEN) cd $(srcdir); XGETTEXT=$(XGETTEXT) PACKAGE_NAME="$(PACKAGE_NAME)" COPYRIGHT_HOLDERS="$(COPYRIGHT_HOLDERS)" COPYRIGHT_HOLDERS_SUBSTITUTION="$(COPYRIGHT_HOLDERS_SUBSTITUTION)" $(PYTHON) ../share/qt/extract_strings_qt.py $^
|
||||
|
||||
translate: $(srcdir)/qt/bitcoinstrings.cpp $(QT_FORMS_UI) $(QT_FORMS_UI) $(BITCOIN_QT_CPP) $(BITCOIN_QT_H) $(BITCOIN_MM)
|
||||
translate: $(srcdir)/qt/bitcoinstrings.cpp $(QT_FORMS_UI) $(QT_FORMS_UI) $(BITCOIN_QT_BASE_CPP) $(BITCOIN_QT_WINDOWS_CPP) $(BITCOIN_QT_WALLET_CPP) $(BITCOIN_QT_H) $(BITCOIN_MM)
|
||||
@test -n $(LUPDATE) || echo "lupdate is required for updating translations"
|
||||
$(AM_V_GEN) QT_SELECT=$(QT_SELECT) $(LUPDATE) $^ -locations relative -no-obsolete -ts $(srcdir)/qt/locale/bitcoin_en.ts
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
|
||||
#define MIN_TRANSACTION_BASE_SIZE (::GetSerializeSize(CTransaction(), SER_NETWORK, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS))
|
||||
|
||||
CBlockHeaderAndShortTxIDs::CBlockHeaderAndShortTxIDs(const CBlock& block) :
|
||||
CBlockHeaderAndShortTxIDs::CBlockHeaderAndShortTxIDs(const CBlock& block, bool fUseWTXID) :
|
||||
nonce(GetRand(std::numeric_limits<uint64_t>::max())),
|
||||
shorttxids(block.vtx.size() - 1), prefilledtxn(1), header(block) {
|
||||
FillShortTxIDSelector();
|
||||
@@ -25,7 +25,7 @@ CBlockHeaderAndShortTxIDs::CBlockHeaderAndShortTxIDs(const CBlock& block) :
|
||||
prefilledtxn[0] = {0, block.vtx[0]};
|
||||
for (size_t i = 1; i < block.vtx.size(); i++) {
|
||||
const CTransaction& tx = block.vtx[i];
|
||||
shorttxids[i - 1] = GetShortID(tx.GetHash());
|
||||
shorttxids[i - 1] = GetShortID(fUseWTXID ? tx.GetWitnessHash() : tx.GetHash());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -75,7 +75,7 @@ ReadStatus PartiallyDownloadedBlock::InitData(const CBlockHeaderAndShortTxIDs& c
|
||||
}
|
||||
prefilled_count = cmpctblock.prefilledtxn.size();
|
||||
|
||||
// Calculate map of txids -> positions and check mempool to see what we have (or dont)
|
||||
// Calculate map of txids -> positions and check mempool to see what we have (or don't)
|
||||
// Because well-formed cmpctblock messages will have a (relatively) uniform distribution
|
||||
// of short IDs, any highly-uneven distribution of elements can be safely treated as a
|
||||
// READ_STATUS_FAILED.
|
||||
|
||||
@@ -146,7 +146,7 @@ public:
|
||||
// Dummy for deserialization
|
||||
CBlockHeaderAndShortTxIDs() {}
|
||||
|
||||
CBlockHeaderAndShortTxIDs(const CBlock& block);
|
||||
CBlockHeaderAndShortTxIDs(const CBlock& block, bool fUseWTXID);
|
||||
|
||||
uint64_t GetShortID(const uint256& txhash) const;
|
||||
|
||||
|
||||
10
src/chain.h
10
src/chain.h
@@ -137,15 +137,15 @@ enum BlockStatus: uint32_t {
|
||||
BLOCK_VALID_MASK = BLOCK_VALID_HEADER | BLOCK_VALID_TREE | BLOCK_VALID_TRANSACTIONS |
|
||||
BLOCK_VALID_CHAIN | BLOCK_VALID_SCRIPTS,
|
||||
|
||||
BLOCK_HAVE_DATA = 8, //! full block available in blk*.dat
|
||||
BLOCK_HAVE_UNDO = 16, //! undo data available in rev*.dat
|
||||
BLOCK_HAVE_DATA = 8, //!< full block available in blk*.dat
|
||||
BLOCK_HAVE_UNDO = 16, //!< undo data available in rev*.dat
|
||||
BLOCK_HAVE_MASK = BLOCK_HAVE_DATA | BLOCK_HAVE_UNDO,
|
||||
|
||||
BLOCK_FAILED_VALID = 32, //! stage after last reached validness failed
|
||||
BLOCK_FAILED_CHILD = 64, //! descends from failed block
|
||||
BLOCK_FAILED_VALID = 32, //!< stage after last reached validness failed
|
||||
BLOCK_FAILED_CHILD = 64, //!< descends from failed block
|
||||
BLOCK_FAILED_MASK = BLOCK_FAILED_VALID | BLOCK_FAILED_CHILD,
|
||||
|
||||
BLOCK_OPT_WITNESS = 128, //! block data in blk*.data was received with a witness-enforcing client
|
||||
BLOCK_OPT_WITNESS = 128, //!< block data in blk*.data was received with a witness-enforcing client
|
||||
};
|
||||
|
||||
/** The block chain is a tree shaped structure starting with the
|
||||
|
||||
@@ -92,10 +92,10 @@ public:
|
||||
consensus.vDeployments[Consensus::DEPLOYMENT_CSV].nStartTime = 1462060800; // May 1st, 2016
|
||||
consensus.vDeployments[Consensus::DEPLOYMENT_CSV].nTimeout = 1493596800; // May 1st, 2017
|
||||
|
||||
// Deployment of SegWit (BIP141 and BIP143)
|
||||
// Deployment of SegWit (BIP141, BIP143, and BIP147)
|
||||
consensus.vDeployments[Consensus::DEPLOYMENT_SEGWIT].bit = 1;
|
||||
consensus.vDeployments[Consensus::DEPLOYMENT_SEGWIT].nStartTime = 0;
|
||||
consensus.vDeployments[Consensus::DEPLOYMENT_SEGWIT].nTimeout = 0; // Never / undefined
|
||||
consensus.vDeployments[Consensus::DEPLOYMENT_SEGWIT].nStartTime = 1479168000; // November 15th, 2016.
|
||||
consensus.vDeployments[Consensus::DEPLOYMENT_SEGWIT].nTimeout = 1510704000; // November 15th, 2017.
|
||||
|
||||
/**
|
||||
* The message start string is designed to be unlikely to occur in normal data.
|
||||
@@ -114,12 +114,13 @@ public:
|
||||
assert(consensus.hashGenesisBlock == uint256S("0x000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f"));
|
||||
assert(genesis.hashMerkleRoot == uint256S("0x4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b"));
|
||||
|
||||
vSeeds.push_back(CDNSSeedData("bitcoin.sipa.be", "seed.bitcoin.sipa.be", true)); // Pieter Wuille
|
||||
vSeeds.push_back(CDNSSeedData("bluematt.me", "dnsseed.bluematt.me")); // Matt Corallo
|
||||
// Note that of those with the service bits flag, most only support a subset of possible options
|
||||
vSeeds.push_back(CDNSSeedData("bitcoin.sipa.be", "seed.bitcoin.sipa.be", true)); // Pieter Wuille, only supports x1, x5, x9, and xd
|
||||
vSeeds.push_back(CDNSSeedData("bluematt.me", "dnsseed.bluematt.me", true)); // Matt Corallo, only supports x9
|
||||
vSeeds.push_back(CDNSSeedData("dashjr.org", "dnsseed.bitcoin.dashjr.org")); // Luke Dashjr
|
||||
vSeeds.push_back(CDNSSeedData("bitcoinstats.com", "seed.bitcoinstats.com")); // Christian Decker
|
||||
vSeeds.push_back(CDNSSeedData("bitcoinstats.com", "seed.bitcoinstats.com", true)); // Christian Decker, supports x1 - xf
|
||||
vSeeds.push_back(CDNSSeedData("xf2.org", "bitseed.xf2.org")); // Jeff Garzik
|
||||
vSeeds.push_back(CDNSSeedData("bitcoin.jonasschnelli.ch", "seed.bitcoin.jonasschnelli.ch")); // Jonas Schnelli
|
||||
vSeeds.push_back(CDNSSeedData("bitcoin.jonasschnelli.ch", "seed.bitcoin.jonasschnelli.ch", true)); // Jonas Schnelli, only supports x1, x5, x9, and xd
|
||||
|
||||
base58Prefixes[PUBKEY_ADDRESS] = std::vector<unsigned char>(1,0);
|
||||
base58Prefixes[SCRIPT_ADDRESS] = std::vector<unsigned char>(1,5);
|
||||
@@ -188,7 +189,7 @@ public:
|
||||
consensus.vDeployments[Consensus::DEPLOYMENT_CSV].nStartTime = 1456790400; // March 1st, 2016
|
||||
consensus.vDeployments[Consensus::DEPLOYMENT_CSV].nTimeout = 1493596800; // May 1st, 2017
|
||||
|
||||
// Deployment of SegWit (BIP141 and BIP143)
|
||||
// Deployment of SegWit (BIP141, BIP143, and BIP147)
|
||||
consensus.vDeployments[Consensus::DEPLOYMENT_SEGWIT].bit = 1;
|
||||
consensus.vDeployments[Consensus::DEPLOYMENT_SEGWIT].nStartTime = 1462060800; // May 1st 2016
|
||||
consensus.vDeployments[Consensus::DEPLOYMENT_SEGWIT].nTimeout = 1493596800; // May 1st 2017
|
||||
@@ -303,6 +304,12 @@ public:
|
||||
base58Prefixes[EXT_PUBLIC_KEY] = boost::assign::list_of(0x04)(0x35)(0x87)(0xCF).convert_to_container<std::vector<unsigned char> >();
|
||||
base58Prefixes[EXT_SECRET_KEY] = boost::assign::list_of(0x04)(0x35)(0x83)(0x94).convert_to_container<std::vector<unsigned char> >();
|
||||
}
|
||||
|
||||
void UpdateBIP9Parameters(Consensus::DeploymentPos d, int64_t nStartTime, int64_t nTimeout)
|
||||
{
|
||||
consensus.vDeployments[d].nStartTime = nStartTime;
|
||||
consensus.vDeployments[d].nTimeout = nTimeout;
|
||||
}
|
||||
};
|
||||
static CRegTestParams regTestParams;
|
||||
|
||||
@@ -330,4 +337,9 @@ void SelectParams(const std::string& network)
|
||||
SelectBaseParams(network);
|
||||
pCurrentParams = &Params(network);
|
||||
}
|
||||
|
||||
void UpdateRegtestBIP9Parameters(Consensus::DeploymentPos d, int64_t nStartTime, int64_t nTimeout)
|
||||
{
|
||||
regTestParams.UpdateBIP9Parameters(d, nStartTime, nTimeout);
|
||||
}
|
||||
|
||||
|
||||
@@ -112,4 +112,9 @@ CChainParams& Params(const std::string& chain);
|
||||
*/
|
||||
void SelectParams(const std::string& chain);
|
||||
|
||||
/**
|
||||
* Allows modifying the BIP9 regtest parameters.
|
||||
*/
|
||||
void UpdateRegtestBIP9Parameters(Consensus::DeploymentPos d, int64_t nStartTime, int64_t nTimeout);
|
||||
|
||||
#endif // BITCOIN_CHAINPARAMS_H
|
||||
|
||||
@@ -15,12 +15,12 @@
|
||||
|
||||
//! These need to be macros, as clientversion.cpp's and bitcoin*-res.rc's voodoo requires it
|
||||
#define CLIENT_VERSION_MAJOR 0
|
||||
#define CLIENT_VERSION_MINOR 12
|
||||
#define CLIENT_VERSION_REVISION 99
|
||||
#define CLIENT_VERSION_MINOR 13
|
||||
#define CLIENT_VERSION_REVISION 1
|
||||
#define CLIENT_VERSION_BUILD 0
|
||||
|
||||
//! Set to true for release, false for prerelease or test build
|
||||
#define CLIENT_VERSION_IS_RELEASE false
|
||||
#define CLIENT_VERSION_IS_RELEASE true
|
||||
|
||||
/**
|
||||
* Copyright year (2009-this)
|
||||
|
||||
@@ -10,8 +10,8 @@
|
||||
|
||||
/** The maximum allowed size for a serialized block, in bytes (only for buffer size limits) */
|
||||
static const unsigned int MAX_BLOCK_SERIALIZED_SIZE = 4000000;
|
||||
/** The maximum allowed cost for a block, see BIP 141 (network rule) */
|
||||
static const unsigned int MAX_BLOCK_COST = 4000000;
|
||||
/** The maximum allowed weight for a block, see BIP 141 (network rule) */
|
||||
static const unsigned int MAX_BLOCK_WEIGHT = 4000000;
|
||||
/** The maximum allowed size for a block excluding witness data, in bytes (network rule) */
|
||||
static const unsigned int MAX_BLOCK_BASE_SIZE = 1000000;
|
||||
/** The maximum allowed number of signature check operations in a block (network rule) */
|
||||
|
||||
@@ -16,7 +16,7 @@ enum DeploymentPos
|
||||
{
|
||||
DEPLOYMENT_TESTDUMMY,
|
||||
DEPLOYMENT_CSV, // Deployment of BIP68, BIP112, and BIP113.
|
||||
DEPLOYMENT_SEGWIT, // Deployment of BIP141 and BIP143
|
||||
DEPLOYMENT_SEGWIT, // Deployment of BIP141, BIP143, and BIP147.
|
||||
// NOTE: Also add new deployments to VersionBitsDeploymentInfo in versionbits.cpp
|
||||
MAX_VERSION_BITS_DEPLOYMENTS
|
||||
};
|
||||
|
||||
@@ -22,9 +22,9 @@ static const unsigned char REJECT_CHECKPOINT = 0x43;
|
||||
class CValidationState {
|
||||
private:
|
||||
enum mode_state {
|
||||
MODE_VALID, //! everything ok
|
||||
MODE_INVALID, //! network rule violation (DoS value may be set)
|
||||
MODE_ERROR, //! run-time error
|
||||
MODE_VALID, //!< everything ok
|
||||
MODE_INVALID, //!< network rule violation (DoS value may be set)
|
||||
MODE_ERROR, //!< run-time error
|
||||
} mode;
|
||||
int nDoS;
|
||||
std::string strRejectReason;
|
||||
|
||||
@@ -151,11 +151,13 @@ void ScriptPubKeyToUniv(const CScript& scriptPubKey,
|
||||
void TxToUniv(const CTransaction& tx, const uint256& hashBlock, UniValue& entry)
|
||||
{
|
||||
entry.pushKV("txid", tx.GetHash().GetHex());
|
||||
entry.pushKV("hash", tx.GetWitnessHash().GetHex());
|
||||
entry.pushKV("version", tx.nVersion);
|
||||
entry.pushKV("locktime", (int64_t)tx.nLockTime);
|
||||
|
||||
UniValue vin(UniValue::VARR);
|
||||
BOOST_FOREACH(const CTxIn& txin, tx.vin) {
|
||||
for (unsigned int i = 0; i < tx.vin.size(); i++) {
|
||||
const CTxIn& txin = tx.vin[i];
|
||||
UniValue in(UniValue::VOBJ);
|
||||
if (tx.IsCoinBase())
|
||||
in.pushKV("coinbase", HexStr(txin.scriptSig.begin(), txin.scriptSig.end()));
|
||||
@@ -166,6 +168,13 @@ void TxToUniv(const CTransaction& tx, const uint256& hashBlock, UniValue& entry)
|
||||
o.pushKV("asm", ScriptToAsmStr(txin.scriptSig, true));
|
||||
o.pushKV("hex", HexStr(txin.scriptSig.begin(), txin.scriptSig.end()));
|
||||
in.pushKV("scriptSig", o);
|
||||
if (!tx.wit.IsNull() && i < tx.wit.vtxinwit.size() && !tx.wit.vtxinwit[i].IsNull()) {
|
||||
UniValue txinwitness(UniValue::VARR);
|
||||
for (const auto& item : tx.wit.vtxinwit[i].scriptWitness.stack) {
|
||||
txinwitness.push_back(HexStr(item.begin(), item.end()));
|
||||
}
|
||||
in.pushKV("txinwitness", txinwitness);
|
||||
}
|
||||
}
|
||||
in.pushKV("sequence", (int64_t)txin.nSequence);
|
||||
vin.push_back(in);
|
||||
|
||||
@@ -35,7 +35,7 @@ void InterruptHTTPServer();
|
||||
void StopHTTPServer();
|
||||
|
||||
/** Handler for requests to a certain HTTP path */
|
||||
typedef boost::function<void(HTTPRequest* req, const std::string &)> HTTPRequestHandler;
|
||||
typedef boost::function<bool(HTTPRequest* req, const std::string &)> HTTPRequestHandler;
|
||||
/** Register handler for prefix.
|
||||
* If multiple handlers match a prefix, the first-registered one will
|
||||
* be invoked.
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
// Copyright (c) 2016 The Bitcoin Core developers
|
||||
// Distributed under the MIT software license, see the accompanying
|
||||
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
#ifndef BITCOIN_INDIRECTMAP_H
|
||||
#define BITCOIN_INDIRECTMAP_H
|
||||
|
||||
|
||||
90
src/init.cpp
90
src/init.cpp
@@ -379,7 +379,7 @@ std::string HelpMessage(HelpMessageMode mode)
|
||||
strUsage += HelpMessageOpt("-whitelist=<netmask>", _("Whitelist peers connecting from the given netmask or IP address. Can be specified multiple times.") +
|
||||
" " + _("Whitelisted peers cannot be DoS banned and their transactions are always relayed, even if they are already in the mempool, useful e.g. for a gateway"));
|
||||
strUsage += HelpMessageOpt("-whitelistrelay", strprintf(_("Accept relayed transactions received from whitelisted peers even when not relaying transactions (default: %d)"), DEFAULT_WHITELISTRELAY));
|
||||
strUsage += HelpMessageOpt("-whitelistforcerelay", strprintf(_("Force relay of transactions from whitelisted peers even they violate local relay policy (default: %d)"), DEFAULT_WHITELISTFORCERELAY));
|
||||
strUsage += HelpMessageOpt("-whitelistforcerelay", strprintf(_("Force relay of transactions from whitelisted peers even if they violate local relay policy (default: %d)"), DEFAULT_WHITELISTFORCERELAY));
|
||||
strUsage += HelpMessageOpt("-maxuploadtarget=<n>", strprintf(_("Tries to keep outbound traffic under the given target (in MiB per 24h), 0 = no limit (default: %d)"), DEFAULT_MAX_UPLOAD_TARGET));
|
||||
|
||||
#ifdef ENABLE_WALLET
|
||||
@@ -410,6 +410,7 @@ std::string HelpMessage(HelpMessageMode mode)
|
||||
strUsage += HelpMessageOpt("-limitancestorsize=<n>", strprintf("Do not accept transactions whose size with all in-mempool ancestors exceeds <n> kilobytes (default: %u)", DEFAULT_ANCESTOR_SIZE_LIMIT));
|
||||
strUsage += HelpMessageOpt("-limitdescendantcount=<n>", strprintf("Do not accept transactions if any ancestor would have <n> or more in-mempool descendants (default: %u)", DEFAULT_DESCENDANT_LIMIT));
|
||||
strUsage += HelpMessageOpt("-limitdescendantsize=<n>", strprintf("Do not accept transactions if any ancestor would have more than <n> kilobytes of in-mempool descendants (default: %u).", DEFAULT_DESCENDANT_SIZE_LIMIT));
|
||||
strUsage += HelpMessageOpt("-bip9params=deployment:start:end", "Use given start/end times for specified bip9 deployment (regtest-only)");
|
||||
}
|
||||
string debugCategories = "addrman, alert, bench, coindb, db, http, libevent, lock, mempool, mempoolrej, net, proxy, prune, rand, reindex, rpc, selectcoins, tor, zmq"; // Don't translate these and qt below
|
||||
if (mode == HMM_BITCOIN_QT)
|
||||
@@ -446,13 +447,13 @@ std::string HelpMessage(HelpMessageMode mode)
|
||||
strUsage += HelpMessageGroup(_("Node relay options:"));
|
||||
if (showDebug)
|
||||
strUsage += HelpMessageOpt("-acceptnonstdtxn", strprintf("Relay and mine \"non-standard\" transactions (%sdefault: %u)", "testnet/regtest only; ", !Params(CBaseChainParams::TESTNET).RequireStandard()));
|
||||
strUsage += HelpMessageOpt("-bytespersigop", strprintf(_("Minimum bytes per sigop in transactions we relay and mine (default: %u)"), DEFAULT_BYTES_PER_SIGOP));
|
||||
strUsage += HelpMessageOpt("-bytespersigop", strprintf(_("Equivalent bytes per sigop in transactions for relay and mining (default: %u)"), DEFAULT_BYTES_PER_SIGOP));
|
||||
strUsage += HelpMessageOpt("-datacarrier", strprintf(_("Relay and mine data carrier transactions (default: %u)"), DEFAULT_ACCEPT_DATACARRIER));
|
||||
strUsage += HelpMessageOpt("-datacarriersize", strprintf(_("Maximum size of data in data carrier transactions we relay and mine (default: %u)"), MAX_OP_RETURN_RELAY));
|
||||
strUsage += HelpMessageOpt("-mempoolreplacement", strprintf(_("Enable transaction replacement in the memory pool (default: %u)"), DEFAULT_ENABLE_REPLACEMENT));
|
||||
|
||||
strUsage += HelpMessageGroup(_("Block creation options:"));
|
||||
strUsage += HelpMessageOpt("-blockmaxcost=<n>", strprintf(_("Set maximum BIP141 block cost (default: %d)"), DEFAULT_BLOCK_MAX_COST));
|
||||
strUsage += HelpMessageOpt("-blockmaxweight=<n>", strprintf(_("Set maximum BIP141 block weight (default: %d)"), DEFAULT_BLOCK_MAX_WEIGHT));
|
||||
strUsage += HelpMessageOpt("-blockmaxsize=<n>", strprintf(_("Set maximum block size in bytes (default: %d)"), DEFAULT_BLOCK_MAX_SIZE));
|
||||
strUsage += HelpMessageOpt("-blockprioritysize=<n>", strprintf(_("Set maximum size of high-priority/low-fee transactions in bytes (default: %d)"), DEFAULT_BLOCK_PRIORITY_SIZE));
|
||||
if (showDebug)
|
||||
@@ -510,6 +511,21 @@ static void BlockNotifyCallback(bool initialSync, const CBlockIndex *pBlockIndex
|
||||
boost::thread t(runCommand, strCmd); // thread runs free
|
||||
}
|
||||
|
||||
static bool fHaveGenesis = false;
|
||||
static boost::mutex cs_GenesisWait;
|
||||
static CConditionVariable condvar_GenesisWait;
|
||||
|
||||
static void BlockNotifyGenesisWait(bool, const CBlockIndex *pBlockIndex)
|
||||
{
|
||||
if (pBlockIndex != NULL) {
|
||||
{
|
||||
boost::unique_lock<boost::mutex> lock_GenesisWait(cs_GenesisWait);
|
||||
fHaveGenesis = true;
|
||||
}
|
||||
condvar_GenesisWait.notify_all();
|
||||
}
|
||||
}
|
||||
|
||||
struct CImportingNow
|
||||
{
|
||||
CImportingNow() {
|
||||
@@ -975,6 +991,41 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
|
||||
fEnableReplacement = (std::find(vstrReplacementModes.begin(), vstrReplacementModes.end(), "fee") != vstrReplacementModes.end());
|
||||
}
|
||||
|
||||
if (!mapMultiArgs["-bip9params"].empty()) {
|
||||
// Allow overriding bip9 parameters for testing
|
||||
if (!Params().MineBlocksOnDemand()) {
|
||||
return InitError("BIP9 parameters may only be overridden on regtest.");
|
||||
}
|
||||
const vector<string>& deployments = mapMultiArgs["-bip9params"];
|
||||
for (auto i : deployments) {
|
||||
std::vector<std::string> vDeploymentParams;
|
||||
boost::split(vDeploymentParams, i, boost::is_any_of(":"));
|
||||
if (vDeploymentParams.size() != 3) {
|
||||
return InitError("BIP9 parameters malformed, expecting deployment:start:end");
|
||||
}
|
||||
int64_t nStartTime, nTimeout;
|
||||
if (!ParseInt64(vDeploymentParams[1], &nStartTime)) {
|
||||
return InitError(strprintf("Invalid nStartTime (%s)", vDeploymentParams[1]));
|
||||
}
|
||||
if (!ParseInt64(vDeploymentParams[2], &nTimeout)) {
|
||||
return InitError(strprintf("Invalid nTimeout (%s)", vDeploymentParams[2]));
|
||||
}
|
||||
bool found = false;
|
||||
for (int i=0; i<(int)Consensus::MAX_VERSION_BITS_DEPLOYMENTS; ++i)
|
||||
{
|
||||
if (vDeploymentParams[0].compare(VersionBitsDeploymentInfo[i].name) == 0) {
|
||||
UpdateRegtestBIP9Parameters(Consensus::DeploymentPos(i), nStartTime, nTimeout);
|
||||
found = true;
|
||||
LogPrintf("Setting BIP9 activation parameters for %s to start=%ld, timeout=%ld\n", vDeploymentParams[0], nStartTime, nTimeout);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!found) {
|
||||
return InitError(strprintf("Invalid deployment (%s)", vDeploymentParams[0]));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ********************************************************* Step 4: application initialization: dir lock, daemonize, pidfile, debug log
|
||||
|
||||
// Initialize elliptic curve code
|
||||
@@ -1216,7 +1267,7 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
|
||||
// cache size calculations
|
||||
int64_t nTotalCache = (GetArg("-dbcache", nDefaultDbCache) << 20);
|
||||
nTotalCache = std::max(nTotalCache, nMinDbCache << 20); // total cache cannot be less than nMinDbCache
|
||||
nTotalCache = std::min(nTotalCache, nMaxDbCache << 20); // total cache cannot be greated than nMaxDbcache
|
||||
nTotalCache = std::min(nTotalCache, nMaxDbCache << 20); // total cache cannot be greater than nMaxDbcache
|
||||
int64_t nBlockTreeDBCache = nTotalCache / 8;
|
||||
nBlockTreeDBCache = std::min(nBlockTreeDBCache, (GetBoolArg("-txindex", DEFAULT_TXINDEX) ? nMaxBlockDBAndTxIndexCache : nMaxBlockDBCache) << 20);
|
||||
nTotalCache -= nBlockTreeDBCache;
|
||||
@@ -1286,7 +1337,7 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
|
||||
break;
|
||||
}
|
||||
|
||||
if (!fReindex) {
|
||||
if (!fReindex && chainActive.Tip() != NULL) {
|
||||
uiInterface.InitMessage(_("Rewinding blocks..."));
|
||||
if (!RewindBlockIndex(chainparams)) {
|
||||
strLoadError = _("Unable to rewind the database to a pre-fork state. You will need to redownload the blockchain");
|
||||
@@ -1403,6 +1454,17 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
|
||||
|
||||
// ********************************************************* Step 10: import blocks
|
||||
|
||||
if (!CheckDiskSpace())
|
||||
return false;
|
||||
|
||||
// Either install a handler to notify us when genesis activates, or set fHaveGenesis directly.
|
||||
// No locking, as this happens before any background thread is started.
|
||||
if (chainActive.Tip() == NULL) {
|
||||
uiInterface.NotifyBlockTip.connect(BlockNotifyGenesisWait);
|
||||
} else {
|
||||
fHaveGenesis = true;
|
||||
}
|
||||
|
||||
if (mapArgs.count("-blocknotify"))
|
||||
uiInterface.NotifyBlockTip.connect(BlockNotifyCallback);
|
||||
|
||||
@@ -1412,26 +1474,20 @@ bool AppInit2(boost::thread_group& threadGroup, CScheduler& scheduler)
|
||||
BOOST_FOREACH(const std::string& strFile, mapMultiArgs["-loadblock"])
|
||||
vImportFiles.push_back(strFile);
|
||||
}
|
||||
|
||||
threadGroup.create_thread(boost::bind(&ThreadImport, vImportFiles));
|
||||
|
||||
// Wait for genesis block to be processed
|
||||
bool fHaveGenesis = false;
|
||||
while (!fHaveGenesis && !fRequestShutdown) {
|
||||
{
|
||||
LOCK(cs_main);
|
||||
fHaveGenesis = (chainActive.Tip() != NULL);
|
||||
}
|
||||
|
||||
if (!fHaveGenesis) {
|
||||
MilliSleep(10);
|
||||
{
|
||||
boost::unique_lock<boost::mutex> lock(cs_GenesisWait);
|
||||
while (!fHaveGenesis) {
|
||||
condvar_GenesisWait.wait(lock);
|
||||
}
|
||||
uiInterface.NotifyBlockTip.disconnect(BlockNotifyGenesisWait);
|
||||
}
|
||||
|
||||
// ********************************************************* Step 11: start node
|
||||
|
||||
if (!CheckDiskSpace())
|
||||
return false;
|
||||
|
||||
if (!strErrors.str().empty())
|
||||
return InitError(strErrors.str());
|
||||
|
||||
|
||||
247
src/main.cpp
247
src/main.cpp
@@ -1,5 +1,5 @@
|
||||
// Copyright (c) 2009-2010 Satoshi Nakamoto
|
||||
// Copyright (c) 2009-2015 The Bitcoin Core developers
|
||||
// Copyright (c) 2009-2016 The Bitcoin Core developers
|
||||
// Distributed under the MIT software license, see the accompanying
|
||||
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
@@ -74,7 +74,6 @@ bool fHavePruned = false;
|
||||
bool fPruneMode = false;
|
||||
bool fIsBareMultisigStd = DEFAULT_PERMIT_BAREMULTISIG;
|
||||
bool fRequireStandard = true;
|
||||
unsigned int nBytesPerSigOp = DEFAULT_BYTES_PER_SIGOP;
|
||||
bool fCheckBlockIndex = false;
|
||||
bool fCheckpointsEnabled = DEFAULT_CHECKPOINTS_ENABLED;
|
||||
size_t nCoinCacheUsage = 5000 * 300;
|
||||
@@ -293,10 +292,21 @@ struct CNodeState {
|
||||
bool fPreferHeaders;
|
||||
//! Whether this peer wants invs or cmpctblocks (when possible) for block announcements.
|
||||
bool fPreferHeaderAndIDs;
|
||||
//! Whether this peer will send us cmpctblocks if we request them
|
||||
/**
|
||||
* Whether this peer will send us cmpctblocks if we request them.
|
||||
* This is not used to gate request logic, as we really only care about fSupportsDesiredCmpctVersion,
|
||||
* but is used as a flag to "lock in" the version of compact blocks (fWantsCmpctWitness) we send.
|
||||
*/
|
||||
bool fProvidesHeaderAndIDs;
|
||||
//! Whether this peer can give us witnesses
|
||||
bool fHaveWitness;
|
||||
//! Whether this peer wants witnesses in cmpctblocks/blocktxns
|
||||
bool fWantsCmpctWitness;
|
||||
/**
|
||||
* If we've announced NODE_WITNESS to this peer: whether the peer sends witnesses in cmpctblocks/blocktxns,
|
||||
* otherwise: whether this peer sends non-witnesses in cmpctblocks/blocktxns.
|
||||
*/
|
||||
bool fSupportsDesiredCmpctVersion;
|
||||
|
||||
CNodeState() {
|
||||
fCurrentlyConnected = false;
|
||||
@@ -317,6 +327,8 @@ struct CNodeState {
|
||||
fPreferHeaderAndIDs = false;
|
||||
fProvidesHeaderAndIDs = false;
|
||||
fHaveWitness = false;
|
||||
fWantsCmpctWitness = false;
|
||||
fSupportsDesiredCmpctVersion = false;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -476,8 +488,8 @@ void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) {
|
||||
}
|
||||
|
||||
void MaybeSetPeerAsAnnouncingHeaderAndIDs(const CNodeState* nodestate, CNode* pfrom) {
|
||||
if (nLocalServices & NODE_WITNESS) {
|
||||
// Don't ever request compact blocks when segwit is enabled.
|
||||
if (!nodestate->fSupportsDesiredCmpctVersion) {
|
||||
// Never ask from peers who can't provide witnesses.
|
||||
return;
|
||||
}
|
||||
if (nodestate->fProvidesHeaderAndIDs) {
|
||||
@@ -485,7 +497,7 @@ void MaybeSetPeerAsAnnouncingHeaderAndIDs(const CNodeState* nodestate, CNode* pf
|
||||
if (nodeid == pfrom->GetId())
|
||||
return;
|
||||
bool fAnnounceUsingCMPCTBLOCK = false;
|
||||
uint64_t nCMPCTBLOCKVersion = 1;
|
||||
uint64_t nCMPCTBLOCKVersion = (nLocalServices & NODE_WITNESS) ? 2 : 1;
|
||||
if (lNodesAnnouncingHeaderAndIDs.size() >= 3) {
|
||||
// As per BIP152, we only get 3 of our peers to announce
|
||||
// blocks using compact encodings.
|
||||
@@ -538,7 +550,7 @@ CBlockIndex* LastCommonAncestor(CBlockIndex* pa, CBlockIndex* pb) {
|
||||
|
||||
/** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has
|
||||
* at most count entries. */
|
||||
void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<CBlockIndex*>& vBlocks, NodeId& nodeStaller) {
|
||||
void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<CBlockIndex*>& vBlocks, NodeId& nodeStaller, const Consensus::Params& consensusParams) {
|
||||
if (count == 0)
|
||||
return;
|
||||
|
||||
@@ -595,6 +607,10 @@ void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<CBl
|
||||
// We consider the chain that this peer is on invalid.
|
||||
return;
|
||||
}
|
||||
if (!State(nodeid)->fHaveWitness && IsWitnessEnabled(pindex->pprev, consensusParams)) {
|
||||
// We wouldn't download this block or its descendants from this peer.
|
||||
return;
|
||||
}
|
||||
if (pindex->nStatus & BLOCK_HAVE_DATA || chainActive.Contains(pindex)) {
|
||||
if (pindex->nChainTx)
|
||||
state->pindexLastCommonBlock = pindex;
|
||||
@@ -694,8 +710,8 @@ bool AddOrphanTx(const CTransaction& tx, NodeId peer) EXCLUSIVE_LOCKS_REQUIRED(c
|
||||
// have been mined or received.
|
||||
// 100 orphans, each of which is at most 99,999 bytes big is
|
||||
// at most 10 megabytes of orphans and somewhat more byprev index (in the worst case):
|
||||
unsigned int sz = GetTransactionCost(tx);
|
||||
if (sz >= MAX_STANDARD_TX_COST)
|
||||
unsigned int sz = GetTransactionWeight(tx);
|
||||
if (sz >= MAX_STANDARD_TX_WEIGHT)
|
||||
{
|
||||
LogPrint("mempool", "ignoring large orphan tx (size: %u, hash: %s)\n", sz, hash.ToString());
|
||||
return false;
|
||||
@@ -1144,13 +1160,14 @@ bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const C
|
||||
}
|
||||
|
||||
// Reject transactions with witness before segregated witness activates (override with -prematurewitness)
|
||||
if (!GetBoolArg("-prematurewitness",false) && !tx.wit.IsNull() && !IsWitnessEnabled(chainActive.Tip(), Params().GetConsensus())) {
|
||||
bool witnessEnabled = IsWitnessEnabled(chainActive.Tip(), Params().GetConsensus());
|
||||
if (!GetBoolArg("-prematurewitness",false) && !tx.wit.IsNull() && !witnessEnabled) {
|
||||
return state.DoS(0, false, REJECT_NONSTANDARD, "no-witness-yet", true);
|
||||
}
|
||||
|
||||
// Rather not work on nonstandard transactions (unless -testnet/-regtest)
|
||||
string reason;
|
||||
if (fRequireStandard && !IsStandardTx(tx, reason))
|
||||
if (fRequireStandard && !IsStandardTx(tx, reason, witnessEnabled))
|
||||
return state.DoS(0, false, REJECT_NONSTANDARD, reason);
|
||||
|
||||
// Only accept nLockTime-using transactions that can be mined in the next
|
||||
@@ -1179,7 +1196,7 @@ bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const C
|
||||
// nSequence >= maxint-1 on all inputs.
|
||||
//
|
||||
// maxint-1 is picked to still allow use of nLockTime by
|
||||
// non-replacable transactions. All inputs rather than just one
|
||||
// non-replaceable transactions. All inputs rather than just one
|
||||
// is for the sake of multi-party protocols, where we don't
|
||||
// want a single party to be able to disable replacement.
|
||||
//
|
||||
@@ -1265,6 +1282,10 @@ bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const C
|
||||
if (fRequireStandard && !AreInputsStandard(tx, view))
|
||||
return state.Invalid(false, REJECT_NONSTANDARD, "bad-txns-nonstandard-inputs");
|
||||
|
||||
// Check for non-standard witness in P2WSH
|
||||
if (!tx.wit.IsNull() && fRequireStandard && !IsWitnessStandard(tx, view))
|
||||
return state.DoS(0, false, REJECT_NONSTANDARD, "bad-witness-nonstandard", true);
|
||||
|
||||
int64_t nSigOpsCost = GetTransactionSigOpCost(tx, view, STANDARD_SCRIPT_VERIFY_FLAGS);
|
||||
|
||||
CAmount nValueOut = tx.GetValueOut();
|
||||
@@ -1296,7 +1317,7 @@ bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const C
|
||||
// itself can contain sigops MAX_STANDARD_TX_SIGOPS is less than
|
||||
// MAX_BLOCK_SIGOPS; we still consider this an invalid rather than
|
||||
// merely non-standard transaction.
|
||||
if ((nSigOpsCost > MAX_STANDARD_TX_SIGOPS_COST) || (nBytesPerSigOp && nSigOpsCost > nSize * WITNESS_SCALE_FACTOR / nBytesPerSigOp))
|
||||
if (nSigOpsCost > MAX_STANDARD_TX_SIGOPS_COST)
|
||||
return state.DoS(0, false, REJECT_NONSTANDARD, "bad-txns-too-many-sigops", false,
|
||||
strprintf("%d", nSigOpsCost));
|
||||
|
||||
@@ -1497,13 +1518,14 @@ bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const C
|
||||
|
||||
// Check against previous transactions
|
||||
// This is done last to help prevent CPU exhaustion denial-of-service attacks.
|
||||
if (!CheckInputs(tx, state, view, true, scriptVerifyFlags, true)) {
|
||||
PrecomputedTransactionData txdata(tx);
|
||||
if (!CheckInputs(tx, state, view, true, scriptVerifyFlags, true, txdata)) {
|
||||
// SCRIPT_VERIFY_CLEANSTACK requires SCRIPT_VERIFY_WITNESS, so we
|
||||
// need to turn both off, and compare against just turning off CLEANSTACK
|
||||
// to see if the failure is specifically due to witness validation.
|
||||
if (CheckInputs(tx, state, view, true, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true) &&
|
||||
!CheckInputs(tx, state, view, true, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true)) {
|
||||
// Only the witness is wrong, so the transaction itself may be fine.
|
||||
if (tx.wit.IsNull() && CheckInputs(tx, state, view, true, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, txdata) &&
|
||||
!CheckInputs(tx, state, view, true, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, txdata)) {
|
||||
// Only the witness is missing, so the transaction itself may be fine.
|
||||
state.SetCorruptionPossible();
|
||||
}
|
||||
return false;
|
||||
@@ -1518,7 +1540,7 @@ bool AcceptToMemoryPoolWorker(CTxMemPool& pool, CValidationState& state, const C
|
||||
// There is a similar check in CreateNewBlock() to prevent creating
|
||||
// invalid blocks, however allowing such transactions into the mempool
|
||||
// can be exploited as a DoS attack.
|
||||
if (!CheckInputs(tx, state, view, true, MANDATORY_SCRIPT_VERIFY_FLAGS, true))
|
||||
if (!CheckInputs(tx, state, view, true, MANDATORY_SCRIPT_VERIFY_FLAGS, true, txdata))
|
||||
{
|
||||
return error("%s: BUG! PLEASE REPORT THIS! ConnectInputs failed against MANDATORY but not STANDARD flags %s, %s",
|
||||
__func__, hash.ToString(), FormatStateMessage(state));
|
||||
@@ -1915,7 +1937,7 @@ void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, int nHeight)
|
||||
bool CScriptCheck::operator()() {
|
||||
const CScript &scriptSig = ptxTo->vin[nIn].scriptSig;
|
||||
const CScriptWitness *witness = (nIn < ptxTo->wit.vtxinwit.size()) ? &ptxTo->wit.vtxinwit[nIn].scriptWitness : NULL;
|
||||
if (!VerifyScript(scriptSig, scriptPubKey, witness, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, amount, cacheStore), &error)) {
|
||||
if (!VerifyScript(scriptSig, scriptPubKey, witness, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, amount, cacheStore, *txdata), &error)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
@@ -1974,7 +1996,7 @@ bool CheckTxInputs(const CTransaction& tx, CValidationState& state, const CCoins
|
||||
}
|
||||
}// namespace Consensus
|
||||
|
||||
bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &inputs, bool fScriptChecks, unsigned int flags, bool cacheStore, std::vector<CScriptCheck> *pvChecks)
|
||||
bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &inputs, bool fScriptChecks, unsigned int flags, bool cacheStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks)
|
||||
{
|
||||
if (!tx.IsCoinBase())
|
||||
{
|
||||
@@ -1993,7 +2015,7 @@ bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsVi
|
||||
// is safe because block merkle hashes are still computed and checked,
|
||||
// and any change will be caught at the next checkpoint. Of course, if
|
||||
// the checkpoint is for a chain that's invalid due to false scriptSigs
|
||||
// this optimisation would allow an invalid chain to be accepted.
|
||||
// this optimization would allow an invalid chain to be accepted.
|
||||
if (fScriptChecks) {
|
||||
for (unsigned int i = 0; i < tx.vin.size(); i++) {
|
||||
const COutPoint &prevout = tx.vin[i].prevout;
|
||||
@@ -2001,7 +2023,7 @@ bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsVi
|
||||
assert(coins);
|
||||
|
||||
// Verify signature
|
||||
CScriptCheck check(*coins, tx, i, flags, cacheStore);
|
||||
CScriptCheck check(*coins, tx, i, flags, cacheStore, &txdata);
|
||||
if (pvChecks) {
|
||||
pvChecks->push_back(CScriptCheck());
|
||||
check.swap(pvChecks->back());
|
||||
@@ -2014,7 +2036,7 @@ bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsVi
|
||||
// avoid splitting the network between upgraded and
|
||||
// non-upgraded nodes.
|
||||
CScriptCheck check2(*coins, tx, i,
|
||||
flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheStore);
|
||||
flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheStore, &txdata);
|
||||
if (check2())
|
||||
return state.Invalid(false, REJECT_NONSTANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(check.GetScriptError())));
|
||||
}
|
||||
@@ -2068,7 +2090,7 @@ bool UndoReadFromDisk(CBlockUndo& blockundo, const CDiskBlockPos& pos, const uin
|
||||
// Open history file to read
|
||||
CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION);
|
||||
if (filein.IsNull())
|
||||
return error("%s: OpenBlockFile failed", __func__);
|
||||
return error("%s: OpenUndoFile failed", __func__);
|
||||
|
||||
// Read block
|
||||
uint256 hashChecksum;
|
||||
@@ -2394,6 +2416,7 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
|
||||
// Start enforcing WITNESS rules using versionbits logic.
|
||||
if (IsWitnessEnabled(pindex->pprev, chainparams.GetConsensus())) {
|
||||
flags |= SCRIPT_VERIFY_WITNESS;
|
||||
flags |= SCRIPT_VERIFY_NULLDUMMY;
|
||||
}
|
||||
|
||||
int64_t nTime2 = GetTimeMicros(); nTimeForks += nTime2 - nTime1;
|
||||
@@ -2412,6 +2435,8 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
|
||||
std::vector<std::pair<uint256, CDiskTxPos> > vPos;
|
||||
vPos.reserve(block.vtx.size());
|
||||
blockundo.vtxundo.reserve(block.vtx.size() - 1);
|
||||
std::vector<PrecomputedTransactionData> txdata;
|
||||
txdata.reserve(block.vtx.size()); // Required so that pointers to individual PrecomputedTransactionData don't get invalidated
|
||||
for (unsigned int i = 0; i < block.vtx.size(); i++)
|
||||
{
|
||||
const CTransaction &tx = block.vtx[i];
|
||||
@@ -2458,13 +2483,14 @@ bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockIndex* pin
|
||||
return state.DoS(100, error("ConnectBlock(): too many sigops"),
|
||||
REJECT_INVALID, "bad-blk-sigops");
|
||||
|
||||
txdata.emplace_back(tx);
|
||||
if (!tx.IsCoinBase())
|
||||
{
|
||||
nFees += view.GetValueIn(tx)-tx.GetValueOut();
|
||||
|
||||
std::vector<CScriptCheck> vChecks;
|
||||
bool fCacheResults = fJustCheck; /* Don't cache results if we're actually connecting blocks (still consult the cache, though) */
|
||||
if (!CheckInputs(tx, state, view, fScriptChecks, flags, fCacheResults, nScriptCheckThreads ? &vChecks : NULL))
|
||||
if (!CheckInputs(tx, state, view, fScriptChecks, flags, fCacheResults, txdata[i], nScriptCheckThreads ? &vChecks : NULL))
|
||||
return error("ConnectBlock(): CheckInputs on %s failed with %s",
|
||||
tx.GetHash().ToString(), FormatStateMessage(state));
|
||||
control.Add(vChecks);
|
||||
@@ -3596,13 +3622,13 @@ bool ContextualCheckBlock(const CBlock& block, CValidationState& state, CBlockIn
|
||||
}
|
||||
|
||||
// After the coinbase witness nonce and commitment are verified,
|
||||
// we can check if the block cost passes (before we've checked the
|
||||
// coinbase witness, it would be possible for the cost to be too
|
||||
// we can check if the block weight passes (before we've checked the
|
||||
// coinbase witness, it would be possible for the weight to be too
|
||||
// large by filling up the coinbase witness, which doesn't change
|
||||
// the block hash, so we couldn't mark the block as permanently
|
||||
// failed).
|
||||
if (GetBlockCost(block) > MAX_BLOCK_COST) {
|
||||
return state.DoS(100, error("ContextualCheckBlock(): cost limit failed"), REJECT_INVALID, "bad-blk-cost");
|
||||
if (GetBlockWeight(block) > MAX_BLOCK_WEIGHT) {
|
||||
return state.DoS(100, error("ContextualCheckBlock(): weight limit failed"), REJECT_INVALID, "bad-blk-weight");
|
||||
}
|
||||
|
||||
return true;
|
||||
@@ -3954,7 +3980,7 @@ CBlockIndex * InsertBlockIndex(uint256 hash)
|
||||
// Create new
|
||||
CBlockIndex* pindexNew = new CBlockIndex();
|
||||
if (!pindexNew)
|
||||
throw runtime_error("LoadBlockIndex(): new CBlockIndex failed");
|
||||
throw runtime_error(std::string(__func__) + ": new CBlockIndex failed");
|
||||
mi = mapBlockIndex.insert(make_pair(hash, pindexNew)).first;
|
||||
pindexNew->phashBlock = &((*mi).first);
|
||||
|
||||
@@ -4331,8 +4357,6 @@ bool InitBlockIndex(const CChainParams& chainparams)
|
||||
CBlockIndex *pindex = AddToBlockIndex(block);
|
||||
if (!ReceivedBlockTransactions(block, state, pindex, blockPos))
|
||||
return error("LoadBlockIndex(): genesis block not accepted");
|
||||
if (!ActivateBestChain(state, chainparams, &block))
|
||||
return error("LoadBlockIndex(): genesis block cannot be activated");
|
||||
// Force a chainstate write so that when we VerifyDB in a moment, it doesn't check stale data
|
||||
return FlushStateToDisk(state, FLUSH_STATE_ALWAYS);
|
||||
} catch (const std::runtime_error& e) {
|
||||
@@ -4647,6 +4671,7 @@ std::string GetWarnings(const std::string& strFor)
|
||||
string strStatusBar;
|
||||
string strRPC;
|
||||
string strGUI;
|
||||
const string uiAlertSeperator = "<hr />";
|
||||
|
||||
if (!CLIENT_VERSION_IS_RELEASE) {
|
||||
strStatusBar = "This is a pre-release test build - use at your own risk - do not use for mining or merchant applications";
|
||||
@@ -4659,18 +4684,19 @@ std::string GetWarnings(const std::string& strFor)
|
||||
// Misc warnings like out of disk space and clock is wrong
|
||||
if (strMiscWarning != "")
|
||||
{
|
||||
strStatusBar = strGUI = strMiscWarning;
|
||||
strStatusBar = strMiscWarning;
|
||||
strGUI += (strGUI.empty() ? "" : uiAlertSeperator) + strMiscWarning;
|
||||
}
|
||||
|
||||
if (fLargeWorkForkFound)
|
||||
{
|
||||
strStatusBar = strRPC = "Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues.";
|
||||
strGUI = _("Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues.");
|
||||
strGUI += (strGUI.empty() ? "" : uiAlertSeperator) + _("Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues.");
|
||||
}
|
||||
else if (fLargeWorkInvalidChainFound)
|
||||
{
|
||||
strStatusBar = strRPC = "Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.";
|
||||
strGUI = _("Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.");
|
||||
strGUI += (strGUI.empty() ? "" : uiAlertSeperator) + _("Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.");
|
||||
}
|
||||
|
||||
if (strFor == "gui")
|
||||
@@ -4793,10 +4819,16 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
|
||||
pfrom->PushMessage(NetMsgType::BLOCK, block);
|
||||
else if (inv.type == MSG_FILTERED_BLOCK)
|
||||
{
|
||||
LOCK(pfrom->cs_filter);
|
||||
if (pfrom->pfilter)
|
||||
bool send = false;
|
||||
CMerkleBlock merkleBlock;
|
||||
{
|
||||
CMerkleBlock merkleBlock(block, *pfrom->pfilter);
|
||||
LOCK(pfrom->cs_filter);
|
||||
if (pfrom->pfilter) {
|
||||
send = true;
|
||||
merkleBlock = CMerkleBlock(block, *pfrom->pfilter);
|
||||
}
|
||||
}
|
||||
if (send) {
|
||||
pfrom->PushMessage(NetMsgType::MERKLEBLOCK, merkleBlock);
|
||||
// CMerkleBlock just contains hashes, so also push any transactions in the block the client did not see
|
||||
// This avoids hurting performance by pointlessly requiring a round-trip
|
||||
@@ -4815,13 +4847,14 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
|
||||
{
|
||||
// If a peer is asking for old blocks, we're almost guaranteed
|
||||
// they wont have a useful mempool to match against a compact block,
|
||||
// and we dont feel like constructing the object for them, so
|
||||
// and we don't feel like constructing the object for them, so
|
||||
// instead we respond with the full, non-compact block.
|
||||
bool fPeerWantsWitness = State(pfrom->GetId())->fWantsCmpctWitness;
|
||||
if (mi->second->nHeight >= chainActive.Height() - 10) {
|
||||
CBlockHeaderAndShortTxIDs cmpctblock(block);
|
||||
pfrom->PushMessageWithFlag(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::CMPCTBLOCK, cmpctblock);
|
||||
CBlockHeaderAndShortTxIDs cmpctblock(block, fPeerWantsWitness);
|
||||
pfrom->PushMessageWithFlag(fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::CMPCTBLOCK, cmpctblock);
|
||||
} else
|
||||
pfrom->PushMessageWithFlag(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, block);
|
||||
pfrom->PushMessageWithFlag(fPeerWantsWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCK, block);
|
||||
}
|
||||
|
||||
// Trigger the peer node to send a getblocks request for the next batch of inventory
|
||||
@@ -4883,7 +4916,7 @@ void static ProcessGetData(CNode* pfrom, const Consensus::Params& consensusParam
|
||||
|
||||
uint32_t GetFetchFlags(CNode* pfrom, CBlockIndex* pprev, const Consensus::Params& chainparams) {
|
||||
uint32_t nFetchFlags = 0;
|
||||
if (IsWitnessEnabled(pprev, chainparams) && State(pfrom->GetId())->fHaveWitness) {
|
||||
if ((nLocalServices & NODE_WITNESS) && State(pfrom->GetId())->fHaveWitness) {
|
||||
nFetchFlags |= MSG_WITNESS_FLAG;
|
||||
}
|
||||
return nFetchFlags;
|
||||
@@ -4917,6 +4950,12 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
||||
|
||||
if (strCommand == NetMsgType::VERSION)
|
||||
{
|
||||
// Feeler connections exist only to verify if address is online.
|
||||
if (pfrom->fFeeler) {
|
||||
assert(pfrom->fInbound == false);
|
||||
pfrom->fDisconnect = true;
|
||||
}
|
||||
|
||||
// Each connection can only send one version message
|
||||
if (pfrom->nVersion != 0)
|
||||
{
|
||||
@@ -5035,12 +5074,6 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
||||
pfrom->fGetAddr = true;
|
||||
}
|
||||
addrman.Good(pfrom->addr);
|
||||
} else {
|
||||
if (((CNetAddr)pfrom->addr) == (CNetAddr)addrFrom)
|
||||
{
|
||||
addrman.Add(addrFrom, addrFrom);
|
||||
addrman.Good(addrFrom);
|
||||
}
|
||||
}
|
||||
|
||||
pfrom->fSuccessfullyConnected = true;
|
||||
@@ -5087,13 +5120,16 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
||||
pfrom->PushMessage(NetMsgType::SENDHEADERS);
|
||||
}
|
||||
if (pfrom->nVersion >= SHORT_IDS_BLOCKS_VERSION) {
|
||||
// Tell our peer we are willing to provide version-1 cmpctblocks
|
||||
// Tell our peer we are willing to provide version 1 or 2 cmpctblocks
|
||||
// However, we do not request new block announcements using
|
||||
// cmpctblock messages.
|
||||
// We send this to non-NODE NETWORK peers as well, because
|
||||
// they may wish to request compact blocks from us
|
||||
bool fAnnounceUsingCMPCTBLOCK = false;
|
||||
uint64_t nCMPCTBLOCKVersion = 1;
|
||||
uint64_t nCMPCTBLOCKVersion = 2;
|
||||
if (nLocalServices & NODE_WITNESS)
|
||||
pfrom->PushMessage(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion);
|
||||
nCMPCTBLOCKVersion = 1;
|
||||
pfrom->PushMessage(NetMsgType::SENDCMPCT, fAnnounceUsingCMPCTBLOCK, nCMPCTBLOCKVersion);
|
||||
}
|
||||
}
|
||||
@@ -5173,12 +5209,23 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
||||
else if (strCommand == NetMsgType::SENDCMPCT)
|
||||
{
|
||||
bool fAnnounceUsingCMPCTBLOCK = false;
|
||||
uint64_t nCMPCTBLOCKVersion = 1;
|
||||
uint64_t nCMPCTBLOCKVersion = 0;
|
||||
vRecv >> fAnnounceUsingCMPCTBLOCK >> nCMPCTBLOCKVersion;
|
||||
if (nCMPCTBLOCKVersion == 1) {
|
||||
if (nCMPCTBLOCKVersion == 1 || ((nLocalServices & NODE_WITNESS) && nCMPCTBLOCKVersion == 2)) {
|
||||
LOCK(cs_main);
|
||||
State(pfrom->GetId())->fProvidesHeaderAndIDs = true;
|
||||
State(pfrom->GetId())->fPreferHeaderAndIDs = fAnnounceUsingCMPCTBLOCK;
|
||||
// fProvidesHeaderAndIDs is used to "lock in" version of compact blocks we send (fWantsCmpctWitness)
|
||||
if (!State(pfrom->GetId())->fProvidesHeaderAndIDs) {
|
||||
State(pfrom->GetId())->fProvidesHeaderAndIDs = true;
|
||||
State(pfrom->GetId())->fWantsCmpctWitness = nCMPCTBLOCKVersion == 2;
|
||||
}
|
||||
if (State(pfrom->GetId())->fWantsCmpctWitness == (nCMPCTBLOCKVersion == 2)) // ignore later version announces
|
||||
State(pfrom->GetId())->fPreferHeaderAndIDs = fAnnounceUsingCMPCTBLOCK;
|
||||
if (!State(pfrom->GetId())->fSupportsDesiredCmpctVersion) {
|
||||
if (nLocalServices & NODE_WITNESS)
|
||||
State(pfrom->GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 2);
|
||||
else
|
||||
State(pfrom->GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5236,7 +5283,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
||||
nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER &&
|
||||
(!IsWitnessEnabled(chainActive.Tip(), chainparams.GetConsensus()) || State(pfrom->GetId())->fHaveWitness)) {
|
||||
inv.type |= nFetchFlags;
|
||||
if (nodestate->fProvidesHeaderAndIDs && !(nLocalServices & NODE_WITNESS))
|
||||
if (nodestate->fSupportsDesiredCmpctVersion)
|
||||
vToFetch.push_back(CInv(MSG_CMPCT_BLOCK, inv.hash));
|
||||
else
|
||||
vToFetch.push_back(inv);
|
||||
@@ -5343,7 +5390,6 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
||||
|
||||
BlockMap::iterator it = mapBlockIndex.find(req.blockhash);
|
||||
if (it == mapBlockIndex.end() || !(it->second->nStatus & BLOCK_HAVE_DATA)) {
|
||||
Misbehaving(pfrom->GetId(), 100);
|
||||
LogPrintf("Peer %d sent us a getblocktxn for a block we don't have", pfrom->id);
|
||||
return true;
|
||||
}
|
||||
@@ -5365,7 +5411,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
||||
}
|
||||
resp.txn[i] = block.vtx[req.indexes[i]];
|
||||
}
|
||||
pfrom->PushMessageWithFlag(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCKTXN, resp);
|
||||
pfrom->PushMessageWithFlag(State(pfrom->GetId())->fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::BLOCKTXN, resp);
|
||||
}
|
||||
|
||||
|
||||
@@ -5492,7 +5538,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
||||
else if (!fMissingInputs2)
|
||||
{
|
||||
int nDos = 0;
|
||||
if (stateDummy.IsInvalid(nDos) && nDos > 0 && (!state.CorruptionPossible() || State(fromPeer)->fHaveWitness))
|
||||
if (stateDummy.IsInvalid(nDos) && nDos > 0)
|
||||
{
|
||||
// Punish peer that gave us an invalid orphan tx
|
||||
Misbehaving(fromPeer, nDos);
|
||||
@@ -5503,7 +5549,10 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
||||
// Probably non-standard or insufficient fee/priority
|
||||
LogPrint("mempool", " removed orphan tx %s\n", orphanHash.ToString());
|
||||
vEraseQueue.push_back(orphanHash);
|
||||
if (!stateDummy.CorruptionPossible()) {
|
||||
if (orphanTx.wit.IsNull() && !stateDummy.CorruptionPossible()) {
|
||||
// Do not use rejection cache for witness transactions or
|
||||
// witness-stripped transactions, as they can have been malleated.
|
||||
// See https://github.com/bitcoin/bitcoin/issues/8279 for details.
|
||||
assert(recentRejects);
|
||||
recentRejects->insert(orphanHash);
|
||||
}
|
||||
@@ -5541,7 +5590,10 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
||||
LogPrint("mempool", "not keeping orphan with rejected parents %s\n",tx.GetHash().ToString());
|
||||
}
|
||||
} else {
|
||||
if (!state.CorruptionPossible()) {
|
||||
if (tx.wit.IsNull() && !state.CorruptionPossible()) {
|
||||
// Do not use rejection cache for witness transactions or
|
||||
// witness-stripped transactions, as they can have been malleated.
|
||||
// See https://github.com/bitcoin/bitcoin/issues/8279 for details.
|
||||
assert(recentRejects);
|
||||
recentRejects->insert(tx.GetHash());
|
||||
}
|
||||
@@ -5573,9 +5625,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
||||
if (state.GetRejectCode() < REJECT_INTERNAL) // Never send AcceptToMemoryPool's internal codes over P2P
|
||||
pfrom->PushMessage(NetMsgType::REJECT, strCommand, (unsigned char)state.GetRejectCode(),
|
||||
state.GetRejectReason().substr(0, MAX_REJECT_MESSAGE_LENGTH), inv.hash);
|
||||
if (nDoS > 0 && (!state.CorruptionPossible() || State(pfrom->id)->fHaveWitness)) {
|
||||
// When a non-witness-supporting peer gives us a transaction that would
|
||||
// be accepted if witness validation was off, we can't blame them for it.
|
||||
if (nDoS > 0) {
|
||||
Misbehaving(pfrom->GetId(), nDoS);
|
||||
}
|
||||
}
|
||||
@@ -5625,10 +5675,10 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
||||
// We requested this block for some reason, but our mempool will probably be useless
|
||||
// so we just grab the block via normal getdata
|
||||
std::vector<CInv> vInv(1);
|
||||
vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash());
|
||||
vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()), cmpctblock.header.GetHash());
|
||||
pfrom->PushMessage(NetMsgType::GETDATA, vInv);
|
||||
return true;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// If we're not close to tip yet, give up and let parallel block fetch work its magic
|
||||
@@ -5637,6 +5687,12 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
||||
|
||||
CNodeState *nodestate = State(pfrom->GetId());
|
||||
|
||||
if (IsWitnessEnabled(pindex->pprev, chainparams.GetConsensus()) && !nodestate->fSupportsDesiredCmpctVersion) {
|
||||
// Don't bother trying to process compact blocks from v1 peers
|
||||
// after segwit activates.
|
||||
return true;
|
||||
}
|
||||
|
||||
// We want to be a bit conservative just to be extra careful about DoS
|
||||
// possibilities in compact block processing...
|
||||
if (pindex->nHeight <= chainActive.Height() + 2) {
|
||||
@@ -5663,7 +5719,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
||||
} else if (status == READ_STATUS_FAILED) {
|
||||
// Duplicate txindexes, the block is now in-flight, so just request it
|
||||
std::vector<CInv> vInv(1);
|
||||
vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash());
|
||||
vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()), cmpctblock.header.GetHash());
|
||||
pfrom->PushMessage(NetMsgType::GETDATA, vInv);
|
||||
return true;
|
||||
}
|
||||
@@ -5690,7 +5746,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
||||
// We requested this block, but its far into the future, so our
|
||||
// mempool will probably be useless - request the block normally
|
||||
std::vector<CInv> vInv(1);
|
||||
vInv[0] = CInv(MSG_BLOCK, cmpctblock.header.GetHash());
|
||||
vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(pfrom, pindex->pprev, chainparams.GetConsensus()), cmpctblock.header.GetHash());
|
||||
pfrom->PushMessage(NetMsgType::GETDATA, vInv);
|
||||
return true;
|
||||
} else {
|
||||
@@ -5732,7 +5788,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
||||
} else if (status == READ_STATUS_FAILED) {
|
||||
// Might have collided, fall back to getdata now :(
|
||||
std::vector<CInv> invs;
|
||||
invs.push_back(CInv(MSG_BLOCK, resp.blockhash));
|
||||
invs.push_back(CInv(MSG_BLOCK | GetFetchFlags(pfrom, chainActive.Tip(), chainparams.GetConsensus()), resp.blockhash));
|
||||
pfrom->PushMessage(NetMsgType::GETDATA, invs);
|
||||
} else {
|
||||
CValidationState state;
|
||||
@@ -5881,7 +5937,7 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
||||
pindexLast->GetBlockHash().ToString(), pindexLast->nHeight);
|
||||
}
|
||||
if (vGetData.size() > 0) {
|
||||
if (nodestate->fProvidesHeaderAndIDs && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN) && !(nLocalServices & NODE_WITNESS)) {
|
||||
if (nodestate->fSupportsDesiredCmpctVersion && vGetData.size() == 1 && mapBlocksInFlight.size() == 1 && pindexLast->pprev->IsValid(BLOCK_VALID_CHAIN)) {
|
||||
// We seem to be rather well-synced, so it appears pfrom was the first to provide us
|
||||
// with this block! Let's get them to announce using compact blocks in the future.
|
||||
MaybeSetPeerAsAnnouncingHeaderAndIDs(nodestate, pfrom);
|
||||
@@ -6059,8 +6115,6 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
||||
CBloomFilter filter;
|
||||
vRecv >> filter;
|
||||
|
||||
LOCK(pfrom->cs_filter);
|
||||
|
||||
if (!filter.IsWithinSizeConstraints())
|
||||
{
|
||||
// There is no excuse for sending a too-large filter
|
||||
@@ -6069,11 +6123,12 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
||||
}
|
||||
else
|
||||
{
|
||||
LOCK(pfrom->cs_filter);
|
||||
delete pfrom->pfilter;
|
||||
pfrom->pfilter = new CBloomFilter(filter);
|
||||
pfrom->pfilter->UpdateEmptyFull();
|
||||
pfrom->fRelayTxes = true;
|
||||
}
|
||||
pfrom->fRelayTxes = true;
|
||||
}
|
||||
|
||||
|
||||
@@ -6084,20 +6139,21 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
||||
|
||||
// Nodes must NEVER send a data item > 520 bytes (the max size for a script data object,
|
||||
// and thus, the maximum size any matched object can have) in a filteradd message
|
||||
if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE)
|
||||
{
|
||||
LOCK(cs_main);
|
||||
Misbehaving(pfrom->GetId(), 100);
|
||||
bool bad = false;
|
||||
if (vData.size() > MAX_SCRIPT_ELEMENT_SIZE) {
|
||||
bad = true;
|
||||
} else {
|
||||
LOCK(pfrom->cs_filter);
|
||||
if (pfrom->pfilter)
|
||||
if (pfrom->pfilter) {
|
||||
pfrom->pfilter->insert(vData);
|
||||
else
|
||||
{
|
||||
LOCK(cs_main);
|
||||
Misbehaving(pfrom->GetId(), 100);
|
||||
} else {
|
||||
bad = true;
|
||||
}
|
||||
}
|
||||
if (bad) {
|
||||
LOCK(cs_main);
|
||||
Misbehaving(pfrom->GetId(), 100);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -6146,6 +6202,11 @@ bool static ProcessMessage(CNode* pfrom, string strCommand, CDataStream& vRecv,
|
||||
}
|
||||
}
|
||||
|
||||
else if (strCommand == NetMsgType::NOTFOUND) {
|
||||
// We do not care about the NOTFOUND message, but logging an Unknown Command
|
||||
// message would be undesirable as we transmit it ourselves.
|
||||
}
|
||||
|
||||
else {
|
||||
// Ignore unknown commands for extensibility
|
||||
LogPrint("net", "Unknown command \"%s\" from peer=%d\n", SanitizeString(strCommand), pfrom->id);
|
||||
@@ -6319,7 +6380,7 @@ bool SendMessages(CNode* pto)
|
||||
// Ping automatically sent as a latency probe & keepalive.
|
||||
pingSend = true;
|
||||
}
|
||||
if (pingSend) {
|
||||
if (pingSend && !pto->fDisconnect) {
|
||||
uint64_t nonce = 0;
|
||||
while (nonce == 0) {
|
||||
GetRandBytes((unsigned char*)&nonce, sizeof(nonce));
|
||||
@@ -6400,7 +6461,7 @@ bool SendMessages(CNode* pto)
|
||||
if (pindexBestHeader == NULL)
|
||||
pindexBestHeader = chainActive.Tip();
|
||||
bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->fOneShot); // Download if this is a nice peer, or we have no nice peers and this one might do.
|
||||
if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) {
|
||||
if (!state.fSyncStarted && !pto->fClient && !pto->fDisconnect && !fImporting && !fReindex) {
|
||||
// Only actively request headers from a single peer, unless we're close to today.
|
||||
if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
|
||||
state.fSyncStarted = true;
|
||||
@@ -6504,8 +6565,8 @@ bool SendMessages(CNode* pto)
|
||||
//TODO: Shouldn't need to reload block from disk, but requires refactor
|
||||
CBlock block;
|
||||
assert(ReadBlockFromDisk(block, pBestIndex, consensusParams));
|
||||
CBlockHeaderAndShortTxIDs cmpctblock(block);
|
||||
pto->PushMessageWithFlag(SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::CMPCTBLOCK, cmpctblock);
|
||||
CBlockHeaderAndShortTxIDs cmpctblock(block, state.fWantsCmpctWitness);
|
||||
pto->PushMessageWithFlag(state.fWantsCmpctWitness ? 0 : SERIALIZE_TRANSACTION_NO_WITNESS, NetMsgType::CMPCTBLOCK, cmpctblock);
|
||||
state.pindexBestHeaderSent = pBestIndex;
|
||||
} else if (state.fPreferHeaders) {
|
||||
if (vHeaders.size() > 1) {
|
||||
@@ -6715,15 +6776,13 @@ bool SendMessages(CNode* pto)
|
||||
if (!pto->fDisconnect && !pto->fClient && (fFetch || !IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
|
||||
vector<CBlockIndex*> vToDownload;
|
||||
NodeId staller = -1;
|
||||
FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller);
|
||||
FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller, consensusParams);
|
||||
BOOST_FOREACH(CBlockIndex *pindex, vToDownload) {
|
||||
if (State(pto->GetId())->fHaveWitness || !IsWitnessEnabled(pindex->pprev, consensusParams)) {
|
||||
uint32_t nFetchFlags = GetFetchFlags(pto, pindex->pprev, consensusParams);
|
||||
vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
|
||||
MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), consensusParams, pindex);
|
||||
LogPrint("net", "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
|
||||
pindex->nHeight, pto->id);
|
||||
}
|
||||
uint32_t nFetchFlags = GetFetchFlags(pto, pindex->pprev, consensusParams);
|
||||
vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
|
||||
MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), consensusParams, pindex);
|
||||
LogPrint("net", "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
|
||||
pindex->nHeight, pto->id);
|
||||
}
|
||||
if (state.nBlocksInFlight == 0 && staller != -1) {
|
||||
if (State(staller)->nStallingSince == 0) {
|
||||
|
||||
17
src/main.h
17
src/main.h
@@ -39,6 +39,7 @@ class CTxMemPool;
|
||||
class CValidationInterface;
|
||||
class CValidationState;
|
||||
|
||||
struct PrecomputedTransactionData;
|
||||
struct CNodeStateStats;
|
||||
struct LockPoints;
|
||||
|
||||
@@ -124,7 +125,6 @@ static const int64_t DEFAULT_MAX_TIP_AGE = 24 * 60 * 60;
|
||||
|
||||
/** Default for -permitbaremultisig */
|
||||
static const bool DEFAULT_PERMIT_BAREMULTISIG = true;
|
||||
static const unsigned int DEFAULT_BYTES_PER_SIGOP = 20;
|
||||
static const bool DEFAULT_CHECKPOINTS_ENABLED = true;
|
||||
static const bool DEFAULT_TXINDEX = false;
|
||||
static const unsigned int DEFAULT_BANSCORE_THRESHOLD = 100;
|
||||
@@ -155,7 +155,7 @@ typedef boost::unordered_map<uint256, CBlockIndex*, BlockHasher> BlockMap;
|
||||
extern BlockMap mapBlockIndex;
|
||||
extern uint64_t nLastBlockTx;
|
||||
extern uint64_t nLastBlockSize;
|
||||
extern uint64_t nLastBlockCost;
|
||||
extern uint64_t nLastBlockWeight;
|
||||
extern const std::string strMessageMagic;
|
||||
extern CWaitableCriticalSection csBestBlock;
|
||||
extern CConditionVariable cvBlockChange;
|
||||
@@ -165,7 +165,6 @@ extern int nScriptCheckThreads;
|
||||
extern bool fTxIndex;
|
||||
extern bool fIsBareMultisigStd;
|
||||
extern bool fRequireStandard;
|
||||
extern unsigned int nBytesPerSigOp;
|
||||
extern bool fCheckBlockIndex;
|
||||
extern bool fCheckpointsEnabled;
|
||||
extern size_t nCoinCacheUsage;
|
||||
@@ -193,7 +192,7 @@ extern uint64_t nPruneTarget;
|
||||
/** Block files containing a block-height within MIN_BLOCKS_TO_KEEP of chainActive.Tip() will not be pruned. */
|
||||
static const unsigned int MIN_BLOCKS_TO_KEEP = 288;
|
||||
|
||||
static const signed int DEFAULT_CHECKBLOCKS = MIN_BLOCKS_TO_KEEP;
|
||||
static const signed int DEFAULT_CHECKBLOCKS = 6;
|
||||
static const unsigned int DEFAULT_CHECKLEVEL = 3;
|
||||
|
||||
// Require that user allocate at least 550MB for block & undo files (blk???.dat and rev???.dat)
|
||||
@@ -216,7 +215,7 @@ void UnregisterNodeSignals(CNodeSignals& nodeSignals);
|
||||
* block is made active. Note that it does not, however, guarantee that the
|
||||
* specific block passed to it has been checked for validity!
|
||||
*
|
||||
* @param[out] state This may be set to an Error state if any error occurred processing it, including during validation/connection/etc of otherwise unrelated blocks during reorganisation; or it may be set to an Invalid state if pblock is itself invalid (but this is not guaranteed even when the block is checked). If you want to *possibly* get feedback on whether pblock is valid, you must also install a CValidationInterface (see validationinterface.h) - this will have its BlockChecked method called whenever *any* block completes validation.
|
||||
* @param[out] state This may be set to an Error state if any error occurred processing it, including during validation/connection/etc of otherwise unrelated blocks during reorganization; or it may be set to an Invalid state if pblock is itself invalid (but this is not guaranteed even when the block is checked). If you want to *possibly* get feedback on whether pblock is valid, you must also install a CValidationInterface (see validationinterface.h) - this will have its BlockChecked method called whenever *any* block completes validation.
|
||||
* @param[in] pfrom The node which we are receiving the block from; it is added to mapBlockSource and may be penalised if the block is invalid.
|
||||
* @param[in] pblock The block we want to process.
|
||||
* @param[in] fForceProcessing Process this block even if unrequested; used for non-network block sources and whitelisted peers.
|
||||
@@ -349,7 +348,7 @@ int64_t GetTransactionSigOpCost(const CTransaction& tx, const CCoinsViewCache& i
|
||||
* instead of being performed inline.
|
||||
*/
|
||||
bool CheckInputs(const CTransaction& tx, CValidationState &state, const CCoinsViewCache &view, bool fScriptChecks,
|
||||
unsigned int flags, bool cacheStore, std::vector<CScriptCheck> *pvChecks = NULL);
|
||||
unsigned int flags, bool cacheStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks = NULL);
|
||||
|
||||
/** Apply the effects of this transaction on the UTXO set represented by view */
|
||||
void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, int nHeight);
|
||||
@@ -410,12 +409,13 @@ private:
|
||||
unsigned int nFlags;
|
||||
bool cacheStore;
|
||||
ScriptError error;
|
||||
PrecomputedTransactionData *txdata;
|
||||
|
||||
public:
|
||||
CScriptCheck(): amount(0), ptxTo(0), nIn(0), nFlags(0), cacheStore(false), error(SCRIPT_ERR_UNKNOWN_ERROR) {}
|
||||
CScriptCheck(const CCoins& txFromIn, const CTransaction& txToIn, unsigned int nInIn, unsigned int nFlagsIn, bool cacheIn) :
|
||||
CScriptCheck(const CCoins& txFromIn, const CTransaction& txToIn, unsigned int nInIn, unsigned int nFlagsIn, bool cacheIn, PrecomputedTransactionData* txdataIn) :
|
||||
scriptPubKey(txFromIn.vout[txToIn.vin[nInIn].prevout.n].scriptPubKey), amount(txFromIn.vout[txToIn.vin[nInIn].prevout.n].nValue),
|
||||
ptxTo(&txToIn), nIn(nInIn), nFlags(nFlagsIn), cacheStore(cacheIn), error(SCRIPT_ERR_UNKNOWN_ERROR) { }
|
||||
ptxTo(&txToIn), nIn(nInIn), nFlags(nFlagsIn), cacheStore(cacheIn), error(SCRIPT_ERR_UNKNOWN_ERROR), txdata(txdataIn) { }
|
||||
|
||||
bool operator()();
|
||||
|
||||
@@ -427,6 +427,7 @@ public:
|
||||
std::swap(nFlags, check.nFlags);
|
||||
std::swap(cacheStore, check.cacheStore);
|
||||
std::swap(error, check.error);
|
||||
std::swap(txdata, check.txdata);
|
||||
}
|
||||
|
||||
ScriptError GetScriptError() const { return error; }
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user