mirror of
https://github.com/bitcoin/bitcoin.git
synced 2026-01-23 00:24:48 +01:00
Compare commits
113 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fa91ad3420 | ||
|
|
b834447fb2 | ||
|
|
e9c978391f | ||
|
|
e973b61dbb | ||
|
|
f4b78c42e5 | ||
|
|
c6e7765c0a | ||
|
|
bab1ac827b | ||
|
|
71633a9b5c | ||
|
|
daef5852f0 | ||
|
|
7a71850a6d | ||
|
|
2e4688618b | ||
|
|
6e7ea3cf2a | ||
|
|
3af199531b | ||
|
|
76cdeb7b06 | ||
|
|
9405e915e7 | ||
|
|
5e8ad98163 | ||
|
|
a7e2d106db | ||
|
|
9ea84c08d7 | ||
|
|
833848e9b8 | ||
|
|
a074d36254 | ||
|
|
d91f56e1e3 | ||
|
|
cc324aa2be | ||
|
|
01c04d32aa | ||
|
|
abaf1e37a7 | ||
|
|
7a33cb9062 | ||
|
|
2cf352fd8e | ||
|
|
8a16165ab7 | ||
|
|
6f136cd391 | ||
|
|
be0857745a | ||
|
|
65bcbbc538 | ||
|
|
f24291bd96 | ||
|
|
73d3ab8fc9 | ||
|
|
ddfb9150b8 | ||
|
|
354d46bc10 | ||
|
|
5a0506eea0 | ||
|
|
020ed613be | ||
|
|
56626300b8 | ||
|
|
97088fa75a | ||
|
|
4917d0c0de | ||
|
|
554ff3f7f3 | ||
|
|
16e10f928c | ||
|
|
c7979f429a | ||
|
|
2d6426c296 | ||
|
|
46d9b9091b | ||
|
|
b2026fa290 | ||
|
|
3226616493 | ||
|
|
8bcb90d7e3 | ||
|
|
abf4a6eeae | ||
|
|
398c176ea8 | ||
|
|
d82fc69829 | ||
|
|
513cef75ee | ||
|
|
eea16f7de7 | ||
|
|
6b3c1dbc5c | ||
|
|
d1b5d4e9ca | ||
|
|
2d7ebd2d91 | ||
|
|
a8bb76b61f | ||
|
|
666aec7d49 | ||
|
|
6f23ead4a2 | ||
|
|
9d9baafc6f | ||
|
|
22ab141243 | ||
|
|
118abf4c30 | ||
|
|
f6d49d0a09 | ||
|
|
5750355139 | ||
|
|
78d93effd0 | ||
|
|
4a034cbeb4 | ||
|
|
6ded1fe117 | ||
|
|
4e8b64b181 | ||
|
|
773e4cda94 | ||
|
|
4339787379 | ||
|
|
c7f290b826 | ||
|
|
a08c3cc51c | ||
|
|
06424fb004 | ||
|
|
0f0378fe3c | ||
|
|
643385b22d | ||
|
|
3b2dcc8b9a | ||
|
|
b4286cf354 | ||
|
|
5057b9a6ff | ||
|
|
85ec6c6882 | ||
|
|
544f902b2a | ||
|
|
e826c3daa5 | ||
|
|
835b5b8bb1 | ||
|
|
a91567a980 | ||
|
|
819ee09af3 | ||
|
|
894a3cbe42 | ||
|
|
82c60a3151 | ||
|
|
849993377d | ||
|
|
f9f3e8b686 | ||
|
|
af086431e8 | ||
|
|
0a649d07c9 | ||
|
|
f3089fb2cf | ||
|
|
1faf918a16 | ||
|
|
954c1a55e4 | ||
|
|
301aa5d814 | ||
|
|
f63b8e960d | ||
|
|
7e1eca4882 | ||
|
|
f2bd79f80c | ||
|
|
461dd13faf | ||
|
|
9bc4afb62c | ||
|
|
61cdc04a83 | ||
|
|
1288d44804 | ||
|
|
569ceb0df4 | ||
|
|
4c940d4789 | ||
|
|
9b95ab5e9d | ||
|
|
e97588fc3d | ||
|
|
324caa8497 | ||
|
|
2717331981 | ||
|
|
a0e438bd49 | ||
|
|
7c6be9acae | ||
|
|
ea40fa95d9 | ||
|
|
5513516241 | ||
|
|
f9939cdbe0 | ||
|
|
0fba5ae021 | ||
|
|
10cbf2255d |
214
.cirrus.yml
214
.cirrus.yml
@@ -1,214 +0,0 @@
|
||||
env: # Global defaults
|
||||
CIRRUS_CLONE_DEPTH: 1
|
||||
CIRRUS_LOG_TIMESTAMP: true
|
||||
MAKEJOBS: "-j10"
|
||||
TEST_RUNNER_PORT_MIN: "14000" # Must be larger than 12321, which is used for the http cache. See https://cirrus-ci.org/guide/writing-tasks/#http-cache
|
||||
CI_FAILFAST_TEST_LEAVE_DANGLING: "1" # Cirrus CI does not care about dangling processes and setting this variable avoids killing the CI script itself on error
|
||||
|
||||
# A self-hosted machine(s) can be used via Cirrus CI. It can be configured with
|
||||
# multiple users to run tasks in parallel. No sudo permission is required.
|
||||
#
|
||||
# https://cirrus-ci.org/guide/persistent-workers/
|
||||
#
|
||||
# Generally, a persistent worker must run Ubuntu 23.04+ or Debian 12+.
|
||||
#
|
||||
# The following specific types should exist, with the following requirements:
|
||||
# - small: For an x86_64 machine, with at least 2 vCPUs and 8 GB of memory.
|
||||
# - medium: For an x86_64 machine, with at least 4 vCPUs and 16 GB of memory.
|
||||
# - arm64: For an aarch64 machine, with at least 2 vCPUs and 8 GB of memory.
|
||||
#
|
||||
# CI jobs for the latter configuration can be run on x86_64 hardware
|
||||
# by installing qemu-user-static, which works out of the box with
|
||||
# podman or docker. Background: https://stackoverflow.com/a/72890225/313633
|
||||
#
|
||||
# The above machine types are matched to each task by their label. Refer to the
|
||||
# Cirrus CI docs for more details.
|
||||
#
|
||||
# When a contributor maintains a fork of the repo, any pull request they make
|
||||
# to their own fork, or to the main repository, will trigger two CI runs:
|
||||
# one for the branch push and one for the pull request.
|
||||
# This can be avoided by setting SKIP_BRANCH_PUSH=true as a custom env variable
|
||||
# in Cirrus repository settings, accessible from
|
||||
# https://cirrus-ci.com/github/my-organization/my-repository
|
||||
#
|
||||
# On machines that are persisted between CI jobs, RESTART_CI_DOCKER_BEFORE_RUN=1
|
||||
# ensures that previous containers and artifacts are cleared before each run.
|
||||
# This requires installing Podman instead of Docker.
|
||||
#
|
||||
# Futhermore:
|
||||
# - podman-docker-4.1+ is required due to the bugfix in 4.1
|
||||
# (https://github.com/bitcoin/bitcoin/pull/21652#issuecomment-1657098200)
|
||||
# - The ./ci/ dependencies (with cirrus-cli) should be installed. One-liner example
|
||||
# for a single user setup with sudo permission:
|
||||
#
|
||||
# ```
|
||||
# apt update && apt install git screen python3 bash podman-docker uidmap slirp4netns curl -y && curl -L -o cirrus "https://github.com/cirruslabs/cirrus-cli/releases/latest/download/cirrus-linux-$(dpkg --print-architecture)" && mv cirrus /usr/local/bin/cirrus && chmod +x /usr/local/bin/cirrus
|
||||
# ```
|
||||
#
|
||||
# - There are no strict requirements on the hardware. Having fewer CPU threads
|
||||
# than recommended merely causes the CI script to run slower.
|
||||
# To avoid rare and intermittent OOM due to short memory usage spikes,
|
||||
# it is recommended to add (and persist) swap:
|
||||
#
|
||||
# ```
|
||||
# fallocate -l 16G /swapfile_ci && chmod 600 /swapfile_ci && mkswap /swapfile_ci && swapon /swapfile_ci && ( echo '/swapfile_ci none swap sw 0 0' | tee -a /etc/fstab )
|
||||
# ```
|
||||
#
|
||||
# - To register the persistent worker, open a `screen` session and run:
|
||||
#
|
||||
# ```
|
||||
# RESTART_CI_DOCKER_BEFORE_RUN=1 screen cirrus worker run --labels type=todo_fill_in_type --token todo_fill_in_token
|
||||
# ```
|
||||
|
||||
# https://cirrus-ci.org/guide/tips-and-tricks/#sharing-configuration-between-tasks
|
||||
filter_template: &FILTER_TEMPLATE
|
||||
# Allow forks to specify SKIP_BRANCH_PUSH=true and skip CI runs when a branch is pushed,
|
||||
# but still run CI when a PR is created.
|
||||
# https://cirrus-ci.org/guide/writing-tasks/#conditional-task-execution
|
||||
skip: $SKIP_BRANCH_PUSH == "true" && $CIRRUS_PR == ""
|
||||
stateful: false # https://cirrus-ci.org/guide/writing-tasks/#stateful-tasks
|
||||
|
||||
base_template: &BASE_TEMPLATE
|
||||
<< : *FILTER_TEMPLATE
|
||||
merge_base_script:
|
||||
# Require git (used in fingerprint_script).
|
||||
- git --version || ( apt-get update && apt-get install -y git )
|
||||
- if [ "$CIRRUS_PR" = "" ]; then exit 0; fi
|
||||
- git fetch --depth=1 $CIRRUS_REPO_CLONE_URL "pull/${CIRRUS_PR}/merge"
|
||||
- git checkout FETCH_HEAD # Use merged changes to detect silent merge conflicts
|
||||
# Also, the merge commit is used to lint COMMIT_RANGE="HEAD~..HEAD"
|
||||
|
||||
main_template: &MAIN_TEMPLATE
|
||||
timeout_in: 120m # https://cirrus-ci.org/faq/#instance-timed-out
|
||||
ci_script:
|
||||
- ./ci/test_run_all.sh
|
||||
|
||||
global_task_template: &GLOBAL_TASK_TEMPLATE
|
||||
<< : *BASE_TEMPLATE
|
||||
<< : *MAIN_TEMPLATE
|
||||
|
||||
compute_credits_template: &CREDITS_TEMPLATE
|
||||
# https://cirrus-ci.org/pricing/#compute-credits
|
||||
# Only use credits for pull requests to the main repo
|
||||
use_compute_credits: $CIRRUS_REPO_FULL_NAME == 'bitcoin/bitcoin' && $CIRRUS_PR != ""
|
||||
|
||||
task:
|
||||
name: 'lint'
|
||||
<< : *BASE_TEMPLATE
|
||||
container:
|
||||
image: debian:bookworm
|
||||
cpu: 1
|
||||
memory: 1G
|
||||
# For faster CI feedback, immediately schedule the linters
|
||||
<< : *CREDITS_TEMPLATE
|
||||
test_runner_cache:
|
||||
folder: "/lint_test_runner"
|
||||
fingerprint_script: echo $CIRRUS_TASK_NAME $(git rev-parse HEAD:test/lint/test_runner)
|
||||
python_cache:
|
||||
folder: "/python_build"
|
||||
fingerprint_script: cat .python-version /etc/os-release
|
||||
unshallow_script:
|
||||
- git fetch --unshallow --no-tags
|
||||
lint_script:
|
||||
- ./ci/lint_run_all.sh
|
||||
|
||||
task:
|
||||
name: 'tidy'
|
||||
<< : *GLOBAL_TASK_TEMPLATE
|
||||
persistent_worker:
|
||||
labels:
|
||||
type: medium
|
||||
env:
|
||||
FILE_ENV: "./ci/test/00_setup_env_native_tidy.sh"
|
||||
|
||||
task:
|
||||
name: 'ARM, unit tests, no functional tests'
|
||||
<< : *GLOBAL_TASK_TEMPLATE
|
||||
persistent_worker:
|
||||
labels:
|
||||
type: arm64 # Use arm64 worker to sidestep qemu and avoid a slow CI: https://github.com/bitcoin/bitcoin/pull/28087#issuecomment-1649399453
|
||||
env:
|
||||
FILE_ENV: "./ci/test/00_setup_env_arm.sh"
|
||||
|
||||
task:
|
||||
name: 'Win64-cross'
|
||||
<< : *GLOBAL_TASK_TEMPLATE
|
||||
persistent_worker:
|
||||
labels:
|
||||
type: small
|
||||
env:
|
||||
FILE_ENV: "./ci/test/00_setup_env_win64.sh"
|
||||
|
||||
task:
|
||||
name: 'CentOS, depends, gui'
|
||||
<< : *GLOBAL_TASK_TEMPLATE
|
||||
persistent_worker:
|
||||
labels:
|
||||
type: small
|
||||
env:
|
||||
FILE_ENV: "./ci/test/00_setup_env_native_centos.sh"
|
||||
|
||||
task:
|
||||
name: 'previous releases, depends DEBUG'
|
||||
<< : *GLOBAL_TASK_TEMPLATE
|
||||
persistent_worker:
|
||||
labels:
|
||||
type: small
|
||||
env:
|
||||
FILE_ENV: "./ci/test/00_setup_env_native_previous_releases.sh"
|
||||
|
||||
task:
|
||||
name: 'TSan, depends, gui'
|
||||
<< : *GLOBAL_TASK_TEMPLATE
|
||||
persistent_worker:
|
||||
labels:
|
||||
type: medium
|
||||
env:
|
||||
FILE_ENV: "./ci/test/00_setup_env_native_tsan.sh"
|
||||
|
||||
task:
|
||||
name: 'MSan, depends'
|
||||
<< : *GLOBAL_TASK_TEMPLATE
|
||||
persistent_worker:
|
||||
labels:
|
||||
type: small
|
||||
timeout_in: 300m # Use longer timeout for the *rare* case where a full build (llvm + msan + depends + ...) needs to be done.
|
||||
env:
|
||||
FILE_ENV: "./ci/test/00_setup_env_native_msan.sh"
|
||||
|
||||
task:
|
||||
name: 'fuzzer,address,undefined,integer, no depends'
|
||||
<< : *GLOBAL_TASK_TEMPLATE
|
||||
persistent_worker:
|
||||
labels:
|
||||
type: medium
|
||||
timeout_in: 240m # larger timeout, due to the high CPU demand
|
||||
env:
|
||||
FILE_ENV: "./ci/test/00_setup_env_native_fuzz.sh"
|
||||
|
||||
task:
|
||||
name: 'multiprocess, i686, DEBUG'
|
||||
<< : *GLOBAL_TASK_TEMPLATE
|
||||
persistent_worker:
|
||||
labels:
|
||||
type: medium
|
||||
env:
|
||||
FILE_ENV: "./ci/test/00_setup_env_i686_multiprocess.sh"
|
||||
|
||||
task:
|
||||
name: 'no wallet, libbitcoinkernel'
|
||||
<< : *GLOBAL_TASK_TEMPLATE
|
||||
persistent_worker:
|
||||
labels:
|
||||
type: small
|
||||
env:
|
||||
FILE_ENV: "./ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh"
|
||||
|
||||
task:
|
||||
name: 'macOS-cross, gui, no tests'
|
||||
<< : *GLOBAL_TASK_TEMPLATE
|
||||
persistent_worker:
|
||||
labels:
|
||||
type: small
|
||||
env:
|
||||
FILE_ENV: "./ci/test/00_setup_env_mac_cross.sh"
|
||||
56
.github/actions/configure-docker/action.yml
vendored
Normal file
56
.github/actions/configure-docker/action.yml
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
name: 'Configure Docker'
|
||||
description: 'Set up Docker build driver and configure build cache args'
|
||||
inputs:
|
||||
use-cirrus:
|
||||
description: 'Use cirrus cache'
|
||||
required: true
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
# Use host network to allow access to cirrus gha cache running on the host
|
||||
driver-opts: |
|
||||
network=host
|
||||
|
||||
# This is required to allow buildkit to access the actions cache
|
||||
- name: Expose actions cache variables
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
script: |
|
||||
Object.keys(process.env).forEach(function (key) {
|
||||
if (key.startsWith('ACTIONS_')) {
|
||||
core.info(`Exporting ${key}`);
|
||||
core.exportVariable(key, process.env[key]);
|
||||
}
|
||||
});
|
||||
|
||||
- name: Construct docker build cache args
|
||||
shell: bash
|
||||
run: |
|
||||
# Configure docker build cache backend
|
||||
#
|
||||
# On forks the gha cache will work but will use Github's cache backend.
|
||||
# Docker will check for variables $ACTIONS_CACHE_URL, $ACTIONS_RESULTS_URL and $ACTIONS_RUNTIME_TOKEN
|
||||
# which are set automatically when running on GitHub infra: https://docs.docker.com/build/cache/backends/gha/#synopsis
|
||||
|
||||
# Use cirrus cache host
|
||||
if [[ ${{ inputs.use-cirrus }} == 'true' ]]; then
|
||||
url_args="url=${CIRRUS_CACHE_HOST},url_v2=${CIRRUS_CACHE_HOST}"
|
||||
else
|
||||
url_args=""
|
||||
fi
|
||||
|
||||
# Always optimistically --cache‑from in case a cache blob exists
|
||||
args=(--cache-from "type=gha${url_args:+,${url_args}},scope=${CONTAINER_NAME}")
|
||||
|
||||
# If this is a push to the default branch, also add --cache‑to to save the cache
|
||||
if [[ ${{ github.event_name }} == "push" && ${{ github.ref_name }} == ${{ github.event.repository.default_branch }} ]]; then
|
||||
args+=(--cache-to "type=gha${url_args:+,${url_args}},mode=max,ignore-error=true,scope=${CONTAINER_NAME}")
|
||||
fi
|
||||
|
||||
# Always `--load` into docker images (needed when using the `docker-container` build driver).
|
||||
args+=(--load)
|
||||
|
||||
echo "DOCKER_BUILD_CACHE_ARG=${args[*]}" >> $GITHUB_ENV
|
||||
27
.github/actions/configure-environment/action.yml
vendored
Normal file
27
.github/actions/configure-environment/action.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
name: 'Configure environment'
|
||||
description: 'Configure CI, cache and container name environment variables'
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Set CI and cache directories
|
||||
shell: bash
|
||||
run: |
|
||||
echo "BASE_ROOT_DIR=${{ runner.temp }}" >> "$GITHUB_ENV"
|
||||
echo "BASE_BUILD_DIR=${{ runner.temp }}/build" >> "$GITHUB_ENV"
|
||||
echo "CCACHE_DIR=${{ runner.temp }}/ccache_dir" >> $GITHUB_ENV
|
||||
echo "DEPENDS_DIR=${{ runner.temp }}/depends" >> "$GITHUB_ENV"
|
||||
echo "BASE_CACHE=${{ runner.temp }}/depends/built" >> $GITHUB_ENV
|
||||
echo "SOURCES_PATH=${{ runner.temp }}/depends/sources" >> $GITHUB_ENV
|
||||
echo "PREVIOUS_RELEASES_DIR=${{ runner.temp }}/previous_releases" >> $GITHUB_ENV
|
||||
|
||||
- name: Set cache hashes
|
||||
shell: bash
|
||||
run: |
|
||||
echo "DEPENDS_HASH=$(git ls-tree HEAD depends "$FILE_ENV" | sha256sum | cut -d' ' -f1)" >> $GITHUB_ENV
|
||||
echo "PREVIOUS_RELEASES_HASH=$(git ls-tree HEAD test/get_previous_releases.py | sha256sum | cut -d' ' -f1)" >> $GITHUB_ENV
|
||||
|
||||
- name: Get container name
|
||||
shell: bash
|
||||
run: |
|
||||
source $FILE_ENV
|
||||
echo "CONTAINER_NAME=$CONTAINER_NAME" >> "$GITHUB_ENV"
|
||||
47
.github/actions/restore-caches/action.yml
vendored
Normal file
47
.github/actions/restore-caches/action.yml
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
name: 'Restore Caches'
|
||||
description: 'Restore ccache, depends sources, and built depends caches'
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Restore Ccache cache
|
||||
id: ccache-cache
|
||||
uses: cirruslabs/cache/restore@v4
|
||||
with:
|
||||
path: ${{ env.CCACHE_DIR }}
|
||||
key: ccache-${{ env.CONTAINER_NAME }}-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
ccache-${{ env.CONTAINER_NAME }}-
|
||||
|
||||
- name: Restore depends sources cache
|
||||
id: depends-sources
|
||||
uses: cirruslabs/cache/restore@v4
|
||||
with:
|
||||
path: ${{ env.SOURCES_PATH }}
|
||||
key: depends-sources-${{ env.CONTAINER_NAME }}-${{ env.DEPENDS_HASH }}
|
||||
restore-keys: |
|
||||
depends-sources-${{ env.CONTAINER_NAME }}-
|
||||
|
||||
- name: Restore built depends cache
|
||||
id: depends-built
|
||||
uses: cirruslabs/cache/restore@v4
|
||||
with:
|
||||
path: ${{ env.BASE_CACHE }}
|
||||
key: depends-built-${{ env.CONTAINER_NAME }}-${{ env.DEPENDS_HASH }}
|
||||
restore-keys: |
|
||||
depends-built-${{ env.CONTAINER_NAME }}-
|
||||
|
||||
- name: Restore previous releases cache
|
||||
id: previous-releases
|
||||
uses: cirruslabs/cache/restore@v4
|
||||
with:
|
||||
path: ${{ env.PREVIOUS_RELEASES_DIR }}
|
||||
key: previous-releases-${{ env.CONTAINER_NAME }}-${{ env.PREVIOUS_RELEASES_HASH }}
|
||||
restore-keys: |
|
||||
previous-releases-${{ env.CONTAINER_NAME }}-
|
||||
|
||||
- name: export cache hits
|
||||
shell: bash
|
||||
run: |
|
||||
echo "depends-sources-cache-hit=${{ steps.depends-sources.outputs.cache-hit }}" >> $GITHUB_ENV
|
||||
echo "depends-built-cache-hit=${{ steps.depends-built.outputs.cache-hit }}" >> $GITHUB_ENV
|
||||
echo "previous-releases-cache-hit=${{ steps.previous-releases.outputs.cache-hit }}" >> $GITHUB_ENV
|
||||
39
.github/actions/save-caches/action.yml
vendored
Normal file
39
.github/actions/save-caches/action.yml
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
name: 'Save Caches'
|
||||
description: 'Save ccache, depends sources, and built depends caches'
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: debug cache hit inputs
|
||||
shell: bash
|
||||
run: |
|
||||
echo "depends sources direct cache hit to primary key: ${{ env.depends-sources-cache-hit }}"
|
||||
echo "depends built direct cache hit to primary key: ${{ env.depends-built-cache-hit }}"
|
||||
echo "previous releases direct cache hit to primary key: ${{ env.previous-releases-cache-hit }}"
|
||||
|
||||
- name: Save Ccache cache
|
||||
uses: cirruslabs/cache/save@v4
|
||||
if: ${{ (github.event_name == 'push') && (github.ref_name == github.event.repository.default_branch) }}
|
||||
with:
|
||||
path: ${{ env.CCACHE_DIR }}
|
||||
key: ccache-${{ env.CONTAINER_NAME }}-${{ github.run_id }}
|
||||
|
||||
- name: Save depends sources cache
|
||||
uses: cirruslabs/cache/save@v4
|
||||
if: ${{ (github.event_name == 'push') && (github.ref_name == github.event.repository.default_branch) && (env.depends-sources-cache-hit != 'true') }}
|
||||
with:
|
||||
path: ${{ env.SOURCES_PATH }}
|
||||
key: depends-sources-${{ env.CONTAINER_NAME }}-${{ env.DEPENDS_HASH }}
|
||||
|
||||
- name: Save built depends cache
|
||||
uses: cirruslabs/cache/save@v4
|
||||
if: ${{ (github.event_name == 'push') && (github.ref_name == github.event.repository.default_branch) && (env.depends-built-cache-hit != 'true' )}}
|
||||
with:
|
||||
path: ${{ env.BASE_CACHE }}
|
||||
key: depends-built-${{ env.CONTAINER_NAME }}-${{ env.DEPENDS_HASH }}
|
||||
|
||||
- name: Save previous releases cache
|
||||
uses: cirruslabs/cache/save@v4
|
||||
if: ${{ (github.event_name == 'push') && (github.ref_name == github.event.repository.default_branch) && (env.previous-releases-cache-hit != 'true' )}}
|
||||
with:
|
||||
path: ${{ env.PREVIOUS_RELEASES_DIR }}
|
||||
key: previous-releases-${{ env.CONTAINER_NAME }}-${{ env.PREVIOUS_RELEASES_HASH }}
|
||||
185
.github/workflows/ci.yml
vendored
185
.github/workflows/ci.yml
vendored
@@ -19,9 +19,26 @@ concurrency:
|
||||
|
||||
env:
|
||||
CI_FAILFAST_TEST_LEAVE_DANGLING: 1 # GHA does not care about dangling processes and setting this variable avoids killing the CI script itself on error
|
||||
MAKEJOBS: '-j10'
|
||||
CIRRUS_CACHE_HOST: http://127.0.0.1:12321/ # When using Cirrus Runners this host can be used by the docker `gha` build cache type.
|
||||
REPO_USE_CIRRUS_RUNNERS: 'bitcoin/bitcoin' # Use cirrus runners and cache for this repo, instead of falling back to the slow GHA runners
|
||||
|
||||
jobs:
|
||||
runners:
|
||||
name: 'determine runners'
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
use-cirrus-runners: ${{ steps.runners.outputs.use-cirrus-runners }}
|
||||
steps:
|
||||
- id: runners
|
||||
run: |
|
||||
if [[ "${REPO_USE_CIRRUS_RUNNERS}" == "${{ github.repository }}" ]]; then
|
||||
echo "use-cirrus-runners=true" >> "$GITHUB_OUTPUT"
|
||||
echo "::notice title=Runner Selection::Using Cirrus Runners"
|
||||
else
|
||||
echo "use-cirrus-runners=false" >> "$GITHUB_OUTPUT"
|
||||
echo "::notice title=Runner Selection::Using GitHub-hosted runners"
|
||||
fi
|
||||
|
||||
test-each-commit:
|
||||
name: 'test each commit'
|
||||
runs-on: ubuntu-24.04
|
||||
@@ -106,8 +123,12 @@ jobs:
|
||||
BASE_ROOT_DIR: ${{ github.workspace }}
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- &CHECKOUT
|
||||
name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# Ensure the latest merged pull request state is used, even on re-runs.
|
||||
ref: &CHECKOUT_REF_TMPL ${{ github.event_name == 'pull_request' && github.ref || '' }}
|
||||
|
||||
- name: Clang version
|
||||
run: |
|
||||
@@ -175,8 +196,7 @@ jobs:
|
||||
job-name: 'Win64 native fuzz, VS 2022'
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- *CHECKOUT
|
||||
|
||||
- name: Configure Developer Command Prompt for Microsoft Visual C++
|
||||
# Using microsoft/setup-msbuild is not enough.
|
||||
@@ -265,44 +285,151 @@ jobs:
|
||||
run: |
|
||||
py -3 test\fuzz\test_runner.py --par %NUMBER_OF_PROCESSORS% --loglevel DEBUG %RUNNER_TEMP%\qa-assets\fuzz_corpora
|
||||
|
||||
asan-lsan-ubsan-integer-no-depends-usdt:
|
||||
name: 'ASan + LSan + UBSan + integer, no depends, USDT'
|
||||
runs-on: ubuntu-24.04 # has to match container in ci/test/00_setup_env_native_asan.sh for tracing tools
|
||||
ci-matrix:
|
||||
name: ${{ matrix.name }}
|
||||
needs: runners
|
||||
runs-on: ${{ needs.runners.outputs.use-cirrus-runners == 'true' && matrix.cirrus-runner || matrix.fallback-runner }}
|
||||
if: ${{ vars.SKIP_BRANCH_PUSH != 'true' || github.event_name == 'pull_request' }}
|
||||
timeout-minutes: 120
|
||||
timeout-minutes: ${{ matrix.timeout-minutes }}
|
||||
|
||||
env:
|
||||
FILE_ENV: "./ci/test/00_setup_env_native_asan.sh"
|
||||
DANGER_CI_ON_HOST_FOLDERS: 1
|
||||
FILE_ENV: ${{ matrix.file-env }}
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- name: '32 bit ARM, unit tests, no functional tests'
|
||||
cirrus-runner: 'ubuntu-24.04-arm' # Cirrus' Arm runners are Apple (with virtual Linux aarch64), which doesn't support 32-bit mode
|
||||
fallback-runner: 'ubuntu-24.04-arm'
|
||||
timeout-minutes: 120
|
||||
file-env: './ci/test/00_setup_env_arm.sh'
|
||||
|
||||
- name: 'win64 Cross'
|
||||
cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-sm'
|
||||
fallback-runner: 'ubuntu-24.04'
|
||||
timeout-minutes: 120
|
||||
file-env: './ci/test/00_setup_env_win64.sh'
|
||||
|
||||
- name: 'ASan + LSan + UBSan + integer, no depends, USDT'
|
||||
cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md' # has to match container in ci/test/00_setup_env_native_asan.sh for tracing tools
|
||||
fallback-runner: 'ubuntu-24.04'
|
||||
timeout-minutes: 120
|
||||
file-env: './ci/test/00_setup_env_native_asan.sh'
|
||||
|
||||
- name: 'macOS-cross, gui, no tests'
|
||||
cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-sm'
|
||||
fallback-runner: 'ubuntu-24.04'
|
||||
timeout-minutes: 120
|
||||
file-env: './ci/test/00_setup_env_mac_cross.sh'
|
||||
|
||||
- name: 'No wallet, libbitcoinkernel'
|
||||
cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-sm'
|
||||
fallback-runner: 'ubuntu-24.04'
|
||||
timeout-minutes: 120
|
||||
file-env: './ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh'
|
||||
|
||||
- name: 'i686, multiprocess, DEBUG'
|
||||
cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md'
|
||||
fallback-runner: 'ubuntu-24.04'
|
||||
timeout-minutes: 120
|
||||
file-env: './ci/test/00_setup_env_i686_multiprocess.sh'
|
||||
|
||||
- name: 'fuzzer,address,undefined,integer, no depends'
|
||||
cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-lg'
|
||||
fallback-runner: 'ubuntu-24.04'
|
||||
timeout-minutes: 240
|
||||
file-env: './ci/test/00_setup_env_native_fuzz.sh'
|
||||
|
||||
- name: 'previous releases, depends DEBUG'
|
||||
cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md'
|
||||
fallback-runner: 'ubuntu-24.04'
|
||||
timeout-minutes: 120
|
||||
file-env: './ci/test/00_setup_env_native_previous_releases.sh'
|
||||
|
||||
- name: 'CentOS, depends, gui'
|
||||
cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-lg'
|
||||
fallback-runner: 'ubuntu-24.04'
|
||||
timeout-minutes: 120
|
||||
file-env: './ci/test/00_setup_env_native_centos.sh'
|
||||
|
||||
- name: 'tidy'
|
||||
cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md'
|
||||
fallback-runner: 'ubuntu-24.04'
|
||||
timeout-minutes: 120
|
||||
file-env: './ci/test/00_setup_env_native_tidy.sh'
|
||||
|
||||
- name: 'TSan, depends, no gui'
|
||||
cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md'
|
||||
fallback-runner: 'ubuntu-24.04'
|
||||
timeout-minutes: 120
|
||||
file-env: './ci/test/00_setup_env_native_tsan.sh'
|
||||
|
||||
- name: 'MSan, depends'
|
||||
cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-lg'
|
||||
fallback-runner: 'ubuntu-24.04'
|
||||
timeout-minutes: 120
|
||||
file-env: './ci/test/00_setup_env_native_msan.sh'
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- *CHECKOUT
|
||||
|
||||
- name: Set CI directories
|
||||
run: |
|
||||
echo "CCACHE_DIR=${{ runner.temp }}/ccache_dir" >> "$GITHUB_ENV"
|
||||
echo "BASE_ROOT_DIR=${{ runner.temp }}" >> "$GITHUB_ENV"
|
||||
echo "BASE_BUILD_DIR=${{ runner.temp }}/build-asan" >> "$GITHUB_ENV"
|
||||
- name: Configure environment
|
||||
uses: ./.github/actions/configure-environment
|
||||
|
||||
- name: Restore Ccache cache
|
||||
id: ccache-cache
|
||||
uses: actions/cache/restore@v4
|
||||
- name: Restore caches
|
||||
id: restore-cache
|
||||
uses: ./.github/actions/restore-caches
|
||||
|
||||
- name: Configure Docker
|
||||
uses: ./.github/actions/configure-docker
|
||||
with:
|
||||
path: ${{ env.CCACHE_DIR }}
|
||||
key: ${{ github.job }}-ccache-${{ github.run_id }}
|
||||
restore-keys: ${{ github.job }}-ccache-
|
||||
use-cirrus: ${{ needs.runners.outputs.use-cirrus-runners }}
|
||||
|
||||
- name: Enable bpfcc script
|
||||
if: ${{ env.CONTAINER_NAME == 'ci_native_asan' }}
|
||||
# In the image build step, no external environment variables are available,
|
||||
# so any settings will need to be written to the settings env file:
|
||||
run: sed -i "s|\${INSTALL_BCC_TRACING_TOOLS}|true|g" ./ci/test/00_setup_env_native_asan.sh
|
||||
|
||||
- name: Set mmap_rnd_bits
|
||||
if: ${{ env.CONTAINER_NAME == 'ci_native_tsan' || env.CONTAINER_NAME == 'ci_native_msan' }}
|
||||
# Prevents crashes due to high ASLR entropy
|
||||
run: sudo sysctl -w vm.mmap_rnd_bits=28
|
||||
|
||||
- name: CI script
|
||||
run: ./ci/test_run_all.sh
|
||||
|
||||
- name: Save Ccache cache
|
||||
uses: actions/cache/save@v4
|
||||
if: github.event_name != 'pull_request' && steps.ccache-cache.outputs.cache-hit != 'true'
|
||||
- name: Save caches
|
||||
uses: ./.github/actions/save-caches
|
||||
|
||||
lint:
|
||||
name: 'lint'
|
||||
needs: runners
|
||||
runs-on: ${{ needs.runners.outputs.use-cirrus-runners == 'true' && 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-xs' || 'ubuntu-24.04' }}
|
||||
if: ${{ vars.SKIP_BRANCH_PUSH != 'true' || github.event_name == 'pull_request' }}
|
||||
timeout-minutes: 20
|
||||
env:
|
||||
CONTAINER_NAME: "bitcoin-linter"
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
path: ${{ env.CCACHE_DIR }}
|
||||
# https://github.com/actions/cache/blob/main/tips-and-workarounds.md#update-a-cache
|
||||
key: ${{ github.job }}-ccache-${{ github.run_id }}
|
||||
ref: *CHECKOUT_REF_TMPL
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure Docker
|
||||
uses: ./.github/actions/configure-docker
|
||||
with:
|
||||
use-cirrus: ${{ needs.runners.outputs.use-cirrus-runners }}
|
||||
|
||||
- name: CI script
|
||||
run: |
|
||||
set -o xtrace
|
||||
docker buildx build -t "$CONTAINER_NAME" $DOCKER_BUILD_CACHE_ARG --file "./ci/lint_imagefile" .
|
||||
CIRRUS_PR_FLAG=""
|
||||
if [ "${{ github.event_name }}" = "pull_request" ]; then
|
||||
CIRRUS_PR_FLAG="-e CIRRUS_PR=1"
|
||||
fi
|
||||
docker run --rm $CIRRUS_PR_FLAG -v "$(pwd)":/bitcoin "$CONTAINER_NAME"
|
||||
|
||||
@@ -28,9 +28,9 @@ get_directory_property(precious_variables CACHE_VARIABLES)
|
||||
#=============================
|
||||
set(CLIENT_NAME "Bitcoin Core")
|
||||
set(CLIENT_VERSION_MAJOR 29)
|
||||
set(CLIENT_VERSION_MINOR 1)
|
||||
set(CLIENT_VERSION_MINOR 3)
|
||||
set(CLIENT_VERSION_BUILD 0)
|
||||
set(CLIENT_VERSION_RC 0)
|
||||
set(CLIENT_VERSION_RC 1)
|
||||
set(CLIENT_VERSION_IS_RELEASE "true")
|
||||
set(COPYRIGHT_YEAR "2025")
|
||||
|
||||
|
||||
32
ci/README.md
32
ci/README.md
@@ -1,8 +1,8 @@
|
||||
## CI Scripts
|
||||
# CI Scripts
|
||||
|
||||
This directory contains scripts for each build step in each build stage.
|
||||
|
||||
### Running a Stage Locally
|
||||
## Running a Stage Locally
|
||||
|
||||
Be aware that the tests will be built and run in-place, so please run at your own risk.
|
||||
If the repository is not a fresh git clone, you might have to clean files from previous builds or test runs first.
|
||||
@@ -27,7 +27,7 @@ with a specific configuration,
|
||||
env -i HOME="$HOME" PATH="$PATH" USER="$USER" bash -c 'FILE_ENV="./ci/test/00_setup_env_arm.sh" ./ci/test_run_all.sh'
|
||||
```
|
||||
|
||||
### Configurations
|
||||
## Configurations
|
||||
|
||||
The test files (`FILE_ENV`) are constructed to test a wide range of
|
||||
configurations, rather than a single pass/fail. This helps to catch build
|
||||
@@ -49,8 +49,32 @@ env -i HOME="$HOME" PATH="$PATH" USER="$USER" bash -c 'MAKEJOBS="-j1" FILE_ENV="
|
||||
The files starting with `0n` (`n` greater than 0) are the scripts that are run
|
||||
in order.
|
||||
|
||||
### Cache
|
||||
## Cache
|
||||
|
||||
In order to avoid rebuilding all dependencies for each build, the binaries are
|
||||
cached and reused when possible. Changes in the dependency-generator will
|
||||
trigger cache-invalidation and rebuilds as necessary.
|
||||
|
||||
## Configuring a repository for CI
|
||||
|
||||
### Primary repository
|
||||
|
||||
To configure the primary repository, follow these steps:
|
||||
|
||||
1. Register with [Cirrus Runners](https://cirrus-runners.app/) and purchase runners.
|
||||
2. Install the Cirrus Runners GitHub app against the GitHub organization.
|
||||
3. Enable organisation-level runners to be used in public repositories:
|
||||
1. `Org settings -> Actions -> Runner Groups -> Default -> Allow public repos`
|
||||
4. Permit the following actions to run:
|
||||
1. cirruslabs/cache/restore@\*
|
||||
1. cirruslabs/cache/save@\*
|
||||
1. docker/setup-buildx-action@\*
|
||||
1. actions/github-script@\*
|
||||
|
||||
### Forked repositories
|
||||
|
||||
When used in a fork the CI will run on GitHub's free hosted runners by default.
|
||||
In this case, due to GitHub's 10GB-per-repo cache size limitations caches will be frequently evicted and missed, but the workflows will run (slowly).
|
||||
|
||||
It is also possible to use your own Cirrus Runners in your own fork with an appropriate patch to the `REPO_USE_CIRRUS_RUNNERS` variable in ../.github/workflows/ci.yml
|
||||
NB that Cirrus Runners only work at an organisation level, therefore in order to use your own Cirrus Runners, *the fork must be within your own organisation*.
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Copyright (c) 2019-present The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
export LC_ALL=C.UTF-8
|
||||
|
||||
# Only used in .cirrus.yml. Refer to test/lint/README.md on how to run locally.
|
||||
|
||||
cp "./ci/retry/retry" "/ci_retry"
|
||||
cp "./.python-version" "/.python-version"
|
||||
mkdir --parents "/test/lint"
|
||||
cp --recursive "./test/lint/test_runner" "/test/lint/"
|
||||
set -o errexit; source ./ci/lint/04_install.sh
|
||||
set -o errexit
|
||||
./ci/lint/06_script.sh
|
||||
@@ -35,7 +35,7 @@ fi
|
||||
|
||||
echo "Fallback to default values in env (if not yet set)"
|
||||
# The number of parallel jobs to pass down to make and test_runner.py
|
||||
export MAKEJOBS=${MAKEJOBS:--j4}
|
||||
export MAKEJOBS=${MAKEJOBS:--j$(if command -v nproc > /dev/null 2>&1; then nproc; else sysctl -n hw.logicalcpu; fi)}
|
||||
# Whether to prefer BusyBox over GNU utilities
|
||||
export USE_BUSY_BOX=${USE_BUSY_BOX:-false}
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ export LC_ALL=C.UTF-8
|
||||
|
||||
# Homebrew's python@3.12 is marked as externally managed (PEP 668).
|
||||
# Therefore, `--break-system-packages` is needed.
|
||||
export CONTAINER_NAME="ci_mac_native" # macos does not use a container, but the env var is needed for logging
|
||||
export PIP_PACKAGES="--break-system-packages zmq"
|
||||
export GOAL="install"
|
||||
export CMAKE_GENERATOR="Ninja"
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
|
||||
export LC_ALL=C.UTF-8
|
||||
|
||||
export CONTAINER_NAME="ci_mac_native_fuzz" # macos does not use a container, but the env var is needed for logging
|
||||
export CMAKE_GENERATOR="Ninja"
|
||||
export BITCOIN_CONFIG="-DBUILD_FOR_FUZZING=ON"
|
||||
export CI_OS_NAME="macos"
|
||||
|
||||
@@ -19,15 +19,15 @@ else
|
||||
fi
|
||||
|
||||
export CONTAINER_NAME=ci_native_asan
|
||||
export APT_LLVM_V="20"
|
||||
export APT_LLVM_V="21"
|
||||
export PACKAGES="systemtap-sdt-dev clang-${APT_LLVM_V} llvm-${APT_LLVM_V} libclang-rt-${APT_LLVM_V}-dev python3-zmq qtbase5-dev qttools5-dev qttools5-dev-tools libevent-dev libboost-dev libdb5.3++-dev libzmq3-dev libqrencode-dev libsqlite3-dev ${BPFCC_PACKAGE}"
|
||||
export NO_DEPENDS=1
|
||||
export GOAL="install"
|
||||
export BITCOIN_CONFIG="\
|
||||
-DWITH_USDT=ON -DWITH_ZMQ=ON -DWITH_BDB=ON -DWARN_INCOMPATIBLE_BDB=OFF -DBUILD_GUI=ON \
|
||||
-DSANITIZERS=address,float-divide-by-zero,integer,undefined \
|
||||
-DCMAKE_C_COMPILER=clang-${APT_LLVM_V} \
|
||||
-DCMAKE_CXX_COMPILER=clang++-${APT_LLVM_V} \
|
||||
-DCMAKE_C_COMPILER=clang \
|
||||
-DCMAKE_CXX_COMPILER=clang++ \
|
||||
-DCMAKE_C_FLAGS='-ftrivial-auto-var-init=pattern' \
|
||||
-DCMAKE_CXX_FLAGS='-ftrivial-auto-var-init=pattern -Wno-error=deprecated-declarations' \
|
||||
-DAPPEND_CXXFLAGS='-std=c++23' \
|
||||
|
||||
@@ -8,7 +8,7 @@ export LC_ALL=C.UTF-8
|
||||
|
||||
export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04"
|
||||
export CONTAINER_NAME=ci_native_fuzz
|
||||
export APT_LLVM_V="20"
|
||||
export APT_LLVM_V="21"
|
||||
export PACKAGES="clang-${APT_LLVM_V} llvm-${APT_LLVM_V} libclang-rt-${APT_LLVM_V}-dev libevent-dev libboost-dev libsqlite3-dev"
|
||||
export NO_DEPENDS=1
|
||||
export RUN_UNIT_TESTS=false
|
||||
@@ -19,9 +19,8 @@ export CI_CONTAINER_CAP="--cap-add SYS_PTRACE" # If run with (ASan + LSan), the
|
||||
export BITCOIN_CONFIG="\
|
||||
-DBUILD_FOR_FUZZING=ON \
|
||||
-DSANITIZERS=fuzzer,address,undefined,float-divide-by-zero,integer \
|
||||
-DCMAKE_C_COMPILER=clang-${APT_LLVM_V} \
|
||||
-DCMAKE_CXX_COMPILER=clang++-${APT_LLVM_V} \
|
||||
-DCMAKE_C_COMPILER=clang \
|
||||
-DCMAKE_CXX_COMPILER=clang++ \
|
||||
-DCMAKE_C_FLAGS='-ftrivial-auto-var-init=pattern' \
|
||||
-DCMAKE_CXX_FLAGS='-ftrivial-auto-var-init=pattern' \
|
||||
"
|
||||
export LLVM_SYMBOLIZER_PATH="/usr/bin/llvm-symbolizer-${APT_LLVM_V}"
|
||||
|
||||
@@ -7,14 +7,16 @@
|
||||
export LC_ALL=C.UTF-8
|
||||
|
||||
export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04"
|
||||
LIBCXX_DIR="/msan/cxx_build/"
|
||||
export APT_LLVM_V="21"
|
||||
LIBCXX_DIR="/cxx_build/"
|
||||
export MSAN_FLAGS="-fsanitize=memory -fsanitize-memory-track-origins=2 -fno-omit-frame-pointer -g -O1 -fno-optimize-sibling-calls"
|
||||
LIBCXX_FLAGS="-nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument"
|
||||
# -lstdc++ to resolve link issues due to upstream packaging
|
||||
LIBCXX_FLAGS="-nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument -lstdc++"
|
||||
export MSAN_AND_LIBCXX_FLAGS="${MSAN_FLAGS} ${LIBCXX_FLAGS}"
|
||||
|
||||
export CONTAINER_NAME="ci_native_fuzz_msan"
|
||||
export PACKAGES="ninja-build"
|
||||
# BDB generates false-positives and will be removed in future
|
||||
export PACKAGES="ninja-build clang-${APT_LLVM_V} llvm-${APT_LLVM_V} llvm-${APT_LLVM_V}-dev libclang-${APT_LLVM_V}-dev libclang-rt-${APT_LLVM_V}-dev"
|
||||
export DEP_OPTS="DEBUG=1 NO_BDB=1 NO_QT=1 CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'"
|
||||
export GOAL="all"
|
||||
# Setting CMAKE_{C,CXX}_FLAGS_DEBUG flags to an empty string ensures that the flags set in MSAN_FLAGS remain unaltered.
|
||||
@@ -27,7 +29,7 @@ export BITCOIN_CONFIG="\
|
||||
-DSANITIZERS=fuzzer,memory \
|
||||
-DAPPEND_CPPFLAGS='-DBOOST_MULTI_INDEX_ENABLE_SAFE_MODE -U_FORTIFY_SOURCE' \
|
||||
"
|
||||
export USE_MEMORY_SANITIZER="true"
|
||||
export USE_INSTRUMENTED_LIBCPP="MemoryWithOrigins"
|
||||
export RUN_UNIT_TESTS="false"
|
||||
export RUN_FUNCTIONAL_TESTS="false"
|
||||
export RUN_FUZZ_TESTS=true
|
||||
|
||||
@@ -7,13 +7,14 @@
|
||||
export LC_ALL=C.UTF-8
|
||||
|
||||
export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04"
|
||||
LIBCXX_DIR="/msan/cxx_build/"
|
||||
export APT_LLVM_V="21"
|
||||
LIBCXX_DIR="/cxx_build/"
|
||||
export MSAN_FLAGS="-fsanitize=memory -fsanitize-memory-track-origins=2 -fno-omit-frame-pointer -g -O1 -fno-optimize-sibling-calls"
|
||||
LIBCXX_FLAGS="-nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument"
|
||||
export MSAN_AND_LIBCXX_FLAGS="${MSAN_FLAGS} ${LIBCXX_FLAGS}"
|
||||
|
||||
export CONTAINER_NAME="ci_native_msan"
|
||||
export PACKAGES="ninja-build"
|
||||
export PACKAGES="clang-${APT_LLVM_V} llvm-${APT_LLVM_V} llvm-${APT_LLVM_V}-dev libclang-${APT_LLVM_V}-dev libclang-rt-${APT_LLVM_V}-dev ninja-build"
|
||||
# BDB generates false-positives and will be removed in future
|
||||
export DEP_OPTS="DEBUG=1 NO_BDB=1 NO_QT=1 CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'"
|
||||
export GOAL="install"
|
||||
@@ -26,4 +27,4 @@ export BITCOIN_CONFIG="\
|
||||
-DSANITIZERS=memory \
|
||||
-DAPPEND_CPPFLAGS='-U_FORTIFY_SOURCE' \
|
||||
"
|
||||
export USE_MEMORY_SANITIZER="true"
|
||||
export USE_INSTRUMENTED_LIBCPP="MemoryWithOrigins"
|
||||
|
||||
@@ -8,9 +8,12 @@ export LC_ALL=C.UTF-8
|
||||
|
||||
export CONTAINER_NAME=ci_native_tsan
|
||||
export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04"
|
||||
export APT_LLVM_V="20"
|
||||
export PACKAGES="clang-${APT_LLVM_V} llvm-${APT_LLVM_V} libclang-rt-${APT_LLVM_V}-dev libc++abi-${APT_LLVM_V}-dev libc++-${APT_LLVM_V}-dev python3-zmq"
|
||||
export DEP_OPTS="CC=clang-${APT_LLVM_V} CXX='clang++-${APT_LLVM_V} -stdlib=libc++'"
|
||||
export APT_LLVM_V="21"
|
||||
LIBCXX_DIR="/cxx_build/"
|
||||
LIBCXX_FLAGS="-fsanitize=thread -nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument"
|
||||
export PACKAGES="clang-${APT_LLVM_V} llvm-${APT_LLVM_V} llvm-${APT_LLVM_V}-dev libclang-${APT_LLVM_V}-dev libclang-rt-${APT_LLVM_V}-dev python3-zmq ninja-build"
|
||||
export DEP_OPTS="CC=clang CXX=clang++ CXXFLAGS='${LIBCXX_FLAGS}' NO_QT=1"
|
||||
export GOAL="install"
|
||||
export BITCOIN_CONFIG="-DWITH_ZMQ=ON -DSANITIZERS=thread \
|
||||
-DAPPEND_CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER -DDEBUG_LOCKCONTENTION -D_LIBCPP_REMOVE_TRANSITIVE_INCLUDES'"
|
||||
-DAPPEND_CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKCONTENTION -D_LIBCPP_REMOVE_TRANSITIVE_INCLUDES'"
|
||||
export USE_INSTRUMENTED_LIBCPP="Thread"
|
||||
|
||||
@@ -43,32 +43,24 @@ elif [ "$CI_OS_NAME" != "macos" ]; then
|
||||
${CI_RETRY_EXE} bash -c "apt-get install --no-install-recommends --no-upgrade -y $PACKAGES $CI_BASE_PACKAGES"
|
||||
fi
|
||||
|
||||
if [ -n "${APT_LLVM_V}" ]; then
|
||||
update-alternatives --install /usr/bin/clang++ clang++ "/usr/bin/clang++-${APT_LLVM_V}" 100
|
||||
update-alternatives --install /usr/bin/clang clang "/usr/bin/clang-${APT_LLVM_V}" 100
|
||||
update-alternatives --install /usr/bin/llvm-symbolizer llvm-symbolizer "/usr/bin/llvm-symbolizer-${APT_LLVM_V}" 100
|
||||
fi
|
||||
|
||||
if [ -n "$PIP_PACKAGES" ]; then
|
||||
# shellcheck disable=SC2086
|
||||
${CI_RETRY_EXE} pip3 install --user $PIP_PACKAGES
|
||||
fi
|
||||
|
||||
if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then
|
||||
${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-20.1.0" /msan/llvm-project
|
||||
if [[ -n "${USE_INSTRUMENTED_LIBCPP}" ]]; then
|
||||
${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-21.1.1" /llvm-project
|
||||
|
||||
cmake -G Ninja -B /msan/clang_build/ \
|
||||
-DLLVM_ENABLE_PROJECTS="clang" \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DLLVM_TARGETS_TO_BUILD=Native \
|
||||
-DLLVM_ENABLE_RUNTIMES="compiler-rt;libcxx;libcxxabi;libunwind" \
|
||||
-S /msan/llvm-project/llvm
|
||||
|
||||
ninja -C /msan/clang_build/ "$MAKEJOBS"
|
||||
ninja -C /msan/clang_build/ install-runtimes
|
||||
|
||||
update-alternatives --install /usr/bin/clang++ clang++ /msan/clang_build/bin/clang++ 100
|
||||
update-alternatives --install /usr/bin/clang clang /msan/clang_build/bin/clang 100
|
||||
update-alternatives --install /usr/bin/llvm-symbolizer llvm-symbolizer /msan/clang_build/bin/llvm-symbolizer 100
|
||||
|
||||
cmake -G Ninja -B /msan/cxx_build/ \
|
||||
cmake -G Ninja -B /cxx_build/ \
|
||||
-DLLVM_ENABLE_RUNTIMES="libcxx;libcxxabi;libunwind" \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DLLVM_USE_SANITIZER=MemoryWithOrigins \
|
||||
-DLLVM_USE_SANITIZER="${USE_INSTRUMENTED_LIBCPP}" \
|
||||
-DCMAKE_C_COMPILER=clang \
|
||||
-DCMAKE_CXX_COMPILER=clang++ \
|
||||
-DLLVM_TARGETS_TO_BUILD=Native \
|
||||
@@ -76,13 +68,13 @@ if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then
|
||||
-DLIBCXXABI_USE_LLVM_UNWINDER=OFF \
|
||||
-DLIBCXX_ABI_DEFINES="_LIBCPP_ABI_BOUNDED_ITERATORS;_LIBCPP_ABI_BOUNDED_ITERATORS_IN_STD_ARRAY;_LIBCPP_ABI_BOUNDED_ITERATORS_IN_STRING;_LIBCPP_ABI_BOUNDED_ITERATORS_IN_VECTOR;_LIBCPP_ABI_BOUNDED_UNIQUE_PTR" \
|
||||
-DLIBCXX_HARDENING_MODE=debug \
|
||||
-S /msan/llvm-project/runtimes
|
||||
-S /llvm-project/runtimes
|
||||
|
||||
ninja -C /msan/cxx_build/ "$MAKEJOBS"
|
||||
ninja -C /cxx_build/ "$MAKEJOBS"
|
||||
|
||||
# Clear no longer needed source folder
|
||||
du -sh /msan/llvm-project
|
||||
rm -rf /msan/llvm-project
|
||||
du -sh /llvm-project
|
||||
rm -rf /llvm-project
|
||||
fi
|
||||
|
||||
if [[ "${RUN_TIDY}" == "true" ]]; then
|
||||
|
||||
@@ -23,34 +23,14 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then
|
||||
fi
|
||||
echo "Creating $CI_IMAGE_NAME_TAG container to run in"
|
||||
|
||||
DOCKER_BUILD_CACHE_ARG=""
|
||||
DOCKER_BUILD_CACHE_TEMPDIR=""
|
||||
DOCKER_BUILD_CACHE_OLD_DIR=""
|
||||
DOCKER_BUILD_CACHE_NEW_DIR=""
|
||||
# If set, use an `docker build` cache directory on the CI host
|
||||
# to cache docker image layers for the CI container image.
|
||||
# This cache can be multiple GB in size. Prefixed with DANGER
|
||||
# as setting it removes (old cache) files from the host.
|
||||
if [ "$DANGER_DOCKER_BUILD_CACHE_HOST_DIR" ]; then
|
||||
# Directory where the current cache for this run could be. If not existing
|
||||
# or empty, "docker build" will warn, but treat it as cache-miss and continue.
|
||||
DOCKER_BUILD_CACHE_OLD_DIR="${DANGER_DOCKER_BUILD_CACHE_HOST_DIR}/${CONTAINER_NAME}"
|
||||
# Temporary directory for a newly created cache. We can't write the new
|
||||
# cache into OLD_DIR directly, as old cache layers would not be removed.
|
||||
# The NEW_DIR contents are moved to OLD_DIR after OLD_DIR has been cleared.
|
||||
# This happens after `docker build`. If a task fails or is aborted, the
|
||||
# DOCKER_BUILD_CACHE_TEMPDIR might be retained on the host. If the host isn't
|
||||
# ephemeral, it has to take care of cleaning old TEMPDIR's up.
|
||||
DOCKER_BUILD_CACHE_TEMPDIR="$(mktemp --directory ci-docker-build-cache-XXXXXXXXXX)"
|
||||
DOCKER_BUILD_CACHE_NEW_DIR="${DOCKER_BUILD_CACHE_TEMPDIR}/${CONTAINER_NAME}"
|
||||
DOCKER_BUILD_CACHE_ARG="--cache-from type=local,src=${DOCKER_BUILD_CACHE_OLD_DIR} --cache-to type=local,dest=${DOCKER_BUILD_CACHE_NEW_DIR},mode=max"
|
||||
fi
|
||||
|
||||
# Use buildx unconditionally
|
||||
# Using buildx is required to properly load the correct driver, for use with registry caching. Neither build, nor BUILDKIT=1 currently do this properly
|
||||
# shellcheck disable=SC2086
|
||||
DOCKER_BUILDKIT=1 docker build \
|
||||
docker buildx build \
|
||||
--file "${BASE_READ_ONLY_DIR}/ci/test_imagefile" \
|
||||
--build-arg "CI_IMAGE_NAME_TAG=${CI_IMAGE_NAME_TAG}" \
|
||||
--build-arg "FILE_ENV=${FILE_ENV}" \
|
||||
--build-arg "BASE_ROOT_DIR=${BASE_ROOT_DIR}" \
|
||||
$MAYBE_CPUSET \
|
||||
--platform="${CI_IMAGE_PLATFORM}" \
|
||||
--label="${CI_IMAGE_LABEL}" \
|
||||
@@ -58,15 +38,6 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then
|
||||
$DOCKER_BUILD_CACHE_ARG \
|
||||
"${BASE_READ_ONLY_DIR}"
|
||||
|
||||
if [ "$DANGER_DOCKER_BUILD_CACHE_HOST_DIR" ]; then
|
||||
if [ -e "${DOCKER_BUILD_CACHE_NEW_DIR}/index.json" ]; then
|
||||
echo "Removing the existing docker build cache in ${DOCKER_BUILD_CACHE_OLD_DIR}"
|
||||
rm -rf "${DOCKER_BUILD_CACHE_OLD_DIR}"
|
||||
echo "Moving the contents of ${DOCKER_BUILD_CACHE_NEW_DIR} to ${DOCKER_BUILD_CACHE_OLD_DIR}"
|
||||
mv "${DOCKER_BUILD_CACHE_NEW_DIR}" "${DOCKER_BUILD_CACHE_OLD_DIR}"
|
||||
fi
|
||||
fi
|
||||
|
||||
docker volume create "${CONTAINER_NAME}_ccache" || true
|
||||
docker volume create "${CONTAINER_NAME}_depends" || true
|
||||
docker volume create "${CONTAINER_NAME}_depends_sources" || true
|
||||
|
||||
@@ -24,6 +24,14 @@ fi
|
||||
echo "Free disk space:"
|
||||
df -h
|
||||
|
||||
# We force an install of linux-headers again here via $PACKAGES to fix any
|
||||
# kernel mismatch between a cached docker image and the underlying host.
|
||||
# This can happen occasionally on hosted runners if the runner image is updated.
|
||||
if [[ "$CONTAINER_NAME" == "ci_native_asan" ]]; then
|
||||
$CI_RETRY_EXE apt-get update
|
||||
${CI_RETRY_EXE} bash -c "apt-get install --no-install-recommends --no-upgrade -y $PACKAGES"
|
||||
fi
|
||||
|
||||
# What host to compile for. See also ./depends/README.md
|
||||
# Tests that need cross-compilation export the appropriate HOST.
|
||||
# Tests that run natively guess the host
|
||||
@@ -129,6 +137,12 @@ bash -c "cmake -S $BASE_ROOT_DIR $BITCOIN_CONFIG_ALL $BITCOIN_CONFIG || ( (cat $
|
||||
bash -c "cmake --build . $MAKEJOBS --target all $GOAL" || ( echo "Build failure. Verbose build follows." && cmake --build . --target all "$GOAL" --verbose ; false )
|
||||
|
||||
bash -c "${PRINT_CCACHE_STATISTICS}"
|
||||
if [ "$CI" = "true" ]; then
|
||||
hit_rate=$(ccache -s | grep "Hits:" | head -1 | sed 's/.*(\(.*\)%).*/\1/')
|
||||
if [ "${hit_rate%.*}" -lt 75 ]; then
|
||||
echo "::notice title=low ccache hitrate::Ccache hit-rate in $CONTAINER_NAME was $hit_rate%"
|
||||
fi
|
||||
fi
|
||||
du -sh "${DEPENDS_DIR}"/*/
|
||||
du -sh "${PREVIOUS_RELEASES_DIR}"
|
||||
|
||||
|
||||
@@ -4,12 +4,16 @@
|
||||
|
||||
# See ci/README.md for usage.
|
||||
|
||||
ARG CI_IMAGE_NAME_TAG
|
||||
# We never want scratch, but default arg silences a Warning
|
||||
ARG CI_IMAGE_NAME_TAG=scratch
|
||||
FROM ${CI_IMAGE_NAME_TAG}
|
||||
|
||||
ARG FILE_ENV
|
||||
ENV FILE_ENV=${FILE_ENV}
|
||||
|
||||
ARG BASE_ROOT_DIR
|
||||
ENV BASE_ROOT_DIR=${BASE_ROOT_DIR}
|
||||
|
||||
COPY ./ci/retry/retry /usr/bin/retry
|
||||
COPY ./ci/test/00_setup_env.sh ./${FILE_ENV} ./ci/test/01_base_install.sh /ci_container_base/ci/test/
|
||||
|
||||
|
||||
@@ -36,6 +36,10 @@ if(USDT_INCLUDE_DIR)
|
||||
include(CheckCXXSourceCompiles)
|
||||
set(CMAKE_REQUIRED_INCLUDES ${USDT_INCLUDE_DIR})
|
||||
check_cxx_source_compiles("
|
||||
#if defined(__arm__)
|
||||
# define STAP_SDT_ARG_CONSTRAINT g
|
||||
#endif
|
||||
|
||||
// Setting SDT_USE_VARIADIC lets systemtap (sys/sdt.h) know that we want to use
|
||||
// the optional variadic macros to define tracepoints.
|
||||
#define SDT_USE_VARIADIC 1
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
((gnu packages bash) #:select (bash-minimal))
|
||||
(gnu packages bison)
|
||||
((gnu packages certs) #:select (nss-certs))
|
||||
((gnu packages check) #:select (libfaketime))
|
||||
((gnu packages cmake) #:select (cmake-minimal))
|
||||
(gnu packages commencement)
|
||||
(gnu packages compression)
|
||||
@@ -209,7 +210,17 @@ and abstract ELF, PE and MachO formats.")
|
||||
(base32
|
||||
"1j47vwq4caxfv0xw68kw5yh00qcpbd56d7rq6c483ma3y7s96yyz"))))
|
||||
(build-system cmake-build-system)
|
||||
(inputs (list openssl))
|
||||
(arguments
|
||||
(list
|
||||
#:phases
|
||||
#~(modify-phases %standard-phases
|
||||
(replace 'check
|
||||
(lambda* (#:key tests? #:allow-other-keys)
|
||||
(if tests?
|
||||
(invoke "faketime" "-f" "@2025-01-01 00:00:00" ;; Tests fail after 2025.
|
||||
"ctest" "--output-on-failure" "--no-tests=error")
|
||||
(format #t "test suite not run~%")))))))
|
||||
(inputs (list libfaketime openssl))
|
||||
(home-page "https://github.com/mtrojnar/osslsigncode")
|
||||
(synopsis "Authenticode signing and timestamping tool")
|
||||
(description "osslsigncode is a small tool that implements part of the
|
||||
|
||||
@@ -465,18 +465,18 @@ if config.translations_dir:
|
||||
sys.stderr.write(f"Error: Could not find translation dir \"{config.translations_dir[0]}\"\n")
|
||||
sys.exit(1)
|
||||
|
||||
print("+ Adding Qt translations +")
|
||||
print("+ Adding Qt translations +")
|
||||
|
||||
translations = Path(config.translations_dir[0])
|
||||
translations = Path(config.translations_dir[0])
|
||||
|
||||
regex = re.compile('qt_[a-z]*(.qm|_[A-Z]*.qm)')
|
||||
regex = re.compile('qt_[a-z]*(.qm|_[A-Z]*.qm)')
|
||||
|
||||
lang_files = [x for x in translations.iterdir() if regex.match(x.name)]
|
||||
lang_files = [x for x in translations.iterdir() if regex.match(x.name)]
|
||||
|
||||
for file in lang_files:
|
||||
if verbose:
|
||||
print(file.as_posix(), "->", os.path.join(applicationBundle.resourcesPath, file.name))
|
||||
shutil.copy2(file.as_posix(), os.path.join(applicationBundle.resourcesPath, file.name))
|
||||
for file in lang_files:
|
||||
if verbose:
|
||||
print(file.as_posix(), "->", os.path.join(applicationBundle.resourcesPath, file.name))
|
||||
shutil.copy2(file.as_posix(), os.path.join(applicationBundle.resourcesPath, file.name))
|
||||
|
||||
# ------------------------------------------------
|
||||
|
||||
|
||||
@@ -10,14 +10,13 @@ to addrman with).
|
||||
|
||||
Update `MIN_BLOCKS` in `makeseeds.py` and the `-m`/`--minblocks` arguments below, as needed.
|
||||
|
||||
The seeds compiled into the release are created from sipa's, achow101's and luke-jr's
|
||||
The seeds compiled into the release are created from sipa's and achow101's
|
||||
DNS seed, virtu's crawler, and asmap community AS map data. Run the following commands
|
||||
from the `/contrib/seeds` directory:
|
||||
|
||||
```
|
||||
curl https://bitcoin.sipa.be/seeds.txt.gz | gzip -dc > seeds_main.txt
|
||||
curl https://21.ninja/seeds.txt.gz | gzip -dc >> seeds_main.txt
|
||||
curl https://luke.dashjr.org/programs/bitcoin/files/charts/seeds.txt >> seeds_main.txt
|
||||
curl https://mainnet.achownodes.xyz/seeds.txt.gz | gzip -dc >> seeds_main.txt
|
||||
curl https://signet.achownodes.xyz/seeds.txt.gz | gzip -dc > seeds_signet.txt
|
||||
curl https://testnet.achownodes.xyz/seeds.txt.gz | gzip -dc > seeds_test.txt
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package=native_libmultiprocess
|
||||
$(package)_version=1954f7f65661d49e700c344eae0fc8092decf975
|
||||
$(package)_version=v5.0
|
||||
$(package)_download_path=https://github.com/bitcoin-core/libmultiprocess/archive
|
||||
$(package)_file_name=$($(package)_version).tar.gz
|
||||
$(package)_sha256_hash=fc014bd74727c1d5d30b396813685012c965d079244dd07b53bc1c75c610a2cb
|
||||
$(package)_sha256_hash=401984715b271a3446e1910f21adf048ba390d31cc93cc3073742e70d56fa3ea
|
||||
$(package)_dependencies=native_capnp
|
||||
|
||||
define $(package)_config_cmds
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
package=qt
|
||||
$(package)_version=5.15.16
|
||||
$(package)_download_path=https://download.qt.io/official_releases/qt/5.15/$($(package)_version)/submodules
|
||||
$(package)_download_path=https://download.qt.io/archive/qt/5.15/$($(package)_version)/submodules
|
||||
$(package)_suffix=everywhere-opensource-src-$($(package)_version).tar.xz
|
||||
$(package)_file_name=qtbase-$($(package)_suffix)
|
||||
$(package)_sha256_hash=b04815058c18058b6ba837206756a2c87d1391f07a0dcb0dd314f970fd041592
|
||||
|
||||
@@ -30,9 +30,13 @@ Bitcoin Core requires one of the following compilers.
|
||||
| [Fontconfig](../depends/packages/fontconfig.mk) (gui) | [link](https://www.freedesktop.org/wiki/Software/fontconfig/) | [2.12.6](https://github.com/bitcoin/bitcoin/pull/23495) | 2.6 | Yes |
|
||||
| [FreeType](../depends/packages/freetype.mk) (gui) | [link](https://freetype.org) | [2.11.0](https://github.com/bitcoin/bitcoin/commit/01544dd78ccc0b0474571da854e27adef97137fb) | 2.3.0 | Yes |
|
||||
| [qrencode](../depends/packages/qrencode.mk) (gui) | [link](https://fukuchi.org/works/qrencode/) | [4.1.1](https://github.com/bitcoin/bitcoin/pull/27312) | N/A | No |
|
||||
| [Qt](../depends/packages/qt.mk) (gui) | [link](https://download.qt.io/official_releases/qt/) | [5.15.16](https://github.com/bitcoin/bitcoin/pull/30774) | [5.11.3](https://github.com/bitcoin/bitcoin/pull/24132) | No |
|
||||
| [Qt](../depends/packages/qt.mk) (gui) | [link](https://download.qt.io/archive/qt/) | [5.15.16](https://github.com/bitcoin/bitcoin/pull/30774) | [5.11.3](https://github.com/bitcoin/bitcoin/pull/24132) | No |
|
||||
| [ZeroMQ](../depends/packages/zeromq.mk) (notifications) | [link](https://github.com/zeromq/libzmq/releases) | [4.3.4](https://github.com/bitcoin/bitcoin/pull/23956) | 4.0.0 | No |
|
||||
| [Berkeley DB](../depends/packages/bdb.mk) (legacy wallet) | [link](https://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html) | 4.8.30 | 4.8.x | No |
|
||||
| [SQLite](../depends/packages/sqlite.mk) (wallet) | [link](https://sqlite.org) | [3.38.5](https://github.com/bitcoin/bitcoin/pull/25378) | [3.7.17](https://github.com/bitcoin/bitcoin/pull/19077) | No |
|
||||
| Python (scripts, tests) | [link](https://www.python.org) | N/A | [3.10](https://github.com/bitcoin/bitcoin/pull/30527) | No |
|
||||
| [systemtap](../depends/packages/systemtap.mk) ([tracing](tracing.md)) | [link](https://sourceware.org/systemtap/) | [4.8](https://github.com/bitcoin/bitcoin/pull/26945)| N/A | No |
|
||||
| [capnproto](../depends/packages/capnp.mk) ([multiprocess](multiprocess.md)) | [link](https://capnproto.org/) | [1.2.0](https://github.com/bitcoin/bitcoin/pull/32760)| [0.7.0](https://github.com/bitcoin-core/libmultiprocess/pull/88) | No |
|
||||
| [libmultiprocess](../depends/packages/libmultiprocess.mk) ([multiprocess](multiprocess.md)) | [link](https://github.com/bitcoin-core/libmultiprocess) | [5.0](https://github.com/bitcoin/bitcoin/pull/31945)| [v5.0-pre1](https://github.com/bitcoin/bitcoin/pull/31740)* | No |
|
||||
|
||||
\* Libmultiprocess 5.x versions should be compatible, but 6.0 and later are not due to bitcoin-core/libmultiprocess#160.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3.
|
||||
.TH BITCOIN-CLI "1" "September 2025" "bitcoin-cli v29.1.0" "User Commands"
|
||||
.TH BITCOIN-CLI "1" "January 2026" "bitcoin-cli v29.3.0rc1" "User Commands"
|
||||
.SH NAME
|
||||
bitcoin-cli \- manual page for bitcoin-cli v29.1.0
|
||||
bitcoin-cli \- manual page for bitcoin-cli v29.3.0rc1
|
||||
.SH SYNOPSIS
|
||||
.B bitcoin-cli
|
||||
[\fI\,options\/\fR] \fI\,<command> \/\fR[\fI\,params\/\fR]
|
||||
@@ -15,7 +15,7 @@ bitcoin-cli \- manual page for bitcoin-cli v29.1.0
|
||||
.B bitcoin-cli
|
||||
[\fI\,options\/\fR] \fI\,help <command>\/\fR
|
||||
.SH DESCRIPTION
|
||||
Bitcoin Core RPC client version v29.1.0
|
||||
Bitcoin Core RPC client version v29.3.0rc1
|
||||
.PP
|
||||
The bitcoin\-cli utility provides a command line interface to interact with a Bitcoin Core RPC server.
|
||||
.PP
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3.
|
||||
.TH BITCOIN-QT "1" "September 2025" "bitcoin-qt v29.1.0" "User Commands"
|
||||
.TH BITCOIN-QT "1" "January 2026" "bitcoin-qt v29.3.0rc1" "User Commands"
|
||||
.SH NAME
|
||||
bitcoin-qt \- manual page for bitcoin-qt v29.1.0
|
||||
bitcoin-qt \- manual page for bitcoin-qt v29.3.0rc1
|
||||
.SH SYNOPSIS
|
||||
.B bitcoin-qt
|
||||
[\fI\,options\/\fR] [\fI\,URI\/\fR]
|
||||
.SH DESCRIPTION
|
||||
Bitcoin Core version v29.1.0
|
||||
Bitcoin Core version v29.3.0rc1
|
||||
.PP
|
||||
The bitcoin\-qt application provides a graphical interface for interacting with Bitcoin Core.
|
||||
.PP
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3.
|
||||
.TH BITCOIN-TX "1" "September 2025" "bitcoin-tx v29.1.0" "User Commands"
|
||||
.TH BITCOIN-TX "1" "January 2026" "bitcoin-tx v29.3.0rc1" "User Commands"
|
||||
.SH NAME
|
||||
bitcoin-tx \- manual page for bitcoin-tx v29.1.0
|
||||
bitcoin-tx \- manual page for bitcoin-tx v29.3.0rc1
|
||||
.SH SYNOPSIS
|
||||
.B bitcoin-tx
|
||||
[\fI\,options\/\fR] \fI\,<hex-tx> \/\fR[\fI\,commands\/\fR]
|
||||
@@ -9,7 +9,7 @@ bitcoin-tx \- manual page for bitcoin-tx v29.1.0
|
||||
.B bitcoin-tx
|
||||
[\fI\,options\/\fR] \fI\,-create \/\fR[\fI\,commands\/\fR]
|
||||
.SH DESCRIPTION
|
||||
Bitcoin Core bitcoin\-tx utility version v29.1.0
|
||||
Bitcoin Core bitcoin\-tx utility version v29.3.0rc1
|
||||
.PP
|
||||
The bitcoin\-tx tool is used for creating and modifying bitcoin transactions.
|
||||
.PP
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3.
|
||||
.TH BITCOIN-UTIL "1" "September 2025" "bitcoin-util v29.1.0" "User Commands"
|
||||
.TH BITCOIN-UTIL "1" "January 2026" "bitcoin-util v29.3.0rc1" "User Commands"
|
||||
.SH NAME
|
||||
bitcoin-util \- manual page for bitcoin-util v29.1.0
|
||||
bitcoin-util \- manual page for bitcoin-util v29.3.0rc1
|
||||
.SH SYNOPSIS
|
||||
.B bitcoin-util
|
||||
[\fI\,options\/\fR] [\fI\,command\/\fR]
|
||||
@@ -9,7 +9,7 @@ bitcoin-util \- manual page for bitcoin-util v29.1.0
|
||||
.B bitcoin-util
|
||||
[\fI\,options\/\fR] \fI\,grind <hex-block-header>\/\fR
|
||||
.SH DESCRIPTION
|
||||
Bitcoin Core bitcoin\-util utility version v29.1.0
|
||||
Bitcoin Core bitcoin\-util utility version v29.3.0rc1
|
||||
.PP
|
||||
The bitcoin\-util tool provides bitcoin related functionality that does not rely on the ability to access a running node. Available [commands] are listed below.
|
||||
.SH OPTIONS
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3.
|
||||
.TH BITCOIN-WALLET "1" "September 2025" "bitcoin-wallet v29.1.0" "User Commands"
|
||||
.TH BITCOIN-WALLET "1" "January 2026" "bitcoin-wallet v29.3.0rc1" "User Commands"
|
||||
.SH NAME
|
||||
bitcoin-wallet \- manual page for bitcoin-wallet v29.1.0
|
||||
bitcoin-wallet \- manual page for bitcoin-wallet v29.3.0rc1
|
||||
.SH SYNOPSIS
|
||||
.B bitcoin-wallet
|
||||
[\fI\,options\/\fR] \fI\,<command>\/\fR
|
||||
.SH DESCRIPTION
|
||||
Bitcoin Core bitcoin\-wallet utility version v29.1.0
|
||||
Bitcoin Core bitcoin\-wallet utility version v29.3.0rc1
|
||||
.PP
|
||||
bitcoin\-wallet is an offline tool for creating and interacting with Bitcoin Core wallet files.
|
||||
.PP
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3.
|
||||
.TH BITCOIND "1" "September 2025" "bitcoind v29.1.0" "User Commands"
|
||||
.TH BITCOIND "1" "January 2026" "bitcoind v29.3.0rc1" "User Commands"
|
||||
.SH NAME
|
||||
bitcoind \- manual page for bitcoind v29.1.0
|
||||
bitcoind \- manual page for bitcoind v29.3.0rc1
|
||||
.SH SYNOPSIS
|
||||
.B bitcoind
|
||||
[\fI\,options\/\fR]
|
||||
.SH DESCRIPTION
|
||||
Bitcoin Core daemon version v29.1.0
|
||||
Bitcoin Core daemon version v29.3.0rc1
|
||||
.PP
|
||||
The Bitcoin Core daemon (bitcoind) is a headless program that connects to the Bitcoin network to validate and relay transactions and blocks, as well as relaying addresses.
|
||||
.PP
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
Bitcoin Core version 29.1 is now available from:
|
||||
Bitcoin Core version 29.3rc1 is now available from:
|
||||
|
||||
<https://bitcoincore.org/bin/bitcoin-core-29.1/>
|
||||
<https://bitcoincore.org/bin/bitcoin-core-29.3/test.rc1/>
|
||||
|
||||
This release includes various bug fixes and performance
|
||||
improvements, as well as updated translations.
|
||||
@@ -37,192 +37,62 @@ unsupported systems.
|
||||
Notable changes
|
||||
===============
|
||||
|
||||
### Mempool Policy
|
||||
### P2P
|
||||
|
||||
- The maximum number of potentially executed legacy signature operations in a
|
||||
single standard transaction is now limited to 2500. Signature operations in all
|
||||
previous output scripts, in all input scripts, as well as all P2SH redeem
|
||||
scripts (if there are any) are counted toward the limit. The new limit is
|
||||
assumed to not affect any known typically formed standard transactions. The
|
||||
change was done to prepare for a possible BIP54 deployment in the future.
|
||||
- #33050 net, validation: don't punish peers for consensus-invalid txs
|
||||
- #33723 chainparams: remove dnsseed.bitcoin.dashjr-list-of-p2p-nodes.us
|
||||
|
||||
- #32521 policy: make pathological transactions packed with legacy sigops non-standard
|
||||
### Validation
|
||||
|
||||
- The minimum block feerate (`-blockmintxfee`) has been changed to 1 satoshi per kvB. It can still be changed using the
|
||||
configuration option.
|
||||
|
||||
- The default minimum relay feerate (`-minrelaytxfee`) and incremental relay feerate (`-incrementalrelayfee`) have been
|
||||
changed to 100 satoshis per kvB. They can still be changed using their respective configuration options, but it is
|
||||
recommended to change both together if you decide to do so.
|
||||
- Other minimum feerates (e.g. the dust feerate, the minimum returned by the fee estimator, and all feerates used by
|
||||
the wallet) remain unchanged. The mempool minimum feerate still changes in response to high volume.
|
||||
- Note that unless these lower defaults are widely adopted across the network, transactions created with lower fee
|
||||
rates are not guaranteed to propagate or confirm. The wallet feerates remain unchanged; `-mintxfee` must be changed
|
||||
before attempting to create transactions with lower feerates using the wallet.
|
||||
|
||||
- #33106 policy: lower the default blockmintxfee, incrementalrelayfee, minrelaytxfee
|
||||
|
||||
### Logging
|
||||
|
||||
Unconditional logging to disk is now rate limited by giving each source location
|
||||
a quota of 1MiB per hour. Unconditional logging is any logging with a log level
|
||||
higher than debug, that is `info`, `warning`, and `error`. All logs will be
|
||||
prefixed with `[*]` if there is at least one source location that is currently
|
||||
being suppressed. (#32604)
|
||||
|
||||
When `-logsourcelocations` is enabled, the log output now contains the entire
|
||||
function signature instead of just the function name. (#32604)
|
||||
|
||||
### RPC
|
||||
|
||||
- The `dumptxoutset` RPC now requires a `type` parameter to be specified. To maintain pre
|
||||
v29.0 behavior, use the `latest` parameter. Documenting this change was missed in the v29.0
|
||||
release notes. (#30808)
|
||||
|
||||
### Updated Settings
|
||||
|
||||
- The `-maxmempool` and `-dbcache` startup parameters are now capped on
|
||||
32-bit systems to 500MB and 1GiB respectively.
|
||||
|
||||
- #32530 node: cap -maxmempool and -dbcache values for 32-bit
|
||||
- #32473 Introduce per-txin sighash midstate cache for legacy/p2sh/segwitv0 scripts
|
||||
- #33105 validation: detect witness stripping without re-running Script checks
|
||||
|
||||
### Wallet
|
||||
|
||||
- #31757 wallet: fix crash on double block disconnection
|
||||
- #32553 wallet: Fix logging of wallet version
|
||||
- #33268 wallet: Identify transactions spending 0-value outputs, and add tests for anchor outputs in a wallet
|
||||
- #34156 wallet: fix unnamed legacy wallet migration failure
|
||||
- #34226 wallet: test: Relative wallet failed migration cleanup
|
||||
- #34123 wallet: migration, avoid creating spendable wallet from a watch-only legacy wallet
|
||||
- #34215 wallettool: fix unnamed createfromdump failure walletsdir deletion
|
||||
|
||||
### P2P
|
||||
### Mining
|
||||
|
||||
- #32826 p2p: add more bad ports
|
||||
|
||||
### Test
|
||||
|
||||
- #32069 test: fix intermittent failure in wallet_reorgsrestore.py
|
||||
- #32286 test: Handle empty string returned by CLI as None in RPC tests
|
||||
- #32312 test: Fix feature_pruning test after nTime typo fix
|
||||
- #32336 test: Suppress upstream -Wduplicate-decl-specifier in bpfcc
|
||||
- #32463 test: fix an incorrect feature_fee_estimation.py subtest
|
||||
- #32483 test: fix two intermittent failures in wallet_basic.py
|
||||
- #32630 test: fix sync function in rpc_psbt.py
|
||||
- #32765 test: Fix list index out of range error in feature_bip68_sequence.py
|
||||
- #32742 test: fix catchup loop in outbound eviction functional test
|
||||
- #32823 test: Fix wait_for_getheaders() call in test_outbound_eviction_blocks_relay_only()
|
||||
- #32833 test: Add msgtype to msg_generic slots
|
||||
- #32841 feature_taproot: sample tx version border values more
|
||||
- #32850 test: check P2SH sigop count for coinbase tx
|
||||
- #32859 test: correctly detect nonstd TRUC tx vsize in feature_taproot
|
||||
- #33001 test: Do not pass tests on unhandled exceptions
|
||||
|
||||
### Indexes
|
||||
|
||||
- #33212 index: Don't commit state in BaseIndex::Rewind
|
||||
|
||||
### Util
|
||||
|
||||
- #32248 Remove support for RNDR/RNDRRS for aarch64
|
||||
- #33475 bugfix: miner: fix `addPackageTxs` unsigned integer overflow
|
||||
|
||||
### Build
|
||||
|
||||
- #32356 cmake: Respect user-provided configuration-specific flags
|
||||
- #32437 crypto: disable ASan for sha256_sse4 with Clang
|
||||
- #32469 cmake: Allow WITH_DBUS on all Unix-like systems
|
||||
- #32439 guix: accomodate migration to codeberg
|
||||
- #32551 cmake: Add missed SSE41_CXXFLAGS
|
||||
- #32568 depends: use "mkdir -p" when installing xproto
|
||||
- #32678 guix: warn and abort when SOURCE_DATE_EPOCH is set
|
||||
- #32690 depends: fix SHA256SUM command on OpenBSD (use GNU mode output)
|
||||
- #32716 depends: Override host compilers for FreeBSD and OpenBSD
|
||||
- #32760 depends: capnp 1.2.0
|
||||
- #32798 build: add root dir to CMAKE_PREFIX_PATH in toolchain
|
||||
- #32805 cmake: Use HINTS instead of PATHS in find_* commands
|
||||
- #32814 cmake: Explicitly specify Boost_ROOT for Homebrew's package
|
||||
- #32837 depends: fix libevent _WIN32_WINNT usage
|
||||
- #32943 depends: Force CMAKE_EXPORT_NO_PACKAGE_REGISTRY=TRUE
|
||||
- #32954 cmake: Drop no longer necessary "cmakeMinimumRequired" object
|
||||
- #33073 guix: warn SOURCE_DATE_EPOCH set in guix-codesign
|
||||
- #34227 guix: Fix `osslsigncode` tests
|
||||
|
||||
### Gui
|
||||
### Documentation
|
||||
|
||||
- #864 Crash fix, disconnect numBlocksChanged() signal during shutdown
|
||||
- #868 Replace stray tfm::format to cerr with qWarning
|
||||
- #33623 doc: document capnproto and libmultiprocess deps in 29.x
|
||||
|
||||
### Doc
|
||||
### Test
|
||||
|
||||
- #32333 doc: Add missing top-level description to pruneblockchain RPC
|
||||
- #32353 doc: Fix fuzz test_runner.py path
|
||||
- #32389 doc: Fix test_bitcoin path
|
||||
- #32607 rpc: Note in fundrawtransaction doc, fee rate is for package
|
||||
- #32679 doc: update tor docs to use bitcoind binary from path
|
||||
- #32693 depends: fix cmake compatibility error for freetype
|
||||
- #32696 doc: make -DWITH_ZMQ=ON explicit on build-unix.md
|
||||
- #32708 rpc, doc: update listdescriptors RCP help
|
||||
- #32711 doc: add missing packages for BSDs (cmake, gmake, curl) to depends/README.md
|
||||
- #32719 doc, windows: CompanyName "Bitcoin" => "Bitcoin Core project"
|
||||
- #32776 doc: taproot became always active in v24.0
|
||||
- #32777 doc: fix Transifex 404s
|
||||
- #32846 doc: clarify that the "-j N" goes after the "--build build" part
|
||||
- #32858 doc: Add workaround for vcpkg issue with paths with embedded spaces
|
||||
- #33070 doc/zmq: fix unix socket path example
|
||||
- #33088 doc: move cmake -B build -LH up in Unix build docs
|
||||
- #33133 rpc: fix getpeerinfo ping duration unit docs
|
||||
- #33119 rpc: Fix 'getdescriptoractivity' RPCHelpMan, add test to verify fix
|
||||
- #33236 doc: Remove wrong and redundant doxygen tag
|
||||
|
||||
### CI
|
||||
|
||||
- #32184 ci: Add workaround for vcpkg's libevent package
|
||||
- #33261 ci: return to using dash in CentOS job
|
||||
- #33612 test: change log rate limit version gate
|
||||
|
||||
### Misc
|
||||
|
||||
- #32187 refactor: Remove spurious virtual from final ~CZMQNotificationInterface
|
||||
- #32454 tracing: fix invalid argument in mempool_monitor
|
||||
- #32771 contrib: tracing: Fix read of pmsg_type in p2p_monitor.py
|
||||
- #33086 contrib: [tracing] fix pointer argument handling in mempool_monitor.py
|
||||
- #33508 ci: fix buildx gha cache authentication on forks
|
||||
- #33581 ci: Properly include $FILE_ENV in DEPENDS_HASH
|
||||
|
||||
Credits
|
||||
=======
|
||||
|
||||
Thanks to everyone who directly contributed to this release:
|
||||
|
||||
- 0xB10C
|
||||
- achow101
|
||||
- Anthony Towns
|
||||
- Antoine Poinsot
|
||||
- benthecarman
|
||||
- bigspider
|
||||
- Brandon Odiwuor
|
||||
- brunoerg
|
||||
- Bufo
|
||||
- Christewart
|
||||
- Crypt-iQ
|
||||
- davidgumberg
|
||||
- deadmanoz
|
||||
- dergoegge
|
||||
- enirox001
|
||||
- Ava Chow
|
||||
- David Gumberg
|
||||
- Eugene Siegel
|
||||
- fanquake
|
||||
- furszy
|
||||
- glozow
|
||||
- instagibbs
|
||||
- Hennadii Stepanov
|
||||
- hodlinator
|
||||
- ismaelsadeeq
|
||||
- jb55
|
||||
- jlopp
|
||||
- josibake
|
||||
- laanwj
|
||||
- luisschwab
|
||||
- MarcoFalke
|
||||
- Martin Zumsande
|
||||
- monlovesmango
|
||||
- nervana21
|
||||
- pablomartin4btc
|
||||
- rkrux
|
||||
- romanz
|
||||
- ryanofsky
|
||||
- Sjors
|
||||
- theStack
|
||||
- Pieter Wuille
|
||||
- SatsAndSports
|
||||
- willcl-ark
|
||||
- zaidmstrr
|
||||
|
||||
As well as to everyone that helped with translations on
|
||||
[Transifex](https://explore.transifex.com/bitcoin/bitcoin/).
|
||||
|
||||
@@ -180,7 +180,7 @@ bool PartiallyDownloadedBlock::IsTxAvailable(size_t index) const
|
||||
return txn_available[index] != nullptr;
|
||||
}
|
||||
|
||||
ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector<CTransactionRef>& vtx_missing)
|
||||
ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector<CTransactionRef>& vtx_missing, bool segwit_active)
|
||||
{
|
||||
if (header.IsNull()) return READ_STATUS_INVALID;
|
||||
|
||||
@@ -205,16 +205,11 @@ ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector<
|
||||
if (vtx_missing.size() != tx_missing_offset)
|
||||
return READ_STATUS_INVALID;
|
||||
|
||||
BlockValidationState state;
|
||||
CheckBlockFn check_block = m_check_block_mock ? m_check_block_mock : CheckBlock;
|
||||
if (!check_block(block, state, Params().GetConsensus(), /*fCheckPoW=*/true, /*fCheckMerkleRoot=*/true)) {
|
||||
// TODO: We really want to just check merkle tree manually here,
|
||||
// but that is expensive, and CheckBlock caches a block's
|
||||
// "checked-status" (in the CBlock?). CBlock should be able to
|
||||
// check its own merkle root and cache that check.
|
||||
if (state.GetResult() == BlockValidationResult::BLOCK_MUTATED)
|
||||
return READ_STATUS_FAILED; // Possible Short ID collision
|
||||
return READ_STATUS_CHECKBLOCK_FAILED;
|
||||
// Check for possible mutations early now that we have a seemingly good block
|
||||
IsBlockMutatedFn check_mutated{m_check_block_mutated_mock ? m_check_block_mutated_mock : IsBlockMutated};
|
||||
if (check_mutated(/*block=*/block,
|
||||
/*check_witness_root=*/segwit_active)) {
|
||||
return READ_STATUS_FAILED; // Possible Short ID collision
|
||||
}
|
||||
|
||||
LogDebug(BCLog::CMPCTBLOCK, "Successfully reconstructed block %s with %lu txn prefilled, %lu txn from mempool (incl at least %lu from extra pool) and %lu txn requested\n", hash.ToString(), prefilled_count, mempool_count, extra_count, vtx_missing.size());
|
||||
|
||||
@@ -84,8 +84,6 @@ typedef enum ReadStatus_t
|
||||
READ_STATUS_OK,
|
||||
READ_STATUS_INVALID, // Invalid object, peer is sending bogus crap
|
||||
READ_STATUS_FAILED, // Failed to process object
|
||||
READ_STATUS_CHECKBLOCK_FAILED, // Used only by FillBlock to indicate a
|
||||
// failure in CheckBlock.
|
||||
} ReadStatus;
|
||||
|
||||
class CBlockHeaderAndShortTxIDs {
|
||||
@@ -141,15 +139,16 @@ public:
|
||||
CBlockHeader header;
|
||||
|
||||
// Can be overridden for testing
|
||||
using CheckBlockFn = std::function<bool(const CBlock&, BlockValidationState&, const Consensus::Params&, bool, bool)>;
|
||||
CheckBlockFn m_check_block_mock{nullptr};
|
||||
using IsBlockMutatedFn = std::function<bool(const CBlock&, bool)>;
|
||||
IsBlockMutatedFn m_check_block_mutated_mock{nullptr};
|
||||
|
||||
explicit PartiallyDownloadedBlock(CTxMemPool* poolIn) : pool(poolIn) {}
|
||||
|
||||
// extra_txn is a list of extra orphan/conflicted/etc transactions to look at
|
||||
ReadStatus InitData(const CBlockHeaderAndShortTxIDs& cmpctblock, const std::vector<CTransactionRef>& extra_txn);
|
||||
bool IsTxAvailable(size_t index) const;
|
||||
ReadStatus FillBlock(CBlock& block, const std::vector<CTransactionRef>& vtx_missing);
|
||||
// segwit_active enforces witness mutation checks just before reporting a healthy status
|
||||
ReadStatus FillBlock(CBlock& block, const std::vector<CTransactionRef>& vtx_missing, bool segwit_active);
|
||||
};
|
||||
|
||||
#endif // BITCOIN_BLOCKENCODINGS_H
|
||||
|
||||
@@ -627,7 +627,7 @@ std::string SHA256AutoDetect(sha256_implementation::UseImplementation use_implem
|
||||
Transform = sha256_x86_shani::Transform;
|
||||
TransformD64 = TransformD64Wrapper<sha256_x86_shani::Transform>;
|
||||
TransformD64_2way = sha256d64_x86_shani::Transform_2way;
|
||||
ret = "x86_shani(1way,2way)";
|
||||
ret = "x86_shani(1way;2way)";
|
||||
have_sse4 = false; // Disable SSE4/AVX2;
|
||||
have_avx2 = false;
|
||||
}
|
||||
@@ -641,14 +641,14 @@ std::string SHA256AutoDetect(sha256_implementation::UseImplementation use_implem
|
||||
#endif
|
||||
#if defined(ENABLE_SSE41)
|
||||
TransformD64_4way = sha256d64_sse41::Transform_4way;
|
||||
ret += ",sse41(4way)";
|
||||
ret += ";sse41(4way)";
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(ENABLE_AVX2)
|
||||
if (have_avx2 && have_avx && enabled_avx) {
|
||||
TransformD64_8way = sha256d64_avx2::Transform_8way;
|
||||
ret += ",avx2(8way)";
|
||||
ret += ";avx2(8way)";
|
||||
}
|
||||
#endif
|
||||
#endif // defined(HAVE_GETCPUID)
|
||||
@@ -682,7 +682,7 @@ std::string SHA256AutoDetect(sha256_implementation::UseImplementation use_implem
|
||||
Transform = sha256_arm_shani::Transform;
|
||||
TransformD64 = TransformD64Wrapper<sha256_arm_shani::Transform>;
|
||||
TransformD64_2way = sha256d64_arm_shani::Transform_2way;
|
||||
ret = "arm_shani(1way,2way)";
|
||||
ret = "arm_shani(1way;2way)";
|
||||
}
|
||||
#endif
|
||||
#endif // DISABLE_OPTIMIZED_SHA256
|
||||
|
||||
@@ -146,7 +146,6 @@ public:
|
||||
// release ASAP to avoid it where possible.
|
||||
vSeeds.emplace_back("seed.bitcoin.sipa.be."); // Pieter Wuille, only supports x1, x5, x9, and xd
|
||||
vSeeds.emplace_back("dnsseed.bluematt.me."); // Matt Corallo, only supports x9
|
||||
vSeeds.emplace_back("dnsseed.bitcoin.dashjr-list-of-p2p-nodes.us."); // Luke Dashjr
|
||||
vSeeds.emplace_back("seed.bitcoin.jonasschnelli.ch."); // Jonas Schnelli, only supports x1, x5, x9, and xd
|
||||
vSeeds.emplace_back("seed.btc.petertodd.net."); // Peter Todd, only supports x1, x5, x9, and xd
|
||||
vSeeds.emplace_back("seed.bitcoin.sprovoost.nl."); // Sjors Provoost
|
||||
|
||||
11
src/net.cpp
11
src/net.cpp
@@ -575,9 +575,9 @@ void CNode::CloseSocketDisconnect()
|
||||
m_i2p_sam_session.reset();
|
||||
}
|
||||
|
||||
void CConnman::AddWhitelistPermissionFlags(NetPermissionFlags& flags, const CNetAddr &addr, const std::vector<NetWhitelistPermissions>& ranges) const {
|
||||
void CConnman::AddWhitelistPermissionFlags(NetPermissionFlags& flags, std::optional<CNetAddr> addr, const std::vector<NetWhitelistPermissions>& ranges) const {
|
||||
for (const auto& subnet : ranges) {
|
||||
if (subnet.m_subnet.Match(addr)) {
|
||||
if (addr.has_value() && subnet.m_subnet.Match(addr.value())) {
|
||||
NetPermissions::AddFlag(flags, subnet.m_flags);
|
||||
}
|
||||
}
|
||||
@@ -1767,7 +1767,11 @@ void CConnman::CreateNodeFromAcceptedSocket(std::unique_ptr<Sock>&& sock,
|
||||
{
|
||||
int nInbound = 0;
|
||||
|
||||
AddWhitelistPermissionFlags(permission_flags, addr, vWhitelistedRangeIncoming);
|
||||
const bool inbound_onion = std::find(m_onion_binds.begin(), m_onion_binds.end(), addr_bind) != m_onion_binds.end();
|
||||
|
||||
// Tor inbound connections do not reveal the peer's actual network address.
|
||||
// Therefore do not apply address-based whitelist permissions to them.
|
||||
AddWhitelistPermissionFlags(permission_flags, inbound_onion ? std::optional<CNetAddr>{} : addr, vWhitelistedRangeIncoming);
|
||||
|
||||
{
|
||||
LOCK(m_nodes_mutex);
|
||||
@@ -1822,7 +1826,6 @@ void CConnman::CreateNodeFromAcceptedSocket(std::unique_ptr<Sock>&& sock,
|
||||
NodeId id = GetNewNodeId();
|
||||
uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE).Write(id).Finalize();
|
||||
|
||||
const bool inbound_onion = std::find(m_onion_binds.begin(), m_onion_binds.end(), addr_bind) != m_onion_binds.end();
|
||||
// The V2Transport transparently falls back to V1 behavior when an incoming V1 connection is
|
||||
// detected, so use it whenever we signal NODE_P2P_V2.
|
||||
ServiceFlags local_services = GetLocalServices();
|
||||
|
||||
@@ -1364,7 +1364,7 @@ private:
|
||||
|
||||
bool AttemptToEvictConnection();
|
||||
CNode* ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, ConnectionType conn_type, bool use_v2transport) EXCLUSIVE_LOCKS_REQUIRED(!m_unused_i2p_sessions_mutex);
|
||||
void AddWhitelistPermissionFlags(NetPermissionFlags& flags, const CNetAddr &addr, const std::vector<NetWhitelistPermissions>& ranges) const;
|
||||
void AddWhitelistPermissionFlags(NetPermissionFlags& flags, std::optional<CNetAddr> addr, const std::vector<NetWhitelistPermissions>& ranges) const;
|
||||
|
||||
void DeleteNode(CNode* pnode);
|
||||
|
||||
|
||||
@@ -553,12 +553,6 @@ private:
|
||||
bool via_compact_block, const std::string& message = "")
|
||||
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
|
||||
|
||||
/**
|
||||
* Potentially disconnect and discourage a node based on the contents of a TxValidationState object
|
||||
*/
|
||||
void MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
|
||||
|
||||
/** Maybe disconnect a peer and discourage future connections from its address.
|
||||
*
|
||||
* @param[in] pnode The node to check.
|
||||
@@ -1805,32 +1799,6 @@ void PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidati
|
||||
}
|
||||
}
|
||||
|
||||
void PeerManagerImpl::MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state)
|
||||
{
|
||||
PeerRef peer{GetPeerRef(nodeid)};
|
||||
switch (state.GetResult()) {
|
||||
case TxValidationResult::TX_RESULT_UNSET:
|
||||
break;
|
||||
// The node is providing invalid data:
|
||||
case TxValidationResult::TX_CONSENSUS:
|
||||
if (peer) Misbehaving(*peer, "");
|
||||
return;
|
||||
// Conflicting (but not necessarily invalid) data or different policy:
|
||||
case TxValidationResult::TX_INPUTS_NOT_STANDARD:
|
||||
case TxValidationResult::TX_NOT_STANDARD:
|
||||
case TxValidationResult::TX_MISSING_INPUTS:
|
||||
case TxValidationResult::TX_PREMATURE_SPEND:
|
||||
case TxValidationResult::TX_WITNESS_MUTATED:
|
||||
case TxValidationResult::TX_WITNESS_STRIPPED:
|
||||
case TxValidationResult::TX_CONFLICT:
|
||||
case TxValidationResult::TX_MEMPOOL_POLICY:
|
||||
case TxValidationResult::TX_NO_MEMPOOL:
|
||||
case TxValidationResult::TX_RECONSIDERABLE:
|
||||
case TxValidationResult::TX_UNKNOWN:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex* pindex)
|
||||
{
|
||||
AssertLockHeld(cs_main);
|
||||
@@ -2987,8 +2955,6 @@ std::optional<node::PackageToValidate> PeerManagerImpl::ProcessInvalidTx(NodeId
|
||||
if (peer) AddKnownTx(*peer, parent_txid);
|
||||
}
|
||||
|
||||
MaybePunishNodeForTx(nodeid, state);
|
||||
|
||||
return package_to_validate;
|
||||
}
|
||||
|
||||
@@ -3314,7 +3280,21 @@ void PeerManagerImpl::ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const Bl
|
||||
}
|
||||
|
||||
PartiallyDownloadedBlock& partialBlock = *range_flight.first->second.second->partialBlock;
|
||||
ReadStatus status = partialBlock.FillBlock(*pblock, block_transactions.txn);
|
||||
|
||||
if (partialBlock.header.IsNull()) {
|
||||
// It is possible for the header to be empty if a previous call to FillBlock wiped the header, but left
|
||||
// the PartiallyDownloadedBlock pointer around (i.e. did not call RemoveBlockRequest). In this case, we
|
||||
// should not call LookupBlockIndex below.
|
||||
RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId());
|
||||
Misbehaving(peer, "previous compact block reconstruction attempt failed");
|
||||
LogDebug(BCLog::NET, "Peer %d sent compact block transactions multiple times", pfrom.GetId());
|
||||
return;
|
||||
}
|
||||
|
||||
// We should not have gotten this far in compact block processing unless it's attached to a known header
|
||||
const CBlockIndex* prev_block{Assume(m_chainman.m_blockman.LookupBlockIndex(partialBlock.header.hashPrevBlock))};
|
||||
ReadStatus status = partialBlock.FillBlock(*pblock, block_transactions.txn,
|
||||
/*segwit_active=*/DeploymentActiveAfter(prev_block, m_chainman, Consensus::DEPLOYMENT_SEGWIT));
|
||||
if (status == READ_STATUS_INVALID) {
|
||||
RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // Reset in-flight state in case Misbehaving does not result in a disconnect
|
||||
Misbehaving(peer, "invalid compact block/non-matching block transactions");
|
||||
@@ -3322,6 +3302,9 @@ void PeerManagerImpl::ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const Bl
|
||||
} else if (status == READ_STATUS_FAILED) {
|
||||
if (first_in_flight) {
|
||||
// Might have collided, fall back to getdata now :(
|
||||
// We keep the failed partialBlock to disallow processing another compact block announcement from the same
|
||||
// peer for the same block. We let the full block download below continue under the same m_downloading_since
|
||||
// timer.
|
||||
std::vector<CInv> invs;
|
||||
invs.emplace_back(MSG_BLOCK | GetFetchFlags(peer), block_transactions.blockhash);
|
||||
MakeAndPushMessage(pfrom, NetMsgType::GETDATA, invs);
|
||||
@@ -3331,23 +3314,7 @@ void PeerManagerImpl::ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const Bl
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
// Block is either okay, or possibly we received
|
||||
// READ_STATUS_CHECKBLOCK_FAILED.
|
||||
// Note that CheckBlock can only fail for one of a few reasons:
|
||||
// 1. bad-proof-of-work (impossible here, because we've already
|
||||
// accepted the header)
|
||||
// 2. merkleroot doesn't match the transactions given (already
|
||||
// caught in FillBlock with READ_STATUS_FAILED, so
|
||||
// impossible here)
|
||||
// 3. the block is otherwise invalid (eg invalid coinbase,
|
||||
// block is too big, too many legacy sigops, etc).
|
||||
// So if CheckBlock failed, #3 is the only possibility.
|
||||
// Under BIP 152, we don't discourage the peer unless proof of work is
|
||||
// invalid (we don't require all the stateless checks to have
|
||||
// been run). This is handled below, so just treat this as
|
||||
// though the block was successfully read, and rely on the
|
||||
// handling in ProcessNewBlock to ensure the block index is
|
||||
// updated, etc.
|
||||
// Block is okay for further processing
|
||||
RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // it is now an empty pointer
|
||||
fBlockRead = true;
|
||||
// mapBlockSource is used for potentially punishing peers and
|
||||
@@ -4462,7 +4429,9 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
|
||||
return;
|
||||
}
|
||||
std::vector<CTransactionRef> dummy;
|
||||
status = tempBlock.FillBlock(*pblock, dummy);
|
||||
const CBlockIndex* prev_block{Assume(m_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.hashPrevBlock))};
|
||||
status = tempBlock.FillBlock(*pblock, dummy,
|
||||
/*segwit_active=*/DeploymentActiveAfter(prev_block, m_chainman, Consensus::DEPLOYMENT_SEGWIT));
|
||||
if (status == READ_STATUS_OK) {
|
||||
fBlockReconstructed = true;
|
||||
}
|
||||
|
||||
@@ -394,8 +394,8 @@ void BlockAssembler::addPackageTxs(int& nPackagesSelected, int& nDescendantsUpda
|
||||
|
||||
++nConsecutiveFailed;
|
||||
|
||||
if (nConsecutiveFailed > MAX_CONSECUTIVE_FAILURES && nBlockWeight >
|
||||
m_options.nBlockMaxWeight - m_options.block_reserved_weight) {
|
||||
if (nConsecutiveFailed > MAX_CONSECUTIVE_FAILURES && nBlockWeight +
|
||||
m_options.block_reserved_weight > m_options.nBlockMaxWeight) {
|
||||
// Give up if we're close to full and haven't succeeded in a while
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -344,6 +344,42 @@ bool IsWitnessStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool SpendsNonAnchorWitnessProg(const CTransaction& tx, const CCoinsViewCache& prevouts)
|
||||
{
|
||||
if (tx.IsCoinBase()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
int version;
|
||||
std::vector<uint8_t> program;
|
||||
for (const auto& txin: tx.vin) {
|
||||
const auto& prev_spk{prevouts.AccessCoin(txin.prevout).out.scriptPubKey};
|
||||
|
||||
// Note this includes not-yet-defined witness programs.
|
||||
if (prev_spk.IsWitnessProgram(version, program) && !prev_spk.IsPayToAnchor(version, program)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// For P2SH extract the redeem script and check if it spends a non-Taproot witness program. Note
|
||||
// this is fine to call EvalScript (as done in AreInputsStandard/IsWitnessStandard) because this
|
||||
// function is only ever called after IsStandardTx, which checks the scriptsig is pushonly.
|
||||
if (prev_spk.IsPayToScriptHash()) {
|
||||
// If EvalScript fails or results in an empty stack, the transaction is invalid by consensus.
|
||||
std::vector <std::vector<uint8_t>> stack;
|
||||
if (!EvalScript(stack, txin.scriptSig, SCRIPT_VERIFY_NONE, BaseSignatureChecker{}, SigVersion::BASE)
|
||||
|| stack.empty()) {
|
||||
continue;
|
||||
}
|
||||
const CScript redeem_script{stack.back().begin(), stack.back().end()};
|
||||
if (redeem_script.IsWitnessProgram(version, program)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
int64_t GetVirtualTransactionSize(int64_t nWeight, int64_t nSigOpCost, unsigned int bytes_per_sigop)
|
||||
{
|
||||
return (std::max(nWeight, nSigOpCost * bytes_per_sigop) + WITNESS_SCALE_FACTOR - 1) / WITNESS_SCALE_FACTOR;
|
||||
|
||||
@@ -167,6 +167,11 @@ bool AreInputsStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs)
|
||||
* Also enforce a maximum stack item size limit and no annexes for tapscript spends.
|
||||
*/
|
||||
bool IsWitnessStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs);
|
||||
/**
|
||||
* Check whether this transaction spends any witness program but P2A, including not-yet-defined ones.
|
||||
* May return `false` early for consensus-invalid transactions.
|
||||
*/
|
||||
bool SpendsNonAnchorWitnessProg(const CTransaction& tx, const CCoinsViewCache& prevouts);
|
||||
|
||||
/** Compute the virtual transaction size (weight reinterpreted as bytes). */
|
||||
int64_t GetVirtualTransactionSize(int64_t nWeight, int64_t nSigOpCost, unsigned int bytes_per_sigop);
|
||||
|
||||
@@ -164,7 +164,7 @@ UniValue blockheaderToJSON(const CBlockIndex& tip, const CBlockIndex& blockindex
|
||||
result.pushKV("mediantime", blockindex.GetMedianTimePast());
|
||||
result.pushKV("nonce", blockindex.nNonce);
|
||||
result.pushKV("bits", strprintf("%08x", blockindex.nBits));
|
||||
result.pushKV("target", GetTarget(tip, pow_limit).GetHex());
|
||||
result.pushKV("target", GetTarget(blockindex, pow_limit).GetHex());
|
||||
result.pushKV("difficulty", GetDifficulty(blockindex));
|
||||
result.pushKV("chainwork", blockindex.nChainWork.GetHex());
|
||||
result.pushKV("nTx", blockindex.nTx);
|
||||
|
||||
@@ -1494,7 +1494,7 @@ static RPCHelpMan finalizepsbt()
|
||||
return RPCHelpMan{"finalizepsbt",
|
||||
"Finalize the inputs of a PSBT. If the transaction is fully signed, it will produce a\n"
|
||||
"network serialized transaction which can be broadcast with sendrawtransaction. Otherwise a PSBT will be\n"
|
||||
"created which has the final_scriptSig and final_scriptWitness fields filled for inputs that are complete.\n"
|
||||
"created which has the final_scriptSig and final_scriptwitness fields filled for inputs that are complete.\n"
|
||||
"Implements the Finalizer and Extractor roles.\n",
|
||||
{
|
||||
{"psbt", RPCArg::Type::STR, RPCArg::Optional::NO, "A base64 string of a PSBT"},
|
||||
|
||||
@@ -1564,11 +1564,57 @@ bool SignatureHashSchnorr(uint256& hash_out, ScriptExecutionData& execdata, cons
|
||||
return true;
|
||||
}
|
||||
|
||||
int SigHashCache::CacheIndex(int32_t hash_type) const noexcept
|
||||
{
|
||||
// Note that we do not distinguish between BASE and WITNESS_V0 to determine the cache index,
|
||||
// because no input can simultaneously use both.
|
||||
return 3 * !!(hash_type & SIGHASH_ANYONECANPAY) +
|
||||
2 * ((hash_type & 0x1f) == SIGHASH_SINGLE) +
|
||||
1 * ((hash_type & 0x1f) == SIGHASH_NONE);
|
||||
}
|
||||
|
||||
bool SigHashCache::Load(int32_t hash_type, const CScript& script_code, HashWriter& writer) const noexcept
|
||||
{
|
||||
auto& entry = m_cache_entries[CacheIndex(hash_type)];
|
||||
if (entry.has_value()) {
|
||||
if (script_code == entry->first) {
|
||||
writer = HashWriter(entry->second);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void SigHashCache::Store(int32_t hash_type, const CScript& script_code, const HashWriter& writer) noexcept
|
||||
{
|
||||
auto& entry = m_cache_entries[CacheIndex(hash_type)];
|
||||
entry.emplace(script_code, writer);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn, int32_t nHashType, const CAmount& amount, SigVersion sigversion, const PrecomputedTransactionData* cache)
|
||||
uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn, int32_t nHashType, const CAmount& amount, SigVersion sigversion, const PrecomputedTransactionData* cache, SigHashCache* sighash_cache)
|
||||
{
|
||||
assert(nIn < txTo.vin.size());
|
||||
|
||||
if (sigversion != SigVersion::WITNESS_V0) {
|
||||
// Check for invalid use of SIGHASH_SINGLE
|
||||
if ((nHashType & 0x1f) == SIGHASH_SINGLE) {
|
||||
if (nIn >= txTo.vout.size()) {
|
||||
// nOut out of range
|
||||
return uint256::ONE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
HashWriter ss{};
|
||||
|
||||
// Try to compute using cached SHA256 midstate.
|
||||
if (sighash_cache && sighash_cache->Load(nHashType, scriptCode, ss)) {
|
||||
// Add sighash type and hash.
|
||||
ss << nHashType;
|
||||
return ss.GetHash();
|
||||
}
|
||||
|
||||
if (sigversion == SigVersion::WITNESS_V0) {
|
||||
uint256 hashPrevouts;
|
||||
uint256 hashSequence;
|
||||
@@ -1583,16 +1629,14 @@ uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn
|
||||
hashSequence = cacheready ? cache->hashSequence : SHA256Uint256(GetSequencesSHA256(txTo));
|
||||
}
|
||||
|
||||
|
||||
if ((nHashType & 0x1f) != SIGHASH_SINGLE && (nHashType & 0x1f) != SIGHASH_NONE) {
|
||||
hashOutputs = cacheready ? cache->hashOutputs : SHA256Uint256(GetOutputsSHA256(txTo));
|
||||
} else if ((nHashType & 0x1f) == SIGHASH_SINGLE && nIn < txTo.vout.size()) {
|
||||
HashWriter ss{};
|
||||
ss << txTo.vout[nIn];
|
||||
hashOutputs = ss.GetHash();
|
||||
HashWriter inner_ss{};
|
||||
inner_ss << txTo.vout[nIn];
|
||||
hashOutputs = inner_ss.GetHash();
|
||||
}
|
||||
|
||||
HashWriter ss{};
|
||||
// Version
|
||||
ss << txTo.version;
|
||||
// Input prevouts/nSequence (none/all, depending on flags)
|
||||
@@ -1609,26 +1653,21 @@ uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn
|
||||
ss << hashOutputs;
|
||||
// Locktime
|
||||
ss << txTo.nLockTime;
|
||||
// Sighash type
|
||||
ss << nHashType;
|
||||
} else {
|
||||
// Wrapper to serialize only the necessary parts of the transaction being signed
|
||||
CTransactionSignatureSerializer<T> txTmp(txTo, scriptCode, nIn, nHashType);
|
||||
|
||||
return ss.GetHash();
|
||||
// Serialize
|
||||
ss << txTmp;
|
||||
}
|
||||
|
||||
// Check for invalid use of SIGHASH_SINGLE
|
||||
if ((nHashType & 0x1f) == SIGHASH_SINGLE) {
|
||||
if (nIn >= txTo.vout.size()) {
|
||||
// nOut out of range
|
||||
return uint256::ONE;
|
||||
}
|
||||
// If a cache object was provided, store the midstate there.
|
||||
if (sighash_cache != nullptr) {
|
||||
sighash_cache->Store(nHashType, scriptCode, ss);
|
||||
}
|
||||
|
||||
// Wrapper to serialize only the necessary parts of the transaction being signed
|
||||
CTransactionSignatureSerializer<T> txTmp(txTo, scriptCode, nIn, nHashType);
|
||||
|
||||
// Serialize and hash
|
||||
HashWriter ss{};
|
||||
ss << txTmp << nHashType;
|
||||
// Add sighash type and hash.
|
||||
ss << nHashType;
|
||||
return ss.GetHash();
|
||||
}
|
||||
|
||||
@@ -1661,7 +1700,7 @@ bool GenericTransactionSignatureChecker<T>::CheckECDSASignature(const std::vecto
|
||||
// Witness sighashes need the amount.
|
||||
if (sigversion == SigVersion::WITNESS_V0 && amount < 0) return HandleMissingData(m_mdb);
|
||||
|
||||
uint256 sighash = SignatureHash(scriptCode, *txTo, nIn, nHashType, amount, sigversion, this->txdata);
|
||||
uint256 sighash = SignatureHash(scriptCode, *txTo, nIn, nHashType, amount, sigversion, this->txdata, &m_sighash_cache);
|
||||
|
||||
if (!VerifyECDSASignature(vchSig, pubkey, sighash))
|
||||
return false;
|
||||
|
||||
@@ -239,8 +239,27 @@ extern const HashWriter HASHER_TAPSIGHASH; //!< Hasher with tag "TapSighash" pre
|
||||
extern const HashWriter HASHER_TAPLEAF; //!< Hasher with tag "TapLeaf" pre-fed to it.
|
||||
extern const HashWriter HASHER_TAPBRANCH; //!< Hasher with tag "TapBranch" pre-fed to it.
|
||||
|
||||
/** Data structure to cache SHA256 midstates for the ECDSA sighash calculations
|
||||
* (bare, P2SH, P2WPKH, P2WSH). */
|
||||
class SigHashCache
|
||||
{
|
||||
/** For each sighash mode (ALL, SINGLE, NONE, ALL|ANYONE, SINGLE|ANYONE, NONE|ANYONE),
|
||||
* optionally store a scriptCode which the hash is for, plus a midstate for the SHA256
|
||||
* computation just before adding the hash_type itself. */
|
||||
std::optional<std::pair<CScript, HashWriter>> m_cache_entries[6];
|
||||
|
||||
/** Given a hash_type, find which of the 6 cache entries is to be used. */
|
||||
int CacheIndex(int32_t hash_type) const noexcept;
|
||||
|
||||
public:
|
||||
/** Load into writer the SHA256 midstate if found in this cache. */
|
||||
[[nodiscard]] bool Load(int32_t hash_type, const CScript& script_code, HashWriter& writer) const noexcept;
|
||||
/** Store into this cache object the provided SHA256 midstate. */
|
||||
void Store(int32_t hash_type, const CScript& script_code, const HashWriter& writer) noexcept;
|
||||
};
|
||||
|
||||
template <class T>
|
||||
uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn, int32_t nHashType, const CAmount& amount, SigVersion sigversion, const PrecomputedTransactionData* cache = nullptr);
|
||||
uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn, int32_t nHashType, const CAmount& amount, SigVersion sigversion, const PrecomputedTransactionData* cache = nullptr, SigHashCache* sighash_cache = nullptr);
|
||||
|
||||
class BaseSignatureChecker
|
||||
{
|
||||
@@ -289,6 +308,7 @@ private:
|
||||
unsigned int nIn;
|
||||
const CAmount amount;
|
||||
const PrecomputedTransactionData* txdata;
|
||||
mutable SigHashCache m_sighash_cache;
|
||||
|
||||
protected:
|
||||
virtual bool VerifyECDSASignature(const std::vector<unsigned char>& vchSig, const CPubKey& vchPubKey, const uint256& sighash) const;
|
||||
|
||||
@@ -95,21 +95,21 @@ BOOST_AUTO_TEST_CASE(SimpleRoundTripTest)
|
||||
CBlock block2;
|
||||
{
|
||||
PartiallyDownloadedBlock tmp = partialBlock;
|
||||
BOOST_CHECK(partialBlock.FillBlock(block2, {}) == READ_STATUS_INVALID); // No transactions
|
||||
BOOST_CHECK(partialBlock.FillBlock(block2, {}, /*segwit_active=*/true) == READ_STATUS_INVALID); // No transactions
|
||||
partialBlock = tmp;
|
||||
}
|
||||
|
||||
// Wrong transaction
|
||||
{
|
||||
PartiallyDownloadedBlock tmp = partialBlock;
|
||||
partialBlock.FillBlock(block2, {block.vtx[2]}); // Current implementation doesn't check txn here, but don't require that
|
||||
partialBlock.FillBlock(block2, {block.vtx[2]}, /*segwit_active=*/true); // Current implementation doesn't check txn here, but don't require that
|
||||
partialBlock = tmp;
|
||||
}
|
||||
bool mutated;
|
||||
BOOST_CHECK(block.hashMerkleRoot != BlockMerkleRoot(block2, &mutated));
|
||||
|
||||
CBlock block3;
|
||||
BOOST_CHECK(partialBlock.FillBlock(block3, {block.vtx[1]}) == READ_STATUS_OK);
|
||||
BOOST_CHECK(partialBlock.FillBlock(block3, {block.vtx[1]}, /*segwit_active=*/true) == READ_STATUS_OK);
|
||||
BOOST_CHECK_EQUAL(block.GetHash().ToString(), block3.GetHash().ToString());
|
||||
BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block3, &mutated).ToString());
|
||||
BOOST_CHECK(!mutated);
|
||||
@@ -182,14 +182,14 @@ BOOST_AUTO_TEST_CASE(NonCoinbasePreforwardRTTest)
|
||||
CBlock block2;
|
||||
{
|
||||
PartiallyDownloadedBlock tmp = partialBlock;
|
||||
BOOST_CHECK(partialBlock.FillBlock(block2, {}) == READ_STATUS_INVALID); // No transactions
|
||||
BOOST_CHECK(partialBlock.FillBlock(block2, {}, /*segwit_active=*/true) == READ_STATUS_INVALID); // No transactions
|
||||
partialBlock = tmp;
|
||||
}
|
||||
|
||||
// Wrong transaction
|
||||
{
|
||||
PartiallyDownloadedBlock tmp = partialBlock;
|
||||
partialBlock.FillBlock(block2, {block.vtx[1]}); // Current implementation doesn't check txn here, but don't require that
|
||||
partialBlock.FillBlock(block2, {block.vtx[1]}, /*segwit_active=*/true); // Current implementation doesn't check txn here, but don't require that
|
||||
partialBlock = tmp;
|
||||
}
|
||||
BOOST_CHECK_EQUAL(pool.get(block.vtx[2]->GetHash()).use_count(), SHARED_TX_OFFSET + 2); // +2 because of partialBlock and block2
|
||||
@@ -198,7 +198,7 @@ BOOST_AUTO_TEST_CASE(NonCoinbasePreforwardRTTest)
|
||||
|
||||
CBlock block3;
|
||||
PartiallyDownloadedBlock partialBlockCopy = partialBlock;
|
||||
BOOST_CHECK(partialBlock.FillBlock(block3, {block.vtx[0]}) == READ_STATUS_OK);
|
||||
BOOST_CHECK(partialBlock.FillBlock(block3, {block.vtx[0]}, /*segwit_active=*/true) == READ_STATUS_OK);
|
||||
BOOST_CHECK_EQUAL(block.GetHash().ToString(), block3.GetHash().ToString());
|
||||
BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block3, &mutated).ToString());
|
||||
BOOST_CHECK(!mutated);
|
||||
@@ -252,7 +252,7 @@ BOOST_AUTO_TEST_CASE(SufficientPreforwardRTTest)
|
||||
|
||||
CBlock block2;
|
||||
PartiallyDownloadedBlock partialBlockCopy = partialBlock;
|
||||
BOOST_CHECK(partialBlock.FillBlock(block2, {}) == READ_STATUS_OK);
|
||||
BOOST_CHECK(partialBlock.FillBlock(block2, {}, /*segwit_active=*/true) == READ_STATUS_OK);
|
||||
BOOST_CHECK_EQUAL(block.GetHash().ToString(), block2.GetHash().ToString());
|
||||
bool mutated;
|
||||
BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block2, &mutated).ToString());
|
||||
@@ -300,7 +300,7 @@ BOOST_AUTO_TEST_CASE(EmptyBlockRoundTripTest)
|
||||
|
||||
CBlock block2;
|
||||
std::vector<CTransactionRef> vtx_missing;
|
||||
BOOST_CHECK(partialBlock.FillBlock(block2, vtx_missing) == READ_STATUS_OK);
|
||||
BOOST_CHECK(partialBlock.FillBlock(block2, vtx_missing, /*segwit_active=*/true) == READ_STATUS_OK);
|
||||
BOOST_CHECK_EQUAL(block.GetHash().ToString(), block2.GetHash().ToString());
|
||||
BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block2, &mutated).ToString());
|
||||
BOOST_CHECK(!mutated);
|
||||
|
||||
@@ -324,7 +324,7 @@ FUZZ_TARGET(ephemeral_package_eval, .init = initialize_tx_pool)
|
||||
return ProcessNewPackage(chainstate, tx_pool, txs, /*test_accept=*/single_submit, /*client_maxfeerate=*/{}));
|
||||
|
||||
const auto res = WITH_LOCK(::cs_main, return AcceptToMemoryPool(chainstate, txs.back(), GetTime(),
|
||||
/*bypass_limits=*/fuzzed_data_provider.ConsumeBool(), /*test_accept=*/!single_submit));
|
||||
/*bypass_limits=*/false, /*test_accept=*/!single_submit));
|
||||
|
||||
if (!single_submit && result_package.m_state.GetResult() != PackageValidationResult::PCKG_POLICY) {
|
||||
// We don't know anything about the validity since transactions were randomly generated, so
|
||||
|
||||
@@ -32,14 +32,10 @@ void initialize_pdb()
|
||||
g_setup = testing_setup.get();
|
||||
}
|
||||
|
||||
PartiallyDownloadedBlock::CheckBlockFn FuzzedCheckBlock(std::optional<BlockValidationResult> result)
|
||||
PartiallyDownloadedBlock::IsBlockMutatedFn FuzzedIsBlockMutated(bool result)
|
||||
{
|
||||
return [result](const CBlock&, BlockValidationState& state, const Consensus::Params&, bool, bool) {
|
||||
if (result) {
|
||||
return state.Invalid(*result);
|
||||
}
|
||||
|
||||
return true;
|
||||
return [result](const CBlock& block, bool) {
|
||||
return result;
|
||||
};
|
||||
}
|
||||
|
||||
@@ -111,36 +107,22 @@ FUZZ_TARGET(partially_downloaded_block, .init = initialize_pdb)
|
||||
skipped_missing |= (!pdb.IsTxAvailable(i) && skip);
|
||||
}
|
||||
|
||||
// Mock CheckBlock
|
||||
bool fail_check_block{fuzzed_data_provider.ConsumeBool()};
|
||||
auto validation_result =
|
||||
fuzzed_data_provider.PickValueInArray(
|
||||
{BlockValidationResult::BLOCK_RESULT_UNSET,
|
||||
BlockValidationResult::BLOCK_CONSENSUS,
|
||||
BlockValidationResult::BLOCK_CACHED_INVALID,
|
||||
BlockValidationResult::BLOCK_INVALID_HEADER,
|
||||
BlockValidationResult::BLOCK_MUTATED,
|
||||
BlockValidationResult::BLOCK_MISSING_PREV,
|
||||
BlockValidationResult::BLOCK_INVALID_PREV,
|
||||
BlockValidationResult::BLOCK_TIME_FUTURE,
|
||||
BlockValidationResult::BLOCK_CHECKPOINT,
|
||||
BlockValidationResult::BLOCK_HEADER_LOW_WORK});
|
||||
pdb.m_check_block_mock = FuzzedCheckBlock(
|
||||
fail_check_block ?
|
||||
std::optional<BlockValidationResult>{validation_result} :
|
||||
std::nullopt);
|
||||
bool segwit_active{fuzzed_data_provider.ConsumeBool()};
|
||||
|
||||
// Mock IsBlockMutated
|
||||
bool fail_block_mutated{fuzzed_data_provider.ConsumeBool()};
|
||||
pdb.m_check_block_mutated_mock = FuzzedIsBlockMutated(fail_block_mutated);
|
||||
|
||||
CBlock reconstructed_block;
|
||||
auto fill_status{pdb.FillBlock(reconstructed_block, missing)};
|
||||
auto fill_status{pdb.FillBlock(reconstructed_block, missing, segwit_active)};
|
||||
switch (fill_status) {
|
||||
case READ_STATUS_OK:
|
||||
assert(!skipped_missing);
|
||||
assert(!fail_check_block);
|
||||
assert(!fail_block_mutated);
|
||||
assert(block->GetHash() == reconstructed_block.GetHash());
|
||||
break;
|
||||
case READ_STATUS_CHECKBLOCK_FAILED: [[fallthrough]];
|
||||
case READ_STATUS_FAILED:
|
||||
assert(fail_check_block);
|
||||
assert(fail_block_mutated);
|
||||
break;
|
||||
case READ_STATUS_INVALID:
|
||||
break;
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
#include <test/fuzz/FuzzedDataProvider.h>
|
||||
#include <test/fuzz/fuzz.h>
|
||||
#include <test/fuzz/util.h>
|
||||
#include <util/check.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
@@ -45,3 +46,27 @@ FUZZ_TARGET(script_interpreter)
|
||||
(void)CastToBool(ConsumeRandomLengthByteVector(fuzzed_data_provider));
|
||||
}
|
||||
}
|
||||
|
||||
/** Differential fuzzing for SignatureHash with and without cache. */
|
||||
FUZZ_TARGET(sighash_cache)
|
||||
{
|
||||
FuzzedDataProvider provider(buffer.data(), buffer.size());
|
||||
|
||||
// Get inputs to the sighash function that won't change across types.
|
||||
const auto scriptcode{ConsumeScript(provider)};
|
||||
const auto tx{ConsumeTransaction(provider, std::nullopt)};
|
||||
if (tx.vin.empty()) return;
|
||||
const auto in_index{provider.ConsumeIntegralInRange<uint32_t>(0, tx.vin.size() - 1)};
|
||||
const auto amount{ConsumeMoney(provider)};
|
||||
const auto sigversion{(SigVersion)provider.ConsumeIntegralInRange(0, 1)};
|
||||
|
||||
// Check the sighash function will give the same result for 100 fuzzer-generated hash types whether or not a cache is
|
||||
// provided. The cache is conserved across types to exercise cache hits.
|
||||
SigHashCache sighash_cache{};
|
||||
for (int i{0}; i < 100; ++i) {
|
||||
const auto hash_type{((i & 2) == 0) ? provider.ConsumeIntegral<int8_t>() : provider.ConsumeIntegral<int32_t>()};
|
||||
const auto nocache_res{SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion)};
|
||||
const auto cache_res{SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &sighash_cache)};
|
||||
Assert(nocache_res == cache_res);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -295,7 +295,6 @@ FUZZ_TARGET(tx_pool_standard, .init = initialize_tx_pool)
|
||||
std::set<CTransactionRef> added;
|
||||
auto txr = std::make_shared<TransactionsDelta>(removed, added);
|
||||
node.validation_signals->RegisterSharedValidationInterface(txr);
|
||||
const bool bypass_limits = fuzzed_data_provider.ConsumeBool();
|
||||
|
||||
// Make sure ProcessNewPackage on one transaction works.
|
||||
// The result is not guaranteed to be the same as what is returned by ATMP.
|
||||
@@ -310,7 +309,7 @@ FUZZ_TARGET(tx_pool_standard, .init = initialize_tx_pool)
|
||||
it->second.m_result_type == MempoolAcceptResult::ResultType::INVALID);
|
||||
}
|
||||
|
||||
const auto res = WITH_LOCK(::cs_main, return AcceptToMemoryPool(chainstate, tx, GetTime(), bypass_limits, /*test_accept=*/false));
|
||||
const auto res = WITH_LOCK(::cs_main, return AcceptToMemoryPool(chainstate, tx, GetTime(), /*bypass_limits=*/false, /*test_accept=*/false));
|
||||
const bool accepted = res.m_result_type == MempoolAcceptResult::ResultType::VALID;
|
||||
node.validation_signals->SyncWithValidationInterfaceQueue();
|
||||
node.validation_signals->UnregisterSharedValidationInterface(txr);
|
||||
@@ -393,6 +392,9 @@ FUZZ_TARGET(tx_pool, .init = initialize_tx_pool)
|
||||
|
||||
chainstate.SetMempool(&tx_pool);
|
||||
|
||||
// If we ever bypass limits, do not do TRUC invariants checks
|
||||
bool ever_bypassed_limits{false};
|
||||
|
||||
LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 300)
|
||||
{
|
||||
const auto mut_tx = ConsumeTransaction(fuzzed_data_provider, txids);
|
||||
@@ -411,13 +413,17 @@ FUZZ_TARGET(tx_pool, .init = initialize_tx_pool)
|
||||
tx_pool.PrioritiseTransaction(txid.ToUint256(), delta);
|
||||
}
|
||||
|
||||
const bool bypass_limits{fuzzed_data_provider.ConsumeBool()};
|
||||
ever_bypassed_limits |= bypass_limits;
|
||||
|
||||
const auto tx = MakeTransactionRef(mut_tx);
|
||||
const bool bypass_limits = fuzzed_data_provider.ConsumeBool();
|
||||
const auto res = WITH_LOCK(::cs_main, return AcceptToMemoryPool(chainstate, tx, GetTime(), bypass_limits, /*test_accept=*/false));
|
||||
const bool accepted = res.m_result_type == MempoolAcceptResult::ResultType::VALID;
|
||||
if (accepted) {
|
||||
txids.push_back(tx->GetHash());
|
||||
CheckMempoolTRUCInvariants(tx_pool);
|
||||
if (!ever_bypassed_limits) {
|
||||
CheckMempoolTRUCInvariants(tx_pool);
|
||||
}
|
||||
}
|
||||
}
|
||||
Finish(fuzzed_data_provider, tx_pool, chainstate);
|
||||
|
||||
@@ -207,4 +207,94 @@ BOOST_AUTO_TEST_CASE(sighash_from_data)
|
||||
BOOST_CHECK_MESSAGE(sh.GetHex() == sigHashHex, strTest);
|
||||
}
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(sighash_caching)
|
||||
{
|
||||
// Get a script, transaction and parameters as inputs to the sighash function.
|
||||
CScript scriptcode;
|
||||
RandomScript(scriptcode);
|
||||
CScript diff_scriptcode{scriptcode};
|
||||
diff_scriptcode << OP_1;
|
||||
CMutableTransaction tx;
|
||||
RandomTransaction(tx, /*fSingle=*/false);
|
||||
const auto in_index{static_cast<uint32_t>(m_rng.randrange(tx.vin.size()))};
|
||||
const auto amount{m_rng.rand<CAmount>()};
|
||||
|
||||
// Exercise the sighash function under both legacy and segwit v0.
|
||||
for (const auto sigversion: {SigVersion::BASE, SigVersion::WITNESS_V0}) {
|
||||
// For each, run it against all the 6 standard hash types and a few additional random ones.
|
||||
std::vector<int32_t> hash_types{{SIGHASH_ALL, SIGHASH_SINGLE, SIGHASH_NONE, SIGHASH_ALL | SIGHASH_ANYONECANPAY,
|
||||
SIGHASH_SINGLE | SIGHASH_ANYONECANPAY, SIGHASH_NONE | SIGHASH_ANYONECANPAY,
|
||||
SIGHASH_ANYONECANPAY, 0, std::numeric_limits<int32_t>::max()}};
|
||||
for (int i{0}; i < 10; ++i) {
|
||||
hash_types.push_back(i % 2 == 0 ? m_rng.rand<int8_t>() : m_rng.rand<int32_t>());
|
||||
}
|
||||
|
||||
// Reuse the same cache across script types. This must not cause any issue as the cached value for one hash type must never
|
||||
// be confused for another (instantiating the cache within the loop instead would prevent testing this).
|
||||
SigHashCache cache;
|
||||
for (const auto hash_type: hash_types) {
|
||||
const bool expect_one{sigversion == SigVersion::BASE && ((hash_type & 0x1f) == SIGHASH_SINGLE) && in_index >= tx.vout.size()};
|
||||
|
||||
// The result of computing the sighash should be the same with or without cache.
|
||||
const auto sighash_with_cache{SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache)};
|
||||
const auto sighash_no_cache{SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, nullptr)};
|
||||
BOOST_CHECK_EQUAL(sighash_with_cache, sighash_no_cache);
|
||||
|
||||
// Calling the cached version again should return the same value again.
|
||||
BOOST_CHECK_EQUAL(sighash_with_cache, SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache));
|
||||
|
||||
// While here we might as well also check that the result for legacy is the same as for the old SignatureHash() function.
|
||||
if (sigversion == SigVersion::BASE) {
|
||||
BOOST_CHECK_EQUAL(sighash_with_cache, SignatureHashOld(scriptcode, CTransaction(tx), in_index, hash_type));
|
||||
}
|
||||
|
||||
// Calling with a different scriptcode (for instance in case a CODESEP is encountered) will not return the cache value but
|
||||
// overwrite it. The sighash will always be different except in case of legacy SIGHASH_SINGLE bug.
|
||||
const auto sighash_with_cache2{SignatureHash(diff_scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache)};
|
||||
const auto sighash_no_cache2{SignatureHash(diff_scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, nullptr)};
|
||||
BOOST_CHECK_EQUAL(sighash_with_cache2, sighash_no_cache2);
|
||||
if (!expect_one) {
|
||||
BOOST_CHECK_NE(sighash_with_cache, sighash_with_cache2);
|
||||
} else {
|
||||
BOOST_CHECK_EQUAL(sighash_with_cache, sighash_with_cache2);
|
||||
BOOST_CHECK_EQUAL(sighash_with_cache, uint256::ONE);
|
||||
}
|
||||
|
||||
// Calling the cached version again should return the same value again.
|
||||
BOOST_CHECK_EQUAL(sighash_with_cache2, SignatureHash(diff_scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache));
|
||||
|
||||
// And if we store a different value for this scriptcode and hash type it will return that instead.
|
||||
{
|
||||
HashWriter h{};
|
||||
h << 42;
|
||||
cache.Store(hash_type, scriptcode, h);
|
||||
const auto stored_hash{h.GetHash()};
|
||||
BOOST_CHECK(cache.Load(hash_type, scriptcode, h));
|
||||
const auto loaded_hash{h.GetHash()};
|
||||
BOOST_CHECK_EQUAL(stored_hash, loaded_hash);
|
||||
}
|
||||
|
||||
// And using this mutated cache with the sighash function will return the new value (except in the legacy SIGHASH_SINGLE bug
|
||||
// case in which it'll return 1).
|
||||
if (!expect_one) {
|
||||
BOOST_CHECK_NE(SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache), sighash_with_cache);
|
||||
HashWriter h{};
|
||||
BOOST_CHECK(cache.Load(hash_type, scriptcode, h));
|
||||
h << hash_type;
|
||||
const auto new_hash{h.GetHash()};
|
||||
BOOST_CHECK_EQUAL(SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache), new_hash);
|
||||
} else {
|
||||
BOOST_CHECK_EQUAL(SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache), uint256::ONE);
|
||||
}
|
||||
|
||||
// Wipe the cache and restore the correct cached value for this scriptcode and hash_type before starting the next iteration.
|
||||
HashWriter dummy{};
|
||||
cache.Store(hash_type, diff_scriptcode, dummy);
|
||||
(void)SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache);
|
||||
BOOST_CHECK(cache.Load(hash_type, scriptcode, dummy) || expect_one);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_SUITE_END()
|
||||
|
||||
@@ -1144,4 +1144,159 @@ BOOST_AUTO_TEST_CASE(max_standard_legacy_sigops)
|
||||
BOOST_CHECK(!::AreInputsStandard(CTransaction(tx_max_sigops), coins));
|
||||
}
|
||||
|
||||
/** Sanity check the return value of SpendsNonAnchorWitnessProg for various output types. */
|
||||
BOOST_AUTO_TEST_CASE(spends_witness_prog)
|
||||
{
|
||||
CCoinsView coins_dummy;
|
||||
CCoinsViewCache coins(&coins_dummy);
|
||||
CKey key;
|
||||
key.MakeNewKey(true);
|
||||
const CPubKey pubkey{key.GetPubKey()};
|
||||
CMutableTransaction tx_create{}, tx_spend{};
|
||||
tx_create.vout.emplace_back(0, CScript{});
|
||||
tx_spend.vin.emplace_back(Txid{}, 0);
|
||||
std::vector<std::vector<uint8_t>> sol_dummy;
|
||||
|
||||
// CNoDestination, PubKeyDestination, PKHash, ScriptHash, WitnessV0ScriptHash, WitnessV0KeyHash,
|
||||
// WitnessV1Taproot, PayToAnchor, WitnessUnknown.
|
||||
static_assert(std::variant_size_v<CTxDestination> == 9);
|
||||
|
||||
// Go through all defined output types and sanity check SpendsNonAnchorWitnessProg.
|
||||
|
||||
// P2PK
|
||||
tx_create.vout[0].scriptPubKey = GetScriptForDestination(PubKeyDestination{pubkey});
|
||||
BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::PUBKEY);
|
||||
tx_spend.vin[0].prevout.hash = tx_create.GetHash();
|
||||
AddCoins(coins, CTransaction{tx_create}, 0, false);
|
||||
BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
|
||||
// P2PKH
|
||||
tx_create.vout[0].scriptPubKey = GetScriptForDestination(PKHash{pubkey});
|
||||
BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::PUBKEYHASH);
|
||||
tx_spend.vin[0].prevout.hash = tx_create.GetHash();
|
||||
AddCoins(coins, CTransaction{tx_create}, 0, false);
|
||||
BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
|
||||
// P2SH
|
||||
auto redeem_script{CScript{} << OP_1 << OP_CHECKSIG};
|
||||
tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash{redeem_script});
|
||||
BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH);
|
||||
tx_spend.vin[0].prevout.hash = tx_create.GetHash();
|
||||
tx_spend.vin[0].scriptSig = CScript{} << OP_0 << ToByteVector(redeem_script);
|
||||
AddCoins(coins, CTransaction{tx_create}, 0, false);
|
||||
BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
tx_spend.vin[0].scriptSig.clear();
|
||||
|
||||
// native P2WSH
|
||||
const auto witness_script{CScript{} << OP_12 << OP_HASH160 << OP_DUP << OP_EQUAL};
|
||||
tx_create.vout[0].scriptPubKey = GetScriptForDestination(WitnessV0ScriptHash{witness_script});
|
||||
BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::WITNESS_V0_SCRIPTHASH);
|
||||
tx_spend.vin[0].prevout.hash = tx_create.GetHash();
|
||||
AddCoins(coins, CTransaction{tx_create}, 0, false);
|
||||
BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
|
||||
// P2SH-wrapped P2WSH
|
||||
redeem_script = tx_create.vout[0].scriptPubKey;
|
||||
tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash(redeem_script));
|
||||
BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH);
|
||||
tx_spend.vin[0].prevout.hash = tx_create.GetHash();
|
||||
tx_spend.vin[0].scriptSig = CScript{} << ToByteVector(redeem_script);
|
||||
AddCoins(coins, CTransaction{tx_create}, 0, false);
|
||||
BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
tx_spend.vin[0].scriptSig.clear();
|
||||
BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
|
||||
// native P2WPKH
|
||||
tx_create.vout[0].scriptPubKey = GetScriptForDestination(WitnessV0KeyHash{pubkey});
|
||||
BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::WITNESS_V0_KEYHASH);
|
||||
tx_spend.vin[0].prevout.hash = tx_create.GetHash();
|
||||
AddCoins(coins, CTransaction{tx_create}, 0, false);
|
||||
BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
|
||||
// P2SH-wrapped P2WPKH
|
||||
redeem_script = tx_create.vout[0].scriptPubKey;
|
||||
tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash(redeem_script));
|
||||
BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH);
|
||||
tx_spend.vin[0].prevout.hash = tx_create.GetHash();
|
||||
tx_spend.vin[0].scriptSig = CScript{} << ToByteVector(redeem_script);
|
||||
AddCoins(coins, CTransaction{tx_create}, 0, false);
|
||||
BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
tx_spend.vin[0].scriptSig.clear();
|
||||
BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
|
||||
// P2TR
|
||||
tx_create.vout[0].scriptPubKey = GetScriptForDestination(WitnessV1Taproot{XOnlyPubKey{pubkey}});
|
||||
BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::WITNESS_V1_TAPROOT);
|
||||
tx_spend.vin[0].prevout.hash = tx_create.GetHash();
|
||||
AddCoins(coins, CTransaction{tx_create}, 0, false);
|
||||
BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
|
||||
// P2SH-wrapped P2TR (undefined, non-standard)
|
||||
redeem_script = tx_create.vout[0].scriptPubKey;
|
||||
tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash(redeem_script));
|
||||
BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH);
|
||||
tx_spend.vin[0].prevout.hash = tx_create.GetHash();
|
||||
tx_spend.vin[0].scriptSig = CScript{} << ToByteVector(redeem_script);
|
||||
AddCoins(coins, CTransaction{tx_create}, 0, false);
|
||||
BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
tx_spend.vin[0].scriptSig.clear();
|
||||
BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
|
||||
// P2A
|
||||
tx_create.vout[0].scriptPubKey = GetScriptForDestination(PayToAnchor{});
|
||||
BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::ANCHOR);
|
||||
tx_spend.vin[0].prevout.hash = tx_create.GetHash();
|
||||
AddCoins(coins, CTransaction{tx_create}, 0, false);
|
||||
BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
|
||||
// P2SH-wrapped P2A (undefined, non-standard)
|
||||
redeem_script = tx_create.vout[0].scriptPubKey;
|
||||
tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash(redeem_script));
|
||||
BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH);
|
||||
tx_spend.vin[0].prevout.hash = tx_create.GetHash();
|
||||
tx_spend.vin[0].scriptSig = CScript{} << ToByteVector(redeem_script);
|
||||
AddCoins(coins, CTransaction{tx_create}, 0, false);
|
||||
BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
tx_spend.vin[0].scriptSig.clear();
|
||||
|
||||
// Undefined version 1 witness program
|
||||
tx_create.vout[0].scriptPubKey = GetScriptForDestination(WitnessUnknown{1, {0x42, 0x42}});
|
||||
BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::WITNESS_UNKNOWN);
|
||||
tx_spend.vin[0].prevout.hash = tx_create.GetHash();
|
||||
AddCoins(coins, CTransaction{tx_create}, 0, false);
|
||||
BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
|
||||
// P2SH-wrapped undefined version 1 witness program
|
||||
redeem_script = tx_create.vout[0].scriptPubKey;
|
||||
tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash(redeem_script));
|
||||
BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH);
|
||||
tx_spend.vin[0].prevout.hash = tx_create.GetHash();
|
||||
tx_spend.vin[0].scriptSig = CScript{} << ToByteVector(redeem_script);
|
||||
AddCoins(coins, CTransaction{tx_create}, 0, false);
|
||||
BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
tx_spend.vin[0].scriptSig.clear();
|
||||
BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
|
||||
// Various undefined version >1 32-byte witness programs.
|
||||
const auto program{ToByteVector(XOnlyPubKey{pubkey})};
|
||||
for (int i{2}; i <= 16; ++i) {
|
||||
tx_create.vout[0].scriptPubKey = GetScriptForDestination(WitnessUnknown{i, program});
|
||||
BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::WITNESS_UNKNOWN);
|
||||
tx_spend.vin[0].prevout.hash = tx_create.GetHash();
|
||||
AddCoins(coins, CTransaction{tx_create}, 0, false);
|
||||
BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
|
||||
// It's also detected within P2SH.
|
||||
redeem_script = tx_create.vout[0].scriptPubKey;
|
||||
tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash(redeem_script));
|
||||
BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH);
|
||||
tx_spend.vin[0].prevout.hash = tx_create.GetHash();
|
||||
tx_spend.vin[0].scriptSig = CScript{} << ToByteVector(redeem_script);
|
||||
AddCoins(coins, CTransaction{tx_create}, 0, false);
|
||||
BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
tx_spend.vin[0].scriptSig.clear();
|
||||
BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
}
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_SUITE_END()
|
||||
|
||||
@@ -9,6 +9,13 @@
|
||||
|
||||
#ifdef ENABLE_TRACING
|
||||
|
||||
// Workaround for https://gcc.gnu.org/bugzilla/show_bug.cgi?id=103395
|
||||
// systemtap 4.6 on 32-bit ARM triggers internal compiler error
|
||||
// (this workaround is included in systemtap 4.7+)
|
||||
#if defined(__arm__)
|
||||
# define STAP_SDT_ARG_CONSTRAINT g
|
||||
#endif
|
||||
|
||||
// Setting SDT_USE_VARIADIC lets systemtap (sys/sdt.h) know that we want to use
|
||||
// the optional variadic macros to define tracepoints.
|
||||
#define SDT_USE_VARIADIC 1
|
||||
|
||||
@@ -1025,26 +1025,28 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
|
||||
// Even though just checking direct mempool parents for inheritance would be sufficient, we
|
||||
// check using the full ancestor set here because it's more convenient to use what we have
|
||||
// already calculated.
|
||||
if (const auto err{SingleTRUCChecks(ws.m_ptx, ws.m_ancestors, ws.m_conflicts, ws.m_vsize)}) {
|
||||
// Single transaction contexts only.
|
||||
if (args.m_allow_sibling_eviction && err->second != nullptr) {
|
||||
// We should only be considering where replacement is considered valid as well.
|
||||
Assume(args.m_allow_replacement);
|
||||
if (!args.m_bypass_limits) {
|
||||
if (const auto err{SingleTRUCChecks(ws.m_ptx, ws.m_ancestors, ws.m_conflicts, ws.m_vsize)}) {
|
||||
// Single transaction contexts only.
|
||||
if (args.m_allow_sibling_eviction && err->second != nullptr) {
|
||||
// We should only be considering where replacement is considered valid as well.
|
||||
Assume(args.m_allow_replacement);
|
||||
|
||||
// Potential sibling eviction. Add the sibling to our list of mempool conflicts to be
|
||||
// included in RBF checks.
|
||||
ws.m_conflicts.insert(err->second->GetHash());
|
||||
// Adding the sibling to m_iters_conflicting here means that it doesn't count towards
|
||||
// RBF Carve Out above. This is correct, since removing to-be-replaced transactions from
|
||||
// the descendant count is done separately in SingleTRUCChecks for TRUC transactions.
|
||||
ws.m_iters_conflicting.insert(m_pool.GetIter(err->second->GetHash()).value());
|
||||
ws.m_sibling_eviction = true;
|
||||
// The sibling will be treated as part of the to-be-replaced set in ReplacementChecks.
|
||||
// Note that we are not checking whether it opts in to replaceability via BIP125 or TRUC
|
||||
// (which is normally done in PreChecks). However, the only way a TRUC transaction can
|
||||
// have a non-TRUC and non-BIP125 descendant is due to a reorg.
|
||||
} else {
|
||||
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "TRUC-violation", err->first);
|
||||
// Potential sibling eviction. Add the sibling to our list of mempool conflicts to be
|
||||
// included in RBF checks.
|
||||
ws.m_conflicts.insert(err->second->GetHash());
|
||||
// Adding the sibling to m_iters_conflicting here means that it doesn't count towards
|
||||
// RBF Carve Out above. This is correct, since removing to-be-replaced transactions from
|
||||
// the descendant count is done separately in SingleTRUCChecks for TRUC transactions.
|
||||
ws.m_iters_conflicting.insert(m_pool.GetIter(err->second->GetHash()).value());
|
||||
ws.m_sibling_eviction = true;
|
||||
// The sibling will be treated as part of the to-be-replaced set in ReplacementChecks.
|
||||
// Note that we are not checking whether it opts in to replaceability via BIP125 or TRUC
|
||||
// (which is normally done in PreChecks). However, the only way a TRUC transaction can
|
||||
// have a non-TRUC and non-BIP125 descendant is due to a reorg.
|
||||
} else {
|
||||
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "TRUC-violation", err->first);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1236,13 +1238,8 @@ bool MemPoolAccept::PolicyScriptChecks(const ATMPArgs& args, Workspace& ws)
|
||||
// Check input scripts and signatures.
|
||||
// This is done last to help prevent CPU exhaustion denial-of-service attacks.
|
||||
if (!CheckInputScripts(tx, state, m_view, scriptVerifyFlags, true, false, ws.m_precomputed_txdata, GetValidationCache())) {
|
||||
// SCRIPT_VERIFY_CLEANSTACK requires SCRIPT_VERIFY_WITNESS, so we
|
||||
// need to turn both off, and compare against just turning off CLEANSTACK
|
||||
// to see if the failure is specifically due to witness validation.
|
||||
TxValidationState state_dummy; // Want reported failures to be from first CheckInputScripts
|
||||
if (!tx.HasWitness() && CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, false, ws.m_precomputed_txdata, GetValidationCache()) &&
|
||||
!CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, false, ws.m_precomputed_txdata, GetValidationCache())) {
|
||||
// Only the witness is missing, so the transaction itself may be fine.
|
||||
// Detect a failure due to a missing witness so that p2p code can handle rejection caching appropriately.
|
||||
if (!tx.HasWitness() && SpendsNonAnchorWitnessProg(tx, m_view)) {
|
||||
state.Invalid(TxValidationResult::TX_WITNESS_STRIPPED,
|
||||
state.GetRejectReason(), state.GetDebugMessage());
|
||||
}
|
||||
@@ -2212,34 +2209,17 @@ bool CheckInputScripts(const CTransaction& tx, TxValidationState& state,
|
||||
if (pvChecks) {
|
||||
pvChecks->emplace_back(std::move(check));
|
||||
} else if (auto result = check(); result.has_value()) {
|
||||
// Tx failures never trigger disconnections/bans.
|
||||
// This is so that network splits aren't triggered
|
||||
// either due to non-consensus relay policies (such as
|
||||
// non-standard DER encodings or non-null dummy
|
||||
// arguments) or due to new consensus rules introduced in
|
||||
// soft forks.
|
||||
if (flags & STANDARD_NOT_MANDATORY_VERIFY_FLAGS) {
|
||||
// Check whether the failure was caused by a
|
||||
// non-mandatory script verification check, such as
|
||||
// non-standard DER encodings or non-null dummy
|
||||
// arguments; if so, ensure we return NOT_STANDARD
|
||||
// instead of CONSENSUS to avoid downstream users
|
||||
// splitting the network between upgraded and
|
||||
// non-upgraded nodes by banning CONSENSUS-failing
|
||||
// data providers.
|
||||
CScriptCheck check2(txdata.m_spent_outputs[i], tx, validation_cache.m_signature_cache, i,
|
||||
flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheSigStore, &txdata);
|
||||
auto mandatory_result = check2();
|
||||
if (!mandatory_result.has_value()) {
|
||||
return state.Invalid(TxValidationResult::TX_NOT_STANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(result->first)), result->second);
|
||||
} else {
|
||||
// If the second check failed, it failed due to a mandatory script verification
|
||||
// flag, but the first check might have failed on a non-mandatory script
|
||||
// verification flag.
|
||||
//
|
||||
// Avoid reporting a mandatory script check failure with a non-mandatory error
|
||||
// string by reporting the error from the second check.
|
||||
result = mandatory_result;
|
||||
}
|
||||
return state.Invalid(TxValidationResult::TX_NOT_STANDARD, strprintf("mempool-script-verify-flag-failed (%s)", ScriptErrorString(result->first)), result->second);
|
||||
} else {
|
||||
return state.Invalid(TxValidationResult::TX_CONSENSUS, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(result->first)), result->second);
|
||||
}
|
||||
|
||||
// MANDATORY flag failures correspond to
|
||||
// TxValidationResult::TX_CONSENSUS.
|
||||
return state.Invalid(TxValidationResult::TX_CONSENSUS, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(result->first)), result->second);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -132,6 +132,21 @@ public:
|
||||
/** Return path to main database filename */
|
||||
std::string Filename() override { return fs::PathToString(env->Directory() / m_filename); }
|
||||
|
||||
std::vector<fs::path> Files() override
|
||||
{
|
||||
std::vector<fs::path> files;
|
||||
files.emplace_back(env->Directory() / m_filename);
|
||||
if (env->m_databases.size() == 1) {
|
||||
files.emplace_back(env->Directory() / "db.log");
|
||||
files.emplace_back(env->Directory() / ".walletlock");
|
||||
files.emplace_back(env->Directory() / "database" / "log.0000000001");
|
||||
files.emplace_back(env->Directory() / "database");
|
||||
// Note that this list is not exhaustive as BDB may create more log files, and possibly other ones too
|
||||
// However it should be good enough for the only calls to Files()
|
||||
}
|
||||
return files;
|
||||
}
|
||||
|
||||
std::string Format() override { return "bdb"; }
|
||||
/**
|
||||
* Pointer to shared database environment.
|
||||
|
||||
@@ -170,6 +170,9 @@ public:
|
||||
/** Return path to main database file for logs and error messages. */
|
||||
virtual std::string Filename() = 0;
|
||||
|
||||
/** Return paths to all database created files */
|
||||
virtual std::vector<fs::path> Files() = 0;
|
||||
|
||||
virtual std::string Format() = 0;
|
||||
|
||||
std::atomic<unsigned int> nUpdateCounter;
|
||||
|
||||
@@ -288,11 +288,17 @@ bool CreateFromDump(const ArgsManager& args, const std::string& name, const fs::
|
||||
|
||||
dump_file.close();
|
||||
}
|
||||
// On failure, gather the paths to remove
|
||||
std::vector<fs::path> paths_to_remove = wallet->GetDatabase().Files();
|
||||
if (!name.empty()) paths_to_remove.push_back(wallet_path);
|
||||
|
||||
wallet.reset(); // The pointer deleter will close the wallet for us.
|
||||
|
||||
// Remove the wallet dir if we have a failure
|
||||
if (!ret) {
|
||||
fs::remove_all(wallet_path);
|
||||
for (const auto& p : paths_to_remove) {
|
||||
fs::remove(p);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
@@ -65,6 +65,7 @@ public:
|
||||
|
||||
/** Return path to main database file for logs and error messages. */
|
||||
std::string Filename() override { return fs::PathToString(m_filepath); }
|
||||
std::vector<fs::path> Files() override { return {m_filepath}; }
|
||||
|
||||
std::string Format() override { return "bdb_ro"; }
|
||||
|
||||
|
||||
@@ -1486,7 +1486,6 @@ RPCHelpMan sendall()
|
||||
CoinFilterParams coins_params;
|
||||
coins_params.min_amount = 0;
|
||||
for (const COutput& output : AvailableCoins(*pwallet, &coin_control, fee_rate, coins_params).All()) {
|
||||
CHECK_NONFATAL(output.input_bytes > 0);
|
||||
if (send_max && fee_rate.GetFee(output.input_bytes) > output.txout.nValue) {
|
||||
continue;
|
||||
}
|
||||
@@ -1505,6 +1504,9 @@ RPCHelpMan sendall()
|
||||
|
||||
// estimate final size of tx
|
||||
const TxSize tx_size{CalculateMaximumSignedTxSize(CTransaction(rawTx), pwallet.get())};
|
||||
if (tx_size.vsize == -1) {
|
||||
throw JSONRPCError(RPC_WALLET_ERROR, "Unable to determine the size of the transaction, the wallet contains unsolvable descriptors");
|
||||
}
|
||||
const CAmount fee_from_size{fee_rate.GetFee(tx_size.vsize)};
|
||||
const std::optional<CAmount> total_bump_fees{pwallet->chain().calculateCombinedBumpFee(outpoints_spent, fee_rate)};
|
||||
CAmount effective_value = total_input_value - fee_from_size - total_bump_fees.value_or(0);
|
||||
|
||||
@@ -63,6 +63,7 @@ public:
|
||||
void IncrementUpdateCounter() override { ++nUpdateCounter; }
|
||||
void ReloadDbEnv() override {}
|
||||
std::string Filename() override { return "dummy"; }
|
||||
std::vector<fs::path> Files() override { return {}; }
|
||||
std::string Format() override { return "dummy"; }
|
||||
std::unique_ptr<DatabaseBatch> MakeBatch(bool flush_on_close = true) override { return std::make_unique<DummyBatch>(); }
|
||||
};
|
||||
|
||||
@@ -112,12 +112,12 @@ Mutex SQLiteDatabase::g_sqlite_mutex;
|
||||
int SQLiteDatabase::g_sqlite_count = 0;
|
||||
|
||||
SQLiteDatabase::SQLiteDatabase(const fs::path& dir_path, const fs::path& file_path, const DatabaseOptions& options, bool mock)
|
||||
: WalletDatabase(), m_mock(mock), m_dir_path(fs::PathToString(dir_path)), m_file_path(fs::PathToString(file_path)), m_write_semaphore(1), m_use_unsafe_sync(options.use_unsafe_sync)
|
||||
: WalletDatabase(), m_mock(mock), m_dir_path(dir_path), m_file_path(fs::PathToString(file_path)), m_write_semaphore(1), m_use_unsafe_sync(options.use_unsafe_sync)
|
||||
{
|
||||
{
|
||||
LOCK(g_sqlite_mutex);
|
||||
LogPrintf("Using SQLite Version %s\n", SQLiteDatabaseVersion());
|
||||
LogPrintf("Using wallet %s\n", m_dir_path);
|
||||
LogPrintf("Using wallet %s\n", fs::PathToString(m_dir_path));
|
||||
|
||||
if (++g_sqlite_count == 1) {
|
||||
// Setup logging
|
||||
@@ -253,7 +253,7 @@ void SQLiteDatabase::Open()
|
||||
|
||||
if (m_db == nullptr) {
|
||||
if (!m_mock) {
|
||||
TryCreateDirectories(fs::PathFromString(m_dir_path));
|
||||
TryCreateDirectories(m_dir_path);
|
||||
}
|
||||
int ret = sqlite3_open_v2(m_file_path.c_str(), &m_db, flags, nullptr);
|
||||
if (ret != SQLITE_OK) {
|
||||
|
||||
@@ -105,7 +105,7 @@ class SQLiteDatabase : public WalletDatabase
|
||||
private:
|
||||
const bool m_mock{false};
|
||||
|
||||
const std::string m_dir_path;
|
||||
const fs::path m_dir_path;
|
||||
|
||||
const std::string m_file_path;
|
||||
|
||||
@@ -166,6 +166,14 @@ public:
|
||||
void IncrementUpdateCounter() override { ++nUpdateCounter; }
|
||||
|
||||
std::string Filename() override { return m_file_path; }
|
||||
/** Return paths to all database created files */
|
||||
std::vector<fs::path> Files() override
|
||||
{
|
||||
std::vector<fs::path> files;
|
||||
files.emplace_back(m_dir_path / fs::PathFromString(m_file_path));
|
||||
files.emplace_back(m_dir_path / fs::PathFromString(m_file_path + "-journal"));
|
||||
return files;
|
||||
}
|
||||
std::string Format() override { return "sqlite"; }
|
||||
|
||||
/** Make a SQLiteBatch connected to this database */
|
||||
|
||||
@@ -123,6 +123,7 @@ public:
|
||||
void ReloadDbEnv() override {}
|
||||
|
||||
std::string Filename() override { return "mockable"; }
|
||||
std::vector<fs::path> Files() override { return {}; }
|
||||
std::string Format() override { return "mock"; }
|
||||
std::unique_ptr<DatabaseBatch> MakeBatch(bool flush_on_close = true) override { return std::make_unique<MockableBatch>(m_records, m_pass); }
|
||||
};
|
||||
|
||||
@@ -501,6 +501,8 @@ std::shared_ptr<CWallet> RestoreWallet(WalletContext& context, const fs::path& b
|
||||
const fs::path wallet_path = fsbridge::AbsPathJoin(GetWalletDir(), fs::u8path(wallet_name));
|
||||
auto wallet_file = wallet_path / "wallet.dat";
|
||||
std::shared_ptr<CWallet> wallet;
|
||||
bool wallet_file_copied = false;
|
||||
bool created_parent_dir = false;
|
||||
|
||||
try {
|
||||
if (!fs::exists(backup_file)) {
|
||||
@@ -509,13 +511,34 @@ std::shared_ptr<CWallet> RestoreWallet(WalletContext& context, const fs::path& b
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (fs::exists(wallet_path) || !TryCreateDirectories(wallet_path)) {
|
||||
error = Untranslated(strprintf("Failed to create database path '%s'. Database already exists.", fs::PathToString(wallet_path)));
|
||||
status = DatabaseStatus::FAILED_ALREADY_EXISTS;
|
||||
return nullptr;
|
||||
// Wallet directories are allowed to exist, but must not contain a .dat file.
|
||||
// Any existing wallet database is treated as a hard failure to prevent overwriting.
|
||||
if (fs::exists(wallet_path)) {
|
||||
// If this is a file, it is the db and we don't want to overwrite it.
|
||||
if (!fs::is_directory(wallet_path)) {
|
||||
error = Untranslated(strprintf("Failed to restore wallet. Database file exists '%s'.", fs::PathToString(wallet_path)));
|
||||
status = DatabaseStatus::FAILED_ALREADY_EXISTS;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Check we are not going to overwrite an existing db file
|
||||
if (fs::exists(wallet_file)) {
|
||||
error = Untranslated(strprintf("Failed to restore wallet. Database file exists in '%s'.", fs::PathToString(wallet_file)));
|
||||
status = DatabaseStatus::FAILED_ALREADY_EXISTS;
|
||||
return nullptr;
|
||||
}
|
||||
} else {
|
||||
// The directory doesn't exist, create it
|
||||
if (!TryCreateDirectories(wallet_path)) {
|
||||
error = Untranslated(strprintf("Failed to restore database path '%s'.", fs::PathToString(wallet_path)));
|
||||
status = DatabaseStatus::FAILED_ALREADY_EXISTS;
|
||||
return nullptr;
|
||||
}
|
||||
created_parent_dir = true;
|
||||
}
|
||||
|
||||
fs::copy_file(backup_file, wallet_file, fs::copy_options::none);
|
||||
wallet_file_copied = true;
|
||||
|
||||
if (load_after_restore) {
|
||||
wallet = LoadWallet(context, wallet_name, load_on_start, options, status, error, warnings);
|
||||
@@ -528,7 +551,13 @@ std::shared_ptr<CWallet> RestoreWallet(WalletContext& context, const fs::path& b
|
||||
|
||||
// Remove created wallet path only when loading fails
|
||||
if (load_after_restore && !wallet) {
|
||||
fs::remove_all(wallet_path);
|
||||
if (wallet_file_copied) fs::remove(wallet_file);
|
||||
// Clean up the parent directory if we created it during restoration.
|
||||
// As we have created it, it must be empty after deleting the wallet file.
|
||||
if (created_parent_dir) {
|
||||
Assume(fs::is_empty(wallet_path));
|
||||
fs::remove(wallet_path);
|
||||
}
|
||||
}
|
||||
|
||||
return wallet;
|
||||
@@ -1673,7 +1702,13 @@ isminetype CWallet::IsMine(const COutPoint& outpoint) const
|
||||
|
||||
bool CWallet::IsFromMe(const CTransaction& tx) const
|
||||
{
|
||||
return (GetDebit(tx, ISMINE_ALL) > 0);
|
||||
LOCK(cs_wallet);
|
||||
for (const CTxIn& txin : tx.vin) {
|
||||
if (IsMine(txin.prevout)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
CAmount CWallet::GetDebit(const CTransaction& tx, const isminefilter& filter) const
|
||||
@@ -4297,6 +4332,15 @@ bool CWallet::CanGrindR() const
|
||||
return !IsWalletFlagSet(WALLET_FLAG_EXTERNAL_SIGNER);
|
||||
}
|
||||
|
||||
// Returns wallet prefix for migration.
|
||||
// Used to name the backup file and newly created wallets.
|
||||
// E.g. a watch-only wallet is named "<prefix>_watchonly".
|
||||
static std::string MigrationPrefixName(CWallet& wallet)
|
||||
{
|
||||
const std::string& name{wallet.GetName()};
|
||||
return name.empty() ? "default_wallet" : name;
|
||||
}
|
||||
|
||||
bool DoMigration(CWallet& wallet, WalletContext& context, bilingual_str& error, MigrationResult& res) EXCLUSIVE_LOCKS_REQUIRED(wallet.cs_wallet)
|
||||
{
|
||||
AssertLockHeld(wallet.cs_wallet);
|
||||
@@ -4328,7 +4372,7 @@ bool DoMigration(CWallet& wallet, WalletContext& context, bilingual_str& error,
|
||||
|
||||
DatabaseStatus status;
|
||||
std::vector<bilingual_str> warnings;
|
||||
std::string wallet_name = wallet.GetName() + "_watchonly";
|
||||
std::string wallet_name = MigrationPrefixName(wallet) + "_watchonly";
|
||||
std::unique_ptr<WalletDatabase> database = MakeWalletDatabase(wallet_name, options, status, error);
|
||||
if (!database) {
|
||||
error = strprintf(_("Wallet file creation failed: %s"), error);
|
||||
@@ -4365,7 +4409,7 @@ bool DoMigration(CWallet& wallet, WalletContext& context, bilingual_str& error,
|
||||
|
||||
DatabaseStatus status;
|
||||
std::vector<bilingual_str> warnings;
|
||||
std::string wallet_name = wallet.GetName() + "_solvables";
|
||||
std::string wallet_name = MigrationPrefixName(wallet) + "_solvables";
|
||||
std::unique_ptr<WalletDatabase> database = MakeWalletDatabase(wallet_name, options, status, error);
|
||||
if (!database) {
|
||||
error = strprintf(_("Wallet file creation failed: %s"), error);
|
||||
@@ -4486,7 +4530,12 @@ util::Result<MigrationResult> MigrateLegacyToDescriptor(std::shared_ptr<CWallet>
|
||||
std::string name = to_reload->GetName();
|
||||
to_reload.reset();
|
||||
to_reload = LoadWallet(context, name, /*load_on_start=*/std::nullopt, options, status, error, warnings);
|
||||
return to_reload != nullptr;
|
||||
if (!to_reload) {
|
||||
LogError("Failed to load wallet '%s' after migration. Rolling back migration to preserve consistency. "
|
||||
"Error cause: %s\n", wallet_name, error.original);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
// Before anything else, check if there is something to migrate.
|
||||
@@ -4499,7 +4548,7 @@ util::Result<MigrationResult> MigrateLegacyToDescriptor(std::shared_ptr<CWallet>
|
||||
|
||||
// Make a backup of the DB
|
||||
fs::path this_wallet_dir = fs::absolute(fs::PathFromString(local_wallet->GetDatabase().Filename())).parent_path();
|
||||
fs::path backup_filename = fs::PathFromString(strprintf("%s_%d.legacy.bak", (wallet_name.empty() ? "default_wallet" : wallet_name), GetTime()));
|
||||
fs::path backup_filename = fs::PathFromString(strprintf("%s_%d.legacy.bak", MigrationPrefixName(*local_wallet), GetTime()));
|
||||
fs::path backup_path = this_wallet_dir / backup_filename;
|
||||
if (!local_wallet->BackupWallet(fs::PathToString(backup_path))) {
|
||||
if (was_loaded) {
|
||||
@@ -4542,26 +4591,44 @@ util::Result<MigrationResult> MigrateLegacyToDescriptor(std::shared_ptr<CWallet>
|
||||
}
|
||||
}
|
||||
|
||||
// In case of reloading failure, we need to remember the wallet dirs to remove
|
||||
// Set is used as it may be populated with the same wallet directory paths multiple times,
|
||||
// both before and after reloading. This ensures the set is complete even if one of the wallets
|
||||
// fails to reload.
|
||||
std::set<fs::path> wallet_dirs;
|
||||
// In case of loading failure, we need to remember the wallet files we have created to remove.
|
||||
// A `set` is used as it may be populated with the same wallet directory paths multiple times,
|
||||
// both before and after loading. This ensures the set is complete even if one of the wallets
|
||||
// fails to load.
|
||||
std::set<fs::path> wallet_files_to_remove;
|
||||
std::set<fs::path> wallet_empty_dirs_to_remove;
|
||||
|
||||
// Helper to track wallet files and directories for cleanup on failure.
|
||||
// Only directories of wallets created during migration (not the main wallet) are tracked.
|
||||
auto track_for_cleanup = [&](const CWallet& wallet) {
|
||||
const auto files = wallet.GetDatabase().Files();
|
||||
wallet_files_to_remove.insert(files.begin(), files.end());
|
||||
if (wallet.GetName() != wallet_name) {
|
||||
// If this isn’t the main wallet, mark its directory for removal.
|
||||
// This applies to the watch-only and solvable wallets.
|
||||
// Wallets stored directly as files in the top-level directory
|
||||
// (e.g. default unnamed wallets) don’t have a removable parent directory.
|
||||
wallet_empty_dirs_to_remove.insert(fs::PathFromString(wallet.GetDatabase().Filename()).parent_path());
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
if (success) {
|
||||
// Migration successful, unload all wallets locally, then reload them.
|
||||
// Reload the main wallet
|
||||
wallet_dirs.insert(fs::PathFromString(local_wallet->GetDatabase().Filename()).parent_path());
|
||||
LogInfo("Loading new wallets after migration...\n");
|
||||
track_for_cleanup(*local_wallet);
|
||||
success = reload_wallet(local_wallet);
|
||||
res.wallet = local_wallet;
|
||||
res.wallet_name = wallet_name;
|
||||
if (success && res.watchonly_wallet) {
|
||||
// Reload watchonly
|
||||
wallet_dirs.insert(fs::PathFromString(res.watchonly_wallet->GetDatabase().Filename()).parent_path());
|
||||
track_for_cleanup(*res.watchonly_wallet);
|
||||
success = reload_wallet(res.watchonly_wallet);
|
||||
}
|
||||
if (success && res.solvables_wallet) {
|
||||
// Reload solvables
|
||||
wallet_dirs.insert(fs::PathFromString(res.solvables_wallet->GetDatabase().Filename()).parent_path());
|
||||
track_for_cleanup(*res.solvables_wallet);
|
||||
success = reload_wallet(res.solvables_wallet);
|
||||
}
|
||||
}
|
||||
@@ -4569,7 +4636,7 @@ util::Result<MigrationResult> MigrateLegacyToDescriptor(std::shared_ptr<CWallet>
|
||||
// Migration failed, cleanup
|
||||
// Before deleting the wallet's directory, copy the backup file to the top-level wallets dir
|
||||
fs::path temp_backup_location = fsbridge::AbsPathJoin(GetWalletDir(), backup_filename);
|
||||
fs::copy_file(backup_path, temp_backup_location, fs::copy_options::none);
|
||||
fs::rename(backup_path, temp_backup_location);
|
||||
|
||||
// Make list of wallets to cleanup
|
||||
std::vector<std::shared_ptr<CWallet>> created_wallets;
|
||||
@@ -4578,8 +4645,8 @@ util::Result<MigrationResult> MigrateLegacyToDescriptor(std::shared_ptr<CWallet>
|
||||
if (res.solvables_wallet) created_wallets.push_back(std::move(res.solvables_wallet));
|
||||
|
||||
// Get the directories to remove after unloading
|
||||
for (std::shared_ptr<CWallet>& w : created_wallets) {
|
||||
wallet_dirs.emplace(fs::PathFromString(w->GetDatabase().Filename()).parent_path());
|
||||
for (std::shared_ptr<CWallet>& wallet : created_wallets) {
|
||||
track_for_cleanup(*wallet);
|
||||
}
|
||||
|
||||
// Unload the wallets
|
||||
@@ -4598,9 +4665,15 @@ util::Result<MigrationResult> MigrateLegacyToDescriptor(std::shared_ptr<CWallet>
|
||||
}
|
||||
}
|
||||
|
||||
// Delete the wallet directories
|
||||
for (const fs::path& dir : wallet_dirs) {
|
||||
fs::remove_all(dir);
|
||||
// First, delete the db files we have created throughout this process and nothing else
|
||||
for (const fs::path& file : wallet_files_to_remove) {
|
||||
fs::remove(file);
|
||||
}
|
||||
|
||||
// Second, delete the created wallet directories and nothing else. They must be empty at this point.
|
||||
for (const fs::path& dir : wallet_empty_dirs_to_remove) {
|
||||
Assume(fs::is_empty(dir));
|
||||
fs::remove(dir);
|
||||
}
|
||||
|
||||
// Restore the backup
|
||||
@@ -4614,8 +4687,7 @@ util::Result<MigrationResult> MigrateLegacyToDescriptor(std::shared_ptr<CWallet>
|
||||
}
|
||||
|
||||
// The wallet directory has been restored, but just in case, copy the previously created backup to the wallet dir
|
||||
fs::copy_file(temp_backup_location, backup_path, fs::copy_options::none);
|
||||
fs::remove(temp_backup_location);
|
||||
fs::rename(temp_backup_location, backup_path);
|
||||
|
||||
// Verify that there is no dangling wallet: when the wallet wasn't loaded before, expect null.
|
||||
// This check is performed after restoration to avoid an early error before saving the backup.
|
||||
|
||||
@@ -11,9 +11,10 @@ The alternate mainnet chain was generated as follows:
|
||||
- restart node with a faketime 2 minutes later
|
||||
|
||||
```sh
|
||||
for i in {1..2015}
|
||||
for i in {1..2016}
|
||||
do
|
||||
faketime "`date -d @"$(( 1231006505 + $i * 120 ))" +'%Y-%m-%d %H:%M:%S'`" \
|
||||
t=$(( 1231006505 + $i * 120 ))
|
||||
faketime "`date -d @$t +'%Y-%m-%d %H:%M:%S'`" \
|
||||
bitcoind -connect=0 -nocheckpoints -stopatheight=$i
|
||||
done
|
||||
```
|
||||
@@ -21,7 +22,9 @@ done
|
||||
The CPU miner is kept running as follows:
|
||||
|
||||
```sh
|
||||
./minerd --coinbase-addr 1NQpH6Nf8QtR2HphLRcvuVqfhXBXsiWn8r --no-stratum --algo sha256d --no-longpoll --scantime 3 --retry-pause 1
|
||||
./minerd -u ... -p ... -o http://127.0.0.1:8332 --no-stratum \
|
||||
--coinbase-addr 1NQpH6Nf8QtR2HphLRcvuVqfhXBXsiWn8r \
|
||||
--algo sha256d --no-longpoll --scantime 3 --retry-pause 1
|
||||
```
|
||||
|
||||
The payout address is derived from first BIP32 test vector master key:
|
||||
@@ -40,3 +43,8 @@ The timestamp was not kept constant because at difficulty 1 it's not sufficient
|
||||
to only grind the nonce. Grinding the extra_nonce or version field instead
|
||||
would have required additional (stratum) software. It would also make it more
|
||||
complicated to reconstruct the blocks in this test.
|
||||
|
||||
The `getblocktemplate` RPC code needs to be patched to ignore not being connected
|
||||
to any peers, and to ignore the IBD status check.
|
||||
|
||||
On macOS use `faketime "@$t"` instead.
|
||||
|
||||
@@ -69,9 +69,6 @@ class BadTxTemplate:
|
||||
# Only specified if it differs from mempool acceptance error.
|
||||
block_reject_reason = ""
|
||||
|
||||
# Do we expect to be disconnected after submitting this tx?
|
||||
expect_disconnect = False
|
||||
|
||||
# Is this tx considered valid when included in a block, but not for acceptance into
|
||||
# the mempool (i.e. does it violate policy but not consensus)?
|
||||
valid_in_block = False
|
||||
@@ -89,7 +86,6 @@ class BadTxTemplate:
|
||||
|
||||
class OutputMissing(BadTxTemplate):
|
||||
reject_reason = "bad-txns-vout-empty"
|
||||
expect_disconnect = True
|
||||
|
||||
def get_tx(self):
|
||||
tx = CTransaction()
|
||||
@@ -100,7 +96,6 @@ class OutputMissing(BadTxTemplate):
|
||||
|
||||
class InputMissing(BadTxTemplate):
|
||||
reject_reason = "bad-txns-vin-empty"
|
||||
expect_disconnect = True
|
||||
|
||||
# We use a blank transaction here to make sure
|
||||
# it is interpreted as a non-witness transaction.
|
||||
@@ -117,7 +112,6 @@ class InputMissing(BadTxTemplate):
|
||||
# tree depth commitment (CVE-2017-12842)
|
||||
class SizeTooSmall(BadTxTemplate):
|
||||
reject_reason = "tx-size-small"
|
||||
expect_disconnect = False
|
||||
valid_in_block = True
|
||||
|
||||
def get_tx(self):
|
||||
@@ -134,7 +128,6 @@ class BadInputOutpointIndex(BadTxTemplate):
|
||||
# Won't be rejected - nonexistent outpoint index is treated as an orphan since the coins
|
||||
# database can't distinguish between spent outpoints and outpoints which never existed.
|
||||
reject_reason = None
|
||||
expect_disconnect = False
|
||||
|
||||
def get_tx(self):
|
||||
num_indices = len(self.spend_tx.vin)
|
||||
@@ -149,7 +142,6 @@ class BadInputOutpointIndex(BadTxTemplate):
|
||||
|
||||
class DuplicateInput(BadTxTemplate):
|
||||
reject_reason = 'bad-txns-inputs-duplicate'
|
||||
expect_disconnect = True
|
||||
|
||||
def get_tx(self):
|
||||
tx = CTransaction()
|
||||
@@ -162,7 +154,6 @@ class DuplicateInput(BadTxTemplate):
|
||||
|
||||
class PrevoutNullInput(BadTxTemplate):
|
||||
reject_reason = 'bad-txns-prevout-null'
|
||||
expect_disconnect = True
|
||||
|
||||
def get_tx(self):
|
||||
tx = CTransaction()
|
||||
@@ -175,7 +166,6 @@ class PrevoutNullInput(BadTxTemplate):
|
||||
|
||||
class NonexistentInput(BadTxTemplate):
|
||||
reject_reason = None # Added as an orphan tx.
|
||||
expect_disconnect = False
|
||||
|
||||
def get_tx(self):
|
||||
tx = CTransaction()
|
||||
@@ -188,7 +178,6 @@ class NonexistentInput(BadTxTemplate):
|
||||
|
||||
class SpendTooMuch(BadTxTemplate):
|
||||
reject_reason = 'bad-txns-in-belowout'
|
||||
expect_disconnect = True
|
||||
|
||||
def get_tx(self):
|
||||
return create_tx_with_script(
|
||||
@@ -197,7 +186,6 @@ class SpendTooMuch(BadTxTemplate):
|
||||
|
||||
class CreateNegative(BadTxTemplate):
|
||||
reject_reason = 'bad-txns-vout-negative'
|
||||
expect_disconnect = True
|
||||
|
||||
def get_tx(self):
|
||||
return create_tx_with_script(self.spend_tx, 0, amount=-1)
|
||||
@@ -205,7 +193,6 @@ class CreateNegative(BadTxTemplate):
|
||||
|
||||
class CreateTooLarge(BadTxTemplate):
|
||||
reject_reason = 'bad-txns-vout-toolarge'
|
||||
expect_disconnect = True
|
||||
|
||||
def get_tx(self):
|
||||
return create_tx_with_script(self.spend_tx, 0, amount=MAX_MONEY + 1)
|
||||
@@ -213,7 +200,6 @@ class CreateTooLarge(BadTxTemplate):
|
||||
|
||||
class CreateSumTooLarge(BadTxTemplate):
|
||||
reject_reason = 'bad-txns-txouttotal-toolarge'
|
||||
expect_disconnect = True
|
||||
|
||||
def get_tx(self):
|
||||
tx = create_tx_with_script(self.spend_tx, 0, amount=MAX_MONEY)
|
||||
@@ -223,8 +209,7 @@ class CreateSumTooLarge(BadTxTemplate):
|
||||
|
||||
|
||||
class InvalidOPIFConstruction(BadTxTemplate):
|
||||
reject_reason = "mandatory-script-verify-flag-failed (Invalid OP_IF construction)"
|
||||
expect_disconnect = True
|
||||
reject_reason = "mempool-script-verify-flag-failed (Invalid OP_IF construction)"
|
||||
valid_in_block = True
|
||||
|
||||
def get_tx(self):
|
||||
@@ -236,7 +221,6 @@ class InvalidOPIFConstruction(BadTxTemplate):
|
||||
class TooManySigops(BadTxTemplate):
|
||||
reject_reason = "bad-txns-too-many-sigops"
|
||||
block_reject_reason = "bad-blk-sigops, out-of-bounds SigOpCount"
|
||||
expect_disconnect = False
|
||||
|
||||
def get_tx(self):
|
||||
lotsa_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS))
|
||||
@@ -258,15 +242,14 @@ def getDisabledOpcodeTemplate(opcode):
|
||||
|
||||
return type('DisabledOpcode_' + str(opcode), (BadTxTemplate,), {
|
||||
'reject_reason': "disabled opcode",
|
||||
'expect_disconnect': True,
|
||||
'get_tx': get_tx,
|
||||
'valid_in_block' : True
|
||||
})
|
||||
|
||||
class NonStandardAndInvalid(BadTxTemplate):
|
||||
"""A non-standard transaction which is also consensus-invalid should return the consensus error."""
|
||||
reject_reason = "mandatory-script-verify-flag-failed (OP_RETURN was encountered)"
|
||||
expect_disconnect = True
|
||||
"""A non-standard transaction which is also consensus-invalid should return the first error."""
|
||||
reject_reason = "mempool-script-verify-flag-failed (Using OP_CODESEPARATOR in non-witness script)"
|
||||
block_reject_reason = "mandatory-script-verify-flag-failed (OP_RETURN was encountered)"
|
||||
valid_in_block = False
|
||||
|
||||
def get_tx(self):
|
||||
|
||||
@@ -2014,7 +2014,8 @@
|
||||
1231247971,
|
||||
1231248071,
|
||||
1231248198,
|
||||
1231248322
|
||||
1231248322,
|
||||
1231248621
|
||||
],
|
||||
"nonces": [
|
||||
2345621585,
|
||||
@@ -4031,6 +4032,7 @@
|
||||
3658502865,
|
||||
2519048297,
|
||||
1915965760,
|
||||
1183846025
|
||||
1183846025,
|
||||
2713372123
|
||||
]
|
||||
}
|
||||
|
||||
@@ -164,9 +164,12 @@ class FullBlockTest(BitcoinTestFramework):
|
||||
self.sign_tx(badtx, attempt_spend_tx)
|
||||
badtx.rehash()
|
||||
badblock = self.update_block(blockname, [badtx])
|
||||
reject_reason = (template.block_reject_reason or template.reject_reason)
|
||||
if reject_reason and reject_reason.startswith("mempool-script-verify-flag-failed"):
|
||||
reject_reason = "mandatory-script-verify-flag-failed" + reject_reason[33:]
|
||||
self.send_blocks(
|
||||
[badblock], success=False,
|
||||
reject_reason=(template.block_reject_reason or template.reject_reason),
|
||||
reject_reason=reject_reason,
|
||||
reconnect=True, timeout=2)
|
||||
|
||||
self.move_tip(2)
|
||||
|
||||
@@ -154,12 +154,14 @@ class BIP65Test(BitcoinTestFramework):
|
||||
coin_vout = coin.prevout.n
|
||||
cltv_invalidate(spendtx, i)
|
||||
|
||||
blk_rej = "mandatory-script-verify-flag-failed"
|
||||
tx_rej = "mempool-script-verify-flag-failed"
|
||||
expected_cltv_reject_reason = [
|
||||
"mandatory-script-verify-flag-failed (Operation not valid with the current stack size)",
|
||||
"mandatory-script-verify-flag-failed (Negative locktime)",
|
||||
"mandatory-script-verify-flag-failed (Locktime requirement not satisfied)",
|
||||
"mandatory-script-verify-flag-failed (Locktime requirement not satisfied)",
|
||||
"mandatory-script-verify-flag-failed (Locktime requirement not satisfied)",
|
||||
" (Operation not valid with the current stack size)",
|
||||
" (Negative locktime)",
|
||||
" (Locktime requirement not satisfied)",
|
||||
" (Locktime requirement not satisfied)",
|
||||
" (Locktime requirement not satisfied)",
|
||||
][i]
|
||||
# First we show that this tx is valid except for CLTV by getting it
|
||||
# rejected from the mempool for exactly that reason.
|
||||
@@ -170,8 +172,8 @@ class BIP65Test(BitcoinTestFramework):
|
||||
'txid': spendtx_txid,
|
||||
'wtxid': spendtx_wtxid,
|
||||
'allowed': False,
|
||||
'reject-reason': expected_cltv_reject_reason,
|
||||
'reject-details': expected_cltv_reject_reason + f", input 0 of {spendtx_txid} (wtxid {spendtx_wtxid}), spending {coin_txid}:{coin_vout}"
|
||||
'reject-reason': tx_rej + expected_cltv_reject_reason,
|
||||
'reject-details': tx_rej + expected_cltv_reject_reason + f", input 0 of {spendtx_txid} (wtxid {spendtx_wtxid}), spending {coin_txid}:{coin_vout}"
|
||||
}],
|
||||
self.nodes[0].testmempoolaccept(rawtxs=[spendtx.serialize().hex()], maxfeerate=0),
|
||||
)
|
||||
@@ -181,7 +183,7 @@ class BIP65Test(BitcoinTestFramework):
|
||||
block.hashMerkleRoot = block.calc_merkle_root()
|
||||
block.solve()
|
||||
|
||||
with self.nodes[0].assert_debug_log(expected_msgs=[f'Block validation error: {expected_cltv_reject_reason}']):
|
||||
with self.nodes[0].assert_debug_log(expected_msgs=[f'Block validation error: {blk_rej + expected_cltv_reject_reason}']):
|
||||
peer.send_and_ping(msg_block(block))
|
||||
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
|
||||
peer.sync_with_ping()
|
||||
|
||||
@@ -123,8 +123,8 @@ class BIP66Test(BitcoinTestFramework):
|
||||
'txid': spendtx_txid,
|
||||
'wtxid': spendtx_wtxid,
|
||||
'allowed': False,
|
||||
'reject-reason': 'mandatory-script-verify-flag-failed (Non-canonical DER signature)',
|
||||
'reject-details': 'mandatory-script-verify-flag-failed (Non-canonical DER signature), ' +
|
||||
'reject-reason': 'mempool-script-verify-flag-failed (Non-canonical DER signature)',
|
||||
'reject-details': 'mempool-script-verify-flag-failed (Non-canonical DER signature), ' +
|
||||
f"input 0 of {spendtx_txid} (wtxid {spendtx_wtxid}), spending {coin_txid}:0"
|
||||
}],
|
||||
self.nodes[0].testmempoolaccept(rawtxs=[spendtx.serialize().hex()], maxfeerate=0),
|
||||
|
||||
@@ -37,8 +37,8 @@ from test_framework.util import (
|
||||
from test_framework.wallet import getnewdestination
|
||||
from test_framework.wallet_util import generate_keypair
|
||||
|
||||
NULLDUMMY_ERROR = "mandatory-script-verify-flag-failed (Dummy CHECKMULTISIG argument must be zero)"
|
||||
|
||||
NULLDUMMY_TX_ERROR = "mempool-script-verify-flag-failed (Dummy CHECKMULTISIG argument must be zero)"
|
||||
NULLDUMMY_BLK_ERROR = "mandatory-script-verify-flag-failed (Dummy CHECKMULTISIG argument must be zero)"
|
||||
|
||||
def invalidate_nulldummy_tx(tx):
|
||||
"""Transform a NULLDUMMY compliant tx (i.e. scriptSig starts with OP_0)
|
||||
@@ -105,7 +105,7 @@ class NULLDUMMYTest(BitcoinTestFramework):
|
||||
addr=self.ms_address, amount=47,
|
||||
privkey=self.privkey)
|
||||
invalidate_nulldummy_tx(test2tx)
|
||||
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, test2tx.serialize_with_witness().hex(), 0)
|
||||
assert_raises_rpc_error(-26, NULLDUMMY_TX_ERROR, self.nodes[0].sendrawtransaction, test2tx.serialize_with_witness().hex(), 0)
|
||||
|
||||
self.log.info(f"Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [{COINBASE_MATURITY + 4}]")
|
||||
self.block_submit(self.nodes[0], [test2tx], accept=True)
|
||||
@@ -116,7 +116,7 @@ class NULLDUMMYTest(BitcoinTestFramework):
|
||||
privkey=self.privkey)
|
||||
test6txs = [CTransaction(test4tx)]
|
||||
invalidate_nulldummy_tx(test4tx)
|
||||
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, test4tx.serialize_with_witness().hex(), 0)
|
||||
assert_raises_rpc_error(-26, NULLDUMMY_TX_ERROR, self.nodes[0].sendrawtransaction, test4tx.serialize_with_witness().hex(), 0)
|
||||
self.block_submit(self.nodes[0], [test4tx], accept=False)
|
||||
|
||||
self.log.info("Test 5: Non-NULLDUMMY P2WSH multisig transaction invalid after activation")
|
||||
@@ -126,7 +126,7 @@ class NULLDUMMYTest(BitcoinTestFramework):
|
||||
privkey=self.privkey)
|
||||
test6txs.append(CTransaction(test5tx))
|
||||
test5tx.wit.vtxinwit[0].scriptWitness.stack[0] = b'\x01'
|
||||
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, test5tx.serialize_with_witness().hex(), 0)
|
||||
assert_raises_rpc_error(-26, NULLDUMMY_TX_ERROR, self.nodes[0].sendrawtransaction, test5tx.serialize_with_witness().hex(), 0)
|
||||
self.block_submit(self.nodes[0], [test5tx], with_witness=True, accept=False)
|
||||
|
||||
self.log.info(f"Test 6: NULLDUMMY compliant base/witness transactions should be accepted to mempool and in block after activation [{COINBASE_MATURITY + 5}]")
|
||||
@@ -142,7 +142,7 @@ class NULLDUMMYTest(BitcoinTestFramework):
|
||||
if with_witness:
|
||||
add_witness_commitment(block)
|
||||
block.solve()
|
||||
assert_equal(None if accept else NULLDUMMY_ERROR, node.submitblock(block.serialize().hex()))
|
||||
assert_equal(None if accept else NULLDUMMY_BLK_ERROR, node.submitblock(block.serialize().hex()))
|
||||
if accept:
|
||||
assert_equal(node.getbestblockhash(), block.hash)
|
||||
self.lastblockhash = block.hash
|
||||
|
||||
@@ -193,8 +193,8 @@ class SegWitTest(BitcoinTestFramework):
|
||||
assert_equal(self.nodes[2].getbalance(), 20 * Decimal("49.999"))
|
||||
|
||||
self.log.info("Verify unsigned p2sh witness txs without a redeem script are invalid")
|
||||
self.fail_accept(self.nodes[2], "mandatory-script-verify-flag-failed (Operation not valid with the current stack size)", p2sh_ids[NODE_2][P2WPKH][1], sign=False)
|
||||
self.fail_accept(self.nodes[2], "mandatory-script-verify-flag-failed (Operation not valid with the current stack size)", p2sh_ids[NODE_2][P2WSH][1], sign=False)
|
||||
self.fail_accept(self.nodes[2], "mempool-script-verify-flag-failed (Operation not valid with the current stack size)", p2sh_ids[NODE_2][P2WPKH][1], sign=False)
|
||||
self.fail_accept(self.nodes[2], "mempool-script-verify-flag-failed (Operation not valid with the current stack size)", p2sh_ids[NODE_2][P2WSH][1], sign=False)
|
||||
|
||||
self.generate(self.nodes[0], 1) # block 164
|
||||
|
||||
@@ -213,13 +213,13 @@ class SegWitTest(BitcoinTestFramework):
|
||||
|
||||
self.log.info("Verify default node can't accept txs with missing witness")
|
||||
# unsigned, no scriptsig
|
||||
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag-failed (Witness program hash mismatch)", wit_ids[NODE_0][P2WPKH][0], sign=False)
|
||||
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag-failed (Witness program was passed an empty witness)", wit_ids[NODE_0][P2WSH][0], sign=False)
|
||||
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag-failed (Operation not valid with the current stack size)", p2sh_ids[NODE_0][P2WPKH][0], sign=False)
|
||||
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag-failed (Operation not valid with the current stack size)", p2sh_ids[NODE_0][P2WSH][0], sign=False)
|
||||
self.fail_accept(self.nodes[0], "mempool-script-verify-flag-failed (Witness program hash mismatch)", wit_ids[NODE_0][P2WPKH][0], sign=False)
|
||||
self.fail_accept(self.nodes[0], "mempool-script-verify-flag-failed (Witness program was passed an empty witness)", wit_ids[NODE_0][P2WSH][0], sign=False)
|
||||
self.fail_accept(self.nodes[0], "mempool-script-verify-flag-failed (Operation not valid with the current stack size)", p2sh_ids[NODE_0][P2WPKH][0], sign=False)
|
||||
self.fail_accept(self.nodes[0], "mempool-script-verify-flag-failed (Operation not valid with the current stack size)", p2sh_ids[NODE_0][P2WSH][0], sign=False)
|
||||
# unsigned with redeem script
|
||||
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag-failed (Witness program hash mismatch)", p2sh_ids[NODE_0][P2WPKH][0], sign=False, redeem_script=witness_script(False, self.pubkey[0]))
|
||||
self.fail_accept(self.nodes[0], "mandatory-script-verify-flag-failed (Witness program was passed an empty witness)", p2sh_ids[NODE_0][P2WSH][0], sign=False, redeem_script=witness_script(True, self.pubkey[0]))
|
||||
self.fail_accept(self.nodes[0], "mempool-script-verify-flag-failed (Witness program hash mismatch)", p2sh_ids[NODE_0][P2WPKH][0], sign=False, redeem_script=witness_script(False, self.pubkey[0]))
|
||||
self.fail_accept(self.nodes[0], "mempool-script-verify-flag-failed (Witness program was passed an empty witness)", p2sh_ids[NODE_0][P2WSH][0], sign=False, redeem_script=witness_script(True, self.pubkey[0]))
|
||||
|
||||
# Coinbase contains the witness commitment nonce, check that RPC shows us
|
||||
coinbase_txid = self.nodes[2].getblock(blockhash)['tx'][0]
|
||||
@@ -230,10 +230,10 @@ class SegWitTest(BitcoinTestFramework):
|
||||
assert_equal(witnesses[0], '00' * 32)
|
||||
|
||||
self.log.info("Verify witness txs without witness data are invalid after the fork")
|
||||
self.fail_accept(self.nodes[2], 'mandatory-script-verify-flag-failed (Witness program hash mismatch)', wit_ids[NODE_2][P2WPKH][2], sign=False)
|
||||
self.fail_accept(self.nodes[2], 'mandatory-script-verify-flag-failed (Witness program was passed an empty witness)', wit_ids[NODE_2][P2WSH][2], sign=False)
|
||||
self.fail_accept(self.nodes[2], 'mandatory-script-verify-flag-failed (Witness program hash mismatch)', p2sh_ids[NODE_2][P2WPKH][2], sign=False, redeem_script=witness_script(False, self.pubkey[2]))
|
||||
self.fail_accept(self.nodes[2], 'mandatory-script-verify-flag-failed (Witness program was passed an empty witness)', p2sh_ids[NODE_2][P2WSH][2], sign=False, redeem_script=witness_script(True, self.pubkey[2]))
|
||||
self.fail_accept(self.nodes[2], 'mempool-script-verify-flag-failed (Witness program hash mismatch)', wit_ids[NODE_2][P2WPKH][2], sign=False)
|
||||
self.fail_accept(self.nodes[2], 'mempool-script-verify-flag-failed (Witness program was passed an empty witness)', wit_ids[NODE_2][P2WSH][2], sign=False)
|
||||
self.fail_accept(self.nodes[2], 'mempool-script-verify-flag-failed (Witness program hash mismatch)', p2sh_ids[NODE_2][P2WPKH][2], sign=False, redeem_script=witness_script(False, self.pubkey[2]))
|
||||
self.fail_accept(self.nodes[2], 'mempool-script-verify-flag-failed (Witness program was passed an empty witness)', p2sh_ids[NODE_2][P2WSH][2], sign=False, redeem_script=witness_script(True, self.pubkey[2]))
|
||||
|
||||
self.log.info("Verify default node can now use witness txs")
|
||||
self.success_mine(self.nodes[0], wit_ids[NODE_0][P2WPKH][0], True)
|
||||
|
||||
@@ -71,6 +71,7 @@ from test_framework.script import (
|
||||
OP_PUSHDATA1,
|
||||
OP_RETURN,
|
||||
OP_SWAP,
|
||||
OP_TUCK,
|
||||
OP_VERIFY,
|
||||
SIGHASH_DEFAULT,
|
||||
SIGHASH_ALL,
|
||||
@@ -171,9 +172,9 @@ def get(ctx, name):
|
||||
ctx[name] = expr
|
||||
return expr.value
|
||||
|
||||
def getter(name):
|
||||
def getter(name, **kwargs):
|
||||
"""Return a callable that evaluates name in its passed context."""
|
||||
return lambda ctx: get(ctx, name)
|
||||
return lambda ctx: get({**ctx, **kwargs}, name)
|
||||
|
||||
def override(expr, **kwargs):
|
||||
"""Return a callable that evaluates expr in a modified context."""
|
||||
@@ -217,6 +218,20 @@ def default_controlblock(ctx):
|
||||
"""Default expression for "controlblock": combine leafversion, negflag, pubkey_internal, merklebranch."""
|
||||
return bytes([get(ctx, "leafversion") + get(ctx, "negflag")]) + get(ctx, "pubkey_internal") + get(ctx, "merklebranch")
|
||||
|
||||
def default_scriptcode_suffix(ctx):
|
||||
"""Default expression for "scriptcode_suffix", the actually used portion of the scriptcode."""
|
||||
scriptcode = get(ctx, "scriptcode")
|
||||
codesepnum = get(ctx, "codesepnum")
|
||||
if codesepnum == -1:
|
||||
return scriptcode
|
||||
codeseps = 0
|
||||
for (opcode, data, sop_idx) in scriptcode.raw_iter():
|
||||
if opcode == OP_CODESEPARATOR:
|
||||
if codeseps == codesepnum:
|
||||
return CScript(scriptcode[sop_idx+1:])
|
||||
codeseps += 1
|
||||
assert False
|
||||
|
||||
def default_sigmsg(ctx):
|
||||
"""Default expression for "sigmsg": depending on mode, compute BIP341, BIP143, or legacy sigmsg."""
|
||||
tx = get(ctx, "tx")
|
||||
@@ -236,12 +251,12 @@ def default_sigmsg(ctx):
|
||||
return TaprootSignatureMsg(tx, utxos, hashtype, idx, scriptpath=False, annex=annex)
|
||||
elif mode == "witv0":
|
||||
# BIP143 signature hash
|
||||
scriptcode = get(ctx, "scriptcode")
|
||||
scriptcode = get(ctx, "scriptcode_suffix")
|
||||
utxos = get(ctx, "utxos")
|
||||
return SegwitV0SignatureMsg(scriptcode, tx, idx, hashtype, utxos[idx].nValue)
|
||||
else:
|
||||
# Pre-segwit signature hash
|
||||
scriptcode = get(ctx, "scriptcode")
|
||||
scriptcode = get(ctx, "scriptcode_suffix")
|
||||
return LegacySignatureMsg(scriptcode, tx, idx, hashtype)[0]
|
||||
|
||||
def default_sighash(ctx):
|
||||
@@ -301,7 +316,12 @@ def default_hashtype_actual(ctx):
|
||||
|
||||
def default_bytes_hashtype(ctx):
|
||||
"""Default expression for "bytes_hashtype": bytes([hashtype_actual]) if not 0, b"" otherwise."""
|
||||
return bytes([x for x in [get(ctx, "hashtype_actual")] if x != 0])
|
||||
mode = get(ctx, "mode")
|
||||
hashtype_actual = get(ctx, "hashtype_actual")
|
||||
if mode != "taproot" or hashtype_actual != 0:
|
||||
return bytes([hashtype_actual])
|
||||
else:
|
||||
return bytes()
|
||||
|
||||
def default_sign(ctx):
|
||||
"""Default expression for "sign": concatenation of signature and bytes_hashtype."""
|
||||
@@ -379,6 +399,8 @@ DEFAULT_CONTEXT = {
|
||||
"key_tweaked": default_key_tweaked,
|
||||
# The tweak to use (None for script path spends, the actual tweak for key path spends).
|
||||
"tweak": default_tweak,
|
||||
# The part of the scriptcode after the last executed OP_CODESEPARATOR.
|
||||
"scriptcode_suffix": default_scriptcode_suffix,
|
||||
# The sigmsg value (preimage of sighash)
|
||||
"sigmsg": default_sigmsg,
|
||||
# The sighash value (32 bytes)
|
||||
@@ -409,6 +431,8 @@ DEFAULT_CONTEXT = {
|
||||
"annex": None,
|
||||
# The codeseparator position (only when mode=="taproot").
|
||||
"codeseppos": -1,
|
||||
# Which OP_CODESEPARATOR is the last executed one in the script (in legacy/P2SH/P2WSH).
|
||||
"codesepnum": -1,
|
||||
# The redeemscript to add to the scriptSig (if P2SH; None implies not P2SH).
|
||||
"script_p2sh": None,
|
||||
# The script to add to the witness in (if P2WSH; None implies P2WPKH)
|
||||
@@ -1210,6 +1234,70 @@ def spenders_taproot_active():
|
||||
standard = hashtype in VALID_SIGHASHES_ECDSA and (p2sh or witv0)
|
||||
add_spender(spenders, "compat/nocsa", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, script=CScript([OP_IF, OP_11, pubkey1, OP_CHECKSIGADD, OP_12, OP_EQUAL, OP_ELSE, pubkey1, OP_CHECKSIG, OP_ENDIF]), key=eckey1, sigops_weight=4-3*witv0, inputs=[getter("sign"), b''], failure={"inputs": [getter("sign"), b'\x01']}, **ERR_UNDECODABLE)
|
||||
|
||||
# == sighash caching tests ==
|
||||
|
||||
# Sighash caching in legacy.
|
||||
for p2sh in [False, True]:
|
||||
for witv0 in [False, True]:
|
||||
eckey1, pubkey1 = generate_keypair(compressed=compressed)
|
||||
for _ in range(10):
|
||||
# Construct a script with 20 checksig operations (10 sighash types, each 2 times),
|
||||
# randomly ordered and interleaved with 4 OP_CODESEPARATORS.
|
||||
ops = [1, 2, 3, 0x21, 0x42, 0x63, 0x81, 0x83, 0xe1, 0xc2, -1, -1] * 2
|
||||
# Make sure no OP_CODESEPARATOR appears last.
|
||||
while True:
|
||||
random.shuffle(ops)
|
||||
if ops[-1] != -1:
|
||||
break
|
||||
script = [pubkey1]
|
||||
inputs = []
|
||||
codeseps = -1
|
||||
for pos, op in enumerate(ops):
|
||||
if op == -1:
|
||||
codeseps += 1
|
||||
script.append(OP_CODESEPARATOR)
|
||||
elif pos + 1 != len(ops):
|
||||
script += [OP_TUCK, OP_CHECKSIGVERIFY]
|
||||
inputs.append(getter("sign", codesepnum=codeseps, hashtype=op))
|
||||
else:
|
||||
script += [OP_CHECKSIG]
|
||||
inputs.append(getter("sign", codesepnum=codeseps, hashtype=op))
|
||||
inputs.reverse()
|
||||
script = CScript(script)
|
||||
add_spender(spenders, "sighashcache/legacy", p2sh=p2sh, witv0=witv0, standard=False, script=script, inputs=inputs, key=eckey1, sigops_weight=12*8*(4-3*witv0), no_fail=True)
|
||||
|
||||
# Sighash caching in tapscript.
|
||||
for _ in range(10):
|
||||
# Construct a script with 700 checksig operations (7 sighash types, each 100 times),
|
||||
# randomly ordered and interleaved with 100 OP_CODESEPARATORS.
|
||||
ops = [0, 1, 2, 3, 0x81, 0x82, 0x83, -1] * 100
|
||||
# Make sure no OP_CODESEPARATOR appears last.
|
||||
while True:
|
||||
random.shuffle(ops)
|
||||
if ops[-1] != -1:
|
||||
break
|
||||
script = [pubs[1]]
|
||||
inputs = []
|
||||
opcount = 1
|
||||
codeseppos = -1
|
||||
for pos, op in enumerate(ops):
|
||||
if op == -1:
|
||||
codeseppos = opcount
|
||||
opcount += 1
|
||||
script.append(OP_CODESEPARATOR)
|
||||
elif pos + 1 != len(ops):
|
||||
opcount += 2
|
||||
script += [OP_TUCK, OP_CHECKSIGVERIFY]
|
||||
inputs.append(getter("sign", codeseppos=codeseppos, hashtype=op))
|
||||
else:
|
||||
opcount += 1
|
||||
script += [OP_CHECKSIG]
|
||||
inputs.append(getter("sign", codeseppos=codeseppos, hashtype=op))
|
||||
inputs.reverse()
|
||||
script = CScript(script)
|
||||
tap = taproot_construct(pubs[0], [("leaf", script)])
|
||||
add_spender(spenders, "sighashcache/taproot", tap=tap, leaf="leaf", inputs=inputs, standard=True, key=secs[1], no_fail=True)
|
||||
|
||||
return spenders
|
||||
|
||||
|
||||
|
||||
@@ -441,7 +441,7 @@ class MempoolAcceptanceTest(BitcoinTestFramework):
|
||||
nested_anchor_spend.rehash()
|
||||
|
||||
self.check_mempool_result(
|
||||
result_expected=[{'txid': nested_anchor_spend.rehash(), 'allowed': False, 'reject-reason': 'non-mandatory-script-verify-flag (Witness version reserved for soft-fork upgrades)'}],
|
||||
result_expected=[{'txid': nested_anchor_spend.rehash(), 'allowed': False, 'reject-reason': 'mempool-script-verify-flag-failed (Witness version reserved for soft-fork upgrades)'}],
|
||||
rawtxs=[nested_anchor_spend.serialize().hex()],
|
||||
maxfeerate=0,
|
||||
)
|
||||
|
||||
@@ -164,23 +164,36 @@ class MempoolTRUC(BitcoinTestFramework):
|
||||
def test_truc_reorg(self):
|
||||
node = self.nodes[0]
|
||||
self.log.info("Test that, during a reorg, TRUC rules are not enforced")
|
||||
tx_v2_block = self.wallet.send_self_transfer(from_node=node, version=2)
|
||||
tx_v3_block = self.wallet.send_self_transfer(from_node=node, version=3)
|
||||
tx_v3_block2 = self.wallet.send_self_transfer(from_node=node, version=3)
|
||||
self.check_mempool([tx_v3_block["txid"], tx_v2_block["txid"], tx_v3_block2["txid"]])
|
||||
self.check_mempool([])
|
||||
|
||||
# Testing 2<-3 versions allowed
|
||||
tx_v2_block = self.wallet.create_self_transfer(version=2)
|
||||
|
||||
# Testing 3<-2 versions allowed
|
||||
tx_v3_block = self.wallet.create_self_transfer(version=3)
|
||||
|
||||
# Testing overly-large child size
|
||||
tx_v3_block2 = self.wallet.create_self_transfer(version=3)
|
||||
|
||||
# Also create a linear chain of 3 TRUC transactions that will be directly mined, followed by one v2 in-mempool after block is made
|
||||
tx_chain_1 = self.wallet.create_self_transfer(version=3)
|
||||
tx_chain_2 = self.wallet.create_self_transfer(utxo_to_spend=tx_chain_1["new_utxo"], version=3)
|
||||
tx_chain_3 = self.wallet.create_self_transfer(utxo_to_spend=tx_chain_2["new_utxo"], version=3)
|
||||
|
||||
tx_to_mine = [tx_v3_block["hex"], tx_v2_block["hex"], tx_v3_block2["hex"], tx_chain_1["hex"], tx_chain_2["hex"], tx_chain_3["hex"]]
|
||||
block = self.generateblock(node, output="raw(42)", transactions=tx_to_mine)
|
||||
|
||||
block = self.generate(node, 1)
|
||||
self.check_mempool([])
|
||||
tx_v2_from_v3 = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=tx_v3_block["new_utxo"], version=2)
|
||||
tx_v3_from_v2 = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=tx_v2_block["new_utxo"], version=3)
|
||||
tx_v3_child_large = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=tx_v3_block2["new_utxo"], target_vsize=1250, version=3)
|
||||
assert_greater_than(node.getmempoolentry(tx_v3_child_large["txid"])["vsize"], TRUC_CHILD_MAX_VSIZE)
|
||||
self.check_mempool([tx_v2_from_v3["txid"], tx_v3_from_v2["txid"], tx_v3_child_large["txid"]])
|
||||
node.invalidateblock(block[0])
|
||||
self.check_mempool([tx_v3_block["txid"], tx_v2_block["txid"], tx_v3_block2["txid"], tx_v2_from_v3["txid"], tx_v3_from_v2["txid"], tx_v3_child_large["txid"]])
|
||||
# This is needed because generate() will create the exact same block again.
|
||||
node.reconsiderblock(block[0])
|
||||
tx_chain_4 = self.wallet.send_self_transfer(from_node=node, utxo_to_spend=tx_chain_3["new_utxo"], version=2)
|
||||
self.check_mempool([tx_v2_from_v3["txid"], tx_v3_from_v2["txid"], tx_v3_child_large["txid"], tx_chain_4["txid"]])
|
||||
|
||||
# Reorg should have all block transactions re-accepted, ignoring TRUC enforcement
|
||||
node.invalidateblock(block["hash"])
|
||||
self.check_mempool([tx_v3_block["txid"], tx_v2_block["txid"], tx_v3_block2["txid"], tx_v2_from_v3["txid"], tx_v3_from_v2["txid"], tx_v3_child_large["txid"], tx_chain_1["txid"], tx_chain_2["txid"], tx_chain_3["txid"], tx_chain_4["txid"]])
|
||||
|
||||
@cleanup(extra_args=["-limitdescendantsize=10", "-datacarriersize=40000"])
|
||||
def test_nondefault_package_limits(self):
|
||||
|
||||
@@ -54,15 +54,15 @@ class MiningMainnetTest(BitcoinTestFramework):
|
||||
|
||||
self.add_wallet_options(parser)
|
||||
|
||||
def mine(self, height, prev_hash, blocks, node, fees=0):
|
||||
def mine(self, height, prev_hash, blocks, node):
|
||||
self.log.debug(f"height={height}")
|
||||
block = CBlock()
|
||||
block.nVersion = 0x20000000
|
||||
block.hashPrevBlock = int(prev_hash, 16)
|
||||
block.nTime = blocks['timestamps'][height - 1]
|
||||
block.nBits = DIFF_1_N_BITS
|
||||
block.nBits = DIFF_1_N_BITS if height < 2016 else DIFF_4_N_BITS
|
||||
block.nNonce = blocks['nonces'][height - 1]
|
||||
block.vtx = [create_coinbase(height=height, script_pubkey=bytes.fromhex(COINBASE_SCRIPT_PUBKEY), retarget_period=2016)]
|
||||
block.vtx = [create_coinbase(height=height, script_pubkey=bytes.fromhex(COINBASE_SCRIPT_PUBKEY), halving_period=210000)]
|
||||
block.hashMerkleRoot = block.calc_merkle_root()
|
||||
block.rehash()
|
||||
block_hex = block.serialize(with_witness=False).hex()
|
||||
@@ -81,12 +81,15 @@ class MiningMainnetTest(BitcoinTestFramework):
|
||||
self.log.info("Load alternative mainnet blocks")
|
||||
path = os.path.join(os.path.dirname(os.path.realpath(__file__)), self.options.datafile)
|
||||
prev_hash = node.getbestblockhash()
|
||||
blocks = None
|
||||
with open(path, encoding='utf-8') as f:
|
||||
blocks = json.load(f)
|
||||
n_blocks = len(blocks['timestamps'])
|
||||
assert_equal(n_blocks, 2015)
|
||||
for i in range(2015):
|
||||
prev_hash = self.mine(i + 1, prev_hash, blocks, node)
|
||||
assert_equal(n_blocks, 2016)
|
||||
|
||||
# Mine up to the last block of the first retarget period
|
||||
for i in range(2015):
|
||||
prev_hash = self.mine(i + 1, prev_hash, blocks, node)
|
||||
|
||||
assert_equal(node.getblockcount(), 2015)
|
||||
|
||||
@@ -101,5 +104,21 @@ class MiningMainnetTest(BitcoinTestFramework):
|
||||
assert_equal(mining_info['next']['bits'], nbits_str(DIFF_4_N_BITS))
|
||||
assert_equal(mining_info['next']['target'], target_str(DIFF_4_TARGET))
|
||||
|
||||
# Mine first block of the second retarget period
|
||||
height = 2016
|
||||
prev_hash = self.mine(height, prev_hash, blocks, node)
|
||||
assert_equal(node.getblockcount(), height)
|
||||
|
||||
mining_info = node.getmininginfo()
|
||||
assert_equal(mining_info['difficulty'], 4)
|
||||
|
||||
self.log.info("getblock RPC should show historical target")
|
||||
block_info = node.getblock(node.getblockhash(1))
|
||||
|
||||
assert_equal(block_info['difficulty'], 1)
|
||||
assert_equal(block_info['bits'], nbits_str(DIFF_1_N_BITS))
|
||||
assert_equal(block_info['target'], target_str(DIFF_1_TARGET))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
MiningMainnetTest(__file__).main()
|
||||
|
||||
@@ -566,6 +566,42 @@ class CompactBlocksTest(BitcoinTestFramework):
|
||||
test_node.send_and_ping(msg_block(block))
|
||||
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
|
||||
|
||||
# Multiple blocktxn responses will cause a node to get disconnected.
|
||||
def test_multiple_blocktxn_response(self, test_node):
|
||||
node = self.nodes[0]
|
||||
utxo = self.utxos[0]
|
||||
|
||||
block = self.build_block_with_transactions(node, utxo, 2)
|
||||
|
||||
# Send compact block
|
||||
comp_block = HeaderAndShortIDs()
|
||||
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=True)
|
||||
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
|
||||
absolute_indexes = []
|
||||
with p2p_lock:
|
||||
assert "getblocktxn" in test_node.last_message
|
||||
absolute_indexes = test_node.last_message["getblocktxn"].block_txn_request.to_absolute()
|
||||
assert_equal(absolute_indexes, [1, 2])
|
||||
|
||||
# Send a blocktxn that does not succeed in reconstruction, triggering
|
||||
# getdata fallback.
|
||||
msg = msg_blocktxn()
|
||||
msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[2]] + [block.vtx[1]])
|
||||
test_node.send_and_ping(msg)
|
||||
|
||||
# Tip should not have updated
|
||||
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
|
||||
|
||||
# We should receive a getdata request
|
||||
test_node.wait_for_getdata([block.sha256], timeout=10)
|
||||
assert test_node.last_message["getdata"].inv[0].type == MSG_BLOCK or \
|
||||
test_node.last_message["getdata"].inv[0].type == MSG_BLOCK | MSG_WITNESS_FLAG
|
||||
|
||||
# Send the same blocktxn and assert the sender gets disconnected.
|
||||
with node.assert_debug_log(['previous compact block reconstruction attempt failed']):
|
||||
test_node.send_message(msg)
|
||||
test_node.wait_for_disconnect()
|
||||
|
||||
def test_getblocktxn_handler(self, test_node):
|
||||
node = self.nodes[0]
|
||||
# bitcoind will not send blocktxn responses for blocks whose height is
|
||||
@@ -957,6 +993,12 @@ class CompactBlocksTest(BitcoinTestFramework):
|
||||
self.log.info("Testing handling of invalid compact blocks...")
|
||||
self.test_invalid_tx_in_compactblock(self.segwit_node)
|
||||
|
||||
self.log.info("Testing handling of multiple blocktxn responses...")
|
||||
self.test_multiple_blocktxn_response(self.segwit_node)
|
||||
|
||||
# The previous test will lead to a disconnection. Reconnect before continuing.
|
||||
self.segwit_node = self.nodes[0].add_p2p_connection(TestP2PConn())
|
||||
|
||||
self.log.info("Testing invalid index in cmpctblock message...")
|
||||
self.test_invalid_cmpctblock_message()
|
||||
|
||||
|
||||
@@ -73,14 +73,9 @@ class InvalidTxRequestTest(BitcoinTestFramework):
|
||||
tx = template.get_tx()
|
||||
node.p2ps[0].send_txs_and_test(
|
||||
[tx], node, success=False,
|
||||
expect_disconnect=template.expect_disconnect,
|
||||
reject_reason=template.reject_reason,
|
||||
)
|
||||
|
||||
if template.expect_disconnect:
|
||||
self.log.info("Reconnecting to peer")
|
||||
self.reconnect_p2p()
|
||||
|
||||
# Make two p2p connections to provide the node with orphans
|
||||
# * p2ps[0] will send valid orphan txs (one with low fee)
|
||||
# * p2ps[1] will send an invalid orphan tx (and is later disconnected for that)
|
||||
@@ -144,7 +139,6 @@ class InvalidTxRequestTest(BitcoinTestFramework):
|
||||
# tx_orphan_2_no_fee, because it has too low fee (p2ps[0] is not disconnected for relaying that tx)
|
||||
# tx_orphan_2_invalid, because it has negative fee (p2ps[1] is disconnected for relaying that tx)
|
||||
|
||||
self.wait_until(lambda: 1 == len(node.getpeerinfo()), timeout=12) # p2ps[1] is no longer connected
|
||||
assert_equal(expected_mempool, set(node.getrawmempool()))
|
||||
|
||||
self.log.info('Test orphan pool overflow')
|
||||
@@ -165,7 +159,7 @@ class InvalidTxRequestTest(BitcoinTestFramework):
|
||||
node.p2ps[0].send_txs_and_test([rejected_parent], node, success=False)
|
||||
|
||||
self.log.info('Test that a peer disconnection causes erase its transactions from the orphan pool')
|
||||
with node.assert_debug_log(['Erased 100 orphan transaction(s) from peer=26']):
|
||||
with node.assert_debug_log(['Erased 100 orphan transaction(s) from peer=']):
|
||||
self.reconnect_p2p(num_connections=1)
|
||||
|
||||
self.log.info('Test that a transaction in the orphan pool is included in a new tip block causes erase this transaction from the orphan pool')
|
||||
|
||||
@@ -251,8 +251,10 @@ class PackageRelayTest(BitcoinTestFramework):
|
||||
assert tx_orphan_bad_wit.rehash() not in node_mempool
|
||||
|
||||
# 5. Have the other peer send the tx too, so that tx_orphan_bad_wit package is attempted.
|
||||
bad_orphan_sender.send_message(msg_tx(low_fee_parent["tx"]))
|
||||
bad_orphan_sender.wait_for_disconnect()
|
||||
bad_orphan_sender.send_and_ping(msg_tx(low_fee_parent["tx"]))
|
||||
|
||||
# The bad orphan sender should not be disconnected.
|
||||
bad_orphan_sender.sync_with_ping()
|
||||
|
||||
# The peer that didn't provide the orphan should not be disconnected.
|
||||
parent_sender.sync_with_ping()
|
||||
|
||||
@@ -704,14 +704,20 @@ class SegWitTest(BitcoinTestFramework):
|
||||
# segwit activation. Note that older bitcoind's that are not
|
||||
# segwit-aware would also reject this for failing CLEANSTACK.
|
||||
with self.nodes[0].assert_debug_log(
|
||||
expected_msgs=[spend_tx.hash, 'was not accepted: mandatory-script-verify-flag-failed (Witness program was passed an empty witness)']):
|
||||
expected_msgs=[spend_tx.hash, 'was not accepted: mempool-script-verify-flag-failed (Witness program was passed an empty witness)']):
|
||||
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)
|
||||
|
||||
# The transaction was detected as witness stripped above and not added to the reject
|
||||
# filter. Trying again will check it again and result in the same error.
|
||||
with self.nodes[0].assert_debug_log(
|
||||
expected_msgs=[spend_tx.hash, 'was not accepted: mempool-script-verify-flag-failed (Witness program was passed an empty witness)']):
|
||||
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)
|
||||
|
||||
# Try to put the witness script in the scriptSig, should also fail.
|
||||
spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a'])
|
||||
spend_tx.rehash()
|
||||
with self.nodes[0].assert_debug_log(
|
||||
expected_msgs=[spend_tx.hash, 'was not accepted: mandatory-script-verify-flag-failed (Script evaluated without error but finished with a false/empty top stack element)']):
|
||||
expected_msgs=[spend_tx.hash, 'was not accepted: mempool-script-verify-flag-failed (Script evaluated without error but finished with a false/empty top stack element)']):
|
||||
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)
|
||||
|
||||
# Now put the witness script in the witness, should succeed after
|
||||
@@ -1282,6 +1288,13 @@ class SegWitTest(BitcoinTestFramework):
|
||||
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)
|
||||
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False)
|
||||
|
||||
# Now do the opposite: strip the witness entirely. This will be detected as witness stripping and
|
||||
# the (w)txid won't be added to the reject filter: we can try again and get the same error.
|
||||
tx3.wit.vtxinwit[0].scriptWitness.stack = []
|
||||
reason = "was not accepted: mempool-script-verify-flag-failed (Witness program was passed an empty witness)"
|
||||
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=False, accepted=False, reason=reason)
|
||||
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=False, accepted=False, reason=reason)
|
||||
|
||||
# Get rid of the extra witness, and verify acceptance.
|
||||
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_script]
|
||||
# Also check that old_node gets a tx announcement, even though this is
|
||||
@@ -1477,7 +1490,7 @@ class SegWitTest(BitcoinTestFramework):
|
||||
sign_input_segwitv0(tx2, 0, script, tx.vout[0].nValue, key)
|
||||
|
||||
# Should fail policy test.
|
||||
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
|
||||
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, True, False, 'mempool-script-verify-flag-failed (Using non-compressed keys in segwit)')
|
||||
# But passes consensus.
|
||||
block = self.build_next_block()
|
||||
self.update_witness_block_with_transactions(block, [tx2])
|
||||
@@ -1496,7 +1509,7 @@ class SegWitTest(BitcoinTestFramework):
|
||||
sign_p2pk_witness_input(witness_script, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
|
||||
|
||||
# Should fail policy test.
|
||||
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
|
||||
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, True, False, 'mempool-script-verify-flag-failed (Using non-compressed keys in segwit)')
|
||||
# But passes consensus.
|
||||
block = self.build_next_block()
|
||||
self.update_witness_block_with_transactions(block, [tx3])
|
||||
@@ -1513,7 +1526,7 @@ class SegWitTest(BitcoinTestFramework):
|
||||
sign_p2pk_witness_input(witness_script, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
|
||||
|
||||
# Should fail policy test.
|
||||
test_transaction_acceptance(self.nodes[0], self.test_node, tx4, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
|
||||
test_transaction_acceptance(self.nodes[0], self.test_node, tx4, True, False, 'mempool-script-verify-flag-failed (Using non-compressed keys in segwit)')
|
||||
block = self.build_next_block()
|
||||
self.update_witness_block_with_transactions(block, [tx4])
|
||||
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
|
||||
|
||||
@@ -122,8 +122,8 @@ class RPCPackagesTest(BitcoinTestFramework):
|
||||
assert_equal(testres_bad_sig, self.independent_txns_testres + [{
|
||||
"txid": tx_bad_sig_txid,
|
||||
"wtxid": tx_bad_sig_wtxid, "allowed": False,
|
||||
"reject-reason": "mandatory-script-verify-flag-failed (Operation not valid with the current stack size)",
|
||||
"reject-details": "mandatory-script-verify-flag-failed (Operation not valid with the current stack size), " +
|
||||
"reject-reason": "mempool-script-verify-flag-failed (Operation not valid with the current stack size)",
|
||||
"reject-details": "mempool-script-verify-flag-failed (Operation not valid with the current stack size), " +
|
||||
f"input 0 of {tx_bad_sig_txid} (wtxid {tx_bad_sig_wtxid}), spending {coin['txid']}:{coin['vout']}"
|
||||
}])
|
||||
|
||||
|
||||
@@ -143,7 +143,7 @@ def script_BIP34_coinbase_height(height):
|
||||
return CScript([CScriptNum(height)])
|
||||
|
||||
|
||||
def create_coinbase(height, pubkey=None, *, script_pubkey=None, extra_output_script=None, fees=0, nValue=50, retarget_period=REGTEST_RETARGET_PERIOD):
|
||||
def create_coinbase(height, pubkey=None, *, script_pubkey=None, extra_output_script=None, fees=0, nValue=50, halving_period=REGTEST_RETARGET_PERIOD):
|
||||
"""Create a coinbase transaction.
|
||||
|
||||
If pubkey is passed in, the coinbase output will be a P2PK output;
|
||||
@@ -156,7 +156,7 @@ def create_coinbase(height, pubkey=None, *, script_pubkey=None, extra_output_scr
|
||||
coinbaseoutput = CTxOut()
|
||||
coinbaseoutput.nValue = nValue * COIN
|
||||
if nValue == 50:
|
||||
halvings = int(height / retarget_period)
|
||||
halvings = int(height / halving_period)
|
||||
coinbaseoutput.nValue >>= halvings
|
||||
coinbaseoutput.nValue += fees
|
||||
if pubkey is not None:
|
||||
|
||||
@@ -893,13 +893,12 @@ class P2PDataStore(P2PInterface):
|
||||
else:
|
||||
assert node.getbestblockhash() != blocks[-1].hash
|
||||
|
||||
def send_txs_and_test(self, txs, node, *, success=True, expect_disconnect=False, reject_reason=None):
|
||||
def send_txs_and_test(self, txs, node, *, success=True, reject_reason=None):
|
||||
"""Send txs to test node and test whether they're accepted to the mempool.
|
||||
|
||||
- add all txs to our tx_store
|
||||
- send tx messages for all txs
|
||||
- if success is True/False: assert that the txs are/are not accepted to the mempool
|
||||
- if expect_disconnect is True: Skip the sync with ping
|
||||
- if reject_reason is set: assert that the correct reject message is logged."""
|
||||
|
||||
with p2p_lock:
|
||||
@@ -911,10 +910,7 @@ class P2PDataStore(P2PInterface):
|
||||
for tx in txs:
|
||||
self.send_message(msg_tx(tx))
|
||||
|
||||
if expect_disconnect:
|
||||
self.wait_for_disconnect()
|
||||
else:
|
||||
self.sync_with_ping()
|
||||
self.sync_with_ping()
|
||||
|
||||
raw_mempool = node.getrawmempool()
|
||||
if success:
|
||||
|
||||
@@ -50,6 +50,7 @@ DUMMY_MIN_OP_RETURN_SCRIPT = CScript([OP_RETURN] + ([OP_0] * (MIN_PADDING - 1)))
|
||||
assert len(DUMMY_MIN_OP_RETURN_SCRIPT) == MIN_PADDING
|
||||
|
||||
PAY_TO_ANCHOR = CScript([OP_1, bytes.fromhex("4e73")])
|
||||
ANCHOR_ADDRESS = "bcrt1pfeesnyr2tx"
|
||||
|
||||
def key_to_p2pk_script(key):
|
||||
key = check_key(key)
|
||||
|
||||
@@ -137,7 +137,7 @@ class TestNode():
|
||||
self.args.append("-logsourcelocations")
|
||||
if self.version_is_at_least(239000):
|
||||
self.args.append("-loglevel=trace")
|
||||
if self.version_is_at_least(299900):
|
||||
if self.version_is_at_least(290100):
|
||||
self.args.append("-nologratelimit")
|
||||
|
||||
# Default behavior from global -v2transport flag is added to args to persist it over restarts.
|
||||
|
||||
@@ -170,6 +170,8 @@ BASE_SCRIPTS = [
|
||||
'wallet_listreceivedby.py --descriptors',
|
||||
'wallet_abandonconflict.py --legacy-wallet',
|
||||
'wallet_abandonconflict.py --descriptors',
|
||||
'wallet_anchor.py --legacy-wallet',
|
||||
'wallet_anchor.py --descriptors',
|
||||
'feature_reindex.py',
|
||||
'feature_reindex_readonly.py',
|
||||
'wallet_labels.py --legacy-wallet',
|
||||
|
||||
@@ -409,6 +409,18 @@ class ToolWalletTest(BitcoinTestFramework):
|
||||
self.write_dump(dump_data, bad_sum_wallet_dump)
|
||||
self.assert_raises_tool_error('Error: Checksum is not the correct size', '-wallet=badload', '-dumpfile={}'.format(bad_sum_wallet_dump), 'createfromdump')
|
||||
assert not (self.nodes[0].wallets_path / "badload").is_dir()
|
||||
if not self.options.descriptors:
|
||||
os.rename(self.nodes[0].wallets_path / "wallet.dat", self.nodes[0].wallets_path / "default.wallet.dat")
|
||||
self.assert_raises_tool_error('Error: Checksum is not the correct size', '-wallet=', '-dumpfile={}'.format(bad_sum_wallet_dump), 'createfromdump')
|
||||
assert self.nodes[0].wallets_path.exists()
|
||||
assert not (self.nodes[0].wallets_path / "wallet.dat").exists()
|
||||
|
||||
self.log.info('Checking createfromdump with an unnamed wallet')
|
||||
self.do_tool_createfromdump("", "wallet.dump")
|
||||
assert (self.nodes[0].wallets_path / "wallet.dat").exists()
|
||||
os.unlink(self.nodes[0].wallets_path / "wallet.dat")
|
||||
if not self.options.descriptors:
|
||||
os.rename(self.nodes[0].wallets_path / "default.wallet.dat", self.nodes[0].wallets_path / "wallet.dat")
|
||||
|
||||
def test_chainless_conflicts(self):
|
||||
self.log.info("Test wallet tool when wallet contains conflicting transactions")
|
||||
|
||||
128
test/functional/wallet_anchor.py
Executable file
128
test/functional/wallet_anchor.py
Executable file
@@ -0,0 +1,128 @@
|
||||
#!/usr/bin/env python3
|
||||
# Copyright (c) 2025-present The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or https://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
import time
|
||||
|
||||
from test_framework.blocktools import MAX_FUTURE_BLOCK_TIME
|
||||
from test_framework.descriptors import descsum_create
|
||||
from test_framework.messages import (
|
||||
COutPoint,
|
||||
CTxIn,
|
||||
CTxInWitness,
|
||||
CTxOut,
|
||||
)
|
||||
from test_framework.script_util import (
|
||||
ANCHOR_ADDRESS,
|
||||
PAY_TO_ANCHOR,
|
||||
)
|
||||
from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import (
|
||||
assert_equal,
|
||||
assert_raises_rpc_error,
|
||||
)
|
||||
from test_framework.wallet import MiniWallet
|
||||
|
||||
class WalletAnchorTest(BitcoinTestFramework):
|
||||
def add_options(self, parser):
|
||||
self.add_wallet_options(parser)
|
||||
|
||||
def set_test_params(self):
|
||||
self.num_nodes = 1
|
||||
|
||||
def skip_test_if_missing_module(self):
|
||||
self.skip_if_no_wallet()
|
||||
|
||||
def test_0_value_anchor_listunspent(self):
|
||||
self.log.info("Test that 0-value anchor outputs are detected as UTXOs")
|
||||
|
||||
# Create an anchor output, and spend it
|
||||
sender = MiniWallet(self.nodes[0])
|
||||
anchor_tx = sender.create_self_transfer(fee_rate=0, version=3)["tx"]
|
||||
anchor_tx.vout.append(CTxOut(0, PAY_TO_ANCHOR))
|
||||
anchor_tx.rehash() # Rehash after modifying anchor_tx
|
||||
anchor_spend = sender.create_self_transfer(version=3)["tx"]
|
||||
anchor_spend.vin.append(CTxIn(COutPoint(anchor_tx.sha256, 1), b""))
|
||||
anchor_spend.wit.vtxinwit.append(CTxInWitness())
|
||||
anchor_spend.rehash() # Rehash after modifying anchor_spend
|
||||
submit_res = self.nodes[0].submitpackage([anchor_tx.serialize().hex(), anchor_spend.serialize().hex()])
|
||||
assert_equal(submit_res["package_msg"], "success")
|
||||
anchor_txid = anchor_tx.hash
|
||||
anchor_spend_txid = anchor_spend.hash
|
||||
|
||||
# Mine each tx in separate blocks
|
||||
self.generateblock(self.nodes[0], sender.get_address(), [anchor_tx.serialize().hex()])
|
||||
anchor_tx_height = self.nodes[0].getblockcount()
|
||||
self.generateblock(self.nodes[0], sender.get_address(), [anchor_spend.serialize().hex()])
|
||||
|
||||
# Mock time forward and generate some blocks to avoid rescanning of latest blocks
|
||||
self.nodes[0].setmocktime(int(time.time()) + MAX_FUTURE_BLOCK_TIME + 1)
|
||||
self.generate(self.nodes[0], 10)
|
||||
|
||||
self.nodes[0].createwallet(wallet_name="anchor", disable_private_keys=True)
|
||||
wallet = self.nodes[0].get_wallet_rpc("anchor")
|
||||
|
||||
wallet.importaddress(ANCHOR_ADDRESS, rescan=False)
|
||||
|
||||
# The wallet should have no UTXOs, and not know of the anchor tx or its spend
|
||||
assert_equal(wallet.listunspent(), [])
|
||||
assert_raises_rpc_error(-5, "Invalid or non-wallet transaction id", wallet.gettransaction, anchor_txid)
|
||||
assert_raises_rpc_error(-5, "Invalid or non-wallet transaction id", wallet.gettransaction, anchor_spend_txid)
|
||||
|
||||
# Rescanning the block containing the anchor so that listunspent will list the output
|
||||
wallet.rescanblockchain(0, anchor_tx_height)
|
||||
utxos = wallet.listunspent()
|
||||
assert_equal(len(utxos), 1)
|
||||
assert_equal(utxos[0]["txid"], anchor_txid)
|
||||
assert_equal(utxos[0]["address"], ANCHOR_ADDRESS)
|
||||
assert_equal(utxos[0]["amount"], 0)
|
||||
wallet.gettransaction(anchor_txid)
|
||||
assert_raises_rpc_error(-5, "Invalid or non-wallet transaction id", wallet.gettransaction, anchor_spend_txid)
|
||||
|
||||
# Rescan the rest of the blockchain to see the anchor was spent
|
||||
wallet.rescanblockchain()
|
||||
assert_equal(wallet.listunspent(), [])
|
||||
wallet.gettransaction(anchor_spend_txid)
|
||||
|
||||
def test_cannot_sign_anchors(self):
|
||||
self.log.info("Test that the wallet cannot spend anchor outputs")
|
||||
for disable_privkeys in [False, True]:
|
||||
self.nodes[0].createwallet(wallet_name=f"anchor_spend_{disable_privkeys}", disable_private_keys=disable_privkeys)
|
||||
wallet = self.nodes[0].get_wallet_rpc(f"anchor_spend_{disable_privkeys}")
|
||||
if self.options.descriptors:
|
||||
import_res = wallet.importdescriptors([
|
||||
{"desc": descsum_create(f"addr({ANCHOR_ADDRESS})"), "timestamp": "now"},
|
||||
{"desc": descsum_create(f"raw({PAY_TO_ANCHOR.hex()})"), "timestamp": "now"}
|
||||
])
|
||||
assert_equal(import_res[0]["success"], disable_privkeys)
|
||||
assert_equal(import_res[1]["success"], disable_privkeys)
|
||||
else:
|
||||
wallet.importaddress(ANCHOR_ADDRESS)
|
||||
|
||||
anchor_txid = self.default_wallet.sendtoaddress(ANCHOR_ADDRESS, 1)
|
||||
self.generate(self.nodes[0], 1)
|
||||
|
||||
wallet = self.nodes[0].get_wallet_rpc("anchor_spend_True")
|
||||
utxos = wallet.listunspent()
|
||||
assert_equal(len(utxos), 1)
|
||||
assert_equal(utxos[0]["txid"], anchor_txid)
|
||||
assert_equal(utxos[0]["address"], ANCHOR_ADDRESS)
|
||||
assert_equal(utxos[0]["amount"], 1)
|
||||
|
||||
if self.options.descriptors:
|
||||
assert_raises_rpc_error(-4, "Missing solving data for estimating transaction size", wallet.send, [{self.default_wallet.getnewaddress(): 0.9999}])
|
||||
assert_raises_rpc_error(-4, "Unable to determine the size of the transaction, the wallet contains unsolvable descriptors", wallet.sendall, recipients=[self.default_wallet.getnewaddress()])
|
||||
else:
|
||||
assert_raises_rpc_error(-4, "Insufficient funds", wallet.send, [{self.default_wallet.getnewaddress(): 0.9999}])
|
||||
assert_raises_rpc_error(-6, "Total value of UTXO pool too low to pay for transaction. Try using lower feerate or excluding uneconomic UTXOs with 'send_max' option.", wallet.sendall, recipients=[self.default_wallet.getnewaddress()])
|
||||
assert_raises_rpc_error(-4, "Error: Private keys are disabled for this wallet", wallet.sendtoaddress, self.default_wallet.getnewaddress(), 0.9999)
|
||||
assert_raises_rpc_error(-4, "Unable to determine the size of the transaction, the wallet contains unsolvable descriptors", wallet.sendall, recipients=[self.default_wallet.getnewaddress()], inputs=utxos)
|
||||
|
||||
def run_test(self):
|
||||
self.default_wallet = self.nodes[0].get_wallet_rpc(self.default_wallet_name)
|
||||
self.test_0_value_anchor_listunspent()
|
||||
self.test_cannot_sign_anchors()
|
||||
|
||||
if __name__ == '__main__':
|
||||
WalletAnchorTest(__file__).main()
|
||||
@@ -40,6 +40,7 @@ from test_framework.test_framework import BitcoinTestFramework
|
||||
from test_framework.util import (
|
||||
assert_equal,
|
||||
assert_raises_rpc_error,
|
||||
sha256sum_file,
|
||||
)
|
||||
|
||||
|
||||
@@ -136,10 +137,71 @@ class WalletBackupTest(BitcoinTestFramework):
|
||||
backup_file = self.nodes[0].datadir_path / 'wallet.bak'
|
||||
wallet_name = "res0"
|
||||
wallet_file = node.wallets_path / wallet_name
|
||||
error_message = "Failed to create database path '{}'. Database already exists.".format(wallet_file)
|
||||
error_message = "Failed to restore wallet. Database file exists in '{}'.".format(wallet_file / "wallet.dat")
|
||||
assert_raises_rpc_error(-36, error_message, node.restorewallet, wallet_name, backup_file)
|
||||
assert wallet_file.exists()
|
||||
|
||||
def test_restore_existent_dir(self):
|
||||
self.log.info("Test restore on an existent empty directory")
|
||||
node = self.nodes[3]
|
||||
backup_file = self.nodes[0].datadir_path / 'wallet.bak'
|
||||
wallet_name = "restored_wallet"
|
||||
wallet_dir = node.wallets_path / wallet_name
|
||||
os.mkdir(wallet_dir)
|
||||
res = node.restorewallet(wallet_name, backup_file)
|
||||
assert_equal(res['name'], wallet_name)
|
||||
node.unloadwallet(wallet_name)
|
||||
|
||||
self.log.info("Test restore succeeds when the target directory contains non-wallet files")
|
||||
wallet_file = node.wallets_path / wallet_name / "wallet.dat"
|
||||
os.remove(wallet_file)
|
||||
extra_file = node.wallets_path / wallet_name / "not_a_wallet.txt"
|
||||
extra_file.touch()
|
||||
res = node.restorewallet(wallet_name, backup_file)
|
||||
assert_equal(res['name'], wallet_name)
|
||||
assert extra_file.exists() # extra file was not removed by mistake
|
||||
node.unloadwallet(wallet_name)
|
||||
|
||||
self.log.info("Test restore failure due to existing db file in the destination directory")
|
||||
original_shasum = sha256sum_file(wallet_file)
|
||||
error_message = "Failed to restore wallet. Database file exists in '{}'.".format(wallet_dir / "wallet.dat")
|
||||
assert_raises_rpc_error(-36, error_message, node.restorewallet, wallet_name, backup_file)
|
||||
# Ensure the wallet file remains untouched
|
||||
assert wallet_dir.exists()
|
||||
assert_equal(original_shasum, sha256sum_file(wallet_file))
|
||||
|
||||
self.log.info("Test restore succeeds when the .dat file in the destination has a different name")
|
||||
second_wallet = wallet_dir / "hidden_storage.dat"
|
||||
os.rename(wallet_dir / "wallet.dat", second_wallet)
|
||||
original_shasum = sha256sum_file(second_wallet)
|
||||
res = node.restorewallet(wallet_name, backup_file)
|
||||
assert_equal(res['name'], wallet_name)
|
||||
assert (wallet_dir / "hidden_storage.dat").exists()
|
||||
assert_equal(original_shasum, sha256sum_file(second_wallet))
|
||||
node.unloadwallet(wallet_name)
|
||||
|
||||
# Clean for follow-up tests
|
||||
os.remove(wallet_file)
|
||||
|
||||
def test_restore_into_unnamed_wallet(self):
|
||||
self.log.info("Test restore into a default unnamed wallet")
|
||||
# This is also useful to test the migration recovery after failure logic
|
||||
node = self.nodes[3]
|
||||
if not self.options.descriptors:
|
||||
node.unloadwallet("")
|
||||
os.rename(node.wallets_path / "wallet.dat", node.wallets_path / "default.wallet.dat")
|
||||
backup_file = self.nodes[0].datadir_path / 'wallet.bak'
|
||||
wallet_name = ""
|
||||
res = node.restorewallet(wallet_name, backup_file)
|
||||
assert_equal(res['name'], "")
|
||||
assert (node.wallets_path / "wallet.dat").exists()
|
||||
# Clean for follow-up tests
|
||||
node.unloadwallet("")
|
||||
os.remove(node.wallets_path / "wallet.dat")
|
||||
if not self.options.descriptors:
|
||||
os.rename(node.wallets_path / "default.wallet.dat", node.wallets_path / "wallet.dat")
|
||||
node.loadwallet("")
|
||||
|
||||
def test_pruned_wallet_backup(self):
|
||||
self.log.info("Test loading backup on a pruned node when the backup was created close to the prune height of the restoring node")
|
||||
node = self.nodes[3]
|
||||
@@ -159,6 +221,19 @@ class WalletBackupTest(BitcoinTestFramework):
|
||||
# the backup to load successfully this close to the prune height
|
||||
node.restorewallet('pruned', node.datadir_path / 'wallet_pruned.bak')
|
||||
|
||||
self.log.info("Test restore on a pruned node when the backup was beyond the pruning point")
|
||||
if not self.options.descriptors:
|
||||
node.unloadwallet("")
|
||||
os.rename(node.wallets_path / "wallet.dat", node.wallets_path / "default.wallet.dat")
|
||||
backup_file = self.nodes[0].datadir_path / 'wallet.bak'
|
||||
wallet_name = ""
|
||||
error_message = "Wallet loading failed. Prune: last wallet synchronisation goes beyond pruned data. You need to -reindex (download the whole blockchain again in case of pruned node)"
|
||||
assert_raises_rpc_error(-4, error_message, node.restorewallet, wallet_name, backup_file)
|
||||
assert node.wallets_path.exists() # ensure the wallets dir exists
|
||||
if not self.options.descriptors:
|
||||
os.rename(node.wallets_path / "default.wallet.dat", node.wallets_path / "wallet.dat")
|
||||
node.loadwallet("")
|
||||
|
||||
def run_test(self):
|
||||
self.log.info("Generating initial blockchain")
|
||||
self.generate(self.nodes[0], 1)
|
||||
@@ -227,6 +302,8 @@ class WalletBackupTest(BitcoinTestFramework):
|
||||
assert_equal(res2_rpc.getbalance(), balance2)
|
||||
|
||||
self.restore_wallet_existent_name()
|
||||
self.test_restore_existent_dir()
|
||||
self.test_restore_into_unnamed_wallet()
|
||||
|
||||
if not self.options.descriptors:
|
||||
self.log.info("Restoring using dumped wallet")
|
||||
|
||||
@@ -5,9 +5,11 @@
|
||||
"""Test the listtransactions API."""
|
||||
|
||||
from decimal import Decimal
|
||||
import time
|
||||
import os
|
||||
import shutil
|
||||
|
||||
from test_framework.blocktools import MAX_FUTURE_BLOCK_TIME
|
||||
from test_framework.messages import (
|
||||
COIN,
|
||||
tx_from_hex,
|
||||
@@ -17,7 +19,9 @@ from test_framework.util import (
|
||||
assert_array_result,
|
||||
assert_equal,
|
||||
assert_raises_rpc_error,
|
||||
find_vout_for_address,
|
||||
)
|
||||
from test_framework.wallet_util import get_generate_key
|
||||
|
||||
|
||||
class ListTransactionsTest(BitcoinTestFramework):
|
||||
@@ -114,6 +118,8 @@ class ListTransactionsTest(BitcoinTestFramework):
|
||||
self.run_invalid_parameters_test()
|
||||
self.test_op_return()
|
||||
|
||||
self.test_from_me_status_change()
|
||||
|
||||
def run_rbf_opt_in_test(self):
|
||||
"""Test the opt-in-rbf flag for sent and received transactions."""
|
||||
|
||||
@@ -327,6 +333,47 @@ class ListTransactionsTest(BitcoinTestFramework):
|
||||
|
||||
assert 'address' not in op_ret_tx
|
||||
|
||||
def test_from_me_status_change(self):
|
||||
self.log.info("Test gettransaction after changing a transaction's 'from me' status")
|
||||
self.nodes[0].createwallet("fromme")
|
||||
default_wallet = self.nodes[0].get_wallet_rpc(self.default_wallet_name)
|
||||
wallet = self.nodes[0].get_wallet_rpc("fromme")
|
||||
|
||||
# The 'fee' field of gettransaction is only added when the transaction is 'from me'
|
||||
# Run twice, once for a transaction in the mempool, again when it confirms
|
||||
for confirm in [False, True]:
|
||||
key = get_generate_key()
|
||||
default_wallet.importprivkey(key.privkey)
|
||||
|
||||
send_res = default_wallet.send(outputs=[{key.p2wpkh_addr: 1}, {wallet.getnewaddress(): 1}])
|
||||
assert_equal(send_res["complete"], True)
|
||||
vout = find_vout_for_address(self.nodes[0], send_res["txid"], key.p2wpkh_addr)
|
||||
utxos = [{"txid": send_res["txid"], "vout": vout}]
|
||||
self.generate(self.nodes[0], 1, sync_fun=self.no_op)
|
||||
|
||||
# Send to the test wallet, ensuring that one input is for the descriptor we will import,
|
||||
# and that there are other inputs belonging to only the sending wallet
|
||||
send_res = default_wallet.send(outputs=[{wallet.getnewaddress(): 1.5}], inputs=utxos, add_inputs=True)
|
||||
assert_equal(send_res["complete"], True)
|
||||
txid = send_res["txid"]
|
||||
self.nodes[0].syncwithvalidationinterfacequeue()
|
||||
tx_info = wallet.gettransaction(txid)
|
||||
assert "fee" not in tx_info
|
||||
assert_equal(any(detail["category"] == "send" for detail in tx_info["details"]), False)
|
||||
|
||||
if confirm:
|
||||
self.generate(self.nodes[0], 1, sync_fun=self.no_op)
|
||||
# Mock time forward and generate blocks so that the import does not rescan the transaction
|
||||
self.nodes[0].setmocktime(int(time.time()) + MAX_FUTURE_BLOCK_TIME + 1)
|
||||
self.generate(self.nodes[0], 10, sync_fun=self.no_op)
|
||||
|
||||
wallet.importprivkey(key.privkey)
|
||||
# TODO: We should check that the fee matches, but since the transaction spends inputs
|
||||
# not known to the wallet, it is incorrectly calculating the fee.
|
||||
# assert_equal(wallet.gettransaction(txid)["fee"], fee)
|
||||
tx_info = wallet.gettransaction(txid)
|
||||
assert "fee" in tx_info
|
||||
assert_equal(any(detail["category"] == "send" for detail in tx_info["details"]), True)
|
||||
|
||||
if __name__ == '__main__':
|
||||
ListTransactionsTest(__file__).main()
|
||||
|
||||
@@ -4,6 +4,8 @@
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
"""Test Migrating a wallet from legacy to descriptor."""
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
import random
|
||||
import shutil
|
||||
import struct
|
||||
@@ -24,6 +26,7 @@ from test_framework.script import hash160
|
||||
from test_framework.script_util import key_to_p2pkh_script, key_to_p2pk_script, script_to_p2sh_script, script_to_p2wsh_script
|
||||
from test_framework.util import (
|
||||
assert_equal,
|
||||
assert_greater_than,
|
||||
assert_raises_rpc_error,
|
||||
find_vout_for_address,
|
||||
sha256sum_file,
|
||||
@@ -522,6 +525,14 @@ class WalletMigrationTest(BitcoinTestFramework):
|
||||
|
||||
assert_equal(bals, wallet.getbalances())
|
||||
|
||||
def clear_default_wallet(self, backup_file):
|
||||
# Test cleanup: Clear unnamed default wallet for subsequent tests
|
||||
(self.old_node.wallets_path / "wallet.dat").unlink()
|
||||
(self.master_node.wallets_path / "wallet.dat").unlink(missing_ok=True)
|
||||
shutil.rmtree(self.master_node.wallets_path / "default_wallet_watchonly", ignore_errors=True)
|
||||
shutil.rmtree(self.master_node.wallets_path / "default_wallet_solvables", ignore_errors=True)
|
||||
backup_file.unlink()
|
||||
|
||||
def test_default_wallet(self):
|
||||
self.log.info("Test migration of the wallet named as the empty string")
|
||||
wallet = self.create_legacy_wallet("")
|
||||
@@ -548,6 +559,103 @@ class WalletMigrationTest(BitcoinTestFramework):
|
||||
|
||||
self.master_node.setmocktime(0)
|
||||
|
||||
wallet.unloadwallet()
|
||||
self.clear_default_wallet(backup_file=Path(res["backup_path"]))
|
||||
|
||||
def test_default_wallet_watch_only(self):
|
||||
self.log.info("Test unnamed (default) watch-only wallet migration")
|
||||
master_wallet = self.master_node.get_wallet_rpc(self.default_wallet_name)
|
||||
wallet = self.create_legacy_wallet("", blank=True)
|
||||
wallet.importaddress(master_wallet.getnewaddress(address_type="legacy"))
|
||||
|
||||
res, def_wallet = self.migrate_and_get_rpc("")
|
||||
wallet = self.master_node.get_wallet_rpc("default_wallet_watchonly")
|
||||
|
||||
info = wallet.getwalletinfo()
|
||||
assert_equal(info["descriptors"], True)
|
||||
assert_equal(info["format"], "sqlite")
|
||||
assert_equal(info["private_keys_enabled"], False)
|
||||
assert_equal(info["walletname"], "default_wallet_watchonly")
|
||||
|
||||
# The default wallet will still exist and have newly generated descriptors
|
||||
assert (self.master_node.wallets_path / "wallet.dat").exists()
|
||||
def_wallet_info = def_wallet.getwalletinfo()
|
||||
assert_equal(def_wallet_info["descriptors"], True)
|
||||
assert_equal(def_wallet_info["format"], "sqlite")
|
||||
assert_equal(def_wallet_info["private_keys_enabled"], True)
|
||||
assert_equal(def_wallet_info["walletname"], "")
|
||||
assert_greater_than(def_wallet_info["keypoolsize"], 0)
|
||||
|
||||
wallet.unloadwallet()
|
||||
self.clear_default_wallet(backup_file=Path(res["backup_path"]))
|
||||
|
||||
def test_migration_failure(self, wallet_name):
|
||||
is_default = wallet_name == ""
|
||||
wallet_pretty_name = "unnamed (default)" if is_default else f'"{wallet_name}"'
|
||||
self.log.info(f"Test failure during migration of wallet named: {wallet_pretty_name}")
|
||||
# Preface, set up legacy wallet and unload it
|
||||
master_wallet = self.master_node.get_wallet_rpc(self.default_wallet_name)
|
||||
wallet = self.create_legacy_wallet(wallet_name, blank=True)
|
||||
wallet.importaddress(master_wallet.getnewaddress(address_type="legacy"))
|
||||
wallet.unloadwallet()
|
||||
|
||||
if os.path.isabs(wallet_name):
|
||||
old_path = master_path = Path(wallet_name)
|
||||
else:
|
||||
old_path = self.old_node.wallets_path / wallet_name
|
||||
master_path = self.master_node.wallets_path / wallet_name
|
||||
os.makedirs(master_path, exist_ok=True)
|
||||
shutil.copyfile(old_path / "wallet.dat", master_path / "wallet.dat")
|
||||
|
||||
# This will be the watch-only directory the migration tries to create,
|
||||
# we make migration fail by placing a wallet.dat file there.
|
||||
wo_prefix = wallet_name or "default_wallet"
|
||||
# wo_prefix might have path characters in it, this corresponds with
|
||||
# DoMigration().
|
||||
wo_dirname = f"{wo_prefix}_watchonly"
|
||||
watch_only_dir = self.master_node.wallets_path / wo_dirname
|
||||
os.mkdir(watch_only_dir)
|
||||
shutil.copyfile(old_path / "wallet.dat", watch_only_dir / "wallet.dat")
|
||||
|
||||
# Make a file in the wallets dir that must still exist after migration
|
||||
survive_path = self.master_node.wallets_path / "survive"
|
||||
open(survive_path, "wb").close()
|
||||
assert survive_path.exists()
|
||||
|
||||
mocked_time = int(time.time())
|
||||
self.master_node.setmocktime(mocked_time)
|
||||
assert_raises_rpc_error(-4, "Failed to create database", self.master_node.migratewallet, wallet_name)
|
||||
self.master_node.setmocktime(0)
|
||||
|
||||
# Verify the /wallets/ path exists.
|
||||
assert self.master_node.wallets_path.exists()
|
||||
|
||||
# Verify survive is still there
|
||||
assert survive_path.exists()
|
||||
# Verify both wallet paths exist.
|
||||
assert Path(old_path / "wallet.dat").exists()
|
||||
assert Path(master_path / "wallet.dat").exists()
|
||||
|
||||
backup_prefix = "default_wallet" if is_default else wallet_name
|
||||
backup_path = master_path / f"{backup_prefix}_{mocked_time}.legacy.bak"
|
||||
assert backup_path.exists()
|
||||
|
||||
with open(self.master_node.wallets_path / wallet_name / self.wallet_data_filename, "rb") as f:
|
||||
data = f.read(16)
|
||||
_, _, magic = struct.unpack("QII", data)
|
||||
assert_equal(magic, BTREE_MAGIC)
|
||||
|
||||
|
||||
# Cleanup
|
||||
if is_default:
|
||||
self.clear_default_wallet(backup_path)
|
||||
else:
|
||||
backup_path.unlink()
|
||||
Path(watch_only_dir / "wallet.dat").unlink()
|
||||
Path(watch_only_dir).rmdir()
|
||||
Path(master_path / "wallet.dat").unlink()
|
||||
Path(old_path / "wallet.dat").unlink(missing_ok=True)
|
||||
|
||||
def test_direct_file(self):
|
||||
self.log.info("Test migration of a wallet that is not in a wallet directory")
|
||||
wallet = self.create_legacy_wallet("plainfile")
|
||||
@@ -1357,6 +1465,42 @@ class WalletMigrationTest(BitcoinTestFramework):
|
||||
assert_equal(addr_info["solvable"], True)
|
||||
assert "hex" in addr_info
|
||||
|
||||
def unsynced_wallet_on_pruned_node_fails(self):
|
||||
self.log.info("Test migration of an unsynced wallet on a pruned node fails gracefully")
|
||||
wallet = self.create_legacy_wallet("", load_on_startup=False)
|
||||
last_wallet_synced_block = wallet.getwalletinfo()['lastprocessedblock']['height']
|
||||
wallet.unloadwallet()
|
||||
|
||||
shutil.copyfile(self.old_node.wallets_path / "wallet.dat", self.master_node.wallets_path / "wallet.dat")
|
||||
|
||||
# Generate blocks just so the wallet best block is pruned
|
||||
self.restart_node(0, ["-fastprune", "-prune=1", "-nowallet"])
|
||||
self.connect_nodes(0, 1)
|
||||
self.generate(self.master_node, 450, sync_fun=self.no_op)
|
||||
self.master_node.pruneblockchain(250)
|
||||
# Ensure next block to sync is unavailable
|
||||
assert_raises_rpc_error(-1, "Block not available (pruned data)", self.master_node.getblock, self.master_node.getblockhash(last_wallet_synced_block + 1))
|
||||
|
||||
# Check migration failure
|
||||
mocked_time = int(time.time())
|
||||
self.master_node.setmocktime(mocked_time)
|
||||
assert_raises_rpc_error(-4, "last wallet synchronisation goes beyond pruned data. You need to -reindex (download the whole blockchain again in case of pruned node)", self.master_node.migratewallet, wallet_name="")
|
||||
self.master_node.setmocktime(0)
|
||||
|
||||
# Verify the /wallets/ path exists, the wallet is still BDB and the backup file is there.
|
||||
assert self.master_node.wallets_path.exists()
|
||||
|
||||
with open(self.master_node.wallets_path / "wallet.dat", "rb") as f:
|
||||
data = f.read(16)
|
||||
_, _, magic = struct.unpack("QII", data)
|
||||
assert_equal(magic, BTREE_MAGIC)
|
||||
|
||||
backup_path = self.master_node.wallets_path / f"default_wallet_{mocked_time}.legacy.bak"
|
||||
assert backup_path.exists()
|
||||
|
||||
self.clear_default_wallet(backup_path)
|
||||
|
||||
|
||||
def run_test(self):
|
||||
self.master_node = self.nodes[0]
|
||||
self.old_node = self.nodes[1]
|
||||
@@ -1372,7 +1516,18 @@ class WalletMigrationTest(BitcoinTestFramework):
|
||||
self.test_encrypted()
|
||||
self.test_nonexistent()
|
||||
self.test_unloaded_by_path()
|
||||
|
||||
migration_failure_cases = [
|
||||
"",
|
||||
"../",
|
||||
os.path.abspath(self.master_node.datadir_path / "absolute_path"),
|
||||
"normallynamedwallet"
|
||||
]
|
||||
for wallet_name in migration_failure_cases:
|
||||
self.test_migration_failure(wallet_name=wallet_name)
|
||||
|
||||
self.test_default_wallet()
|
||||
self.test_default_wallet_watch_only()
|
||||
self.test_direct_file()
|
||||
self.test_addressbook()
|
||||
self.test_migrate_raw_p2sh()
|
||||
@@ -1390,5 +1545,8 @@ class WalletMigrationTest(BitcoinTestFramework):
|
||||
self.test_taproot()
|
||||
self.test_solvable_no_privs()
|
||||
|
||||
# Note: After this test the first 250 blocks of 'master_node' are pruned
|
||||
self.unsynced_wallet_on_pruned_node_fails()
|
||||
|
||||
if __name__ == '__main__':
|
||||
WalletMigrationTest(__file__).main()
|
||||
|
||||
Reference in New Issue
Block a user