mirror of
https://github.com/bitcoin/bitcoin.git
synced 2026-01-22 00:00:55 +01:00
Compare commits
165 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fa91ad3420 | ||
|
|
b834447fb2 | ||
|
|
e9c978391f | ||
|
|
e973b61dbb | ||
|
|
f4b78c42e5 | ||
|
|
c6e7765c0a | ||
|
|
bab1ac827b | ||
|
|
71633a9b5c | ||
|
|
daef5852f0 | ||
|
|
7a71850a6d | ||
|
|
2e4688618b | ||
|
|
6e7ea3cf2a | ||
|
|
3af199531b | ||
|
|
76cdeb7b06 | ||
|
|
9405e915e7 | ||
|
|
5e8ad98163 | ||
|
|
a7e2d106db | ||
|
|
9ea84c08d7 | ||
|
|
833848e9b8 | ||
|
|
a074d36254 | ||
|
|
d91f56e1e3 | ||
|
|
cc324aa2be | ||
|
|
01c04d32aa | ||
|
|
abaf1e37a7 | ||
|
|
7a33cb9062 | ||
|
|
2cf352fd8e | ||
|
|
8a16165ab7 | ||
|
|
6f136cd391 | ||
|
|
be0857745a | ||
|
|
65bcbbc538 | ||
|
|
f24291bd96 | ||
|
|
73d3ab8fc9 | ||
|
|
ddfb9150b8 | ||
|
|
354d46bc10 | ||
|
|
5a0506eea0 | ||
|
|
020ed613be | ||
|
|
56626300b8 | ||
|
|
97088fa75a | ||
|
|
4917d0c0de | ||
|
|
554ff3f7f3 | ||
|
|
16e10f928c | ||
|
|
c7979f429a | ||
|
|
2d6426c296 | ||
|
|
46d9b9091b | ||
|
|
b2026fa290 | ||
|
|
3226616493 | ||
|
|
8bcb90d7e3 | ||
|
|
abf4a6eeae | ||
|
|
398c176ea8 | ||
|
|
d82fc69829 | ||
|
|
513cef75ee | ||
|
|
eea16f7de7 | ||
|
|
6b3c1dbc5c | ||
|
|
d1b5d4e9ca | ||
|
|
2d7ebd2d91 | ||
|
|
a8bb76b61f | ||
|
|
666aec7d49 | ||
|
|
6f23ead4a2 | ||
|
|
9d9baafc6f | ||
|
|
22ab141243 | ||
|
|
118abf4c30 | ||
|
|
f6d49d0a09 | ||
|
|
5750355139 | ||
|
|
78d93effd0 | ||
|
|
4a034cbeb4 | ||
|
|
6ded1fe117 | ||
|
|
4e8b64b181 | ||
|
|
773e4cda94 | ||
|
|
4339787379 | ||
|
|
c7f290b826 | ||
|
|
a08c3cc51c | ||
|
|
06424fb004 | ||
|
|
0f0378fe3c | ||
|
|
643385b22d | ||
|
|
3b2dcc8b9a | ||
|
|
b4286cf354 | ||
|
|
5057b9a6ff | ||
|
|
85ec6c6882 | ||
|
|
544f902b2a | ||
|
|
e826c3daa5 | ||
|
|
835b5b8bb1 | ||
|
|
a91567a980 | ||
|
|
819ee09af3 | ||
|
|
894a3cbe42 | ||
|
|
82c60a3151 | ||
|
|
849993377d | ||
|
|
f9f3e8b686 | ||
|
|
af086431e8 | ||
|
|
0a649d07c9 | ||
|
|
f3089fb2cf | ||
|
|
1faf918a16 | ||
|
|
954c1a55e4 | ||
|
|
301aa5d814 | ||
|
|
f63b8e960d | ||
|
|
7e1eca4882 | ||
|
|
f2bd79f80c | ||
|
|
461dd13faf | ||
|
|
9bc4afb62c | ||
|
|
61cdc04a83 | ||
|
|
1288d44804 | ||
|
|
569ceb0df4 | ||
|
|
4c940d4789 | ||
|
|
9b95ab5e9d | ||
|
|
e97588fc3d | ||
|
|
324caa8497 | ||
|
|
2717331981 | ||
|
|
a0e438bd49 | ||
|
|
7c6be9acae | ||
|
|
ea40fa95d9 | ||
|
|
5513516241 | ||
|
|
f9939cdbe0 | ||
|
|
0fba5ae021 | ||
|
|
10cbf2255d | ||
|
|
fd784f2774 | ||
|
|
084c95a18c | ||
|
|
37d115c67e | ||
|
|
b0d88bcc50 | ||
|
|
99ab2e70e7 | ||
|
|
6448ebb5a7 | ||
|
|
162c009c1d | ||
|
|
fcac8022d8 | ||
|
|
16b1710d97 | ||
|
|
027a60d218 | ||
|
|
65dc198d2c | ||
|
|
89fe999cda | ||
|
|
0034dcfba9 | ||
|
|
eb1574af0c | ||
|
|
f9f1ca5445 | ||
|
|
9dd7efc8c3 | ||
|
|
bbdab3ef7b | ||
|
|
da30ca0efa | ||
|
|
a0ae3fc8a7 | ||
|
|
1c1970fb45 | ||
|
|
3a7e093f94 | ||
|
|
567c3ee3cb | ||
|
|
6b5396c4b1 | ||
|
|
03da7aff99 | ||
|
|
4e3cfa660d | ||
|
|
c5196bc9c4 | ||
|
|
0022e25333 | ||
|
|
206f5902db | ||
|
|
dfdd407c42 | ||
|
|
11538160b3 | ||
|
|
4ed7a51642 | ||
|
|
acfa83d9d0 | ||
|
|
81751341e9 | ||
|
|
7c3820ff63 | ||
|
|
dfe4e19f66 | ||
|
|
273ffda2c8 | ||
|
|
9cde68fa98 | ||
|
|
25f975b8df | ||
|
|
24c793d06c | ||
|
|
0b6b096421 | ||
|
|
a0992a842e | ||
|
|
41262cc4d5 | ||
|
|
4987c03531 | ||
|
|
837c5c7fd8 | ||
|
|
b9e637bd0e | ||
|
|
2b9738a083 | ||
|
|
4e8abca445 | ||
|
|
a08886d564 | ||
|
|
264418f80c | ||
|
|
a18b53f99e | ||
|
|
9437415024 | ||
|
|
3cd8612cd7 |
214
.cirrus.yml
214
.cirrus.yml
@@ -1,214 +0,0 @@
|
||||
env: # Global defaults
|
||||
CIRRUS_CLONE_DEPTH: 1
|
||||
CIRRUS_LOG_TIMESTAMP: true
|
||||
MAKEJOBS: "-j10"
|
||||
TEST_RUNNER_PORT_MIN: "14000" # Must be larger than 12321, which is used for the http cache. See https://cirrus-ci.org/guide/writing-tasks/#http-cache
|
||||
CI_FAILFAST_TEST_LEAVE_DANGLING: "1" # Cirrus CI does not care about dangling processes and setting this variable avoids killing the CI script itself on error
|
||||
|
||||
# A self-hosted machine(s) can be used via Cirrus CI. It can be configured with
|
||||
# multiple users to run tasks in parallel. No sudo permission is required.
|
||||
#
|
||||
# https://cirrus-ci.org/guide/persistent-workers/
|
||||
#
|
||||
# Generally, a persistent worker must run Ubuntu 23.04+ or Debian 12+.
|
||||
#
|
||||
# The following specific types should exist, with the following requirements:
|
||||
# - small: For an x86_64 machine, with at least 2 vCPUs and 8 GB of memory.
|
||||
# - medium: For an x86_64 machine, with at least 4 vCPUs and 16 GB of memory.
|
||||
# - arm64: For an aarch64 machine, with at least 2 vCPUs and 8 GB of memory.
|
||||
#
|
||||
# CI jobs for the latter configuration can be run on x86_64 hardware
|
||||
# by installing qemu-user-static, which works out of the box with
|
||||
# podman or docker. Background: https://stackoverflow.com/a/72890225/313633
|
||||
#
|
||||
# The above machine types are matched to each task by their label. Refer to the
|
||||
# Cirrus CI docs for more details.
|
||||
#
|
||||
# When a contributor maintains a fork of the repo, any pull request they make
|
||||
# to their own fork, or to the main repository, will trigger two CI runs:
|
||||
# one for the branch push and one for the pull request.
|
||||
# This can be avoided by setting SKIP_BRANCH_PUSH=true as a custom env variable
|
||||
# in Cirrus repository settings, accessible from
|
||||
# https://cirrus-ci.com/github/my-organization/my-repository
|
||||
#
|
||||
# On machines that are persisted between CI jobs, RESTART_CI_DOCKER_BEFORE_RUN=1
|
||||
# ensures that previous containers and artifacts are cleared before each run.
|
||||
# This requires installing Podman instead of Docker.
|
||||
#
|
||||
# Futhermore:
|
||||
# - podman-docker-4.1+ is required due to the bugfix in 4.1
|
||||
# (https://github.com/bitcoin/bitcoin/pull/21652#issuecomment-1657098200)
|
||||
# - The ./ci/ dependencies (with cirrus-cli) should be installed. One-liner example
|
||||
# for a single user setup with sudo permission:
|
||||
#
|
||||
# ```
|
||||
# apt update && apt install git screen python3 bash podman-docker uidmap slirp4netns curl -y && curl -L -o cirrus "https://github.com/cirruslabs/cirrus-cli/releases/latest/download/cirrus-linux-$(dpkg --print-architecture)" && mv cirrus /usr/local/bin/cirrus && chmod +x /usr/local/bin/cirrus
|
||||
# ```
|
||||
#
|
||||
# - There are no strict requirements on the hardware. Having fewer CPU threads
|
||||
# than recommended merely causes the CI script to run slower.
|
||||
# To avoid rare and intermittent OOM due to short memory usage spikes,
|
||||
# it is recommended to add (and persist) swap:
|
||||
#
|
||||
# ```
|
||||
# fallocate -l 16G /swapfile_ci && chmod 600 /swapfile_ci && mkswap /swapfile_ci && swapon /swapfile_ci && ( echo '/swapfile_ci none swap sw 0 0' | tee -a /etc/fstab )
|
||||
# ```
|
||||
#
|
||||
# - To register the persistent worker, open a `screen` session and run:
|
||||
#
|
||||
# ```
|
||||
# RESTART_CI_DOCKER_BEFORE_RUN=1 screen cirrus worker run --labels type=todo_fill_in_type --token todo_fill_in_token
|
||||
# ```
|
||||
|
||||
# https://cirrus-ci.org/guide/tips-and-tricks/#sharing-configuration-between-tasks
|
||||
filter_template: &FILTER_TEMPLATE
|
||||
# Allow forks to specify SKIP_BRANCH_PUSH=true and skip CI runs when a branch is pushed,
|
||||
# but still run CI when a PR is created.
|
||||
# https://cirrus-ci.org/guide/writing-tasks/#conditional-task-execution
|
||||
skip: $SKIP_BRANCH_PUSH == "true" && $CIRRUS_PR == ""
|
||||
stateful: false # https://cirrus-ci.org/guide/writing-tasks/#stateful-tasks
|
||||
|
||||
base_template: &BASE_TEMPLATE
|
||||
<< : *FILTER_TEMPLATE
|
||||
merge_base_script:
|
||||
# Require git (used in fingerprint_script).
|
||||
- git --version || ( apt-get update && apt-get install -y git )
|
||||
- if [ "$CIRRUS_PR" = "" ]; then exit 0; fi
|
||||
- git fetch --depth=1 $CIRRUS_REPO_CLONE_URL "pull/${CIRRUS_PR}/merge"
|
||||
- git checkout FETCH_HEAD # Use merged changes to detect silent merge conflicts
|
||||
# Also, the merge commit is used to lint COMMIT_RANGE="HEAD~..HEAD"
|
||||
|
||||
main_template: &MAIN_TEMPLATE
|
||||
timeout_in: 120m # https://cirrus-ci.org/faq/#instance-timed-out
|
||||
ci_script:
|
||||
- ./ci/test_run_all.sh
|
||||
|
||||
global_task_template: &GLOBAL_TASK_TEMPLATE
|
||||
<< : *BASE_TEMPLATE
|
||||
<< : *MAIN_TEMPLATE
|
||||
|
||||
compute_credits_template: &CREDITS_TEMPLATE
|
||||
# https://cirrus-ci.org/pricing/#compute-credits
|
||||
# Only use credits for pull requests to the main repo
|
||||
use_compute_credits: $CIRRUS_REPO_FULL_NAME == 'bitcoin/bitcoin' && $CIRRUS_PR != ""
|
||||
|
||||
task:
|
||||
name: 'lint'
|
||||
<< : *BASE_TEMPLATE
|
||||
container:
|
||||
image: debian:bookworm
|
||||
cpu: 1
|
||||
memory: 1G
|
||||
# For faster CI feedback, immediately schedule the linters
|
||||
<< : *CREDITS_TEMPLATE
|
||||
test_runner_cache:
|
||||
folder: "/lint_test_runner"
|
||||
fingerprint_script: echo $CIRRUS_TASK_NAME $(git rev-parse HEAD:test/lint/test_runner)
|
||||
python_cache:
|
||||
folder: "/python_build"
|
||||
fingerprint_script: cat .python-version /etc/os-release
|
||||
unshallow_script:
|
||||
- git fetch --unshallow --no-tags
|
||||
lint_script:
|
||||
- ./ci/lint_run_all.sh
|
||||
|
||||
task:
|
||||
name: 'tidy'
|
||||
<< : *GLOBAL_TASK_TEMPLATE
|
||||
persistent_worker:
|
||||
labels:
|
||||
type: medium
|
||||
env:
|
||||
FILE_ENV: "./ci/test/00_setup_env_native_tidy.sh"
|
||||
|
||||
task:
|
||||
name: 'ARM, unit tests, no functional tests'
|
||||
<< : *GLOBAL_TASK_TEMPLATE
|
||||
persistent_worker:
|
||||
labels:
|
||||
type: arm64 # Use arm64 worker to sidestep qemu and avoid a slow CI: https://github.com/bitcoin/bitcoin/pull/28087#issuecomment-1649399453
|
||||
env:
|
||||
FILE_ENV: "./ci/test/00_setup_env_arm.sh"
|
||||
|
||||
task:
|
||||
name: 'Win64-cross'
|
||||
<< : *GLOBAL_TASK_TEMPLATE
|
||||
persistent_worker:
|
||||
labels:
|
||||
type: small
|
||||
env:
|
||||
FILE_ENV: "./ci/test/00_setup_env_win64.sh"
|
||||
|
||||
task:
|
||||
name: 'CentOS, depends, gui'
|
||||
<< : *GLOBAL_TASK_TEMPLATE
|
||||
persistent_worker:
|
||||
labels:
|
||||
type: small
|
||||
env:
|
||||
FILE_ENV: "./ci/test/00_setup_env_native_centos.sh"
|
||||
|
||||
task:
|
||||
name: 'previous releases, depends DEBUG'
|
||||
<< : *GLOBAL_TASK_TEMPLATE
|
||||
persistent_worker:
|
||||
labels:
|
||||
type: small
|
||||
env:
|
||||
FILE_ENV: "./ci/test/00_setup_env_native_previous_releases.sh"
|
||||
|
||||
task:
|
||||
name: 'TSan, depends, gui'
|
||||
<< : *GLOBAL_TASK_TEMPLATE
|
||||
persistent_worker:
|
||||
labels:
|
||||
type: medium
|
||||
env:
|
||||
FILE_ENV: "./ci/test/00_setup_env_native_tsan.sh"
|
||||
|
||||
task:
|
||||
name: 'MSan, depends'
|
||||
<< : *GLOBAL_TASK_TEMPLATE
|
||||
persistent_worker:
|
||||
labels:
|
||||
type: small
|
||||
timeout_in: 300m # Use longer timeout for the *rare* case where a full build (llvm + msan + depends + ...) needs to be done.
|
||||
env:
|
||||
FILE_ENV: "./ci/test/00_setup_env_native_msan.sh"
|
||||
|
||||
task:
|
||||
name: 'fuzzer,address,undefined,integer, no depends'
|
||||
<< : *GLOBAL_TASK_TEMPLATE
|
||||
persistent_worker:
|
||||
labels:
|
||||
type: medium
|
||||
timeout_in: 240m # larger timeout, due to the high CPU demand
|
||||
env:
|
||||
FILE_ENV: "./ci/test/00_setup_env_native_fuzz.sh"
|
||||
|
||||
task:
|
||||
name: 'multiprocess, i686, DEBUG'
|
||||
<< : *GLOBAL_TASK_TEMPLATE
|
||||
persistent_worker:
|
||||
labels:
|
||||
type: medium
|
||||
env:
|
||||
FILE_ENV: "./ci/test/00_setup_env_i686_multiprocess.sh"
|
||||
|
||||
task:
|
||||
name: 'no wallet, libbitcoinkernel'
|
||||
<< : *GLOBAL_TASK_TEMPLATE
|
||||
persistent_worker:
|
||||
labels:
|
||||
type: small
|
||||
env:
|
||||
FILE_ENV: "./ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh"
|
||||
|
||||
task:
|
||||
name: 'macOS-cross, gui, no tests'
|
||||
<< : *GLOBAL_TASK_TEMPLATE
|
||||
persistent_worker:
|
||||
labels:
|
||||
type: small
|
||||
env:
|
||||
FILE_ENV: "./ci/test/00_setup_env_mac_cross.sh"
|
||||
56
.github/actions/configure-docker/action.yml
vendored
Normal file
56
.github/actions/configure-docker/action.yml
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
name: 'Configure Docker'
|
||||
description: 'Set up Docker build driver and configure build cache args'
|
||||
inputs:
|
||||
use-cirrus:
|
||||
description: 'Use cirrus cache'
|
||||
required: true
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
# Use host network to allow access to cirrus gha cache running on the host
|
||||
driver-opts: |
|
||||
network=host
|
||||
|
||||
# This is required to allow buildkit to access the actions cache
|
||||
- name: Expose actions cache variables
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
script: |
|
||||
Object.keys(process.env).forEach(function (key) {
|
||||
if (key.startsWith('ACTIONS_')) {
|
||||
core.info(`Exporting ${key}`);
|
||||
core.exportVariable(key, process.env[key]);
|
||||
}
|
||||
});
|
||||
|
||||
- name: Construct docker build cache args
|
||||
shell: bash
|
||||
run: |
|
||||
# Configure docker build cache backend
|
||||
#
|
||||
# On forks the gha cache will work but will use Github's cache backend.
|
||||
# Docker will check for variables $ACTIONS_CACHE_URL, $ACTIONS_RESULTS_URL and $ACTIONS_RUNTIME_TOKEN
|
||||
# which are set automatically when running on GitHub infra: https://docs.docker.com/build/cache/backends/gha/#synopsis
|
||||
|
||||
# Use cirrus cache host
|
||||
if [[ ${{ inputs.use-cirrus }} == 'true' ]]; then
|
||||
url_args="url=${CIRRUS_CACHE_HOST},url_v2=${CIRRUS_CACHE_HOST}"
|
||||
else
|
||||
url_args=""
|
||||
fi
|
||||
|
||||
# Always optimistically --cache‑from in case a cache blob exists
|
||||
args=(--cache-from "type=gha${url_args:+,${url_args}},scope=${CONTAINER_NAME}")
|
||||
|
||||
# If this is a push to the default branch, also add --cache‑to to save the cache
|
||||
if [[ ${{ github.event_name }} == "push" && ${{ github.ref_name }} == ${{ github.event.repository.default_branch }} ]]; then
|
||||
args+=(--cache-to "type=gha${url_args:+,${url_args}},mode=max,ignore-error=true,scope=${CONTAINER_NAME}")
|
||||
fi
|
||||
|
||||
# Always `--load` into docker images (needed when using the `docker-container` build driver).
|
||||
args+=(--load)
|
||||
|
||||
echo "DOCKER_BUILD_CACHE_ARG=${args[*]}" >> $GITHUB_ENV
|
||||
27
.github/actions/configure-environment/action.yml
vendored
Normal file
27
.github/actions/configure-environment/action.yml
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
name: 'Configure environment'
|
||||
description: 'Configure CI, cache and container name environment variables'
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Set CI and cache directories
|
||||
shell: bash
|
||||
run: |
|
||||
echo "BASE_ROOT_DIR=${{ runner.temp }}" >> "$GITHUB_ENV"
|
||||
echo "BASE_BUILD_DIR=${{ runner.temp }}/build" >> "$GITHUB_ENV"
|
||||
echo "CCACHE_DIR=${{ runner.temp }}/ccache_dir" >> $GITHUB_ENV
|
||||
echo "DEPENDS_DIR=${{ runner.temp }}/depends" >> "$GITHUB_ENV"
|
||||
echo "BASE_CACHE=${{ runner.temp }}/depends/built" >> $GITHUB_ENV
|
||||
echo "SOURCES_PATH=${{ runner.temp }}/depends/sources" >> $GITHUB_ENV
|
||||
echo "PREVIOUS_RELEASES_DIR=${{ runner.temp }}/previous_releases" >> $GITHUB_ENV
|
||||
|
||||
- name: Set cache hashes
|
||||
shell: bash
|
||||
run: |
|
||||
echo "DEPENDS_HASH=$(git ls-tree HEAD depends "$FILE_ENV" | sha256sum | cut -d' ' -f1)" >> $GITHUB_ENV
|
||||
echo "PREVIOUS_RELEASES_HASH=$(git ls-tree HEAD test/get_previous_releases.py | sha256sum | cut -d' ' -f1)" >> $GITHUB_ENV
|
||||
|
||||
- name: Get container name
|
||||
shell: bash
|
||||
run: |
|
||||
source $FILE_ENV
|
||||
echo "CONTAINER_NAME=$CONTAINER_NAME" >> "$GITHUB_ENV"
|
||||
47
.github/actions/restore-caches/action.yml
vendored
Normal file
47
.github/actions/restore-caches/action.yml
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
name: 'Restore Caches'
|
||||
description: 'Restore ccache, depends sources, and built depends caches'
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Restore Ccache cache
|
||||
id: ccache-cache
|
||||
uses: cirruslabs/cache/restore@v4
|
||||
with:
|
||||
path: ${{ env.CCACHE_DIR }}
|
||||
key: ccache-${{ env.CONTAINER_NAME }}-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
ccache-${{ env.CONTAINER_NAME }}-
|
||||
|
||||
- name: Restore depends sources cache
|
||||
id: depends-sources
|
||||
uses: cirruslabs/cache/restore@v4
|
||||
with:
|
||||
path: ${{ env.SOURCES_PATH }}
|
||||
key: depends-sources-${{ env.CONTAINER_NAME }}-${{ env.DEPENDS_HASH }}
|
||||
restore-keys: |
|
||||
depends-sources-${{ env.CONTAINER_NAME }}-
|
||||
|
||||
- name: Restore built depends cache
|
||||
id: depends-built
|
||||
uses: cirruslabs/cache/restore@v4
|
||||
with:
|
||||
path: ${{ env.BASE_CACHE }}
|
||||
key: depends-built-${{ env.CONTAINER_NAME }}-${{ env.DEPENDS_HASH }}
|
||||
restore-keys: |
|
||||
depends-built-${{ env.CONTAINER_NAME }}-
|
||||
|
||||
- name: Restore previous releases cache
|
||||
id: previous-releases
|
||||
uses: cirruslabs/cache/restore@v4
|
||||
with:
|
||||
path: ${{ env.PREVIOUS_RELEASES_DIR }}
|
||||
key: previous-releases-${{ env.CONTAINER_NAME }}-${{ env.PREVIOUS_RELEASES_HASH }}
|
||||
restore-keys: |
|
||||
previous-releases-${{ env.CONTAINER_NAME }}-
|
||||
|
||||
- name: export cache hits
|
||||
shell: bash
|
||||
run: |
|
||||
echo "depends-sources-cache-hit=${{ steps.depends-sources.outputs.cache-hit }}" >> $GITHUB_ENV
|
||||
echo "depends-built-cache-hit=${{ steps.depends-built.outputs.cache-hit }}" >> $GITHUB_ENV
|
||||
echo "previous-releases-cache-hit=${{ steps.previous-releases.outputs.cache-hit }}" >> $GITHUB_ENV
|
||||
39
.github/actions/save-caches/action.yml
vendored
Normal file
39
.github/actions/save-caches/action.yml
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
name: 'Save Caches'
|
||||
description: 'Save ccache, depends sources, and built depends caches'
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: debug cache hit inputs
|
||||
shell: bash
|
||||
run: |
|
||||
echo "depends sources direct cache hit to primary key: ${{ env.depends-sources-cache-hit }}"
|
||||
echo "depends built direct cache hit to primary key: ${{ env.depends-built-cache-hit }}"
|
||||
echo "previous releases direct cache hit to primary key: ${{ env.previous-releases-cache-hit }}"
|
||||
|
||||
- name: Save Ccache cache
|
||||
uses: cirruslabs/cache/save@v4
|
||||
if: ${{ (github.event_name == 'push') && (github.ref_name == github.event.repository.default_branch) }}
|
||||
with:
|
||||
path: ${{ env.CCACHE_DIR }}
|
||||
key: ccache-${{ env.CONTAINER_NAME }}-${{ github.run_id }}
|
||||
|
||||
- name: Save depends sources cache
|
||||
uses: cirruslabs/cache/save@v4
|
||||
if: ${{ (github.event_name == 'push') && (github.ref_name == github.event.repository.default_branch) && (env.depends-sources-cache-hit != 'true') }}
|
||||
with:
|
||||
path: ${{ env.SOURCES_PATH }}
|
||||
key: depends-sources-${{ env.CONTAINER_NAME }}-${{ env.DEPENDS_HASH }}
|
||||
|
||||
- name: Save built depends cache
|
||||
uses: cirruslabs/cache/save@v4
|
||||
if: ${{ (github.event_name == 'push') && (github.ref_name == github.event.repository.default_branch) && (env.depends-built-cache-hit != 'true' )}}
|
||||
with:
|
||||
path: ${{ env.BASE_CACHE }}
|
||||
key: depends-built-${{ env.CONTAINER_NAME }}-${{ env.DEPENDS_HASH }}
|
||||
|
||||
- name: Save previous releases cache
|
||||
uses: cirruslabs/cache/save@v4
|
||||
if: ${{ (github.event_name == 'push') && (github.ref_name == github.event.repository.default_branch) && (env.previous-releases-cache-hit != 'true' )}}
|
||||
with:
|
||||
path: ${{ env.PREVIOUS_RELEASES_DIR }}
|
||||
key: previous-releases-${{ env.CONTAINER_NAME }}-${{ env.PREVIOUS_RELEASES_HASH }}
|
||||
185
.github/workflows/ci.yml
vendored
185
.github/workflows/ci.yml
vendored
@@ -19,9 +19,26 @@ concurrency:
|
||||
|
||||
env:
|
||||
CI_FAILFAST_TEST_LEAVE_DANGLING: 1 # GHA does not care about dangling processes and setting this variable avoids killing the CI script itself on error
|
||||
MAKEJOBS: '-j10'
|
||||
CIRRUS_CACHE_HOST: http://127.0.0.1:12321/ # When using Cirrus Runners this host can be used by the docker `gha` build cache type.
|
||||
REPO_USE_CIRRUS_RUNNERS: 'bitcoin/bitcoin' # Use cirrus runners and cache for this repo, instead of falling back to the slow GHA runners
|
||||
|
||||
jobs:
|
||||
runners:
|
||||
name: 'determine runners'
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
use-cirrus-runners: ${{ steps.runners.outputs.use-cirrus-runners }}
|
||||
steps:
|
||||
- id: runners
|
||||
run: |
|
||||
if [[ "${REPO_USE_CIRRUS_RUNNERS}" == "${{ github.repository }}" ]]; then
|
||||
echo "use-cirrus-runners=true" >> "$GITHUB_OUTPUT"
|
||||
echo "::notice title=Runner Selection::Using Cirrus Runners"
|
||||
else
|
||||
echo "use-cirrus-runners=false" >> "$GITHUB_OUTPUT"
|
||||
echo "::notice title=Runner Selection::Using GitHub-hosted runners"
|
||||
fi
|
||||
|
||||
test-each-commit:
|
||||
name: 'test each commit'
|
||||
runs-on: ubuntu-24.04
|
||||
@@ -106,8 +123,12 @@ jobs:
|
||||
BASE_ROOT_DIR: ${{ github.workspace }}
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- &CHECKOUT
|
||||
name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
# Ensure the latest merged pull request state is used, even on re-runs.
|
||||
ref: &CHECKOUT_REF_TMPL ${{ github.event_name == 'pull_request' && github.ref || '' }}
|
||||
|
||||
- name: Clang version
|
||||
run: |
|
||||
@@ -175,8 +196,7 @@ jobs:
|
||||
job-name: 'Win64 native fuzz, VS 2022'
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- *CHECKOUT
|
||||
|
||||
- name: Configure Developer Command Prompt for Microsoft Visual C++
|
||||
# Using microsoft/setup-msbuild is not enough.
|
||||
@@ -265,44 +285,151 @@ jobs:
|
||||
run: |
|
||||
py -3 test\fuzz\test_runner.py --par %NUMBER_OF_PROCESSORS% --loglevel DEBUG %RUNNER_TEMP%\qa-assets\fuzz_corpora
|
||||
|
||||
asan-lsan-ubsan-integer-no-depends-usdt:
|
||||
name: 'ASan + LSan + UBSan + integer, no depends, USDT'
|
||||
runs-on: ubuntu-24.04 # has to match container in ci/test/00_setup_env_native_asan.sh for tracing tools
|
||||
ci-matrix:
|
||||
name: ${{ matrix.name }}
|
||||
needs: runners
|
||||
runs-on: ${{ needs.runners.outputs.use-cirrus-runners == 'true' && matrix.cirrus-runner || matrix.fallback-runner }}
|
||||
if: ${{ vars.SKIP_BRANCH_PUSH != 'true' || github.event_name == 'pull_request' }}
|
||||
timeout-minutes: 120
|
||||
timeout-minutes: ${{ matrix.timeout-minutes }}
|
||||
|
||||
env:
|
||||
FILE_ENV: "./ci/test/00_setup_env_native_asan.sh"
|
||||
DANGER_CI_ON_HOST_FOLDERS: 1
|
||||
FILE_ENV: ${{ matrix.file-env }}
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- name: '32 bit ARM, unit tests, no functional tests'
|
||||
cirrus-runner: 'ubuntu-24.04-arm' # Cirrus' Arm runners are Apple (with virtual Linux aarch64), which doesn't support 32-bit mode
|
||||
fallback-runner: 'ubuntu-24.04-arm'
|
||||
timeout-minutes: 120
|
||||
file-env: './ci/test/00_setup_env_arm.sh'
|
||||
|
||||
- name: 'win64 Cross'
|
||||
cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-sm'
|
||||
fallback-runner: 'ubuntu-24.04'
|
||||
timeout-minutes: 120
|
||||
file-env: './ci/test/00_setup_env_win64.sh'
|
||||
|
||||
- name: 'ASan + LSan + UBSan + integer, no depends, USDT'
|
||||
cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md' # has to match container in ci/test/00_setup_env_native_asan.sh for tracing tools
|
||||
fallback-runner: 'ubuntu-24.04'
|
||||
timeout-minutes: 120
|
||||
file-env: './ci/test/00_setup_env_native_asan.sh'
|
||||
|
||||
- name: 'macOS-cross, gui, no tests'
|
||||
cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-sm'
|
||||
fallback-runner: 'ubuntu-24.04'
|
||||
timeout-minutes: 120
|
||||
file-env: './ci/test/00_setup_env_mac_cross.sh'
|
||||
|
||||
- name: 'No wallet, libbitcoinkernel'
|
||||
cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-sm'
|
||||
fallback-runner: 'ubuntu-24.04'
|
||||
timeout-minutes: 120
|
||||
file-env: './ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh'
|
||||
|
||||
- name: 'i686, multiprocess, DEBUG'
|
||||
cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md'
|
||||
fallback-runner: 'ubuntu-24.04'
|
||||
timeout-minutes: 120
|
||||
file-env: './ci/test/00_setup_env_i686_multiprocess.sh'
|
||||
|
||||
- name: 'fuzzer,address,undefined,integer, no depends'
|
||||
cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-lg'
|
||||
fallback-runner: 'ubuntu-24.04'
|
||||
timeout-minutes: 240
|
||||
file-env: './ci/test/00_setup_env_native_fuzz.sh'
|
||||
|
||||
- name: 'previous releases, depends DEBUG'
|
||||
cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md'
|
||||
fallback-runner: 'ubuntu-24.04'
|
||||
timeout-minutes: 120
|
||||
file-env: './ci/test/00_setup_env_native_previous_releases.sh'
|
||||
|
||||
- name: 'CentOS, depends, gui'
|
||||
cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-lg'
|
||||
fallback-runner: 'ubuntu-24.04'
|
||||
timeout-minutes: 120
|
||||
file-env: './ci/test/00_setup_env_native_centos.sh'
|
||||
|
||||
- name: 'tidy'
|
||||
cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md'
|
||||
fallback-runner: 'ubuntu-24.04'
|
||||
timeout-minutes: 120
|
||||
file-env: './ci/test/00_setup_env_native_tidy.sh'
|
||||
|
||||
- name: 'TSan, depends, no gui'
|
||||
cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-md'
|
||||
fallback-runner: 'ubuntu-24.04'
|
||||
timeout-minutes: 120
|
||||
file-env: './ci/test/00_setup_env_native_tsan.sh'
|
||||
|
||||
- name: 'MSan, depends'
|
||||
cirrus-runner: 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-lg'
|
||||
fallback-runner: 'ubuntu-24.04'
|
||||
timeout-minutes: 120
|
||||
file-env: './ci/test/00_setup_env_native_msan.sh'
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- *CHECKOUT
|
||||
|
||||
- name: Set CI directories
|
||||
run: |
|
||||
echo "CCACHE_DIR=${{ runner.temp }}/ccache_dir" >> "$GITHUB_ENV"
|
||||
echo "BASE_ROOT_DIR=${{ runner.temp }}" >> "$GITHUB_ENV"
|
||||
echo "BASE_BUILD_DIR=${{ runner.temp }}/build-asan" >> "$GITHUB_ENV"
|
||||
- name: Configure environment
|
||||
uses: ./.github/actions/configure-environment
|
||||
|
||||
- name: Restore Ccache cache
|
||||
id: ccache-cache
|
||||
uses: actions/cache/restore@v4
|
||||
- name: Restore caches
|
||||
id: restore-cache
|
||||
uses: ./.github/actions/restore-caches
|
||||
|
||||
- name: Configure Docker
|
||||
uses: ./.github/actions/configure-docker
|
||||
with:
|
||||
path: ${{ env.CCACHE_DIR }}
|
||||
key: ${{ github.job }}-ccache-${{ github.run_id }}
|
||||
restore-keys: ${{ github.job }}-ccache-
|
||||
use-cirrus: ${{ needs.runners.outputs.use-cirrus-runners }}
|
||||
|
||||
- name: Enable bpfcc script
|
||||
if: ${{ env.CONTAINER_NAME == 'ci_native_asan' }}
|
||||
# In the image build step, no external environment variables are available,
|
||||
# so any settings will need to be written to the settings env file:
|
||||
run: sed -i "s|\${INSTALL_BCC_TRACING_TOOLS}|true|g" ./ci/test/00_setup_env_native_asan.sh
|
||||
|
||||
- name: Set mmap_rnd_bits
|
||||
if: ${{ env.CONTAINER_NAME == 'ci_native_tsan' || env.CONTAINER_NAME == 'ci_native_msan' }}
|
||||
# Prevents crashes due to high ASLR entropy
|
||||
run: sudo sysctl -w vm.mmap_rnd_bits=28
|
||||
|
||||
- name: CI script
|
||||
run: ./ci/test_run_all.sh
|
||||
|
||||
- name: Save Ccache cache
|
||||
uses: actions/cache/save@v4
|
||||
if: github.event_name != 'pull_request' && steps.ccache-cache.outputs.cache-hit != 'true'
|
||||
- name: Save caches
|
||||
uses: ./.github/actions/save-caches
|
||||
|
||||
lint:
|
||||
name: 'lint'
|
||||
needs: runners
|
||||
runs-on: ${{ needs.runners.outputs.use-cirrus-runners == 'true' && 'ghcr.io/cirruslabs/ubuntu-runner-amd64:24.04-xs' || 'ubuntu-24.04' }}
|
||||
if: ${{ vars.SKIP_BRANCH_PUSH != 'true' || github.event_name == 'pull_request' }}
|
||||
timeout-minutes: 20
|
||||
env:
|
||||
CONTAINER_NAME: "bitcoin-linter"
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
path: ${{ env.CCACHE_DIR }}
|
||||
# https://github.com/actions/cache/blob/main/tips-and-workarounds.md#update-a-cache
|
||||
key: ${{ github.job }}-ccache-${{ github.run_id }}
|
||||
ref: *CHECKOUT_REF_TMPL
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Configure Docker
|
||||
uses: ./.github/actions/configure-docker
|
||||
with:
|
||||
use-cirrus: ${{ needs.runners.outputs.use-cirrus-runners }}
|
||||
|
||||
- name: CI script
|
||||
run: |
|
||||
set -o xtrace
|
||||
docker buildx build -t "$CONTAINER_NAME" $DOCKER_BUILD_CACHE_ARG --file "./ci/lint_imagefile" .
|
||||
CIRRUS_PR_FLAG=""
|
||||
if [ "${{ github.event_name }}" = "pull_request" ]; then
|
||||
CIRRUS_PR_FLAG="-e CIRRUS_PR=1"
|
||||
fi
|
||||
docker run --rm $CIRRUS_PR_FLAG -v "$(pwd)":/bitcoin "$CONTAINER_NAME"
|
||||
|
||||
@@ -28,7 +28,7 @@ get_directory_property(precious_variables CACHE_VARIABLES)
|
||||
#=============================
|
||||
set(CLIENT_NAME "Bitcoin Core")
|
||||
set(CLIENT_VERSION_MAJOR 29)
|
||||
set(CLIENT_VERSION_MINOR 1)
|
||||
set(CLIENT_VERSION_MINOR 3)
|
||||
set(CLIENT_VERSION_BUILD 0)
|
||||
set(CLIENT_VERSION_RC 1)
|
||||
set(CLIENT_VERSION_IS_RELEASE "true")
|
||||
|
||||
32
ci/README.md
32
ci/README.md
@@ -1,8 +1,8 @@
|
||||
## CI Scripts
|
||||
# CI Scripts
|
||||
|
||||
This directory contains scripts for each build step in each build stage.
|
||||
|
||||
### Running a Stage Locally
|
||||
## Running a Stage Locally
|
||||
|
||||
Be aware that the tests will be built and run in-place, so please run at your own risk.
|
||||
If the repository is not a fresh git clone, you might have to clean files from previous builds or test runs first.
|
||||
@@ -27,7 +27,7 @@ with a specific configuration,
|
||||
env -i HOME="$HOME" PATH="$PATH" USER="$USER" bash -c 'FILE_ENV="./ci/test/00_setup_env_arm.sh" ./ci/test_run_all.sh'
|
||||
```
|
||||
|
||||
### Configurations
|
||||
## Configurations
|
||||
|
||||
The test files (`FILE_ENV`) are constructed to test a wide range of
|
||||
configurations, rather than a single pass/fail. This helps to catch build
|
||||
@@ -49,8 +49,32 @@ env -i HOME="$HOME" PATH="$PATH" USER="$USER" bash -c 'MAKEJOBS="-j1" FILE_ENV="
|
||||
The files starting with `0n` (`n` greater than 0) are the scripts that are run
|
||||
in order.
|
||||
|
||||
### Cache
|
||||
## Cache
|
||||
|
||||
In order to avoid rebuilding all dependencies for each build, the binaries are
|
||||
cached and reused when possible. Changes in the dependency-generator will
|
||||
trigger cache-invalidation and rebuilds as necessary.
|
||||
|
||||
## Configuring a repository for CI
|
||||
|
||||
### Primary repository
|
||||
|
||||
To configure the primary repository, follow these steps:
|
||||
|
||||
1. Register with [Cirrus Runners](https://cirrus-runners.app/) and purchase runners.
|
||||
2. Install the Cirrus Runners GitHub app against the GitHub organization.
|
||||
3. Enable organisation-level runners to be used in public repositories:
|
||||
1. `Org settings -> Actions -> Runner Groups -> Default -> Allow public repos`
|
||||
4. Permit the following actions to run:
|
||||
1. cirruslabs/cache/restore@\*
|
||||
1. cirruslabs/cache/save@\*
|
||||
1. docker/setup-buildx-action@\*
|
||||
1. actions/github-script@\*
|
||||
|
||||
### Forked repositories
|
||||
|
||||
When used in a fork the CI will run on GitHub's free hosted runners by default.
|
||||
In this case, due to GitHub's 10GB-per-repo cache size limitations caches will be frequently evicted and missed, but the workflows will run (slowly).
|
||||
|
||||
It is also possible to use your own Cirrus Runners in your own fork with an appropriate patch to the `REPO_USE_CIRRUS_RUNNERS` variable in ../.github/workflows/ci.yml
|
||||
NB that Cirrus Runners only work at an organisation level, therefore in order to use your own Cirrus Runners, *the fork must be within your own organisation*.
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
#
|
||||
# Copyright (c) 2019-present The Bitcoin Core developers
|
||||
# Distributed under the MIT software license, see the accompanying
|
||||
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
export LC_ALL=C.UTF-8
|
||||
|
||||
# Only used in .cirrus.yml. Refer to test/lint/README.md on how to run locally.
|
||||
|
||||
cp "./ci/retry/retry" "/ci_retry"
|
||||
cp "./.python-version" "/.python-version"
|
||||
mkdir --parents "/test/lint"
|
||||
cp --recursive "./test/lint/test_runner" "/test/lint/"
|
||||
set -o errexit; source ./ci/lint/04_install.sh
|
||||
set -o errexit
|
||||
./ci/lint/06_script.sh
|
||||
@@ -35,7 +35,7 @@ fi
|
||||
|
||||
echo "Fallback to default values in env (if not yet set)"
|
||||
# The number of parallel jobs to pass down to make and test_runner.py
|
||||
export MAKEJOBS=${MAKEJOBS:--j4}
|
||||
export MAKEJOBS=${MAKEJOBS:--j$(if command -v nproc > /dev/null 2>&1; then nproc; else sysctl -n hw.logicalcpu; fi)}
|
||||
# Whether to prefer BusyBox over GNU utilities
|
||||
export USE_BUSY_BOX=${USE_BUSY_BOX:-false}
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ export LC_ALL=C.UTF-8
|
||||
|
||||
# Homebrew's python@3.12 is marked as externally managed (PEP 668).
|
||||
# Therefore, `--break-system-packages` is needed.
|
||||
export CONTAINER_NAME="ci_mac_native" # macos does not use a container, but the env var is needed for logging
|
||||
export PIP_PACKAGES="--break-system-packages zmq"
|
||||
export GOAL="install"
|
||||
export CMAKE_GENERATOR="Ninja"
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
|
||||
export LC_ALL=C.UTF-8
|
||||
|
||||
export CONTAINER_NAME="ci_mac_native_fuzz" # macos does not use a container, but the env var is needed for logging
|
||||
export CMAKE_GENERATOR="Ninja"
|
||||
export BITCOIN_CONFIG="-DBUILD_FOR_FUZZING=ON"
|
||||
export CI_OS_NAME="macos"
|
||||
|
||||
@@ -19,15 +19,15 @@ else
|
||||
fi
|
||||
|
||||
export CONTAINER_NAME=ci_native_asan
|
||||
export APT_LLVM_V="20"
|
||||
export APT_LLVM_V="21"
|
||||
export PACKAGES="systemtap-sdt-dev clang-${APT_LLVM_V} llvm-${APT_LLVM_V} libclang-rt-${APT_LLVM_V}-dev python3-zmq qtbase5-dev qttools5-dev qttools5-dev-tools libevent-dev libboost-dev libdb5.3++-dev libzmq3-dev libqrencode-dev libsqlite3-dev ${BPFCC_PACKAGE}"
|
||||
export NO_DEPENDS=1
|
||||
export GOAL="install"
|
||||
export BITCOIN_CONFIG="\
|
||||
-DWITH_USDT=ON -DWITH_ZMQ=ON -DWITH_BDB=ON -DWARN_INCOMPATIBLE_BDB=OFF -DBUILD_GUI=ON \
|
||||
-DSANITIZERS=address,float-divide-by-zero,integer,undefined \
|
||||
-DCMAKE_C_COMPILER=clang-${APT_LLVM_V} \
|
||||
-DCMAKE_CXX_COMPILER=clang++-${APT_LLVM_V} \
|
||||
-DCMAKE_C_COMPILER=clang \
|
||||
-DCMAKE_CXX_COMPILER=clang++ \
|
||||
-DCMAKE_C_FLAGS='-ftrivial-auto-var-init=pattern' \
|
||||
-DCMAKE_CXX_FLAGS='-ftrivial-auto-var-init=pattern -Wno-error=deprecated-declarations' \
|
||||
-DAPPEND_CXXFLAGS='-std=c++23' \
|
||||
|
||||
@@ -8,7 +8,7 @@ export LC_ALL=C.UTF-8
|
||||
|
||||
export CONTAINER_NAME=ci_native_centos
|
||||
export CI_IMAGE_NAME_TAG="quay.io/centos/centos:stream10"
|
||||
export CI_BASE_PACKAGES="gcc-c++ glibc-devel libstdc++-devel ccache make git python3 python3-pip which patch xz procps-ng ksh rsync coreutils bison e2fsprogs cmake"
|
||||
export CI_BASE_PACKAGES="gcc-c++ glibc-devel libstdc++-devel ccache make git python3 python3-pip which patch xz procps-ng rsync coreutils bison e2fsprogs cmake dash"
|
||||
export PIP_PACKAGES="pyzmq"
|
||||
export DEP_OPTS="DEBUG=1" # Temporarily enable a DEBUG=1 build to check for GCC-bug-117966 regressions. This can be removed once the minimum GCC version is bumped to 12 in the previous releases task, see https://github.com/bitcoin/bitcoin/issues/31436#issuecomment-2530717875
|
||||
export GOAL="install"
|
||||
|
||||
@@ -8,7 +8,7 @@ export LC_ALL=C.UTF-8
|
||||
|
||||
export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04"
|
||||
export CONTAINER_NAME=ci_native_fuzz
|
||||
export APT_LLVM_V="20"
|
||||
export APT_LLVM_V="21"
|
||||
export PACKAGES="clang-${APT_LLVM_V} llvm-${APT_LLVM_V} libclang-rt-${APT_LLVM_V}-dev libevent-dev libboost-dev libsqlite3-dev"
|
||||
export NO_DEPENDS=1
|
||||
export RUN_UNIT_TESTS=false
|
||||
@@ -19,9 +19,8 @@ export CI_CONTAINER_CAP="--cap-add SYS_PTRACE" # If run with (ASan + LSan), the
|
||||
export BITCOIN_CONFIG="\
|
||||
-DBUILD_FOR_FUZZING=ON \
|
||||
-DSANITIZERS=fuzzer,address,undefined,float-divide-by-zero,integer \
|
||||
-DCMAKE_C_COMPILER=clang-${APT_LLVM_V} \
|
||||
-DCMAKE_CXX_COMPILER=clang++-${APT_LLVM_V} \
|
||||
-DCMAKE_C_COMPILER=clang \
|
||||
-DCMAKE_CXX_COMPILER=clang++ \
|
||||
-DCMAKE_C_FLAGS='-ftrivial-auto-var-init=pattern' \
|
||||
-DCMAKE_CXX_FLAGS='-ftrivial-auto-var-init=pattern' \
|
||||
"
|
||||
export LLVM_SYMBOLIZER_PATH="/usr/bin/llvm-symbolizer-${APT_LLVM_V}"
|
||||
|
||||
@@ -7,14 +7,16 @@
|
||||
export LC_ALL=C.UTF-8
|
||||
|
||||
export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04"
|
||||
LIBCXX_DIR="/msan/cxx_build/"
|
||||
export APT_LLVM_V="21"
|
||||
LIBCXX_DIR="/cxx_build/"
|
||||
export MSAN_FLAGS="-fsanitize=memory -fsanitize-memory-track-origins=2 -fno-omit-frame-pointer -g -O1 -fno-optimize-sibling-calls"
|
||||
LIBCXX_FLAGS="-nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument"
|
||||
# -lstdc++ to resolve link issues due to upstream packaging
|
||||
LIBCXX_FLAGS="-nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument -lstdc++"
|
||||
export MSAN_AND_LIBCXX_FLAGS="${MSAN_FLAGS} ${LIBCXX_FLAGS}"
|
||||
|
||||
export CONTAINER_NAME="ci_native_fuzz_msan"
|
||||
export PACKAGES="ninja-build"
|
||||
# BDB generates false-positives and will be removed in future
|
||||
export PACKAGES="ninja-build clang-${APT_LLVM_V} llvm-${APT_LLVM_V} llvm-${APT_LLVM_V}-dev libclang-${APT_LLVM_V}-dev libclang-rt-${APT_LLVM_V}-dev"
|
||||
export DEP_OPTS="DEBUG=1 NO_BDB=1 NO_QT=1 CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'"
|
||||
export GOAL="all"
|
||||
# Setting CMAKE_{C,CXX}_FLAGS_DEBUG flags to an empty string ensures that the flags set in MSAN_FLAGS remain unaltered.
|
||||
@@ -27,7 +29,7 @@ export BITCOIN_CONFIG="\
|
||||
-DSANITIZERS=fuzzer,memory \
|
||||
-DAPPEND_CPPFLAGS='-DBOOST_MULTI_INDEX_ENABLE_SAFE_MODE -U_FORTIFY_SOURCE' \
|
||||
"
|
||||
export USE_MEMORY_SANITIZER="true"
|
||||
export USE_INSTRUMENTED_LIBCPP="MemoryWithOrigins"
|
||||
export RUN_UNIT_TESTS="false"
|
||||
export RUN_FUNCTIONAL_TESTS="false"
|
||||
export RUN_FUZZ_TESTS=true
|
||||
|
||||
@@ -7,13 +7,14 @@
|
||||
export LC_ALL=C.UTF-8
|
||||
|
||||
export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04"
|
||||
LIBCXX_DIR="/msan/cxx_build/"
|
||||
export APT_LLVM_V="21"
|
||||
LIBCXX_DIR="/cxx_build/"
|
||||
export MSAN_FLAGS="-fsanitize=memory -fsanitize-memory-track-origins=2 -fno-omit-frame-pointer -g -O1 -fno-optimize-sibling-calls"
|
||||
LIBCXX_FLAGS="-nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument"
|
||||
export MSAN_AND_LIBCXX_FLAGS="${MSAN_FLAGS} ${LIBCXX_FLAGS}"
|
||||
|
||||
export CONTAINER_NAME="ci_native_msan"
|
||||
export PACKAGES="ninja-build"
|
||||
export PACKAGES="clang-${APT_LLVM_V} llvm-${APT_LLVM_V} llvm-${APT_LLVM_V}-dev libclang-${APT_LLVM_V}-dev libclang-rt-${APT_LLVM_V}-dev ninja-build"
|
||||
# BDB generates false-positives and will be removed in future
|
||||
export DEP_OPTS="DEBUG=1 NO_BDB=1 NO_QT=1 CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'"
|
||||
export GOAL="install"
|
||||
@@ -26,4 +27,4 @@ export BITCOIN_CONFIG="\
|
||||
-DSANITIZERS=memory \
|
||||
-DAPPEND_CPPFLAGS='-U_FORTIFY_SOURCE' \
|
||||
"
|
||||
export USE_MEMORY_SANITIZER="true"
|
||||
export USE_INSTRUMENTED_LIBCPP="MemoryWithOrigins"
|
||||
|
||||
@@ -8,9 +8,12 @@ export LC_ALL=C.UTF-8
|
||||
|
||||
export CONTAINER_NAME=ci_native_tsan
|
||||
export CI_IMAGE_NAME_TAG="mirror.gcr.io/ubuntu:24.04"
|
||||
export APT_LLVM_V="20"
|
||||
export PACKAGES="clang-${APT_LLVM_V} llvm-${APT_LLVM_V} libclang-rt-${APT_LLVM_V}-dev libc++abi-${APT_LLVM_V}-dev libc++-${APT_LLVM_V}-dev python3-zmq"
|
||||
export DEP_OPTS="CC=clang-${APT_LLVM_V} CXX='clang++-${APT_LLVM_V} -stdlib=libc++'"
|
||||
export APT_LLVM_V="21"
|
||||
LIBCXX_DIR="/cxx_build/"
|
||||
LIBCXX_FLAGS="-fsanitize=thread -nostdinc++ -nostdlib++ -isystem ${LIBCXX_DIR}include/c++/v1 -L${LIBCXX_DIR}lib -Wl,-rpath,${LIBCXX_DIR}lib -lc++ -lc++abi -lpthread -Wno-unused-command-line-argument"
|
||||
export PACKAGES="clang-${APT_LLVM_V} llvm-${APT_LLVM_V} llvm-${APT_LLVM_V}-dev libclang-${APT_LLVM_V}-dev libclang-rt-${APT_LLVM_V}-dev python3-zmq ninja-build"
|
||||
export DEP_OPTS="CC=clang CXX=clang++ CXXFLAGS='${LIBCXX_FLAGS}' NO_QT=1"
|
||||
export GOAL="install"
|
||||
export BITCOIN_CONFIG="-DWITH_ZMQ=ON -DSANITIZERS=thread \
|
||||
-DAPPEND_CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER -DDEBUG_LOCKCONTENTION -D_LIBCPP_REMOVE_TRANSITIVE_INCLUDES'"
|
||||
-DAPPEND_CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKCONTENTION -D_LIBCPP_REMOVE_TRANSITIVE_INCLUDES'"
|
||||
export USE_INSTRUMENTED_LIBCPP="Thread"
|
||||
|
||||
@@ -43,32 +43,24 @@ elif [ "$CI_OS_NAME" != "macos" ]; then
|
||||
${CI_RETRY_EXE} bash -c "apt-get install --no-install-recommends --no-upgrade -y $PACKAGES $CI_BASE_PACKAGES"
|
||||
fi
|
||||
|
||||
if [ -n "${APT_LLVM_V}" ]; then
|
||||
update-alternatives --install /usr/bin/clang++ clang++ "/usr/bin/clang++-${APT_LLVM_V}" 100
|
||||
update-alternatives --install /usr/bin/clang clang "/usr/bin/clang-${APT_LLVM_V}" 100
|
||||
update-alternatives --install /usr/bin/llvm-symbolizer llvm-symbolizer "/usr/bin/llvm-symbolizer-${APT_LLVM_V}" 100
|
||||
fi
|
||||
|
||||
if [ -n "$PIP_PACKAGES" ]; then
|
||||
# shellcheck disable=SC2086
|
||||
${CI_RETRY_EXE} pip3 install --user $PIP_PACKAGES
|
||||
fi
|
||||
|
||||
if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then
|
||||
${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-20.1.0" /msan/llvm-project
|
||||
if [[ -n "${USE_INSTRUMENTED_LIBCPP}" ]]; then
|
||||
${CI_RETRY_EXE} git clone --depth=1 https://github.com/llvm/llvm-project -b "llvmorg-21.1.1" /llvm-project
|
||||
|
||||
cmake -G Ninja -B /msan/clang_build/ \
|
||||
-DLLVM_ENABLE_PROJECTS="clang" \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DLLVM_TARGETS_TO_BUILD=Native \
|
||||
-DLLVM_ENABLE_RUNTIMES="compiler-rt;libcxx;libcxxabi;libunwind" \
|
||||
-S /msan/llvm-project/llvm
|
||||
|
||||
ninja -C /msan/clang_build/ "$MAKEJOBS"
|
||||
ninja -C /msan/clang_build/ install-runtimes
|
||||
|
||||
update-alternatives --install /usr/bin/clang++ clang++ /msan/clang_build/bin/clang++ 100
|
||||
update-alternatives --install /usr/bin/clang clang /msan/clang_build/bin/clang 100
|
||||
update-alternatives --install /usr/bin/llvm-symbolizer llvm-symbolizer /msan/clang_build/bin/llvm-symbolizer 100
|
||||
|
||||
cmake -G Ninja -B /msan/cxx_build/ \
|
||||
cmake -G Ninja -B /cxx_build/ \
|
||||
-DLLVM_ENABLE_RUNTIMES="libcxx;libcxxabi;libunwind" \
|
||||
-DCMAKE_BUILD_TYPE=Release \
|
||||
-DLLVM_USE_SANITIZER=MemoryWithOrigins \
|
||||
-DLLVM_USE_SANITIZER="${USE_INSTRUMENTED_LIBCPP}" \
|
||||
-DCMAKE_C_COMPILER=clang \
|
||||
-DCMAKE_CXX_COMPILER=clang++ \
|
||||
-DLLVM_TARGETS_TO_BUILD=Native \
|
||||
@@ -76,13 +68,13 @@ if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then
|
||||
-DLIBCXXABI_USE_LLVM_UNWINDER=OFF \
|
||||
-DLIBCXX_ABI_DEFINES="_LIBCPP_ABI_BOUNDED_ITERATORS;_LIBCPP_ABI_BOUNDED_ITERATORS_IN_STD_ARRAY;_LIBCPP_ABI_BOUNDED_ITERATORS_IN_STRING;_LIBCPP_ABI_BOUNDED_ITERATORS_IN_VECTOR;_LIBCPP_ABI_BOUNDED_UNIQUE_PTR" \
|
||||
-DLIBCXX_HARDENING_MODE=debug \
|
||||
-S /msan/llvm-project/runtimes
|
||||
-S /llvm-project/runtimes
|
||||
|
||||
ninja -C /msan/cxx_build/ "$MAKEJOBS"
|
||||
ninja -C /cxx_build/ "$MAKEJOBS"
|
||||
|
||||
# Clear no longer needed source folder
|
||||
du -sh /msan/llvm-project
|
||||
rm -rf /msan/llvm-project
|
||||
du -sh /llvm-project
|
||||
rm -rf /llvm-project
|
||||
fi
|
||||
|
||||
if [[ "${RUN_TIDY}" == "true" ]]; then
|
||||
|
||||
@@ -23,34 +23,14 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then
|
||||
fi
|
||||
echo "Creating $CI_IMAGE_NAME_TAG container to run in"
|
||||
|
||||
DOCKER_BUILD_CACHE_ARG=""
|
||||
DOCKER_BUILD_CACHE_TEMPDIR=""
|
||||
DOCKER_BUILD_CACHE_OLD_DIR=""
|
||||
DOCKER_BUILD_CACHE_NEW_DIR=""
|
||||
# If set, use an `docker build` cache directory on the CI host
|
||||
# to cache docker image layers for the CI container image.
|
||||
# This cache can be multiple GB in size. Prefixed with DANGER
|
||||
# as setting it removes (old cache) files from the host.
|
||||
if [ "$DANGER_DOCKER_BUILD_CACHE_HOST_DIR" ]; then
|
||||
# Directory where the current cache for this run could be. If not existing
|
||||
# or empty, "docker build" will warn, but treat it as cache-miss and continue.
|
||||
DOCKER_BUILD_CACHE_OLD_DIR="${DANGER_DOCKER_BUILD_CACHE_HOST_DIR}/${CONTAINER_NAME}"
|
||||
# Temporary directory for a newly created cache. We can't write the new
|
||||
# cache into OLD_DIR directly, as old cache layers would not be removed.
|
||||
# The NEW_DIR contents are moved to OLD_DIR after OLD_DIR has been cleared.
|
||||
# This happens after `docker build`. If a task fails or is aborted, the
|
||||
# DOCKER_BUILD_CACHE_TEMPDIR might be retained on the host. If the host isn't
|
||||
# ephemeral, it has to take care of cleaning old TEMPDIR's up.
|
||||
DOCKER_BUILD_CACHE_TEMPDIR="$(mktemp --directory ci-docker-build-cache-XXXXXXXXXX)"
|
||||
DOCKER_BUILD_CACHE_NEW_DIR="${DOCKER_BUILD_CACHE_TEMPDIR}/${CONTAINER_NAME}"
|
||||
DOCKER_BUILD_CACHE_ARG="--cache-from type=local,src=${DOCKER_BUILD_CACHE_OLD_DIR} --cache-to type=local,dest=${DOCKER_BUILD_CACHE_NEW_DIR},mode=max"
|
||||
fi
|
||||
|
||||
# Use buildx unconditionally
|
||||
# Using buildx is required to properly load the correct driver, for use with registry caching. Neither build, nor BUILDKIT=1 currently do this properly
|
||||
# shellcheck disable=SC2086
|
||||
DOCKER_BUILDKIT=1 docker build \
|
||||
docker buildx build \
|
||||
--file "${BASE_READ_ONLY_DIR}/ci/test_imagefile" \
|
||||
--build-arg "CI_IMAGE_NAME_TAG=${CI_IMAGE_NAME_TAG}" \
|
||||
--build-arg "FILE_ENV=${FILE_ENV}" \
|
||||
--build-arg "BASE_ROOT_DIR=${BASE_ROOT_DIR}" \
|
||||
$MAYBE_CPUSET \
|
||||
--platform="${CI_IMAGE_PLATFORM}" \
|
||||
--label="${CI_IMAGE_LABEL}" \
|
||||
@@ -58,15 +38,6 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then
|
||||
$DOCKER_BUILD_CACHE_ARG \
|
||||
"${BASE_READ_ONLY_DIR}"
|
||||
|
||||
if [ "$DANGER_DOCKER_BUILD_CACHE_HOST_DIR" ]; then
|
||||
if [ -e "${DOCKER_BUILD_CACHE_NEW_DIR}/index.json" ]; then
|
||||
echo "Removing the existing docker build cache in ${DOCKER_BUILD_CACHE_OLD_DIR}"
|
||||
rm -rf "${DOCKER_BUILD_CACHE_OLD_DIR}"
|
||||
echo "Moving the contents of ${DOCKER_BUILD_CACHE_NEW_DIR} to ${DOCKER_BUILD_CACHE_OLD_DIR}"
|
||||
mv "${DOCKER_BUILD_CACHE_NEW_DIR}" "${DOCKER_BUILD_CACHE_OLD_DIR}"
|
||||
fi
|
||||
fi
|
||||
|
||||
docker volume create "${CONTAINER_NAME}_ccache" || true
|
||||
docker volume create "${CONTAINER_NAME}_depends" || true
|
||||
docker volume create "${CONTAINER_NAME}_depends_sources" || true
|
||||
|
||||
@@ -24,6 +24,14 @@ fi
|
||||
echo "Free disk space:"
|
||||
df -h
|
||||
|
||||
# We force an install of linux-headers again here via $PACKAGES to fix any
|
||||
# kernel mismatch between a cached docker image and the underlying host.
|
||||
# This can happen occasionally on hosted runners if the runner image is updated.
|
||||
if [[ "$CONTAINER_NAME" == "ci_native_asan" ]]; then
|
||||
$CI_RETRY_EXE apt-get update
|
||||
${CI_RETRY_EXE} bash -c "apt-get install --no-install-recommends --no-upgrade -y $PACKAGES"
|
||||
fi
|
||||
|
||||
# What host to compile for. See also ./depends/README.md
|
||||
# Tests that need cross-compilation export the appropriate HOST.
|
||||
# Tests that run natively guess the host
|
||||
@@ -92,7 +100,7 @@ fi
|
||||
|
||||
if [ -z "$NO_DEPENDS" ]; then
|
||||
if [[ $CI_IMAGE_NAME_TAG == *centos* ]]; then
|
||||
SHELL_OPTS="CONFIG_SHELL=/bin/ksh" # Temporarily use ksh instead of dash, until https://bugzilla.redhat.com/show_bug.cgi?id=2335416 is fixed.
|
||||
SHELL_OPTS="CONFIG_SHELL=/bin/dash"
|
||||
else
|
||||
SHELL_OPTS="CONFIG_SHELL="
|
||||
fi
|
||||
@@ -129,6 +137,12 @@ bash -c "cmake -S $BASE_ROOT_DIR $BITCOIN_CONFIG_ALL $BITCOIN_CONFIG || ( (cat $
|
||||
bash -c "cmake --build . $MAKEJOBS --target all $GOAL" || ( echo "Build failure. Verbose build follows." && cmake --build . --target all "$GOAL" --verbose ; false )
|
||||
|
||||
bash -c "${PRINT_CCACHE_STATISTICS}"
|
||||
if [ "$CI" = "true" ]; then
|
||||
hit_rate=$(ccache -s | grep "Hits:" | head -1 | sed 's/.*(\(.*\)%).*/\1/')
|
||||
if [ "${hit_rate%.*}" -lt 75 ]; then
|
||||
echo "::notice title=low ccache hitrate::Ccache hit-rate in $CONTAINER_NAME was $hit_rate%"
|
||||
fi
|
||||
fi
|
||||
du -sh "${DEPENDS_DIR}"/*/
|
||||
du -sh "${PREVIOUS_RELEASES_DIR}"
|
||||
|
||||
|
||||
@@ -4,12 +4,16 @@
|
||||
|
||||
# See ci/README.md for usage.
|
||||
|
||||
ARG CI_IMAGE_NAME_TAG
|
||||
# We never want scratch, but default arg silences a Warning
|
||||
ARG CI_IMAGE_NAME_TAG=scratch
|
||||
FROM ${CI_IMAGE_NAME_TAG}
|
||||
|
||||
ARG FILE_ENV
|
||||
ENV FILE_ENV=${FILE_ENV}
|
||||
|
||||
ARG BASE_ROOT_DIR
|
||||
ENV BASE_ROOT_DIR=${BASE_ROOT_DIR}
|
||||
|
||||
COPY ./ci/retry/retry /usr/bin/retry
|
||||
COPY ./ci/test/00_setup_env.sh ./${FILE_ENV} ./ci/test/01_base_install.sh /ci_container_base/ci/test/
|
||||
|
||||
|
||||
@@ -36,6 +36,10 @@ if(USDT_INCLUDE_DIR)
|
||||
include(CheckCXXSourceCompiles)
|
||||
set(CMAKE_REQUIRED_INCLUDES ${USDT_INCLUDE_DIR})
|
||||
check_cxx_source_compiles("
|
||||
#if defined(__arm__)
|
||||
# define STAP_SDT_ARG_CONSTRAINT g
|
||||
#endif
|
||||
|
||||
// Setting SDT_USE_VARIADIC lets systemtap (sys/sdt.h) know that we want to use
|
||||
// the optional variadic macros to define tracepoints.
|
||||
#define SDT_USE_VARIADIC 1
|
||||
|
||||
@@ -73,19 +73,7 @@ mkdir -p "$VERSION_BASE"
|
||||
# SOURCE_DATE_EPOCH should not unintentionally be set
|
||||
################
|
||||
|
||||
if [ -n "$SOURCE_DATE_EPOCH" ] && [ -z "$FORCE_SOURCE_DATE_EPOCH" ]; then
|
||||
cat << EOF
|
||||
ERR: Environment variable SOURCE_DATE_EPOCH is set which may break reproducibility.
|
||||
|
||||
Aborting...
|
||||
|
||||
Hint: You may want to:
|
||||
1. Unset this variable: \`unset SOURCE_DATE_EPOCH\` before rebuilding
|
||||
2. Set the 'FORCE_SOURCE_DATE_EPOCH' environment variable if you insist on
|
||||
using your own epoch
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
check_source_date_epoch
|
||||
|
||||
################
|
||||
# Build directories should not exist
|
||||
|
||||
@@ -67,6 +67,12 @@ EOF
|
||||
exit 1
|
||||
fi
|
||||
|
||||
################
|
||||
# SOURCE_DATE_EPOCH should not unintentionally be set
|
||||
################
|
||||
|
||||
check_source_date_epoch
|
||||
|
||||
################
|
||||
# The codesignature git worktree should not be dirty
|
||||
################
|
||||
|
||||
@@ -21,6 +21,26 @@ check_tools() {
|
||||
done
|
||||
}
|
||||
|
||||
################
|
||||
# SOURCE_DATE_EPOCH should not unintentionally be set
|
||||
################
|
||||
|
||||
check_source_date_epoch() {
|
||||
if [ -n "$SOURCE_DATE_EPOCH" ] && [ -z "$FORCE_SOURCE_DATE_EPOCH" ]; then
|
||||
cat << EOF
|
||||
ERR: Environment variable SOURCE_DATE_EPOCH is set which may break reproducibility.
|
||||
|
||||
Aborting...
|
||||
|
||||
Hint: You may want to:
|
||||
1. Unset this variable: \`unset SOURCE_DATE_EPOCH\` before rebuilding
|
||||
2. Set the 'FORCE_SOURCE_DATE_EPOCH' environment variable if you insist on
|
||||
using your own epoch
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
check_tools cat env readlink dirname basename git
|
||||
|
||||
################
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
((gnu packages bash) #:select (bash-minimal))
|
||||
(gnu packages bison)
|
||||
((gnu packages certs) #:select (nss-certs))
|
||||
((gnu packages check) #:select (libfaketime))
|
||||
((gnu packages cmake) #:select (cmake-minimal))
|
||||
(gnu packages commencement)
|
||||
(gnu packages compression)
|
||||
@@ -209,7 +210,17 @@ and abstract ELF, PE and MachO formats.")
|
||||
(base32
|
||||
"1j47vwq4caxfv0xw68kw5yh00qcpbd56d7rq6c483ma3y7s96yyz"))))
|
||||
(build-system cmake-build-system)
|
||||
(inputs (list openssl))
|
||||
(arguments
|
||||
(list
|
||||
#:phases
|
||||
#~(modify-phases %standard-phases
|
||||
(replace 'check
|
||||
(lambda* (#:key tests? #:allow-other-keys)
|
||||
(if tests?
|
||||
(invoke "faketime" "-f" "@2025-01-01 00:00:00" ;; Tests fail after 2025.
|
||||
"ctest" "--output-on-failure" "--no-tests=error")
|
||||
(format #t "test suite not run~%")))))))
|
||||
(inputs (list libfaketime openssl))
|
||||
(home-page "https://github.com/mtrojnar/osslsigncode")
|
||||
(synopsis "Authenticode signing and timestamping tool")
|
||||
(description "osslsigncode is a small tool that implements part of the
|
||||
|
||||
@@ -465,18 +465,18 @@ if config.translations_dir:
|
||||
sys.stderr.write(f"Error: Could not find translation dir \"{config.translations_dir[0]}\"\n")
|
||||
sys.exit(1)
|
||||
|
||||
print("+ Adding Qt translations +")
|
||||
print("+ Adding Qt translations +")
|
||||
|
||||
translations = Path(config.translations_dir[0])
|
||||
translations = Path(config.translations_dir[0])
|
||||
|
||||
regex = re.compile('qt_[a-z]*(.qm|_[A-Z]*.qm)')
|
||||
regex = re.compile('qt_[a-z]*(.qm|_[A-Z]*.qm)')
|
||||
|
||||
lang_files = [x for x in translations.iterdir() if regex.match(x.name)]
|
||||
lang_files = [x for x in translations.iterdir() if regex.match(x.name)]
|
||||
|
||||
for file in lang_files:
|
||||
if verbose:
|
||||
print(file.as_posix(), "->", os.path.join(applicationBundle.resourcesPath, file.name))
|
||||
shutil.copy2(file.as_posix(), os.path.join(applicationBundle.resourcesPath, file.name))
|
||||
for file in lang_files:
|
||||
if verbose:
|
||||
print(file.as_posix(), "->", os.path.join(applicationBundle.resourcesPath, file.name))
|
||||
shutil.copy2(file.as_posix(), os.path.join(applicationBundle.resourcesPath, file.name))
|
||||
|
||||
# ------------------------------------------------
|
||||
|
||||
|
||||
@@ -10,14 +10,13 @@ to addrman with).
|
||||
|
||||
Update `MIN_BLOCKS` in `makeseeds.py` and the `-m`/`--minblocks` arguments below, as needed.
|
||||
|
||||
The seeds compiled into the release are created from sipa's, achow101's and luke-jr's
|
||||
The seeds compiled into the release are created from sipa's and achow101's
|
||||
DNS seed, virtu's crawler, and asmap community AS map data. Run the following commands
|
||||
from the `/contrib/seeds` directory:
|
||||
|
||||
```
|
||||
curl https://bitcoin.sipa.be/seeds.txt.gz | gzip -dc > seeds_main.txt
|
||||
curl https://21.ninja/seeds.txt.gz | gzip -dc >> seeds_main.txt
|
||||
curl https://luke.dashjr.org/programs/bitcoin/files/charts/seeds.txt >> seeds_main.txt
|
||||
curl https://mainnet.achownodes.xyz/seeds.txt.gz | gzip -dc >> seeds_main.txt
|
||||
curl https://signet.achownodes.xyz/seeds.txt.gz | gzip -dc > seeds_signet.txt
|
||||
curl https://testnet.achownodes.xyz/seeds.txt.gz | gzip -dc > seeds_test.txt
|
||||
|
||||
@@ -66,7 +66,7 @@ BPF_PERF_OUTPUT(replaced_events);
|
||||
int trace_added(struct pt_regs *ctx) {
|
||||
struct added_event added = {};
|
||||
void *phash = NULL;
|
||||
bpf_usdt_readarg(1, ctx, phash);
|
||||
bpf_usdt_readarg(1, ctx, &phash);
|
||||
bpf_probe_read_user(&added.hash, sizeof(added.hash), phash);
|
||||
bpf_usdt_readarg(2, ctx, &added.vsize);
|
||||
bpf_usdt_readarg(3, ctx, &added.fee);
|
||||
@@ -78,9 +78,9 @@ int trace_added(struct pt_regs *ctx) {
|
||||
int trace_removed(struct pt_regs *ctx) {
|
||||
struct removed_event removed = {};
|
||||
void *phash = NULL, *preason = NULL;
|
||||
bpf_usdt_readarg(1, ctx, phash);
|
||||
bpf_usdt_readarg(1, ctx, &phash);
|
||||
bpf_probe_read_user(&removed.hash, sizeof(removed.hash), phash);
|
||||
bpf_usdt_readarg(2, ctx, preason);
|
||||
bpf_usdt_readarg(2, ctx, &preason);
|
||||
bpf_probe_read_user_str(&removed.reason, sizeof(removed.reason), preason);
|
||||
bpf_usdt_readarg(3, ctx, &removed.vsize);
|
||||
bpf_usdt_readarg(4, ctx, &removed.fee);
|
||||
@@ -93,9 +93,9 @@ int trace_removed(struct pt_regs *ctx) {
|
||||
int trace_rejected(struct pt_regs *ctx) {
|
||||
struct rejected_event rejected = {};
|
||||
void *phash = NULL, *preason = NULL;
|
||||
bpf_usdt_readarg(1, ctx, phash);
|
||||
bpf_usdt_readarg(1, ctx, &phash);
|
||||
bpf_probe_read_user(&rejected.hash, sizeof(rejected.hash), phash);
|
||||
bpf_usdt_readarg(2, ctx, preason);
|
||||
bpf_usdt_readarg(2, ctx, &preason);
|
||||
bpf_probe_read_user_str(&rejected.reason, sizeof(rejected.reason), preason);
|
||||
rejected_events.perf_submit(ctx, &rejected, sizeof(rejected));
|
||||
return 0;
|
||||
@@ -104,12 +104,12 @@ int trace_rejected(struct pt_regs *ctx) {
|
||||
int trace_replaced(struct pt_regs *ctx) {
|
||||
struct replaced_event replaced = {};
|
||||
void *phash_replaced = NULL, *phash_replacement = NULL;
|
||||
bpf_usdt_readarg(1, ctx, phash_replaced);
|
||||
bpf_usdt_readarg(1, ctx, &phash_replaced);
|
||||
bpf_probe_read_user(&replaced.replaced_hash, sizeof(replaced.replaced_hash), phash_replaced);
|
||||
bpf_usdt_readarg(2, ctx, &replaced.replaced_vsize);
|
||||
bpf_usdt_readarg(3, ctx, &replaced.replaced_fee);
|
||||
bpf_usdt_readarg(4, ctx, &replaced.replaced_entry_time);
|
||||
bpf_usdt_readarg(5, ctx, phash_replacement);
|
||||
bpf_usdt_readarg(5, ctx, &phash_replacement);
|
||||
bpf_probe_read_user(&replaced.replacement_hash, sizeof(replaced.replacement_hash), phash_replacement);
|
||||
bpf_usdt_readarg(6, ctx, &replaced.replacement_vsize);
|
||||
bpf_usdt_readarg(7, ctx, &replaced.replacement_fee);
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package=native_libmultiprocess
|
||||
$(package)_version=1954f7f65661d49e700c344eae0fc8092decf975
|
||||
$(package)_version=v5.0
|
||||
$(package)_download_path=https://github.com/bitcoin-core/libmultiprocess/archive
|
||||
$(package)_file_name=$($(package)_version).tar.gz
|
||||
$(package)_sha256_hash=fc014bd74727c1d5d30b396813685012c965d079244dd07b53bc1c75c610a2cb
|
||||
$(package)_sha256_hash=401984715b271a3446e1910f21adf048ba390d31cc93cc3073742e70d56fa3ea
|
||||
$(package)_dependencies=native_capnp
|
||||
|
||||
define $(package)_config_cmds
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
package=qt
|
||||
$(package)_version=5.15.16
|
||||
$(package)_download_path=https://download.qt.io/official_releases/qt/5.15/$($(package)_version)/submodules
|
||||
$(package)_download_path=https://download.qt.io/archive/qt/5.15/$($(package)_version)/submodules
|
||||
$(package)_suffix=everywhere-opensource-src-$($(package)_version).tar.xz
|
||||
$(package)_file_name=qtbase-$($(package)_suffix)
|
||||
$(package)_sha256_hash=b04815058c18058b6ba837206756a2c87d1391f07a0dcb0dd314f970fd041592
|
||||
|
||||
@@ -9,6 +9,10 @@ To Build
|
||||
|
||||
```bash
|
||||
cmake -B build
|
||||
```
|
||||
Run `cmake -B build -LH` to see the full list of available options.
|
||||
|
||||
```bash
|
||||
cmake --build build # Append "-j N" for N parallel jobs
|
||||
cmake --install build # Optional
|
||||
```
|
||||
@@ -171,13 +175,6 @@ In this case there is no dependency on SQLite or Berkeley DB.
|
||||
|
||||
Mining is also possible in disable-wallet mode using the `getblocktemplate` RPC call.
|
||||
|
||||
Additional Configure Flags
|
||||
--------------------------
|
||||
A list of additional configure flags can be displayed with:
|
||||
|
||||
cmake -B build -LH
|
||||
|
||||
|
||||
Setup and Build Example: Arch Linux
|
||||
-----------------------------------
|
||||
This example lists the steps necessary to setup and build a command line only distribution of the latest changes on Arch Linux:
|
||||
|
||||
@@ -30,9 +30,13 @@ Bitcoin Core requires one of the following compilers.
|
||||
| [Fontconfig](../depends/packages/fontconfig.mk) (gui) | [link](https://www.freedesktop.org/wiki/Software/fontconfig/) | [2.12.6](https://github.com/bitcoin/bitcoin/pull/23495) | 2.6 | Yes |
|
||||
| [FreeType](../depends/packages/freetype.mk) (gui) | [link](https://freetype.org) | [2.11.0](https://github.com/bitcoin/bitcoin/commit/01544dd78ccc0b0474571da854e27adef97137fb) | 2.3.0 | Yes |
|
||||
| [qrencode](../depends/packages/qrencode.mk) (gui) | [link](https://fukuchi.org/works/qrencode/) | [4.1.1](https://github.com/bitcoin/bitcoin/pull/27312) | N/A | No |
|
||||
| [Qt](../depends/packages/qt.mk) (gui) | [link](https://download.qt.io/official_releases/qt/) | [5.15.16](https://github.com/bitcoin/bitcoin/pull/30774) | [5.11.3](https://github.com/bitcoin/bitcoin/pull/24132) | No |
|
||||
| [Qt](../depends/packages/qt.mk) (gui) | [link](https://download.qt.io/archive/qt/) | [5.15.16](https://github.com/bitcoin/bitcoin/pull/30774) | [5.11.3](https://github.com/bitcoin/bitcoin/pull/24132) | No |
|
||||
| [ZeroMQ](../depends/packages/zeromq.mk) (notifications) | [link](https://github.com/zeromq/libzmq/releases) | [4.3.4](https://github.com/bitcoin/bitcoin/pull/23956) | 4.0.0 | No |
|
||||
| [Berkeley DB](../depends/packages/bdb.mk) (legacy wallet) | [link](https://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html) | 4.8.30 | 4.8.x | No |
|
||||
| [SQLite](../depends/packages/sqlite.mk) (wallet) | [link](https://sqlite.org) | [3.38.5](https://github.com/bitcoin/bitcoin/pull/25378) | [3.7.17](https://github.com/bitcoin/bitcoin/pull/19077) | No |
|
||||
| Python (scripts, tests) | [link](https://www.python.org) | N/A | [3.10](https://github.com/bitcoin/bitcoin/pull/30527) | No |
|
||||
| [systemtap](../depends/packages/systemtap.mk) ([tracing](tracing.md)) | [link](https://sourceware.org/systemtap/) | [4.8](https://github.com/bitcoin/bitcoin/pull/26945)| N/A | No |
|
||||
| [capnproto](../depends/packages/capnp.mk) ([multiprocess](multiprocess.md)) | [link](https://capnproto.org/) | [1.2.0](https://github.com/bitcoin/bitcoin/pull/32760)| [0.7.0](https://github.com/bitcoin-core/libmultiprocess/pull/88) | No |
|
||||
| [libmultiprocess](../depends/packages/libmultiprocess.mk) ([multiprocess](multiprocess.md)) | [link](https://github.com/bitcoin-core/libmultiprocess) | [5.0](https://github.com/bitcoin/bitcoin/pull/31945)| [v5.0-pre1](https://github.com/bitcoin/bitcoin/pull/31740)* | No |
|
||||
|
||||
\* Libmultiprocess 5.x versions should be compatible, but 6.0 and later are not due to bitcoin-core/libmultiprocess#160.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3.
|
||||
.TH BITCOIN-CLI "1" "July 2025" "bitcoin-cli v29.1.0rc1" "User Commands"
|
||||
.TH BITCOIN-CLI "1" "January 2026" "bitcoin-cli v29.3.0rc1" "User Commands"
|
||||
.SH NAME
|
||||
bitcoin-cli \- manual page for bitcoin-cli v29.1.0rc1
|
||||
bitcoin-cli \- manual page for bitcoin-cli v29.3.0rc1
|
||||
.SH SYNOPSIS
|
||||
.B bitcoin-cli
|
||||
[\fI\,options\/\fR] \fI\,<command> \/\fR[\fI\,params\/\fR]
|
||||
@@ -15,7 +15,7 @@ bitcoin-cli \- manual page for bitcoin-cli v29.1.0rc1
|
||||
.B bitcoin-cli
|
||||
[\fI\,options\/\fR] \fI\,help <command>\/\fR
|
||||
.SH DESCRIPTION
|
||||
Bitcoin Core RPC client version v29.1.0rc1
|
||||
Bitcoin Core RPC client version v29.3.0rc1
|
||||
.PP
|
||||
The bitcoin\-cli utility provides a command line interface to interact with a Bitcoin Core RPC server.
|
||||
.PP
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3.
|
||||
.TH BITCOIN-QT "1" "July 2025" "bitcoin-qt v29.1.0rc1" "User Commands"
|
||||
.TH BITCOIN-QT "1" "January 2026" "bitcoin-qt v29.3.0rc1" "User Commands"
|
||||
.SH NAME
|
||||
bitcoin-qt \- manual page for bitcoin-qt v29.1.0rc1
|
||||
bitcoin-qt \- manual page for bitcoin-qt v29.3.0rc1
|
||||
.SH SYNOPSIS
|
||||
.B bitcoin-qt
|
||||
[\fI\,options\/\fR] [\fI\,URI\/\fR]
|
||||
.SH DESCRIPTION
|
||||
Bitcoin Core version v29.1.0rc1
|
||||
Bitcoin Core version v29.3.0rc1
|
||||
.PP
|
||||
The bitcoin\-qt application provides a graphical interface for interacting with Bitcoin Core.
|
||||
.PP
|
||||
@@ -702,7 +702,7 @@ this size or less (default: 83)
|
||||
\fB\-minrelaytxfee=\fR<amt>
|
||||
.IP
|
||||
Fees (in BTC/kvB) smaller than this are considered zero fee for
|
||||
relaying, mining and transaction creation (default: 0.00001)
|
||||
relaying, mining and transaction creation (default: 0.000001)
|
||||
.HP
|
||||
\fB\-permitbaremultisig\fR
|
||||
.IP
|
||||
@@ -729,7 +729,7 @@ Set maximum BIP141 block weight (default: 4000000)
|
||||
\fB\-blockmintxfee=\fR<amt>
|
||||
.IP
|
||||
Set lowest fee rate (in BTC/kvB) for transactions to be included in
|
||||
block creation. (default: 0.00001)
|
||||
block creation. (default: 0.00000001)
|
||||
.HP
|
||||
\fB\-blockreservedweight=\fR<n>
|
||||
.IP
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3.
|
||||
.TH BITCOIN-TX "1" "July 2025" "bitcoin-tx v29.1.0rc1" "User Commands"
|
||||
.TH BITCOIN-TX "1" "January 2026" "bitcoin-tx v29.3.0rc1" "User Commands"
|
||||
.SH NAME
|
||||
bitcoin-tx \- manual page for bitcoin-tx v29.1.0rc1
|
||||
bitcoin-tx \- manual page for bitcoin-tx v29.3.0rc1
|
||||
.SH SYNOPSIS
|
||||
.B bitcoin-tx
|
||||
[\fI\,options\/\fR] \fI\,<hex-tx> \/\fR[\fI\,commands\/\fR]
|
||||
@@ -9,7 +9,7 @@ bitcoin-tx \- manual page for bitcoin-tx v29.1.0rc1
|
||||
.B bitcoin-tx
|
||||
[\fI\,options\/\fR] \fI\,-create \/\fR[\fI\,commands\/\fR]
|
||||
.SH DESCRIPTION
|
||||
Bitcoin Core bitcoin\-tx utility version v29.1.0rc1
|
||||
Bitcoin Core bitcoin\-tx utility version v29.3.0rc1
|
||||
.PP
|
||||
The bitcoin\-tx tool is used for creating and modifying bitcoin transactions.
|
||||
.PP
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3.
|
||||
.TH BITCOIN-UTIL "1" "July 2025" "bitcoin-util v29.1.0rc1" "User Commands"
|
||||
.TH BITCOIN-UTIL "1" "January 2026" "bitcoin-util v29.3.0rc1" "User Commands"
|
||||
.SH NAME
|
||||
bitcoin-util \- manual page for bitcoin-util v29.1.0rc1
|
||||
bitcoin-util \- manual page for bitcoin-util v29.3.0rc1
|
||||
.SH SYNOPSIS
|
||||
.B bitcoin-util
|
||||
[\fI\,options\/\fR] [\fI\,command\/\fR]
|
||||
@@ -9,7 +9,7 @@ bitcoin-util \- manual page for bitcoin-util v29.1.0rc1
|
||||
.B bitcoin-util
|
||||
[\fI\,options\/\fR] \fI\,grind <hex-block-header>\/\fR
|
||||
.SH DESCRIPTION
|
||||
Bitcoin Core bitcoin\-util utility version v29.1.0rc1
|
||||
Bitcoin Core bitcoin\-util utility version v29.3.0rc1
|
||||
.PP
|
||||
The bitcoin\-util tool provides bitcoin related functionality that does not rely on the ability to access a running node. Available [commands] are listed below.
|
||||
.SH OPTIONS
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3.
|
||||
.TH BITCOIN-WALLET "1" "July 2025" "bitcoin-wallet v29.1.0rc1" "User Commands"
|
||||
.TH BITCOIN-WALLET "1" "January 2026" "bitcoin-wallet v29.3.0rc1" "User Commands"
|
||||
.SH NAME
|
||||
bitcoin-wallet \- manual page for bitcoin-wallet v29.1.0rc1
|
||||
bitcoin-wallet \- manual page for bitcoin-wallet v29.3.0rc1
|
||||
.SH SYNOPSIS
|
||||
.B bitcoin-wallet
|
||||
[\fI\,options\/\fR] \fI\,<command>\/\fR
|
||||
.SH DESCRIPTION
|
||||
Bitcoin Core bitcoin\-wallet utility version v29.1.0rc1
|
||||
Bitcoin Core bitcoin\-wallet utility version v29.3.0rc1
|
||||
.PP
|
||||
bitcoin\-wallet is an offline tool for creating and interacting with Bitcoin Core wallet files.
|
||||
.PP
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
.\" DO NOT MODIFY THIS FILE! It was generated by help2man 1.49.3.
|
||||
.TH BITCOIND "1" "July 2025" "bitcoind v29.1.0rc1" "User Commands"
|
||||
.TH BITCOIND "1" "January 2026" "bitcoind v29.3.0rc1" "User Commands"
|
||||
.SH NAME
|
||||
bitcoind \- manual page for bitcoind v29.1.0rc1
|
||||
bitcoind \- manual page for bitcoind v29.3.0rc1
|
||||
.SH SYNOPSIS
|
||||
.B bitcoind
|
||||
[\fI\,options\/\fR]
|
||||
.SH DESCRIPTION
|
||||
Bitcoin Core daemon version v29.1.0rc1
|
||||
Bitcoin Core daemon version v29.3.0rc1
|
||||
.PP
|
||||
The Bitcoin Core daemon (bitcoind) is a headless program that connects to the Bitcoin network to validate and relay transactions and blocks, as well as relaying addresses.
|
||||
.PP
|
||||
@@ -702,7 +702,7 @@ this size or less (default: 83)
|
||||
\fB\-minrelaytxfee=\fR<amt>
|
||||
.IP
|
||||
Fees (in BTC/kvB) smaller than this are considered zero fee for
|
||||
relaying, mining and transaction creation (default: 0.00001)
|
||||
relaying, mining and transaction creation (default: 0.000001)
|
||||
.HP
|
||||
\fB\-permitbaremultisig\fR
|
||||
.IP
|
||||
@@ -729,7 +729,7 @@ Set maximum BIP141 block weight (default: 4000000)
|
||||
\fB\-blockmintxfee=\fR<amt>
|
||||
.IP
|
||||
Set lowest fee rate (in BTC/kvB) for transactions to be included in
|
||||
block creation. (default: 0.00001)
|
||||
block creation. (default: 0.00000001)
|
||||
.HP
|
||||
\fB\-blockreservedweight=\fR<n>
|
||||
.IP
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
Bitcoin Core version 29.1rc1 is now available from:
|
||||
Bitcoin Core version 29.3rc1 is now available from:
|
||||
|
||||
<https://bitcoincore.org/bin/bitcoin-core-29.1/test.rc1/>
|
||||
<https://bitcoincore.org/bin/bitcoin-core-29.3/test.rc1/>
|
||||
|
||||
This release includes various bug fixes and performance
|
||||
improvements, as well as updated translations.
|
||||
@@ -37,142 +37,62 @@ unsupported systems.
|
||||
Notable changes
|
||||
===============
|
||||
|
||||
### Mempool Policy
|
||||
### P2P
|
||||
|
||||
- The maximum number of potentially executed legacy signature operations in a
|
||||
single standard transaction is now limited to 2500. Signature operations in all
|
||||
previous output scripts, in all input scripts, as well as all P2SH redeem
|
||||
scripts (if there are any) are counted toward the limit. The new limit is
|
||||
assumed to not affect any known typically formed standard transactions. The
|
||||
change was done to prepare for a possible BIP54 deployment in the future.
|
||||
- #33050 net, validation: don't punish peers for consensus-invalid txs
|
||||
- #33723 chainparams: remove dnsseed.bitcoin.dashjr-list-of-p2p-nodes.us
|
||||
|
||||
- #32521 policy: make pathological transactions packed with legacy sigops non-standard
|
||||
### Validation
|
||||
|
||||
### Updated Settings
|
||||
|
||||
- The `-maxmempool` and `-dbcache` startup parameters are now capped on
|
||||
32-bit systems to 500MB and 1GiB respectively.
|
||||
|
||||
- #32530 node: cap -maxmempool and -dbcache values for 32-bit
|
||||
- #32473 Introduce per-txin sighash midstate cache for legacy/p2sh/segwitv0 scripts
|
||||
- #33105 validation: detect witness stripping without re-running Script checks
|
||||
|
||||
### Wallet
|
||||
|
||||
- #31757 wallet: fix crash on double block disconnection
|
||||
- #32553 wallet: Fix logging of wallet version
|
||||
- #33268 wallet: Identify transactions spending 0-value outputs, and add tests for anchor outputs in a wallet
|
||||
- #34156 wallet: fix unnamed legacy wallet migration failure
|
||||
- #34226 wallet: test: Relative wallet failed migration cleanup
|
||||
- #34123 wallet: migration, avoid creating spendable wallet from a watch-only legacy wallet
|
||||
- #34215 wallettool: fix unnamed createfromdump failure walletsdir deletion
|
||||
|
||||
### P2P
|
||||
### Mining
|
||||
|
||||
- #32826 p2p: add more bad ports
|
||||
|
||||
### Test
|
||||
|
||||
- #32069 test: fix intermittent failure in wallet_reorgsrestore.py
|
||||
- #32286 test: Handle empty string returned by CLI as None in RPC tests
|
||||
- #32312 test: Fix feature_pruning test after nTime typo fix
|
||||
- #32336 test: Suppress upstream -Wduplicate-decl-specifier in bpfcc
|
||||
- #32463 test: fix an incorrect feature_fee_estimation.py subtest
|
||||
- #32483 test: fix two intermittent failures in wallet_basic.py
|
||||
- #32630 test: fix sync function in rpc_psbt.py
|
||||
- #32765 test: Fix list index out of range error in feature_bip68_sequence.py
|
||||
- #32742 test: fix catchup loop in outbound eviction functional test
|
||||
- #32823 test: Fix wait_for_getheaders() call in test_outbound_eviction_blocks_relay_only()
|
||||
- #32833 test: Add msgtype to msg_generic slots
|
||||
- #32841 feature_taproot: sample tx version border values more
|
||||
- #32850 test: check P2SH sigop count for coinbase tx
|
||||
- #32859 test: correctly detect nonstd TRUC tx vsize in feature_taproot
|
||||
- #33001 test: Do not pass tests on unhandled exceptions
|
||||
|
||||
### Util
|
||||
|
||||
- #32248 Remove support for RNDR/RNDRRS for aarch64
|
||||
- #33475 bugfix: miner: fix `addPackageTxs` unsigned integer overflow
|
||||
|
||||
### Build
|
||||
|
||||
- #32356 cmake: Respect user-provided configuration-specific flags
|
||||
- #32437 crypto: disable ASan for sha256_sse4 with Clang
|
||||
- #32469 cmake: Allow WITH_DBUS on all Unix-like systems
|
||||
- #32439 guix: accomodate migration to codeberg
|
||||
- #32551 cmake: Add missed SSE41_CXXFLAGS
|
||||
- #32568 depends: use "mkdir -p" when installing xproto
|
||||
- #32678 guix: warn and abort when SOURCE_DATE_EPOCH is set
|
||||
- #32690 depends: fix SHA256SUM command on OpenBSD (use GNU mode output)
|
||||
- #32716 depends: Override host compilers for FreeBSD and OpenBSD
|
||||
- #32760 depends: capnp 1.2.0
|
||||
- #32798 build: add root dir to CMAKE_PREFIX_PATH in toolchain
|
||||
- #32805 cmake: Use HINTS instead of PATHS in find_* commands
|
||||
- #32814 cmake: Explicitly specify Boost_ROOT for Homebrew's package
|
||||
- #32837 depends: fix libevent _WIN32_WINNT usage
|
||||
- #32943 depends: Force CMAKE_EXPORT_NO_PACKAGE_REGISTRY=TRUE
|
||||
- #32954 cmake: Drop no longer necessary "cmakeMinimumRequired" object
|
||||
- #34227 guix: Fix `osslsigncode` tests
|
||||
|
||||
### Gui
|
||||
### Documentation
|
||||
|
||||
- #864 Crash fix, disconnect numBlocksChanged() signal during shutdown
|
||||
- #868 Replace stray tfm::format to cerr with qWarning
|
||||
- #33623 doc: document capnproto and libmultiprocess deps in 29.x
|
||||
|
||||
### Doc
|
||||
### Test
|
||||
|
||||
- #32333 doc: Add missing top-level description to pruneblockchain RPC
|
||||
- #32353 doc: Fix fuzz test_runner.py path
|
||||
- #32389 doc: Fix test_bitcoin path
|
||||
- #32607 rpc: Note in fundrawtransaction doc, fee rate is for package
|
||||
- #32679 doc: update tor docs to use bitcoind binary from path
|
||||
- #32693 depends: fix cmake compatibility error for freetype
|
||||
- #32696 doc: make -DWITH_ZMQ=ON explicit on build-unix.md
|
||||
- #32708 rpc, doc: update listdescriptors RCP help
|
||||
- #32711 doc: add missing packages for BSDs (cmake, gmake, curl) to depends/README.md
|
||||
- #32719 doc, windows: CompanyName "Bitcoin" => "Bitcoin Core project"
|
||||
- #32776 doc: taproot became always active in v24.0
|
||||
- #32777 doc: fix Transifex 404s
|
||||
- #32846 doc: clarify that the "-j N" goes after the "--build build" part
|
||||
- #32858 doc: Add workaround for vcpkg issue with paths with embedded spaces
|
||||
|
||||
### CI
|
||||
|
||||
- #32184 ci: Add workaround for vcpkg's libevent package
|
||||
- #33612 test: change log rate limit version gate
|
||||
|
||||
### Misc
|
||||
|
||||
- #32187 refactor: Remove spurious virtual from final ~CZMQNotificationInterface
|
||||
- #32454 tracing: fix invalid argument in mempool_monitor
|
||||
- #32771 contrib: tracing: Fix read of pmsg_type in p2p_monitor.py
|
||||
- #33508 ci: fix buildx gha cache authentication on forks
|
||||
- #33581 ci: Properly include $FILE_ENV in DEPENDS_HASH
|
||||
|
||||
Credits
|
||||
=======
|
||||
|
||||
Thanks to everyone who directly contributed to this release:
|
||||
|
||||
- achow101
|
||||
- Anthony Towns
|
||||
- Antoine Poinsot
|
||||
- benthecarman
|
||||
- bigspider
|
||||
- Brandon Odiwuor
|
||||
- brunoerg
|
||||
- davidgumberg
|
||||
- dergoegge
|
||||
- enirox001
|
||||
- Ava Chow
|
||||
- David Gumberg
|
||||
- Eugene Siegel
|
||||
- fanquake
|
||||
- furszy
|
||||
- instagibbs
|
||||
- Hennadii Stepanov
|
||||
- hodlinator
|
||||
- ismaelsadeeq
|
||||
- jb55
|
||||
- jlopp
|
||||
- josibake
|
||||
- laanwj
|
||||
- luisschwab
|
||||
- MarcoFalke
|
||||
- Martin Zumsande
|
||||
- monlovesmango
|
||||
- nervana21
|
||||
- pablomartin4btc
|
||||
- rkrux
|
||||
- ryanofsky
|
||||
- Sjors
|
||||
- theStack
|
||||
- Pieter Wuille
|
||||
- SatsAndSports
|
||||
- willcl-ark
|
||||
- zaidmstrr
|
||||
|
||||
As well as to everyone that helped with translations on
|
||||
[Transifex](https://explore.transifex.com/bitcoin/bitcoin/).
|
||||
|
||||
@@ -84,7 +84,7 @@ For instance:
|
||||
$ bitcoind -zmqpubhashtx=tcp://127.0.0.1:28332 \
|
||||
-zmqpubhashtx=tcp://192.168.1.2:28332 \
|
||||
-zmqpubhashblock="tcp://[::1]:28333" \
|
||||
-zmqpubrawtx=ipc:///tmp/bitcoind.tx.raw \
|
||||
-zmqpubrawtx=unix:/tmp/bitcoind.tx.raw \
|
||||
-zmqpubhashtxhwm=10000
|
||||
|
||||
Each PUB notification has a topic and body, where the header
|
||||
|
||||
@@ -581,7 +581,7 @@
|
||||
#datacarriersize=1
|
||||
|
||||
# Fees (in BTC/kvB) smaller than this are considered zero fee for
|
||||
# relaying, mining and transaction creation (default: 0.00001)
|
||||
# relaying, mining and transaction creation (default: 0.000001)
|
||||
#minrelaytxfee=<amt>
|
||||
|
||||
# Relay transactions creating non-P2SH multisig outputs (default: 1)
|
||||
@@ -605,7 +605,7 @@
|
||||
#blockmaxweight=<n>
|
||||
|
||||
# Set lowest fee rate (in BTC/kvB) for transactions to be included in
|
||||
# block creation. (default: 0.00001)
|
||||
# block creation. (default: 0.00000001)
|
||||
#blockmintxfee=<amt>
|
||||
|
||||
# Reserve space for the fixed-size block header plus the largest coinbase
|
||||
|
||||
@@ -180,7 +180,7 @@ bool PartiallyDownloadedBlock::IsTxAvailable(size_t index) const
|
||||
return txn_available[index] != nullptr;
|
||||
}
|
||||
|
||||
ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector<CTransactionRef>& vtx_missing)
|
||||
ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector<CTransactionRef>& vtx_missing, bool segwit_active)
|
||||
{
|
||||
if (header.IsNull()) return READ_STATUS_INVALID;
|
||||
|
||||
@@ -205,16 +205,11 @@ ReadStatus PartiallyDownloadedBlock::FillBlock(CBlock& block, const std::vector<
|
||||
if (vtx_missing.size() != tx_missing_offset)
|
||||
return READ_STATUS_INVALID;
|
||||
|
||||
BlockValidationState state;
|
||||
CheckBlockFn check_block = m_check_block_mock ? m_check_block_mock : CheckBlock;
|
||||
if (!check_block(block, state, Params().GetConsensus(), /*fCheckPoW=*/true, /*fCheckMerkleRoot=*/true)) {
|
||||
// TODO: We really want to just check merkle tree manually here,
|
||||
// but that is expensive, and CheckBlock caches a block's
|
||||
// "checked-status" (in the CBlock?). CBlock should be able to
|
||||
// check its own merkle root and cache that check.
|
||||
if (state.GetResult() == BlockValidationResult::BLOCK_MUTATED)
|
||||
return READ_STATUS_FAILED; // Possible Short ID collision
|
||||
return READ_STATUS_CHECKBLOCK_FAILED;
|
||||
// Check for possible mutations early now that we have a seemingly good block
|
||||
IsBlockMutatedFn check_mutated{m_check_block_mutated_mock ? m_check_block_mutated_mock : IsBlockMutated};
|
||||
if (check_mutated(/*block=*/block,
|
||||
/*check_witness_root=*/segwit_active)) {
|
||||
return READ_STATUS_FAILED; // Possible Short ID collision
|
||||
}
|
||||
|
||||
LogDebug(BCLog::CMPCTBLOCK, "Successfully reconstructed block %s with %lu txn prefilled, %lu txn from mempool (incl at least %lu from extra pool) and %lu txn requested\n", hash.ToString(), prefilled_count, mempool_count, extra_count, vtx_missing.size());
|
||||
|
||||
@@ -84,8 +84,6 @@ typedef enum ReadStatus_t
|
||||
READ_STATUS_OK,
|
||||
READ_STATUS_INVALID, // Invalid object, peer is sending bogus crap
|
||||
READ_STATUS_FAILED, // Failed to process object
|
||||
READ_STATUS_CHECKBLOCK_FAILED, // Used only by FillBlock to indicate a
|
||||
// failure in CheckBlock.
|
||||
} ReadStatus;
|
||||
|
||||
class CBlockHeaderAndShortTxIDs {
|
||||
@@ -141,15 +139,16 @@ public:
|
||||
CBlockHeader header;
|
||||
|
||||
// Can be overridden for testing
|
||||
using CheckBlockFn = std::function<bool(const CBlock&, BlockValidationState&, const Consensus::Params&, bool, bool)>;
|
||||
CheckBlockFn m_check_block_mock{nullptr};
|
||||
using IsBlockMutatedFn = std::function<bool(const CBlock&, bool)>;
|
||||
IsBlockMutatedFn m_check_block_mutated_mock{nullptr};
|
||||
|
||||
explicit PartiallyDownloadedBlock(CTxMemPool* poolIn) : pool(poolIn) {}
|
||||
|
||||
// extra_txn is a list of extra orphan/conflicted/etc transactions to look at
|
||||
ReadStatus InitData(const CBlockHeaderAndShortTxIDs& cmpctblock, const std::vector<CTransactionRef>& extra_txn);
|
||||
bool IsTxAvailable(size_t index) const;
|
||||
ReadStatus FillBlock(CBlock& block, const std::vector<CTransactionRef>& vtx_missing);
|
||||
// segwit_active enforces witness mutation checks just before reporting a healthy status
|
||||
ReadStatus FillBlock(CBlock& block, const std::vector<CTransactionRef>& vtx_missing, bool segwit_active);
|
||||
};
|
||||
|
||||
#endif // BITCOIN_BLOCKENCODINGS_H
|
||||
|
||||
@@ -627,7 +627,7 @@ std::string SHA256AutoDetect(sha256_implementation::UseImplementation use_implem
|
||||
Transform = sha256_x86_shani::Transform;
|
||||
TransformD64 = TransformD64Wrapper<sha256_x86_shani::Transform>;
|
||||
TransformD64_2way = sha256d64_x86_shani::Transform_2way;
|
||||
ret = "x86_shani(1way,2way)";
|
||||
ret = "x86_shani(1way;2way)";
|
||||
have_sse4 = false; // Disable SSE4/AVX2;
|
||||
have_avx2 = false;
|
||||
}
|
||||
@@ -641,14 +641,14 @@ std::string SHA256AutoDetect(sha256_implementation::UseImplementation use_implem
|
||||
#endif
|
||||
#if defined(ENABLE_SSE41)
|
||||
TransformD64_4way = sha256d64_sse41::Transform_4way;
|
||||
ret += ",sse41(4way)";
|
||||
ret += ";sse41(4way)";
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(ENABLE_AVX2)
|
||||
if (have_avx2 && have_avx && enabled_avx) {
|
||||
TransformD64_8way = sha256d64_avx2::Transform_8way;
|
||||
ret += ",avx2(8way)";
|
||||
ret += ";avx2(8way)";
|
||||
}
|
||||
#endif
|
||||
#endif // defined(HAVE_GETCPUID)
|
||||
@@ -682,7 +682,7 @@ std::string SHA256AutoDetect(sha256_implementation::UseImplementation use_implem
|
||||
Transform = sha256_arm_shani::Transform;
|
||||
TransformD64 = TransformD64Wrapper<sha256_arm_shani::Transform>;
|
||||
TransformD64_2way = sha256d64_arm_shani::Transform_2way;
|
||||
ret = "arm_shani(1way,2way)";
|
||||
ret = "arm_shani(1way;2way)";
|
||||
}
|
||||
#endif
|
||||
#endif // DISABLE_OPTIMIZED_SHA256
|
||||
|
||||
@@ -253,18 +253,13 @@ bool BaseIndex::Rewind(const CBlockIndex* current_tip, const CBlockIndex* new_ti
|
||||
return false;
|
||||
}
|
||||
|
||||
// In the case of a reorg, ensure persisted block locator is not stale.
|
||||
// Don't commit here - the committed index state must never be ahead of the
|
||||
// flushed chainstate, otherwise unclean restarts would lead to index corruption.
|
||||
// Pruning has a minimum of 288 blocks-to-keep and getting the index
|
||||
// out of sync may be possible but a users fault.
|
||||
// In case we reorg beyond the pruned depth, ReadBlock would
|
||||
// throw and lead to a graceful shutdown
|
||||
SetBestBlockIndex(new_tip);
|
||||
if (!Commit()) {
|
||||
// If commit fails, revert the best block index to avoid corruption.
|
||||
SetBestBlockIndex(current_tip);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
@@ -1384,6 +1384,15 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
|
||||
}
|
||||
}, std::chrono::minutes{5});
|
||||
|
||||
if (args.GetBoolArg("-logratelimit", BCLog::DEFAULT_LOGRATELIMIT)) {
|
||||
LogInstance().SetRateLimiting(BCLog::LogRateLimiter::Create(
|
||||
[&scheduler](auto func, auto window) { scheduler.scheduleEvery(std::move(func), window); },
|
||||
BCLog::RATELIMIT_MAX_BYTES,
|
||||
BCLog::RATELIMIT_WINDOW));
|
||||
} else {
|
||||
LogInfo("Log rate limiting disabled");
|
||||
}
|
||||
|
||||
assert(!node.validation_signals);
|
||||
node.validation_signals = std::make_unique<ValidationSignals>(std::make_unique<SerialTaskRunner>(scheduler));
|
||||
auto& validation_signals = *node.validation_signals;
|
||||
|
||||
@@ -38,6 +38,7 @@ void AddLoggingArgs(ArgsManager& argsman)
|
||||
argsman.AddArg("-logsourcelocations", strprintf("Prepend debug output with name of the originating source location (source file, line number and function name) (default: %u)", DEFAULT_LOGSOURCELOCATIONS), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
|
||||
argsman.AddArg("-logtimemicros", strprintf("Add microsecond precision to debug timestamps (default: %u)", DEFAULT_LOGTIMEMICROS), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
|
||||
argsman.AddArg("-loglevelalways", strprintf("Always prepend a category and level (default: %u)", DEFAULT_LOGLEVELALWAYS), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
|
||||
argsman.AddArg("-logratelimit", strprintf("Apply rate limiting to unconditional logging to mitigate disk-filling attacks (default: %u)", BCLog::DEFAULT_LOGRATELIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST);
|
||||
argsman.AddArg("-printtoconsole", "Send trace/debug info to console (default: 1 when no -daemon. To disable logging to file, set -nodebuglogfile)", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
|
||||
argsman.AddArg("-shrinkdebugfile", "Shrink debug.log file on client startup (default: 1 when no -debug)", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
|
||||
}
|
||||
|
||||
@@ -146,7 +146,6 @@ public:
|
||||
// release ASAP to avoid it where possible.
|
||||
vSeeds.emplace_back("seed.bitcoin.sipa.be."); // Pieter Wuille, only supports x1, x5, x9, and xd
|
||||
vSeeds.emplace_back("dnsseed.bluematt.me."); // Matt Corallo, only supports x9
|
||||
vSeeds.emplace_back("dnsseed.bitcoin.dashjr-list-of-p2p-nodes.us."); // Luke Dashjr
|
||||
vSeeds.emplace_back("seed.bitcoin.jonasschnelli.ch."); // Jonas Schnelli, only supports x1, x5, x9, and xd
|
||||
vSeeds.emplace_back("seed.btc.petertodd.net."); // Peter Todd, only supports x1, x5, x9, and xd
|
||||
vSeeds.emplace_back("seed.bitcoin.sprovoost.nl."); // Sjors Provoost
|
||||
|
||||
126
src/logging.cpp
126
src/logging.cpp
@@ -12,8 +12,10 @@
|
||||
#include <util/time.h>
|
||||
|
||||
#include <array>
|
||||
#include <cstring>
|
||||
#include <map>
|
||||
#include <optional>
|
||||
#include <utility>
|
||||
|
||||
using util::Join;
|
||||
using util::RemovePrefixView;
|
||||
@@ -73,12 +75,12 @@ bool BCLog::Logger::StartLogging()
|
||||
// dump buffered messages from before we opened the log
|
||||
m_buffering = false;
|
||||
if (m_buffer_lines_discarded > 0) {
|
||||
LogPrintStr_(strprintf("Early logging buffer overflowed, %d log lines discarded.\n", m_buffer_lines_discarded), __func__, __FILE__, __LINE__, BCLog::ALL, Level::Info);
|
||||
LogPrintStr_(strprintf("Early logging buffer overflowed, %d log lines discarded.\n", m_buffer_lines_discarded), std::source_location::current(), BCLog::ALL, Level::Info, /*should_ratelimit=*/false);
|
||||
}
|
||||
while (!m_msgs_before_open.empty()) {
|
||||
const auto& buflog = m_msgs_before_open.front();
|
||||
std::string s{buflog.str};
|
||||
FormatLogStrInPlace(s, buflog.category, buflog.level, buflog.source_file, buflog.source_line, buflog.logging_function, buflog.threadname, buflog.now, buflog.mocktime);
|
||||
FormatLogStrInPlace(s, buflog.category, buflog.level, buflog.source_loc, buflog.threadname, buflog.now, buflog.mocktime);
|
||||
m_msgs_before_open.pop_front();
|
||||
|
||||
if (m_print_to_file) FileWriteStr(s, m_fileout);
|
||||
@@ -364,17 +366,50 @@ std::string BCLog::Logger::GetLogPrefix(BCLog::LogFlags category, BCLog::Level l
|
||||
|
||||
static size_t MemUsage(const BCLog::Logger::BufferedLog& buflog)
|
||||
{
|
||||
return buflog.str.size() + buflog.logging_function.size() + buflog.source_file.size() + buflog.threadname.size() + memusage::MallocUsage(sizeof(memusage::list_node<BCLog::Logger::BufferedLog>));
|
||||
return memusage::DynamicUsage(buflog.str) +
|
||||
memusage::DynamicUsage(buflog.threadname) +
|
||||
memusage::MallocUsage(sizeof(memusage::list_node<BCLog::Logger::BufferedLog>));
|
||||
}
|
||||
|
||||
void BCLog::Logger::FormatLogStrInPlace(std::string& str, BCLog::LogFlags category, BCLog::Level level, std::string_view source_file, int source_line, std::string_view logging_function, std::string_view threadname, SystemClock::time_point now, std::chrono::seconds mocktime) const
|
||||
BCLog::LogRateLimiter::LogRateLimiter(uint64_t max_bytes, std::chrono::seconds reset_window)
|
||||
: m_max_bytes{max_bytes}, m_reset_window{reset_window} {}
|
||||
|
||||
std::shared_ptr<BCLog::LogRateLimiter> BCLog::LogRateLimiter::Create(
|
||||
SchedulerFunction&& scheduler_func, uint64_t max_bytes, std::chrono::seconds reset_window)
|
||||
{
|
||||
auto limiter{std::shared_ptr<LogRateLimiter>(new LogRateLimiter(max_bytes, reset_window))};
|
||||
std::weak_ptr<LogRateLimiter> weak_limiter{limiter};
|
||||
auto reset = [weak_limiter] {
|
||||
if (auto shared_limiter{weak_limiter.lock()}) shared_limiter->Reset();
|
||||
};
|
||||
scheduler_func(reset, limiter->m_reset_window);
|
||||
return limiter;
|
||||
}
|
||||
|
||||
BCLog::LogRateLimiter::Status BCLog::LogRateLimiter::Consume(
|
||||
const std::source_location& source_loc,
|
||||
const std::string& str)
|
||||
{
|
||||
StdLockGuard scoped_lock(m_mutex);
|
||||
auto& stats{m_source_locations.try_emplace(source_loc, m_max_bytes).first->second};
|
||||
Status status{stats.m_dropped_bytes > 0 ? Status::STILL_SUPPRESSED : Status::UNSUPPRESSED};
|
||||
|
||||
if (!stats.Consume(str.size()) && status == Status::UNSUPPRESSED) {
|
||||
status = Status::NEWLY_SUPPRESSED;
|
||||
m_suppression_active = true;
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
void BCLog::Logger::FormatLogStrInPlace(std::string& str, BCLog::LogFlags category, BCLog::Level level, const std::source_location& source_loc, std::string_view threadname, SystemClock::time_point now, std::chrono::seconds mocktime) const
|
||||
{
|
||||
if (!str.ends_with('\n')) str.push_back('\n');
|
||||
|
||||
str.insert(0, GetLogPrefix(category, level));
|
||||
|
||||
if (m_log_sourcelocations) {
|
||||
str.insert(0, strprintf("[%s:%d] [%s] ", RemovePrefixView(source_file, "./"), source_line, logging_function));
|
||||
str.insert(0, strprintf("[%s:%d] [%s] ", RemovePrefixView(source_loc.file_name(), "./"), source_loc.line(), source_loc.function_name()));
|
||||
}
|
||||
|
||||
if (m_log_threadnames) {
|
||||
@@ -384,28 +419,27 @@ void BCLog::Logger::FormatLogStrInPlace(std::string& str, BCLog::LogFlags catego
|
||||
str.insert(0, LogTimestampStr(now, mocktime));
|
||||
}
|
||||
|
||||
void BCLog::Logger::LogPrintStr(std::string_view str, std::string_view logging_function, std::string_view source_file, int source_line, BCLog::LogFlags category, BCLog::Level level)
|
||||
void BCLog::Logger::LogPrintStr(std::string_view str, std::source_location&& source_loc, BCLog::LogFlags category, BCLog::Level level, bool should_ratelimit)
|
||||
{
|
||||
StdLockGuard scoped_lock(m_cs);
|
||||
return LogPrintStr_(str, logging_function, source_file, source_line, category, level);
|
||||
return LogPrintStr_(str, std::move(source_loc), category, level, should_ratelimit);
|
||||
}
|
||||
|
||||
void BCLog::Logger::LogPrintStr_(std::string_view str, std::string_view logging_function, std::string_view source_file, int source_line, BCLog::LogFlags category, BCLog::Level level)
|
||||
// NOLINTNEXTLINE(misc-no-recursion)
|
||||
void BCLog::Logger::LogPrintStr_(std::string_view str, std::source_location&& source_loc, BCLog::LogFlags category, BCLog::Level level, bool should_ratelimit)
|
||||
{
|
||||
std::string str_prefixed = LogEscapeMessage(str);
|
||||
|
||||
if (m_buffering) {
|
||||
{
|
||||
BufferedLog buf{
|
||||
.now=SystemClock::now(),
|
||||
.mocktime=GetMockTime(),
|
||||
.str=str_prefixed,
|
||||
.logging_function=std::string(logging_function),
|
||||
.source_file=std::string(source_file),
|
||||
.threadname=util::ThreadGetInternalName(),
|
||||
.source_line=source_line,
|
||||
.category=category,
|
||||
.level=level,
|
||||
.now = SystemClock::now(),
|
||||
.mocktime = GetMockTime(),
|
||||
.str = str_prefixed,
|
||||
.threadname = util::ThreadGetInternalName(),
|
||||
.source_loc = std::move(source_loc),
|
||||
.category = category,
|
||||
.level = level,
|
||||
};
|
||||
m_cur_buffer_memusage += MemUsage(buf);
|
||||
m_msgs_before_open.push_back(std::move(buf));
|
||||
@@ -424,7 +458,31 @@ void BCLog::Logger::LogPrintStr_(std::string_view str, std::string_view logging_
|
||||
return;
|
||||
}
|
||||
|
||||
FormatLogStrInPlace(str_prefixed, category, level, source_file, source_line, logging_function, util::ThreadGetInternalName(), SystemClock::now(), GetMockTime());
|
||||
FormatLogStrInPlace(str_prefixed, category, level, source_loc, util::ThreadGetInternalName(), SystemClock::now(), GetMockTime());
|
||||
bool ratelimit{false};
|
||||
if (should_ratelimit && m_limiter) {
|
||||
auto status{m_limiter->Consume(source_loc, str_prefixed)};
|
||||
if (status == LogRateLimiter::Status::NEWLY_SUPPRESSED) {
|
||||
// NOLINTNEXTLINE(misc-no-recursion)
|
||||
LogPrintStr_(strprintf(
|
||||
"Excessive logging detected from %s:%d (%s): >%d bytes logged during "
|
||||
"the last time window of %is. Suppressing logging to disk from this "
|
||||
"source location until time window resets. Console logging "
|
||||
"unaffected. Last log entry.",
|
||||
source_loc.file_name(), source_loc.line(), source_loc.function_name(),
|
||||
m_limiter->m_max_bytes,
|
||||
Ticks<std::chrono::seconds>(m_limiter->m_reset_window)),
|
||||
std::source_location::current(), LogFlags::ALL, Level::Warning, /*should_ratelimit=*/false); // with should_ratelimit=false, this cannot lead to infinite recursion
|
||||
} else if (status == LogRateLimiter::Status::STILL_SUPPRESSED) {
|
||||
ratelimit = true;
|
||||
}
|
||||
}
|
||||
|
||||
// To avoid confusion caused by dropped log messages when debugging an issue,
|
||||
// we prefix log lines with "[*]" when there are any suppressed source locations.
|
||||
if (m_limiter && m_limiter->SuppressionsActive()) {
|
||||
str_prefixed.insert(0, "[*] ");
|
||||
}
|
||||
|
||||
if (m_print_to_console) {
|
||||
// print to console
|
||||
@@ -434,7 +492,7 @@ void BCLog::Logger::LogPrintStr_(std::string_view str, std::string_view logging_
|
||||
for (const auto& cb : m_print_callbacks) {
|
||||
cb(str_prefixed);
|
||||
}
|
||||
if (m_print_to_file) {
|
||||
if (m_print_to_file && !ratelimit) {
|
||||
assert(m_fileout != nullptr);
|
||||
|
||||
// reopen the log file, if requested
|
||||
@@ -492,6 +550,36 @@ void BCLog::Logger::ShrinkDebugFile()
|
||||
fclose(file);
|
||||
}
|
||||
|
||||
void BCLog::LogRateLimiter::Reset()
|
||||
{
|
||||
decltype(m_source_locations) source_locations;
|
||||
{
|
||||
StdLockGuard scoped_lock(m_mutex);
|
||||
source_locations.swap(m_source_locations);
|
||||
m_suppression_active = false;
|
||||
}
|
||||
for (const auto& [source_loc, stats] : source_locations) {
|
||||
if (stats.m_dropped_bytes == 0) continue;
|
||||
LogPrintLevel_(
|
||||
LogFlags::ALL, Level::Warning, /*should_ratelimit=*/false,
|
||||
"Restarting logging from %s:%d (%s): %d bytes were dropped during the last %ss.",
|
||||
source_loc.file_name(), source_loc.line(), source_loc.function_name(),
|
||||
stats.m_dropped_bytes, Ticks<std::chrono::seconds>(m_reset_window));
|
||||
}
|
||||
}
|
||||
|
||||
bool BCLog::LogRateLimiter::Stats::Consume(uint64_t bytes)
|
||||
{
|
||||
if (bytes > m_available_bytes) {
|
||||
m_dropped_bytes += bytes;
|
||||
m_available_bytes = 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
m_available_bytes -= bytes;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool BCLog::Logger::SetLogLevel(std::string_view level_str)
|
||||
{
|
||||
const auto level = GetLogLevel(level_str);
|
||||
|
||||
137
src/logging.h
137
src/logging.h
@@ -6,6 +6,7 @@
|
||||
#ifndef BITCOIN_LOGGING_H
|
||||
#define BITCOIN_LOGGING_H
|
||||
|
||||
#include <crypto/siphash.h>
|
||||
#include <threadsafety.h>
|
||||
#include <tinyformat.h>
|
||||
#include <util/fs.h>
|
||||
@@ -14,11 +15,15 @@
|
||||
|
||||
#include <atomic>
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
#include <functional>
|
||||
#include <list>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <source_location>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <unordered_set>
|
||||
#include <vector>
|
||||
|
||||
static const bool DEFAULT_LOGTIMEMICROS = false;
|
||||
@@ -31,6 +36,24 @@ extern const char * const DEFAULT_DEBUGLOGFILE;
|
||||
|
||||
extern bool fLogIPs;
|
||||
|
||||
struct SourceLocationEqual {
|
||||
bool operator()(const std::source_location& lhs, const std::source_location& rhs) const noexcept
|
||||
{
|
||||
return lhs.line() == rhs.line() && std::string_view(lhs.file_name()) == std::string_view(rhs.file_name());
|
||||
}
|
||||
};
|
||||
|
||||
struct SourceLocationHasher {
|
||||
size_t operator()(const std::source_location& s) const noexcept
|
||||
{
|
||||
// Use CSipHasher(0, 0) as a simple way to get uniform distribution.
|
||||
return size_t(CSipHasher(0, 0)
|
||||
.Write(s.line())
|
||||
.Write(MakeUCharSpan(std::string_view{s.file_name()}))
|
||||
.Finalize());
|
||||
}
|
||||
};
|
||||
|
||||
struct LogCategory {
|
||||
std::string category;
|
||||
bool active;
|
||||
@@ -82,6 +105,69 @@ namespace BCLog {
|
||||
};
|
||||
constexpr auto DEFAULT_LOG_LEVEL{Level::Debug};
|
||||
constexpr size_t DEFAULT_MAX_LOG_BUFFER{1'000'000}; // buffer up to 1MB of log data prior to StartLogging
|
||||
constexpr uint64_t RATELIMIT_MAX_BYTES{1024 * 1024}; // maximum number of bytes per source location that can be logged within the RATELIMIT_WINDOW
|
||||
constexpr auto RATELIMIT_WINDOW{1h}; // time window after which log ratelimit stats are reset
|
||||
constexpr bool DEFAULT_LOGRATELIMIT{true};
|
||||
|
||||
//! Fixed window rate limiter for logging.
|
||||
class LogRateLimiter
|
||||
{
|
||||
public:
|
||||
//! Keeps track of an individual source location and how many available bytes are left for logging from it.
|
||||
struct Stats {
|
||||
//! Remaining bytes
|
||||
uint64_t m_available_bytes;
|
||||
//! Number of bytes that were consumed but didn't fit in the available bytes.
|
||||
uint64_t m_dropped_bytes{0};
|
||||
|
||||
Stats(uint64_t max_bytes) : m_available_bytes{max_bytes} {}
|
||||
//! Updates internal accounting and returns true if enough available_bytes were remaining
|
||||
bool Consume(uint64_t bytes);
|
||||
};
|
||||
|
||||
private:
|
||||
mutable StdMutex m_mutex;
|
||||
|
||||
//! Stats for each source location that has attempted to log something.
|
||||
std::unordered_map<std::source_location, Stats, SourceLocationHasher, SourceLocationEqual> m_source_locations GUARDED_BY(m_mutex);
|
||||
//! Whether any log locations are suppressed. Cached view on m_source_locations for performance reasons.
|
||||
std::atomic<bool> m_suppression_active{false};
|
||||
LogRateLimiter(uint64_t max_bytes, std::chrono::seconds reset_window);
|
||||
|
||||
public:
|
||||
using SchedulerFunction = std::function<void(std::function<void()>, std::chrono::milliseconds)>;
|
||||
/**
|
||||
* @param scheduler_func Callable object used to schedule resetting the window. The first
|
||||
* parameter is the function to be executed, and the second is the
|
||||
* reset_window interval.
|
||||
* @param max_bytes Maximum number of bytes that can be logged for each source
|
||||
* location.
|
||||
* @param reset_window Time window after which the stats are reset.
|
||||
*/
|
||||
static std::shared_ptr<LogRateLimiter> Create(
|
||||
SchedulerFunction&& scheduler_func,
|
||||
uint64_t max_bytes,
|
||||
std::chrono::seconds reset_window);
|
||||
//! Maximum number of bytes logged per location per window.
|
||||
const uint64_t m_max_bytes;
|
||||
//! Interval after which the window is reset.
|
||||
const std::chrono::seconds m_reset_window;
|
||||
//! Suppression status of a source log location.
|
||||
enum class Status {
|
||||
UNSUPPRESSED, // string fits within the limit
|
||||
NEWLY_SUPPRESSED, // suppression has started since this string
|
||||
STILL_SUPPRESSED, // suppression is still ongoing
|
||||
};
|
||||
//! Consumes `source_loc`'s available bytes corresponding to the size of the (formatted)
|
||||
//! `str` and returns its status.
|
||||
[[nodiscard]] Status Consume(
|
||||
const std::source_location& source_loc,
|
||||
const std::string& str) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex);
|
||||
//! Resets all usage to zero. Called periodically by the scheduler.
|
||||
void Reset() EXCLUSIVE_LOCKS_REQUIRED(!m_mutex);
|
||||
//! Returns true if any log locations are currently being suppressed.
|
||||
bool SuppressionsActive() const { return m_suppression_active; }
|
||||
};
|
||||
|
||||
class Logger
|
||||
{
|
||||
@@ -89,8 +175,8 @@ namespace BCLog {
|
||||
struct BufferedLog {
|
||||
SystemClock::time_point now;
|
||||
std::chrono::seconds mocktime;
|
||||
std::string str, logging_function, source_file, threadname;
|
||||
int source_line;
|
||||
std::string str, threadname;
|
||||
std::source_location source_loc;
|
||||
LogFlags category;
|
||||
Level level;
|
||||
};
|
||||
@@ -105,6 +191,9 @@ namespace BCLog {
|
||||
size_t m_cur_buffer_memusage GUARDED_BY(m_cs){0};
|
||||
size_t m_buffer_lines_discarded GUARDED_BY(m_cs){0};
|
||||
|
||||
//! Manages the rate limiting of each log location.
|
||||
std::shared_ptr<LogRateLimiter> m_limiter GUARDED_BY(m_cs);
|
||||
|
||||
//! Category-specific log level. Overrides `m_log_level`.
|
||||
std::unordered_map<LogFlags, Level> m_category_log_levels GUARDED_BY(m_cs);
|
||||
|
||||
@@ -115,7 +204,7 @@ namespace BCLog {
|
||||
/** Log categories bitfield. */
|
||||
std::atomic<CategoryMask> m_categories{BCLog::NONE};
|
||||
|
||||
void FormatLogStrInPlace(std::string& str, LogFlags category, Level level, std::string_view source_file, int source_line, std::string_view logging_function, std::string_view threadname, SystemClock::time_point now, std::chrono::seconds mocktime) const;
|
||||
void FormatLogStrInPlace(std::string& str, LogFlags category, Level level, const std::source_location& source_loc, std::string_view threadname, SystemClock::time_point now, std::chrono::seconds mocktime) const;
|
||||
|
||||
std::string LogTimestampStr(SystemClock::time_point now, std::chrono::seconds mocktime) const;
|
||||
|
||||
@@ -123,7 +212,7 @@ namespace BCLog {
|
||||
std::list<std::function<void(const std::string&)>> m_print_callbacks GUARDED_BY(m_cs) {};
|
||||
|
||||
/** Send a string to the log output (internal) */
|
||||
void LogPrintStr_(std::string_view str, std::string_view logging_function, std::string_view source_file, int source_line, BCLog::LogFlags category, BCLog::Level level)
|
||||
void LogPrintStr_(std::string_view str, std::source_location&& source_loc, BCLog::LogFlags category, BCLog::Level level, bool should_ratelimit)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(m_cs);
|
||||
|
||||
std::string GetLogPrefix(LogFlags category, Level level) const;
|
||||
@@ -142,7 +231,7 @@ namespace BCLog {
|
||||
std::atomic<bool> m_reopen_file{false};
|
||||
|
||||
/** Send a string to the log output */
|
||||
void LogPrintStr(std::string_view str, std::string_view logging_function, std::string_view source_file, int source_line, BCLog::LogFlags category, BCLog::Level level)
|
||||
void LogPrintStr(std::string_view str, std::source_location&& source_loc, BCLog::LogFlags category, BCLog::Level level, bool should_ratelimit)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(!m_cs);
|
||||
|
||||
/** Returns whether logs will be written to any output */
|
||||
@@ -172,6 +261,12 @@ namespace BCLog {
|
||||
/** Only for testing */
|
||||
void DisconnectTestLogger() EXCLUSIVE_LOCKS_REQUIRED(!m_cs);
|
||||
|
||||
void SetRateLimiting(std::shared_ptr<LogRateLimiter> limiter) EXCLUSIVE_LOCKS_REQUIRED(!m_cs)
|
||||
{
|
||||
StdLockGuard scoped_lock(m_cs);
|
||||
m_limiter = std::move(limiter);
|
||||
}
|
||||
|
||||
/** Disable logging
|
||||
* This offers a slight speedup and slightly smaller memory usage
|
||||
* compared to leaving the logging system in its default state.
|
||||
@@ -239,7 +334,7 @@ static inline bool LogAcceptCategory(BCLog::LogFlags category, BCLog::Level leve
|
||||
bool GetLogCategory(BCLog::LogFlags& flag, std::string_view str);
|
||||
|
||||
template <typename... Args>
|
||||
inline void LogPrintFormatInternal(std::string_view logging_function, std::string_view source_file, const int source_line, const BCLog::LogFlags flag, const BCLog::Level level, util::ConstevalFormatString<sizeof...(Args)> fmt, const Args&... args)
|
||||
inline void LogPrintFormatInternal(std::source_location&& source_loc, BCLog::LogFlags flag, BCLog::Level level, bool should_ratelimit, util::ConstevalFormatString<sizeof...(Args)> fmt, const Args&... args)
|
||||
{
|
||||
if (LogInstance().Enabled()) {
|
||||
std::string log_msg;
|
||||
@@ -248,19 +343,19 @@ inline void LogPrintFormatInternal(std::string_view logging_function, std::strin
|
||||
} catch (tinyformat::format_error& fmterr) {
|
||||
log_msg = "Error \"" + std::string{fmterr.what()} + "\" while formatting log message: " + fmt.fmt;
|
||||
}
|
||||
LogInstance().LogPrintStr(log_msg, logging_function, source_file, source_line, flag, level);
|
||||
LogInstance().LogPrintStr(log_msg, std::move(source_loc), flag, level, should_ratelimit);
|
||||
}
|
||||
}
|
||||
|
||||
#define LogPrintLevel_(category, level, ...) LogPrintFormatInternal(__func__, __FILE__, __LINE__, category, level, __VA_ARGS__)
|
||||
#define LogPrintLevel_(category, level, should_ratelimit, ...) LogPrintFormatInternal(std::source_location::current(), category, level, should_ratelimit, __VA_ARGS__)
|
||||
|
||||
// Log unconditionally.
|
||||
// Log unconditionally. Uses basic rate limiting to mitigate disk filling attacks.
|
||||
// Be conservative when using functions that unconditionally log to debug.log!
|
||||
// It should not be the case that an inbound peer can fill up a user's storage
|
||||
// with debug.log entries.
|
||||
#define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, __VA_ARGS__)
|
||||
#define LogWarning(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Warning, __VA_ARGS__)
|
||||
#define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, __VA_ARGS__)
|
||||
#define LogInfo(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/true, __VA_ARGS__)
|
||||
#define LogWarning(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Warning, /*should_ratelimit=*/true, __VA_ARGS__)
|
||||
#define LogError(...) LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Error, /*should_ratelimit=*/true, __VA_ARGS__)
|
||||
|
||||
// Deprecated unconditional logging.
|
||||
#define LogPrintf(...) LogInfo(__VA_ARGS__)
|
||||
@@ -268,12 +363,18 @@ inline void LogPrintFormatInternal(std::string_view logging_function, std::strin
|
||||
// Use a macro instead of a function for conditional logging to prevent
|
||||
// evaluating arguments when logging for the category is not enabled.
|
||||
|
||||
// Log conditionally, prefixing the output with the passed category name and severity level.
|
||||
#define LogPrintLevel(category, level, ...) \
|
||||
do { \
|
||||
if (LogAcceptCategory((category), (level))) { \
|
||||
LogPrintLevel_(category, level, __VA_ARGS__); \
|
||||
} \
|
||||
// Log by prefixing the output with the passed category name and severity level. This can either
|
||||
// log conditionally if the category is allowed or unconditionally if level >= BCLog::Level::Info
|
||||
// is passed. If this function logs unconditionally, logging to disk is rate-limited. This is
|
||||
// important so that callers don't need to worry about accidentally introducing a disk-fill
|
||||
// vulnerability if level >= Info is used. Additionally, users specifying -debug are assumed to be
|
||||
// developers or power users who are aware that -debug may cause excessive disk usage due to logging.
|
||||
#define LogPrintLevel(category, level, ...) \
|
||||
do { \
|
||||
if (LogAcceptCategory((category), (level))) { \
|
||||
bool rate_limit{level >= BCLog::Level::Info}; \
|
||||
LogPrintLevel_(category, level, rate_limit, __VA_ARGS__); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
// Log conditionally, prefixing the output with the passed category name.
|
||||
|
||||
11
src/net.cpp
11
src/net.cpp
@@ -575,9 +575,9 @@ void CNode::CloseSocketDisconnect()
|
||||
m_i2p_sam_session.reset();
|
||||
}
|
||||
|
||||
void CConnman::AddWhitelistPermissionFlags(NetPermissionFlags& flags, const CNetAddr &addr, const std::vector<NetWhitelistPermissions>& ranges) const {
|
||||
void CConnman::AddWhitelistPermissionFlags(NetPermissionFlags& flags, std::optional<CNetAddr> addr, const std::vector<NetWhitelistPermissions>& ranges) const {
|
||||
for (const auto& subnet : ranges) {
|
||||
if (subnet.m_subnet.Match(addr)) {
|
||||
if (addr.has_value() && subnet.m_subnet.Match(addr.value())) {
|
||||
NetPermissions::AddFlag(flags, subnet.m_flags);
|
||||
}
|
||||
}
|
||||
@@ -1767,7 +1767,11 @@ void CConnman::CreateNodeFromAcceptedSocket(std::unique_ptr<Sock>&& sock,
|
||||
{
|
||||
int nInbound = 0;
|
||||
|
||||
AddWhitelistPermissionFlags(permission_flags, addr, vWhitelistedRangeIncoming);
|
||||
const bool inbound_onion = std::find(m_onion_binds.begin(), m_onion_binds.end(), addr_bind) != m_onion_binds.end();
|
||||
|
||||
// Tor inbound connections do not reveal the peer's actual network address.
|
||||
// Therefore do not apply address-based whitelist permissions to them.
|
||||
AddWhitelistPermissionFlags(permission_flags, inbound_onion ? std::optional<CNetAddr>{} : addr, vWhitelistedRangeIncoming);
|
||||
|
||||
{
|
||||
LOCK(m_nodes_mutex);
|
||||
@@ -1822,7 +1826,6 @@ void CConnman::CreateNodeFromAcceptedSocket(std::unique_ptr<Sock>&& sock,
|
||||
NodeId id = GetNewNodeId();
|
||||
uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE).Write(id).Finalize();
|
||||
|
||||
const bool inbound_onion = std::find(m_onion_binds.begin(), m_onion_binds.end(), addr_bind) != m_onion_binds.end();
|
||||
// The V2Transport transparently falls back to V1 behavior when an incoming V1 connection is
|
||||
// detected, so use it whenever we signal NODE_P2P_V2.
|
||||
ServiceFlags local_services = GetLocalServices();
|
||||
|
||||
@@ -1364,7 +1364,7 @@ private:
|
||||
|
||||
bool AttemptToEvictConnection();
|
||||
CNode* ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, ConnectionType conn_type, bool use_v2transport) EXCLUSIVE_LOCKS_REQUIRED(!m_unused_i2p_sessions_mutex);
|
||||
void AddWhitelistPermissionFlags(NetPermissionFlags& flags, const CNetAddr &addr, const std::vector<NetWhitelistPermissions>& ranges) const;
|
||||
void AddWhitelistPermissionFlags(NetPermissionFlags& flags, std::optional<CNetAddr> addr, const std::vector<NetWhitelistPermissions>& ranges) const;
|
||||
|
||||
void DeleteNode(CNode* pnode);
|
||||
|
||||
|
||||
@@ -553,12 +553,6 @@ private:
|
||||
bool via_compact_block, const std::string& message = "")
|
||||
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
|
||||
|
||||
/**
|
||||
* Potentially disconnect and discourage a node based on the contents of a TxValidationState object
|
||||
*/
|
||||
void MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
|
||||
|
||||
/** Maybe disconnect a peer and discourage future connections from its address.
|
||||
*
|
||||
* @param[in] pnode The node to check.
|
||||
@@ -1805,32 +1799,6 @@ void PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidati
|
||||
}
|
||||
}
|
||||
|
||||
void PeerManagerImpl::MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state)
|
||||
{
|
||||
PeerRef peer{GetPeerRef(nodeid)};
|
||||
switch (state.GetResult()) {
|
||||
case TxValidationResult::TX_RESULT_UNSET:
|
||||
break;
|
||||
// The node is providing invalid data:
|
||||
case TxValidationResult::TX_CONSENSUS:
|
||||
if (peer) Misbehaving(*peer, "");
|
||||
return;
|
||||
// Conflicting (but not necessarily invalid) data or different policy:
|
||||
case TxValidationResult::TX_INPUTS_NOT_STANDARD:
|
||||
case TxValidationResult::TX_NOT_STANDARD:
|
||||
case TxValidationResult::TX_MISSING_INPUTS:
|
||||
case TxValidationResult::TX_PREMATURE_SPEND:
|
||||
case TxValidationResult::TX_WITNESS_MUTATED:
|
||||
case TxValidationResult::TX_WITNESS_STRIPPED:
|
||||
case TxValidationResult::TX_CONFLICT:
|
||||
case TxValidationResult::TX_MEMPOOL_POLICY:
|
||||
case TxValidationResult::TX_NO_MEMPOOL:
|
||||
case TxValidationResult::TX_RECONSIDERABLE:
|
||||
case TxValidationResult::TX_UNKNOWN:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex* pindex)
|
||||
{
|
||||
AssertLockHeld(cs_main);
|
||||
@@ -2987,8 +2955,6 @@ std::optional<node::PackageToValidate> PeerManagerImpl::ProcessInvalidTx(NodeId
|
||||
if (peer) AddKnownTx(*peer, parent_txid);
|
||||
}
|
||||
|
||||
MaybePunishNodeForTx(nodeid, state);
|
||||
|
||||
return package_to_validate;
|
||||
}
|
||||
|
||||
@@ -3314,7 +3280,21 @@ void PeerManagerImpl::ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const Bl
|
||||
}
|
||||
|
||||
PartiallyDownloadedBlock& partialBlock = *range_flight.first->second.second->partialBlock;
|
||||
ReadStatus status = partialBlock.FillBlock(*pblock, block_transactions.txn);
|
||||
|
||||
if (partialBlock.header.IsNull()) {
|
||||
// It is possible for the header to be empty if a previous call to FillBlock wiped the header, but left
|
||||
// the PartiallyDownloadedBlock pointer around (i.e. did not call RemoveBlockRequest). In this case, we
|
||||
// should not call LookupBlockIndex below.
|
||||
RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId());
|
||||
Misbehaving(peer, "previous compact block reconstruction attempt failed");
|
||||
LogDebug(BCLog::NET, "Peer %d sent compact block transactions multiple times", pfrom.GetId());
|
||||
return;
|
||||
}
|
||||
|
||||
// We should not have gotten this far in compact block processing unless it's attached to a known header
|
||||
const CBlockIndex* prev_block{Assume(m_chainman.m_blockman.LookupBlockIndex(partialBlock.header.hashPrevBlock))};
|
||||
ReadStatus status = partialBlock.FillBlock(*pblock, block_transactions.txn,
|
||||
/*segwit_active=*/DeploymentActiveAfter(prev_block, m_chainman, Consensus::DEPLOYMENT_SEGWIT));
|
||||
if (status == READ_STATUS_INVALID) {
|
||||
RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // Reset in-flight state in case Misbehaving does not result in a disconnect
|
||||
Misbehaving(peer, "invalid compact block/non-matching block transactions");
|
||||
@@ -3322,6 +3302,9 @@ void PeerManagerImpl::ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const Bl
|
||||
} else if (status == READ_STATUS_FAILED) {
|
||||
if (first_in_flight) {
|
||||
// Might have collided, fall back to getdata now :(
|
||||
// We keep the failed partialBlock to disallow processing another compact block announcement from the same
|
||||
// peer for the same block. We let the full block download below continue under the same m_downloading_since
|
||||
// timer.
|
||||
std::vector<CInv> invs;
|
||||
invs.emplace_back(MSG_BLOCK | GetFetchFlags(peer), block_transactions.blockhash);
|
||||
MakeAndPushMessage(pfrom, NetMsgType::GETDATA, invs);
|
||||
@@ -3331,23 +3314,7 @@ void PeerManagerImpl::ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const Bl
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
// Block is either okay, or possibly we received
|
||||
// READ_STATUS_CHECKBLOCK_FAILED.
|
||||
// Note that CheckBlock can only fail for one of a few reasons:
|
||||
// 1. bad-proof-of-work (impossible here, because we've already
|
||||
// accepted the header)
|
||||
// 2. merkleroot doesn't match the transactions given (already
|
||||
// caught in FillBlock with READ_STATUS_FAILED, so
|
||||
// impossible here)
|
||||
// 3. the block is otherwise invalid (eg invalid coinbase,
|
||||
// block is too big, too many legacy sigops, etc).
|
||||
// So if CheckBlock failed, #3 is the only possibility.
|
||||
// Under BIP 152, we don't discourage the peer unless proof of work is
|
||||
// invalid (we don't require all the stateless checks to have
|
||||
// been run). This is handled below, so just treat this as
|
||||
// though the block was successfully read, and rely on the
|
||||
// handling in ProcessNewBlock to ensure the block index is
|
||||
// updated, etc.
|
||||
// Block is okay for further processing
|
||||
RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // it is now an empty pointer
|
||||
fBlockRead = true;
|
||||
// mapBlockSource is used for potentially punishing peers and
|
||||
@@ -4462,7 +4429,9 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
|
||||
return;
|
||||
}
|
||||
std::vector<CTransactionRef> dummy;
|
||||
status = tempBlock.FillBlock(*pblock, dummy);
|
||||
const CBlockIndex* prev_block{Assume(m_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.hashPrevBlock))};
|
||||
status = tempBlock.FillBlock(*pblock, dummy,
|
||||
/*segwit_active=*/DeploymentActiveAfter(prev_block, m_chainman, Consensus::DEPLOYMENT_SEGWIT));
|
||||
if (status == READ_STATUS_OK) {
|
||||
fBlockReconstructed = true;
|
||||
}
|
||||
|
||||
@@ -65,6 +65,7 @@ util::Result<void> ApplyArgsManOptions(const ArgsManager& argsman, const CChainP
|
||||
}
|
||||
}
|
||||
|
||||
static_assert(DEFAULT_MIN_RELAY_TX_FEE == DEFAULT_INCREMENTAL_RELAY_FEE);
|
||||
if (argsman.IsArgSet("-minrelaytxfee")) {
|
||||
if (std::optional<CAmount> min_relay_feerate = ParseMoney(argsman.GetArg("-minrelaytxfee", ""))) {
|
||||
// High fee check is done afterward in CWallet::Create()
|
||||
|
||||
@@ -394,8 +394,8 @@ void BlockAssembler::addPackageTxs(int& nPackagesSelected, int& nDescendantsUpda
|
||||
|
||||
++nConsecutiveFailed;
|
||||
|
||||
if (nConsecutiveFailed > MAX_CONSECUTIVE_FAILURES && nBlockWeight >
|
||||
m_options.nBlockMaxWeight - m_options.block_reserved_weight) {
|
||||
if (nConsecutiveFailed > MAX_CONSECUTIVE_FAILURES && nBlockWeight +
|
||||
m_options.block_reserved_weight > m_options.nBlockMaxWeight) {
|
||||
// Give up if we're close to full and haven't succeeded in a while
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -44,9 +44,6 @@ public:
|
||||
|
||||
/**
|
||||
* Construct a fee rate from a fee in satoshis and a vsize in vB.
|
||||
*
|
||||
* param@[in] nFeePaid The fee paid by a transaction, in satoshis
|
||||
* param@[in] num_bytes The vsize of a transaction, in vbytes
|
||||
*/
|
||||
CFeeRate(const CAmount& nFeePaid, uint32_t num_bytes);
|
||||
|
||||
|
||||
@@ -344,6 +344,42 @@ bool IsWitnessStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool SpendsNonAnchorWitnessProg(const CTransaction& tx, const CCoinsViewCache& prevouts)
|
||||
{
|
||||
if (tx.IsCoinBase()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
int version;
|
||||
std::vector<uint8_t> program;
|
||||
for (const auto& txin: tx.vin) {
|
||||
const auto& prev_spk{prevouts.AccessCoin(txin.prevout).out.scriptPubKey};
|
||||
|
||||
// Note this includes not-yet-defined witness programs.
|
||||
if (prev_spk.IsWitnessProgram(version, program) && !prev_spk.IsPayToAnchor(version, program)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// For P2SH extract the redeem script and check if it spends a non-Taproot witness program. Note
|
||||
// this is fine to call EvalScript (as done in AreInputsStandard/IsWitnessStandard) because this
|
||||
// function is only ever called after IsStandardTx, which checks the scriptsig is pushonly.
|
||||
if (prev_spk.IsPayToScriptHash()) {
|
||||
// If EvalScript fails or results in an empty stack, the transaction is invalid by consensus.
|
||||
std::vector <std::vector<uint8_t>> stack;
|
||||
if (!EvalScript(stack, txin.scriptSig, SCRIPT_VERIFY_NONE, BaseSignatureChecker{}, SigVersion::BASE)
|
||||
|| stack.empty()) {
|
||||
continue;
|
||||
}
|
||||
const CScript redeem_script{stack.back().begin(), stack.back().end()};
|
||||
if (redeem_script.IsWitnessProgram(version, program)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
int64_t GetVirtualTransactionSize(int64_t nWeight, int64_t nSigOpCost, unsigned int bytes_per_sigop)
|
||||
{
|
||||
return (std::max(nWeight, nSigOpCost * bytes_per_sigop) + WITNESS_SCALE_FACTOR - 1) / WITNESS_SCALE_FACTOR;
|
||||
|
||||
@@ -29,7 +29,7 @@ static constexpr unsigned int DEFAULT_BLOCK_RESERVED_WEIGHT{8000};
|
||||
* Setting a lower value is prevented at startup. */
|
||||
static constexpr unsigned int MINIMUM_BLOCK_RESERVED_WEIGHT{2000};
|
||||
/** Default for -blockmintxfee, which sets the minimum feerate for a transaction in blocks created by mining code **/
|
||||
static constexpr unsigned int DEFAULT_BLOCK_MIN_TX_FEE{1000};
|
||||
static constexpr unsigned int DEFAULT_BLOCK_MIN_TX_FEE{1};
|
||||
/** The maximum weight for transactions we're willing to relay/mine */
|
||||
static constexpr int32_t MAX_STANDARD_TX_WEIGHT{400000};
|
||||
/** The minimum non-witness size for transactions we're willing to relay/mine: one larger than 64 */
|
||||
@@ -41,7 +41,7 @@ static constexpr unsigned int MAX_STANDARD_TX_SIGOPS_COST{MAX_BLOCK_SIGOPS_COST/
|
||||
/** The maximum number of potentially executed legacy signature operations in a single standard tx */
|
||||
static constexpr unsigned int MAX_TX_LEGACY_SIGOPS{2'500};
|
||||
/** Default for -incrementalrelayfee, which sets the minimum feerate increase for mempool limiting or replacement **/
|
||||
static constexpr unsigned int DEFAULT_INCREMENTAL_RELAY_FEE{1000};
|
||||
static constexpr unsigned int DEFAULT_INCREMENTAL_RELAY_FEE{100};
|
||||
/** Default for -bytespersigop */
|
||||
static constexpr unsigned int DEFAULT_BYTES_PER_SIGOP{20};
|
||||
/** Default for -permitbaremultisig */
|
||||
@@ -63,7 +63,7 @@ static constexpr unsigned int MAX_STANDARD_SCRIPTSIG_SIZE{1650};
|
||||
* outputs below the new threshold */
|
||||
static constexpr unsigned int DUST_RELAY_TX_FEE{3000};
|
||||
/** Default for -minrelaytxfee, minimum relay fee for transactions */
|
||||
static constexpr unsigned int DEFAULT_MIN_RELAY_TX_FEE{1000};
|
||||
static constexpr unsigned int DEFAULT_MIN_RELAY_TX_FEE{100};
|
||||
/** Default for -limitancestorcount, max number of in-mempool ancestors */
|
||||
static constexpr unsigned int DEFAULT_ANCESTOR_LIMIT{25};
|
||||
/** Default for -limitancestorsize, maximum kilobytes of tx + all in-mempool ancestors */
|
||||
@@ -167,6 +167,11 @@ bool AreInputsStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs)
|
||||
* Also enforce a maximum stack item size limit and no annexes for tapscript spends.
|
||||
*/
|
||||
bool IsWitnessStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs);
|
||||
/**
|
||||
* Check whether this transaction spends any witness program but P2A, including not-yet-defined ones.
|
||||
* May return `false` early for consensus-invalid transactions.
|
||||
*/
|
||||
bool SpendsNonAnchorWitnessProg(const CTransaction& tx, const CCoinsViewCache& prevouts);
|
||||
|
||||
/** Compute the virtual transaction size (weight reinterpreted as bytes). */
|
||||
int64_t GetVirtualTransactionSize(int64_t nWeight, int64_t nSigOpCost, unsigned int bytes_per_sigop);
|
||||
|
||||
@@ -164,7 +164,7 @@ UniValue blockheaderToJSON(const CBlockIndex& tip, const CBlockIndex& blockindex
|
||||
result.pushKV("mediantime", blockindex.GetMedianTimePast());
|
||||
result.pushKV("nonce", blockindex.nNonce);
|
||||
result.pushKV("bits", strprintf("%08x", blockindex.nBits));
|
||||
result.pushKV("target", GetTarget(tip, pow_limit).GetHex());
|
||||
result.pushKV("target", GetTarget(blockindex, pow_limit).GetHex());
|
||||
result.pushKV("difficulty", GetDifficulty(blockindex));
|
||||
result.pushKV("chainwork", blockindex.nChainWork.GetHex());
|
||||
result.pushKV("nTx", blockindex.nTx);
|
||||
@@ -2625,7 +2625,7 @@ static RPCHelpMan getdescriptoractivity()
|
||||
{RPCResult::Type::STR_HEX, "blockhash", /*optional=*/true, "The blockhash this spend appears in (omitted if unconfirmed)"},
|
||||
{RPCResult::Type::NUM, "height", /*optional=*/true, "Height of the spend (omitted if unconfirmed)"},
|
||||
{RPCResult::Type::STR_HEX, "spend_txid", "The txid of the spending transaction"},
|
||||
{RPCResult::Type::NUM, "spend_vout", "The vout of the spend"},
|
||||
{RPCResult::Type::NUM, "spend_vin", "The input index of the spend"},
|
||||
{RPCResult::Type::STR_HEX, "prevout_txid", "The txid of the prevout"},
|
||||
{RPCResult::Type::NUM, "prevout_vout", "The vout of the prevout"},
|
||||
{RPCResult::Type::OBJ, "prevout_spk", "", ScriptPubKeyDoc()},
|
||||
|
||||
@@ -80,7 +80,7 @@ static RPCHelpMan ping()
|
||||
{
|
||||
return RPCHelpMan{"ping",
|
||||
"\nRequests that a ping be sent to all other nodes, to measure ping time.\n"
|
||||
"Results provided in getpeerinfo, pingtime and pingwait fields are decimal seconds.\n"
|
||||
"Results are provided in getpeerinfo.\n"
|
||||
"Ping command is handled in queue with all other commands, so it measures processing backlog, not just network ping.\n",
|
||||
{},
|
||||
RPCResult{RPCResult::Type::NONE, "", ""},
|
||||
@@ -145,9 +145,9 @@ static RPCHelpMan getpeerinfo()
|
||||
{RPCResult::Type::NUM, "bytesrecv", "The total bytes received"},
|
||||
{RPCResult::Type::NUM_TIME, "conntime", "The " + UNIX_EPOCH_TIME + " of the connection"},
|
||||
{RPCResult::Type::NUM, "timeoffset", "The time offset in seconds"},
|
||||
{RPCResult::Type::NUM, "pingtime", /*optional=*/true, "The last ping time in milliseconds (ms), if any"},
|
||||
{RPCResult::Type::NUM, "minping", /*optional=*/true, "The minimum observed ping time in milliseconds (ms), if any"},
|
||||
{RPCResult::Type::NUM, "pingwait", /*optional=*/true, "The duration in milliseconds (ms) of an outstanding ping (if non-zero)"},
|
||||
{RPCResult::Type::NUM, "pingtime", /*optional=*/true, "The last ping time in seconds, if any"},
|
||||
{RPCResult::Type::NUM, "minping", /*optional=*/true, "The minimum observed ping time in seconds, if any"},
|
||||
{RPCResult::Type::NUM, "pingwait", /*optional=*/true, "The duration in seconds of an outstanding ping (if non-zero)"},
|
||||
{RPCResult::Type::NUM, "version", "The peer version, such as 70001"},
|
||||
{RPCResult::Type::STR, "subver", "The string version"},
|
||||
{RPCResult::Type::BOOL, "inbound", "Inbound (true) or Outbound (false)"},
|
||||
|
||||
@@ -1494,7 +1494,7 @@ static RPCHelpMan finalizepsbt()
|
||||
return RPCHelpMan{"finalizepsbt",
|
||||
"Finalize the inputs of a PSBT. If the transaction is fully signed, it will produce a\n"
|
||||
"network serialized transaction which can be broadcast with sendrawtransaction. Otherwise a PSBT will be\n"
|
||||
"created which has the final_scriptSig and final_scriptWitness fields filled for inputs that are complete.\n"
|
||||
"created which has the final_scriptSig and final_scriptwitness fields filled for inputs that are complete.\n"
|
||||
"Implements the Finalizer and Extractor roles.\n",
|
||||
{
|
||||
{"psbt", RPCArg::Type::STR, RPCArg::Optional::NO, "A base64 string of a PSBT"},
|
||||
|
||||
@@ -1564,11 +1564,57 @@ bool SignatureHashSchnorr(uint256& hash_out, ScriptExecutionData& execdata, cons
|
||||
return true;
|
||||
}
|
||||
|
||||
int SigHashCache::CacheIndex(int32_t hash_type) const noexcept
|
||||
{
|
||||
// Note that we do not distinguish between BASE and WITNESS_V0 to determine the cache index,
|
||||
// because no input can simultaneously use both.
|
||||
return 3 * !!(hash_type & SIGHASH_ANYONECANPAY) +
|
||||
2 * ((hash_type & 0x1f) == SIGHASH_SINGLE) +
|
||||
1 * ((hash_type & 0x1f) == SIGHASH_NONE);
|
||||
}
|
||||
|
||||
bool SigHashCache::Load(int32_t hash_type, const CScript& script_code, HashWriter& writer) const noexcept
|
||||
{
|
||||
auto& entry = m_cache_entries[CacheIndex(hash_type)];
|
||||
if (entry.has_value()) {
|
||||
if (script_code == entry->first) {
|
||||
writer = HashWriter(entry->second);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
void SigHashCache::Store(int32_t hash_type, const CScript& script_code, const HashWriter& writer) noexcept
|
||||
{
|
||||
auto& entry = m_cache_entries[CacheIndex(hash_type)];
|
||||
entry.emplace(script_code, writer);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn, int32_t nHashType, const CAmount& amount, SigVersion sigversion, const PrecomputedTransactionData* cache)
|
||||
uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn, int32_t nHashType, const CAmount& amount, SigVersion sigversion, const PrecomputedTransactionData* cache, SigHashCache* sighash_cache)
|
||||
{
|
||||
assert(nIn < txTo.vin.size());
|
||||
|
||||
if (sigversion != SigVersion::WITNESS_V0) {
|
||||
// Check for invalid use of SIGHASH_SINGLE
|
||||
if ((nHashType & 0x1f) == SIGHASH_SINGLE) {
|
||||
if (nIn >= txTo.vout.size()) {
|
||||
// nOut out of range
|
||||
return uint256::ONE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
HashWriter ss{};
|
||||
|
||||
// Try to compute using cached SHA256 midstate.
|
||||
if (sighash_cache && sighash_cache->Load(nHashType, scriptCode, ss)) {
|
||||
// Add sighash type and hash.
|
||||
ss << nHashType;
|
||||
return ss.GetHash();
|
||||
}
|
||||
|
||||
if (sigversion == SigVersion::WITNESS_V0) {
|
||||
uint256 hashPrevouts;
|
||||
uint256 hashSequence;
|
||||
@@ -1583,16 +1629,14 @@ uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn
|
||||
hashSequence = cacheready ? cache->hashSequence : SHA256Uint256(GetSequencesSHA256(txTo));
|
||||
}
|
||||
|
||||
|
||||
if ((nHashType & 0x1f) != SIGHASH_SINGLE && (nHashType & 0x1f) != SIGHASH_NONE) {
|
||||
hashOutputs = cacheready ? cache->hashOutputs : SHA256Uint256(GetOutputsSHA256(txTo));
|
||||
} else if ((nHashType & 0x1f) == SIGHASH_SINGLE && nIn < txTo.vout.size()) {
|
||||
HashWriter ss{};
|
||||
ss << txTo.vout[nIn];
|
||||
hashOutputs = ss.GetHash();
|
||||
HashWriter inner_ss{};
|
||||
inner_ss << txTo.vout[nIn];
|
||||
hashOutputs = inner_ss.GetHash();
|
||||
}
|
||||
|
||||
HashWriter ss{};
|
||||
// Version
|
||||
ss << txTo.version;
|
||||
// Input prevouts/nSequence (none/all, depending on flags)
|
||||
@@ -1609,26 +1653,21 @@ uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn
|
||||
ss << hashOutputs;
|
||||
// Locktime
|
||||
ss << txTo.nLockTime;
|
||||
// Sighash type
|
||||
ss << nHashType;
|
||||
} else {
|
||||
// Wrapper to serialize only the necessary parts of the transaction being signed
|
||||
CTransactionSignatureSerializer<T> txTmp(txTo, scriptCode, nIn, nHashType);
|
||||
|
||||
return ss.GetHash();
|
||||
// Serialize
|
||||
ss << txTmp;
|
||||
}
|
||||
|
||||
// Check for invalid use of SIGHASH_SINGLE
|
||||
if ((nHashType & 0x1f) == SIGHASH_SINGLE) {
|
||||
if (nIn >= txTo.vout.size()) {
|
||||
// nOut out of range
|
||||
return uint256::ONE;
|
||||
}
|
||||
// If a cache object was provided, store the midstate there.
|
||||
if (sighash_cache != nullptr) {
|
||||
sighash_cache->Store(nHashType, scriptCode, ss);
|
||||
}
|
||||
|
||||
// Wrapper to serialize only the necessary parts of the transaction being signed
|
||||
CTransactionSignatureSerializer<T> txTmp(txTo, scriptCode, nIn, nHashType);
|
||||
|
||||
// Serialize and hash
|
||||
HashWriter ss{};
|
||||
ss << txTmp << nHashType;
|
||||
// Add sighash type and hash.
|
||||
ss << nHashType;
|
||||
return ss.GetHash();
|
||||
}
|
||||
|
||||
@@ -1661,7 +1700,7 @@ bool GenericTransactionSignatureChecker<T>::CheckECDSASignature(const std::vecto
|
||||
// Witness sighashes need the amount.
|
||||
if (sigversion == SigVersion::WITNESS_V0 && amount < 0) return HandleMissingData(m_mdb);
|
||||
|
||||
uint256 sighash = SignatureHash(scriptCode, *txTo, nIn, nHashType, amount, sigversion, this->txdata);
|
||||
uint256 sighash = SignatureHash(scriptCode, *txTo, nIn, nHashType, amount, sigversion, this->txdata, &m_sighash_cache);
|
||||
|
||||
if (!VerifyECDSASignature(vchSig, pubkey, sighash))
|
||||
return false;
|
||||
|
||||
@@ -239,8 +239,27 @@ extern const HashWriter HASHER_TAPSIGHASH; //!< Hasher with tag "TapSighash" pre
|
||||
extern const HashWriter HASHER_TAPLEAF; //!< Hasher with tag "TapLeaf" pre-fed to it.
|
||||
extern const HashWriter HASHER_TAPBRANCH; //!< Hasher with tag "TapBranch" pre-fed to it.
|
||||
|
||||
/** Data structure to cache SHA256 midstates for the ECDSA sighash calculations
|
||||
* (bare, P2SH, P2WPKH, P2WSH). */
|
||||
class SigHashCache
|
||||
{
|
||||
/** For each sighash mode (ALL, SINGLE, NONE, ALL|ANYONE, SINGLE|ANYONE, NONE|ANYONE),
|
||||
* optionally store a scriptCode which the hash is for, plus a midstate for the SHA256
|
||||
* computation just before adding the hash_type itself. */
|
||||
std::optional<std::pair<CScript, HashWriter>> m_cache_entries[6];
|
||||
|
||||
/** Given a hash_type, find which of the 6 cache entries is to be used. */
|
||||
int CacheIndex(int32_t hash_type) const noexcept;
|
||||
|
||||
public:
|
||||
/** Load into writer the SHA256 midstate if found in this cache. */
|
||||
[[nodiscard]] bool Load(int32_t hash_type, const CScript& script_code, HashWriter& writer) const noexcept;
|
||||
/** Store into this cache object the provided SHA256 midstate. */
|
||||
void Store(int32_t hash_type, const CScript& script_code, const HashWriter& writer) noexcept;
|
||||
};
|
||||
|
||||
template <class T>
|
||||
uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn, int32_t nHashType, const CAmount& amount, SigVersion sigversion, const PrecomputedTransactionData* cache = nullptr);
|
||||
uint256 SignatureHash(const CScript& scriptCode, const T& txTo, unsigned int nIn, int32_t nHashType, const CAmount& amount, SigVersion sigversion, const PrecomputedTransactionData* cache = nullptr, SigHashCache* sighash_cache = nullptr);
|
||||
|
||||
class BaseSignatureChecker
|
||||
{
|
||||
@@ -289,6 +308,7 @@ private:
|
||||
unsigned int nIn;
|
||||
const CAmount amount;
|
||||
const PrecomputedTransactionData* txdata;
|
||||
mutable SigHashCache m_sighash_cache;
|
||||
|
||||
protected:
|
||||
virtual bool VerifyECDSASignature(const std::vector<unsigned char>& vchSig, const CPubKey& vchPubKey, const uint256& sighash) const;
|
||||
|
||||
@@ -95,21 +95,21 @@ BOOST_AUTO_TEST_CASE(SimpleRoundTripTest)
|
||||
CBlock block2;
|
||||
{
|
||||
PartiallyDownloadedBlock tmp = partialBlock;
|
||||
BOOST_CHECK(partialBlock.FillBlock(block2, {}) == READ_STATUS_INVALID); // No transactions
|
||||
BOOST_CHECK(partialBlock.FillBlock(block2, {}, /*segwit_active=*/true) == READ_STATUS_INVALID); // No transactions
|
||||
partialBlock = tmp;
|
||||
}
|
||||
|
||||
// Wrong transaction
|
||||
{
|
||||
PartiallyDownloadedBlock tmp = partialBlock;
|
||||
partialBlock.FillBlock(block2, {block.vtx[2]}); // Current implementation doesn't check txn here, but don't require that
|
||||
partialBlock.FillBlock(block2, {block.vtx[2]}, /*segwit_active=*/true); // Current implementation doesn't check txn here, but don't require that
|
||||
partialBlock = tmp;
|
||||
}
|
||||
bool mutated;
|
||||
BOOST_CHECK(block.hashMerkleRoot != BlockMerkleRoot(block2, &mutated));
|
||||
|
||||
CBlock block3;
|
||||
BOOST_CHECK(partialBlock.FillBlock(block3, {block.vtx[1]}) == READ_STATUS_OK);
|
||||
BOOST_CHECK(partialBlock.FillBlock(block3, {block.vtx[1]}, /*segwit_active=*/true) == READ_STATUS_OK);
|
||||
BOOST_CHECK_EQUAL(block.GetHash().ToString(), block3.GetHash().ToString());
|
||||
BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block3, &mutated).ToString());
|
||||
BOOST_CHECK(!mutated);
|
||||
@@ -182,14 +182,14 @@ BOOST_AUTO_TEST_CASE(NonCoinbasePreforwardRTTest)
|
||||
CBlock block2;
|
||||
{
|
||||
PartiallyDownloadedBlock tmp = partialBlock;
|
||||
BOOST_CHECK(partialBlock.FillBlock(block2, {}) == READ_STATUS_INVALID); // No transactions
|
||||
BOOST_CHECK(partialBlock.FillBlock(block2, {}, /*segwit_active=*/true) == READ_STATUS_INVALID); // No transactions
|
||||
partialBlock = tmp;
|
||||
}
|
||||
|
||||
// Wrong transaction
|
||||
{
|
||||
PartiallyDownloadedBlock tmp = partialBlock;
|
||||
partialBlock.FillBlock(block2, {block.vtx[1]}); // Current implementation doesn't check txn here, but don't require that
|
||||
partialBlock.FillBlock(block2, {block.vtx[1]}, /*segwit_active=*/true); // Current implementation doesn't check txn here, but don't require that
|
||||
partialBlock = tmp;
|
||||
}
|
||||
BOOST_CHECK_EQUAL(pool.get(block.vtx[2]->GetHash()).use_count(), SHARED_TX_OFFSET + 2); // +2 because of partialBlock and block2
|
||||
@@ -198,7 +198,7 @@ BOOST_AUTO_TEST_CASE(NonCoinbasePreforwardRTTest)
|
||||
|
||||
CBlock block3;
|
||||
PartiallyDownloadedBlock partialBlockCopy = partialBlock;
|
||||
BOOST_CHECK(partialBlock.FillBlock(block3, {block.vtx[0]}) == READ_STATUS_OK);
|
||||
BOOST_CHECK(partialBlock.FillBlock(block3, {block.vtx[0]}, /*segwit_active=*/true) == READ_STATUS_OK);
|
||||
BOOST_CHECK_EQUAL(block.GetHash().ToString(), block3.GetHash().ToString());
|
||||
BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block3, &mutated).ToString());
|
||||
BOOST_CHECK(!mutated);
|
||||
@@ -252,7 +252,7 @@ BOOST_AUTO_TEST_CASE(SufficientPreforwardRTTest)
|
||||
|
||||
CBlock block2;
|
||||
PartiallyDownloadedBlock partialBlockCopy = partialBlock;
|
||||
BOOST_CHECK(partialBlock.FillBlock(block2, {}) == READ_STATUS_OK);
|
||||
BOOST_CHECK(partialBlock.FillBlock(block2, {}, /*segwit_active=*/true) == READ_STATUS_OK);
|
||||
BOOST_CHECK_EQUAL(block.GetHash().ToString(), block2.GetHash().ToString());
|
||||
bool mutated;
|
||||
BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block2, &mutated).ToString());
|
||||
@@ -300,7 +300,7 @@ BOOST_AUTO_TEST_CASE(EmptyBlockRoundTripTest)
|
||||
|
||||
CBlock block2;
|
||||
std::vector<CTransactionRef> vtx_missing;
|
||||
BOOST_CHECK(partialBlock.FillBlock(block2, vtx_missing) == READ_STATUS_OK);
|
||||
BOOST_CHECK(partialBlock.FillBlock(block2, vtx_missing, /*segwit_active=*/true) == READ_STATUS_OK);
|
||||
BOOST_CHECK_EQUAL(block.GetHash().ToString(), block2.GetHash().ToString());
|
||||
BOOST_CHECK_EQUAL(block.hashMerkleRoot.ToString(), BlockMerkleRoot(block2, &mutated).ToString());
|
||||
BOOST_CHECK(!mutated);
|
||||
|
||||
@@ -324,7 +324,7 @@ FUZZ_TARGET(ephemeral_package_eval, .init = initialize_tx_pool)
|
||||
return ProcessNewPackage(chainstate, tx_pool, txs, /*test_accept=*/single_submit, /*client_maxfeerate=*/{}));
|
||||
|
||||
const auto res = WITH_LOCK(::cs_main, return AcceptToMemoryPool(chainstate, txs.back(), GetTime(),
|
||||
/*bypass_limits=*/fuzzed_data_provider.ConsumeBool(), /*test_accept=*/!single_submit));
|
||||
/*bypass_limits=*/false, /*test_accept=*/!single_submit));
|
||||
|
||||
if (!single_submit && result_package.m_state.GetResult() != PackageValidationResult::PCKG_POLICY) {
|
||||
// We don't know anything about the validity since transactions were randomly generated, so
|
||||
|
||||
@@ -32,14 +32,10 @@ void initialize_pdb()
|
||||
g_setup = testing_setup.get();
|
||||
}
|
||||
|
||||
PartiallyDownloadedBlock::CheckBlockFn FuzzedCheckBlock(std::optional<BlockValidationResult> result)
|
||||
PartiallyDownloadedBlock::IsBlockMutatedFn FuzzedIsBlockMutated(bool result)
|
||||
{
|
||||
return [result](const CBlock&, BlockValidationState& state, const Consensus::Params&, bool, bool) {
|
||||
if (result) {
|
||||
return state.Invalid(*result);
|
||||
}
|
||||
|
||||
return true;
|
||||
return [result](const CBlock& block, bool) {
|
||||
return result;
|
||||
};
|
||||
}
|
||||
|
||||
@@ -111,36 +107,22 @@ FUZZ_TARGET(partially_downloaded_block, .init = initialize_pdb)
|
||||
skipped_missing |= (!pdb.IsTxAvailable(i) && skip);
|
||||
}
|
||||
|
||||
// Mock CheckBlock
|
||||
bool fail_check_block{fuzzed_data_provider.ConsumeBool()};
|
||||
auto validation_result =
|
||||
fuzzed_data_provider.PickValueInArray(
|
||||
{BlockValidationResult::BLOCK_RESULT_UNSET,
|
||||
BlockValidationResult::BLOCK_CONSENSUS,
|
||||
BlockValidationResult::BLOCK_CACHED_INVALID,
|
||||
BlockValidationResult::BLOCK_INVALID_HEADER,
|
||||
BlockValidationResult::BLOCK_MUTATED,
|
||||
BlockValidationResult::BLOCK_MISSING_PREV,
|
||||
BlockValidationResult::BLOCK_INVALID_PREV,
|
||||
BlockValidationResult::BLOCK_TIME_FUTURE,
|
||||
BlockValidationResult::BLOCK_CHECKPOINT,
|
||||
BlockValidationResult::BLOCK_HEADER_LOW_WORK});
|
||||
pdb.m_check_block_mock = FuzzedCheckBlock(
|
||||
fail_check_block ?
|
||||
std::optional<BlockValidationResult>{validation_result} :
|
||||
std::nullopt);
|
||||
bool segwit_active{fuzzed_data_provider.ConsumeBool()};
|
||||
|
||||
// Mock IsBlockMutated
|
||||
bool fail_block_mutated{fuzzed_data_provider.ConsumeBool()};
|
||||
pdb.m_check_block_mutated_mock = FuzzedIsBlockMutated(fail_block_mutated);
|
||||
|
||||
CBlock reconstructed_block;
|
||||
auto fill_status{pdb.FillBlock(reconstructed_block, missing)};
|
||||
auto fill_status{pdb.FillBlock(reconstructed_block, missing, segwit_active)};
|
||||
switch (fill_status) {
|
||||
case READ_STATUS_OK:
|
||||
assert(!skipped_missing);
|
||||
assert(!fail_check_block);
|
||||
assert(!fail_block_mutated);
|
||||
assert(block->GetHash() == reconstructed_block.GetHash());
|
||||
break;
|
||||
case READ_STATUS_CHECKBLOCK_FAILED: [[fallthrough]];
|
||||
case READ_STATUS_FAILED:
|
||||
assert(fail_check_block);
|
||||
assert(fail_block_mutated);
|
||||
break;
|
||||
case READ_STATUS_INVALID:
|
||||
break;
|
||||
|
||||
@@ -7,6 +7,7 @@
|
||||
#include <test/fuzz/FuzzedDataProvider.h>
|
||||
#include <test/fuzz/fuzz.h>
|
||||
#include <test/fuzz/util.h>
|
||||
#include <util/check.h>
|
||||
|
||||
#include <cstdint>
|
||||
#include <optional>
|
||||
@@ -45,3 +46,27 @@ FUZZ_TARGET(script_interpreter)
|
||||
(void)CastToBool(ConsumeRandomLengthByteVector(fuzzed_data_provider));
|
||||
}
|
||||
}
|
||||
|
||||
/** Differential fuzzing for SignatureHash with and without cache. */
|
||||
FUZZ_TARGET(sighash_cache)
|
||||
{
|
||||
FuzzedDataProvider provider(buffer.data(), buffer.size());
|
||||
|
||||
// Get inputs to the sighash function that won't change across types.
|
||||
const auto scriptcode{ConsumeScript(provider)};
|
||||
const auto tx{ConsumeTransaction(provider, std::nullopt)};
|
||||
if (tx.vin.empty()) return;
|
||||
const auto in_index{provider.ConsumeIntegralInRange<uint32_t>(0, tx.vin.size() - 1)};
|
||||
const auto amount{ConsumeMoney(provider)};
|
||||
const auto sigversion{(SigVersion)provider.ConsumeIntegralInRange(0, 1)};
|
||||
|
||||
// Check the sighash function will give the same result for 100 fuzzer-generated hash types whether or not a cache is
|
||||
// provided. The cache is conserved across types to exercise cache hits.
|
||||
SigHashCache sighash_cache{};
|
||||
for (int i{0}; i < 100; ++i) {
|
||||
const auto hash_type{((i & 2) == 0) ? provider.ConsumeIntegral<int8_t>() : provider.ConsumeIntegral<int32_t>()};
|
||||
const auto nocache_res{SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion)};
|
||||
const auto cache_res{SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &sighash_cache)};
|
||||
Assert(nocache_res == cache_res);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -295,7 +295,6 @@ FUZZ_TARGET(tx_pool_standard, .init = initialize_tx_pool)
|
||||
std::set<CTransactionRef> added;
|
||||
auto txr = std::make_shared<TransactionsDelta>(removed, added);
|
||||
node.validation_signals->RegisterSharedValidationInterface(txr);
|
||||
const bool bypass_limits = fuzzed_data_provider.ConsumeBool();
|
||||
|
||||
// Make sure ProcessNewPackage on one transaction works.
|
||||
// The result is not guaranteed to be the same as what is returned by ATMP.
|
||||
@@ -310,7 +309,7 @@ FUZZ_TARGET(tx_pool_standard, .init = initialize_tx_pool)
|
||||
it->second.m_result_type == MempoolAcceptResult::ResultType::INVALID);
|
||||
}
|
||||
|
||||
const auto res = WITH_LOCK(::cs_main, return AcceptToMemoryPool(chainstate, tx, GetTime(), bypass_limits, /*test_accept=*/false));
|
||||
const auto res = WITH_LOCK(::cs_main, return AcceptToMemoryPool(chainstate, tx, GetTime(), /*bypass_limits=*/false, /*test_accept=*/false));
|
||||
const bool accepted = res.m_result_type == MempoolAcceptResult::ResultType::VALID;
|
||||
node.validation_signals->SyncWithValidationInterfaceQueue();
|
||||
node.validation_signals->UnregisterSharedValidationInterface(txr);
|
||||
@@ -393,6 +392,9 @@ FUZZ_TARGET(tx_pool, .init = initialize_tx_pool)
|
||||
|
||||
chainstate.SetMempool(&tx_pool);
|
||||
|
||||
// If we ever bypass limits, do not do TRUC invariants checks
|
||||
bool ever_bypassed_limits{false};
|
||||
|
||||
LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 300)
|
||||
{
|
||||
const auto mut_tx = ConsumeTransaction(fuzzed_data_provider, txids);
|
||||
@@ -411,13 +413,17 @@ FUZZ_TARGET(tx_pool, .init = initialize_tx_pool)
|
||||
tx_pool.PrioritiseTransaction(txid.ToUint256(), delta);
|
||||
}
|
||||
|
||||
const bool bypass_limits{fuzzed_data_provider.ConsumeBool()};
|
||||
ever_bypassed_limits |= bypass_limits;
|
||||
|
||||
const auto tx = MakeTransactionRef(mut_tx);
|
||||
const bool bypass_limits = fuzzed_data_provider.ConsumeBool();
|
||||
const auto res = WITH_LOCK(::cs_main, return AcceptToMemoryPool(chainstate, tx, GetTime(), bypass_limits, /*test_accept=*/false));
|
||||
const bool accepted = res.m_result_type == MempoolAcceptResult::ResultType::VALID;
|
||||
if (accepted) {
|
||||
txids.push_back(tx->GetHash());
|
||||
CheckMempoolTRUCInvariants(tx_pool);
|
||||
if (!ever_bypassed_limits) {
|
||||
CheckMempoolTRUCInvariants(tx_pool);
|
||||
}
|
||||
}
|
||||
}
|
||||
Finish(fuzzed_data_provider, tx_pool, chainstate);
|
||||
|
||||
@@ -5,12 +5,21 @@
|
||||
#include <init/common.h>
|
||||
#include <logging.h>
|
||||
#include <logging/timer.h>
|
||||
#include <scheduler.h>
|
||||
#include <test/util/logging.h>
|
||||
#include <test/util/setup_common.h>
|
||||
#include <tinyformat.h>
|
||||
#include <util/fs.h>
|
||||
#include <util/fs_helpers.h>
|
||||
#include <util/string.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <fstream>
|
||||
#include <future>
|
||||
#include <ios>
|
||||
#include <iostream>
|
||||
#include <source_location>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
@@ -28,6 +37,16 @@ static void ResetLogger()
|
||||
LogInstance().SetCategoryLogLevel({});
|
||||
}
|
||||
|
||||
static std::vector<std::string> ReadDebugLogLines()
|
||||
{
|
||||
std::vector<std::string> lines;
|
||||
std::ifstream ifs{LogInstance().m_file_path};
|
||||
for (std::string line; std::getline(ifs, line);) {
|
||||
lines.push_back(std::move(line));
|
||||
}
|
||||
return lines;
|
||||
}
|
||||
|
||||
struct LogSetup : public BasicTestingSetup {
|
||||
fs::path prev_log_path;
|
||||
fs::path tmp_log_path;
|
||||
@@ -38,6 +57,7 @@ struct LogSetup : public BasicTestingSetup {
|
||||
bool prev_log_sourcelocations;
|
||||
std::unordered_map<BCLog::LogFlags, BCLog::Level> prev_category_levels;
|
||||
BCLog::Level prev_log_level;
|
||||
BCLog::CategoryMask prev_category_mask;
|
||||
|
||||
LogSetup() : prev_log_path{LogInstance().m_file_path},
|
||||
tmp_log_path{m_args.GetDataDirBase() / "tmp_debug.log"},
|
||||
@@ -47,7 +67,8 @@ struct LogSetup : public BasicTestingSetup {
|
||||
prev_log_threadnames{LogInstance().m_log_threadnames},
|
||||
prev_log_sourcelocations{LogInstance().m_log_sourcelocations},
|
||||
prev_category_levels{LogInstance().CategoryLevels()},
|
||||
prev_log_level{LogInstance().LogLevel()}
|
||||
prev_log_level{LogInstance().LogLevel()},
|
||||
prev_category_mask{LogInstance().GetCategoryMask()}
|
||||
{
|
||||
LogInstance().m_file_path = tmp_log_path;
|
||||
LogInstance().m_reopen_file = true;
|
||||
@@ -59,7 +80,9 @@ struct LogSetup : public BasicTestingSetup {
|
||||
LogInstance().m_log_sourcelocations = false;
|
||||
|
||||
LogInstance().SetLogLevel(BCLog::Level::Debug);
|
||||
LogInstance().DisableCategory(BCLog::LogFlags::ALL);
|
||||
LogInstance().SetCategoryLogLevel({});
|
||||
LogInstance().SetRateLimiting(nullptr);
|
||||
}
|
||||
|
||||
~LogSetup()
|
||||
@@ -73,6 +96,9 @@ struct LogSetup : public BasicTestingSetup {
|
||||
LogInstance().m_log_sourcelocations = prev_log_sourcelocations;
|
||||
LogInstance().SetLogLevel(prev_log_level);
|
||||
LogInstance().SetCategoryLogLevel(prev_category_levels);
|
||||
LogInstance().SetRateLimiting(nullptr);
|
||||
LogInstance().DisableCategory(BCLog::LogFlags::ALL);
|
||||
LogInstance().EnableCategory(BCLog::LogFlags{prev_category_mask});
|
||||
}
|
||||
};
|
||||
|
||||
@@ -86,41 +112,43 @@ BOOST_AUTO_TEST_CASE(logging_timer)
|
||||
BOOST_FIXTURE_TEST_CASE(logging_LogPrintStr, LogSetup)
|
||||
{
|
||||
LogInstance().m_log_sourcelocations = true;
|
||||
LogInstance().LogPrintStr("foo1: bar1", "fn1", "src1", 1, BCLog::LogFlags::NET, BCLog::Level::Debug);
|
||||
LogInstance().LogPrintStr("foo2: bar2", "fn2", "src2", 2, BCLog::LogFlags::NET, BCLog::Level::Info);
|
||||
LogInstance().LogPrintStr("foo3: bar3", "fn3", "src3", 3, BCLog::LogFlags::ALL, BCLog::Level::Debug);
|
||||
LogInstance().LogPrintStr("foo4: bar4", "fn4", "src4", 4, BCLog::LogFlags::ALL, BCLog::Level::Info);
|
||||
LogInstance().LogPrintStr("foo5: bar5", "fn5", "src5", 5, BCLog::LogFlags::NONE, BCLog::Level::Debug);
|
||||
LogInstance().LogPrintStr("foo6: bar6", "fn6", "src6", 6, BCLog::LogFlags::NONE, BCLog::Level::Info);
|
||||
std::ifstream file{tmp_log_path};
|
||||
std::vector<std::string> log_lines;
|
||||
for (std::string log; std::getline(file, log);) {
|
||||
log_lines.push_back(log);
|
||||
}
|
||||
std::vector<std::string> expected = {
|
||||
"[src1:1] [fn1] [net] foo1: bar1",
|
||||
"[src2:2] [fn2] [net:info] foo2: bar2",
|
||||
"[src3:3] [fn3] [debug] foo3: bar3",
|
||||
"[src4:4] [fn4] foo4: bar4",
|
||||
"[src5:5] [fn5] [debug] foo5: bar5",
|
||||
"[src6:6] [fn6] foo6: bar6",
|
||||
|
||||
struct Case {
|
||||
std::string msg;
|
||||
BCLog::LogFlags category;
|
||||
BCLog::Level level;
|
||||
std::string prefix;
|
||||
std::source_location loc;
|
||||
};
|
||||
|
||||
std::vector<Case> cases = {
|
||||
{"foo1: bar1", BCLog::NET, BCLog::Level::Debug, "[net] ", std::source_location::current()},
|
||||
{"foo2: bar2", BCLog::NET, BCLog::Level::Info, "[net:info] ", std::source_location::current()},
|
||||
{"foo3: bar3", BCLog::ALL, BCLog::Level::Debug, "[debug] ", std::source_location::current()},
|
||||
{"foo4: bar4", BCLog::ALL, BCLog::Level::Info, "", std::source_location::current()},
|
||||
{"foo5: bar5", BCLog::NONE, BCLog::Level::Debug, "[debug] ", std::source_location::current()},
|
||||
{"foo6: bar6", BCLog::NONE, BCLog::Level::Info, "", std::source_location::current()},
|
||||
};
|
||||
|
||||
std::vector<std::string> expected;
|
||||
for (auto& [msg, category, level, prefix, loc] : cases) {
|
||||
expected.push_back(tfm::format("[%s:%s] [%s] %s%s", util::RemovePrefix(loc.file_name(), "./"), loc.line(), loc.function_name(), prefix, msg));
|
||||
LogInstance().LogPrintStr(msg, std::move(loc), category, level, /*should_ratelimit=*/false);
|
||||
}
|
||||
std::vector<std::string> log_lines{ReadDebugLogLines()};
|
||||
BOOST_CHECK_EQUAL_COLLECTIONS(log_lines.begin(), log_lines.end(), expected.begin(), expected.end());
|
||||
}
|
||||
|
||||
BOOST_FIXTURE_TEST_CASE(logging_LogPrintMacrosDeprecated, LogSetup)
|
||||
{
|
||||
LogInstance().EnableCategory(BCLog::NET);
|
||||
LogPrintf("foo5: %s\n", "bar5");
|
||||
LogPrintLevel(BCLog::NET, BCLog::Level::Trace, "foo4: %s\n", "bar4"); // not logged
|
||||
LogPrintLevel(BCLog::NET, BCLog::Level::Debug, "foo7: %s\n", "bar7");
|
||||
LogPrintLevel(BCLog::NET, BCLog::Level::Info, "foo8: %s\n", "bar8");
|
||||
LogPrintLevel(BCLog::NET, BCLog::Level::Warning, "foo9: %s\n", "bar9");
|
||||
LogPrintLevel(BCLog::NET, BCLog::Level::Error, "foo10: %s\n", "bar10");
|
||||
std::ifstream file{tmp_log_path};
|
||||
std::vector<std::string> log_lines;
|
||||
for (std::string log; std::getline(file, log);) {
|
||||
log_lines.push_back(log);
|
||||
}
|
||||
std::vector<std::string> log_lines{ReadDebugLogLines()};
|
||||
std::vector<std::string> expected = {
|
||||
"foo5: bar5",
|
||||
"[net] foo7: bar7",
|
||||
@@ -133,16 +161,13 @@ BOOST_FIXTURE_TEST_CASE(logging_LogPrintMacrosDeprecated, LogSetup)
|
||||
|
||||
BOOST_FIXTURE_TEST_CASE(logging_LogPrintMacros, LogSetup)
|
||||
{
|
||||
LogInstance().EnableCategory(BCLog::NET);
|
||||
LogTrace(BCLog::NET, "foo6: %s", "bar6"); // not logged
|
||||
LogDebug(BCLog::NET, "foo7: %s", "bar7");
|
||||
LogInfo("foo8: %s", "bar8");
|
||||
LogWarning("foo9: %s", "bar9");
|
||||
LogError("foo10: %s", "bar10");
|
||||
std::ifstream file{tmp_log_path};
|
||||
std::vector<std::string> log_lines;
|
||||
for (std::string log; std::getline(file, log);) {
|
||||
log_lines.push_back(log);
|
||||
}
|
||||
std::vector<std::string> log_lines{ReadDebugLogLines()};
|
||||
std::vector<std::string> expected = {
|
||||
"[net] foo7: bar7",
|
||||
"foo8: bar8",
|
||||
@@ -174,19 +199,13 @@ BOOST_FIXTURE_TEST_CASE(logging_LogPrintMacros_CategoryName, LogSetup)
|
||||
expected.push_back(expected_log);
|
||||
}
|
||||
|
||||
std::ifstream file{tmp_log_path};
|
||||
std::vector<std::string> log_lines;
|
||||
for (std::string log; std::getline(file, log);) {
|
||||
log_lines.push_back(log);
|
||||
}
|
||||
std::vector<std::string> log_lines{ReadDebugLogLines()};
|
||||
BOOST_CHECK_EQUAL_COLLECTIONS(log_lines.begin(), log_lines.end(), expected.begin(), expected.end());
|
||||
}
|
||||
|
||||
BOOST_FIXTURE_TEST_CASE(logging_SeverityLevels, LogSetup)
|
||||
{
|
||||
LogInstance().EnableCategory(BCLog::LogFlags::ALL);
|
||||
|
||||
LogInstance().SetLogLevel(BCLog::Level::Debug);
|
||||
LogInstance().SetCategoryLogLevel(/*category_str=*/"net", /*level_str=*/"info");
|
||||
|
||||
// Global log level
|
||||
@@ -207,11 +226,7 @@ BOOST_FIXTURE_TEST_CASE(logging_SeverityLevels, LogSetup)
|
||||
"[net:warning] foo5: bar5",
|
||||
"[net:error] foo7: bar7",
|
||||
};
|
||||
std::ifstream file{tmp_log_path};
|
||||
std::vector<std::string> log_lines;
|
||||
for (std::string log; std::getline(file, log);) {
|
||||
log_lines.push_back(log);
|
||||
}
|
||||
std::vector<std::string> log_lines{ReadDebugLogLines()};
|
||||
BOOST_CHECK_EQUAL_COLLECTIONS(log_lines.begin(), log_lines.end(), expected.begin(), expected.end());
|
||||
}
|
||||
|
||||
@@ -276,4 +291,197 @@ BOOST_FIXTURE_TEST_CASE(logging_Conf, LogSetup)
|
||||
}
|
||||
}
|
||||
|
||||
struct ScopedScheduler {
|
||||
CScheduler scheduler{};
|
||||
|
||||
ScopedScheduler()
|
||||
{
|
||||
scheduler.m_service_thread = std::thread([this] { scheduler.serviceQueue(); });
|
||||
}
|
||||
~ScopedScheduler()
|
||||
{
|
||||
scheduler.stop();
|
||||
}
|
||||
void MockForwardAndSync(std::chrono::seconds duration)
|
||||
{
|
||||
scheduler.MockForward(duration);
|
||||
std::promise<void> promise;
|
||||
scheduler.scheduleFromNow([&promise] { promise.set_value(); }, 0ms);
|
||||
promise.get_future().wait();
|
||||
}
|
||||
std::shared_ptr<BCLog::LogRateLimiter> GetLimiter(size_t max_bytes, std::chrono::seconds window)
|
||||
{
|
||||
auto sched_func = [this](auto func, auto w) {
|
||||
scheduler.scheduleEvery(std::move(func), w);
|
||||
};
|
||||
return BCLog::LogRateLimiter::Create(sched_func, max_bytes, window);
|
||||
}
|
||||
};
|
||||
|
||||
BOOST_AUTO_TEST_CASE(logging_log_rate_limiter)
|
||||
{
|
||||
uint64_t max_bytes{1024};
|
||||
auto reset_window{1min};
|
||||
ScopedScheduler scheduler{};
|
||||
auto limiter_{scheduler.GetLimiter(max_bytes, reset_window)};
|
||||
auto& limiter{*Assert(limiter_)};
|
||||
|
||||
using Status = BCLog::LogRateLimiter::Status;
|
||||
auto source_loc_1{std::source_location::current()};
|
||||
auto source_loc_2{std::source_location::current()};
|
||||
|
||||
// A fresh limiter should not have any suppressions
|
||||
BOOST_CHECK(!limiter.SuppressionsActive());
|
||||
|
||||
// Resetting an unused limiter is fine
|
||||
limiter.Reset();
|
||||
BOOST_CHECK(!limiter.SuppressionsActive());
|
||||
|
||||
// No suppression should happen until more than max_bytes have been consumed
|
||||
BOOST_CHECK_EQUAL(limiter.Consume(source_loc_1, std::string(max_bytes - 1, 'a')), Status::UNSUPPRESSED);
|
||||
BOOST_CHECK_EQUAL(limiter.Consume(source_loc_1, "a"), Status::UNSUPPRESSED);
|
||||
BOOST_CHECK(!limiter.SuppressionsActive());
|
||||
BOOST_CHECK_EQUAL(limiter.Consume(source_loc_1, "a"), Status::NEWLY_SUPPRESSED);
|
||||
BOOST_CHECK(limiter.SuppressionsActive());
|
||||
BOOST_CHECK_EQUAL(limiter.Consume(source_loc_1, "a"), Status::STILL_SUPPRESSED);
|
||||
BOOST_CHECK(limiter.SuppressionsActive());
|
||||
|
||||
// Location 2 should not be affected by location 1's suppression
|
||||
BOOST_CHECK_EQUAL(limiter.Consume(source_loc_2, std::string(max_bytes, 'a')), Status::UNSUPPRESSED);
|
||||
BOOST_CHECK_EQUAL(limiter.Consume(source_loc_2, "a"), Status::NEWLY_SUPPRESSED);
|
||||
BOOST_CHECK(limiter.SuppressionsActive());
|
||||
|
||||
// After reset_window time has passed, all suppressions should be cleared.
|
||||
scheduler.MockForwardAndSync(reset_window);
|
||||
|
||||
BOOST_CHECK(!limiter.SuppressionsActive());
|
||||
BOOST_CHECK_EQUAL(limiter.Consume(source_loc_1, std::string(max_bytes, 'a')), Status::UNSUPPRESSED);
|
||||
BOOST_CHECK_EQUAL(limiter.Consume(source_loc_2, std::string(max_bytes, 'a')), Status::UNSUPPRESSED);
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(logging_log_limit_stats)
|
||||
{
|
||||
BCLog::LogRateLimiter::Stats stats(BCLog::RATELIMIT_MAX_BYTES);
|
||||
|
||||
// Check that stats gets initialized correctly.
|
||||
BOOST_CHECK_EQUAL(stats.m_available_bytes, BCLog::RATELIMIT_MAX_BYTES);
|
||||
BOOST_CHECK_EQUAL(stats.m_dropped_bytes, uint64_t{0});
|
||||
|
||||
const uint64_t MESSAGE_SIZE{BCLog::RATELIMIT_MAX_BYTES / 2};
|
||||
BOOST_CHECK(stats.Consume(MESSAGE_SIZE));
|
||||
BOOST_CHECK_EQUAL(stats.m_available_bytes, BCLog::RATELIMIT_MAX_BYTES - MESSAGE_SIZE);
|
||||
BOOST_CHECK_EQUAL(stats.m_dropped_bytes, uint64_t{0});
|
||||
|
||||
BOOST_CHECK(stats.Consume(MESSAGE_SIZE));
|
||||
BOOST_CHECK_EQUAL(stats.m_available_bytes, BCLog::RATELIMIT_MAX_BYTES - MESSAGE_SIZE * 2);
|
||||
BOOST_CHECK_EQUAL(stats.m_dropped_bytes, uint64_t{0});
|
||||
|
||||
// Consuming more bytes after already having consumed RATELIMIT_MAX_BYTES should fail.
|
||||
BOOST_CHECK(!stats.Consume(500));
|
||||
BOOST_CHECK_EQUAL(stats.m_available_bytes, uint64_t{0});
|
||||
BOOST_CHECK_EQUAL(stats.m_dropped_bytes, uint64_t{500});
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
enum class Location {
|
||||
INFO_1,
|
||||
INFO_2,
|
||||
DEBUG_LOG,
|
||||
INFO_NOLIMIT,
|
||||
};
|
||||
|
||||
void LogFromLocation(Location location, const std::string& message) {
|
||||
switch (location) {
|
||||
case Location::INFO_1:
|
||||
LogInfo("%s\n", message);
|
||||
return;
|
||||
case Location::INFO_2:
|
||||
LogInfo("%s\n", message);
|
||||
return;
|
||||
case Location::DEBUG_LOG:
|
||||
LogDebug(BCLog::LogFlags::HTTP, "%s\n", message);
|
||||
return;
|
||||
case Location::INFO_NOLIMIT:
|
||||
LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/false, "%s\n", message);
|
||||
return;
|
||||
} // no default case, so the compiler can warn about missing cases
|
||||
assert(false);
|
||||
}
|
||||
|
||||
/**
|
||||
* For a given `location` and `message`, ensure that the on-disk debug log behaviour resembles what
|
||||
* we'd expect it to be for `status` and `suppressions_active`.
|
||||
*/
|
||||
void TestLogFromLocation(Location location, const std::string& message,
|
||||
BCLog::LogRateLimiter::Status status, bool suppressions_active,
|
||||
std::source_location source = std::source_location::current())
|
||||
{
|
||||
BOOST_TEST_INFO_SCOPE("TestLogFromLocation called from " << source.file_name() << ":" << source.line());
|
||||
using Status = BCLog::LogRateLimiter::Status;
|
||||
if (!suppressions_active) assert(status == Status::UNSUPPRESSED); // developer error
|
||||
|
||||
std::ofstream ofs(LogInstance().m_file_path, std::ios::out | std::ios::trunc); // clear debug log
|
||||
LogFromLocation(location, message);
|
||||
auto log_lines{ReadDebugLogLines()};
|
||||
BOOST_TEST_INFO_SCOPE(log_lines.size() << " log_lines read: \n" << util::Join(log_lines, "\n"));
|
||||
|
||||
if (status == Status::STILL_SUPPRESSED) {
|
||||
BOOST_CHECK_EQUAL(log_lines.size(), 0);
|
||||
return;
|
||||
}
|
||||
|
||||
if (status == Status::NEWLY_SUPPRESSED) {
|
||||
BOOST_REQUIRE_EQUAL(log_lines.size(), 2);
|
||||
BOOST_CHECK(log_lines[0].starts_with("[*] [warning] Excessive logging detected"));
|
||||
log_lines.erase(log_lines.begin());
|
||||
}
|
||||
BOOST_REQUIRE_EQUAL(log_lines.size(), 1);
|
||||
auto& payload{log_lines.back()};
|
||||
BOOST_CHECK_EQUAL(suppressions_active, payload.starts_with("[*]"));
|
||||
BOOST_CHECK(payload.ends_with(message));
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
BOOST_FIXTURE_TEST_CASE(logging_filesize_rate_limit, LogSetup)
|
||||
{
|
||||
using Status = BCLog::LogRateLimiter::Status;
|
||||
LogInstance().m_log_timestamps = false;
|
||||
LogInstance().m_log_sourcelocations = false;
|
||||
LogInstance().m_log_threadnames = false;
|
||||
LogInstance().EnableCategory(BCLog::LogFlags::HTTP);
|
||||
|
||||
constexpr int64_t line_length{1024};
|
||||
constexpr int64_t num_lines{10};
|
||||
constexpr int64_t bytes_quota{line_length * num_lines};
|
||||
constexpr auto time_window{1h};
|
||||
|
||||
ScopedScheduler scheduler{};
|
||||
auto limiter{scheduler.GetLimiter(bytes_quota, time_window)};
|
||||
LogInstance().SetRateLimiting(limiter);
|
||||
|
||||
const std::string log_message(line_length - 1, 'a'); // subtract one for newline
|
||||
|
||||
for (int i = 0; i < num_lines; ++i) {
|
||||
TestLogFromLocation(Location::INFO_1, log_message, Status::UNSUPPRESSED, /*suppressions_active=*/false);
|
||||
}
|
||||
TestLogFromLocation(Location::INFO_1, "a", Status::NEWLY_SUPPRESSED, /*suppressions_active=*/true);
|
||||
TestLogFromLocation(Location::INFO_1, "b", Status::STILL_SUPPRESSED, /*suppressions_active=*/true);
|
||||
TestLogFromLocation(Location::INFO_2, "c", Status::UNSUPPRESSED, /*suppressions_active=*/true);
|
||||
{
|
||||
scheduler.MockForwardAndSync(time_window);
|
||||
BOOST_CHECK(ReadDebugLogLines().back().starts_with("[warning] Restarting logging"));
|
||||
}
|
||||
// Check that logging from previously suppressed location is unsuppressed again.
|
||||
TestLogFromLocation(Location::INFO_1, log_message, Status::UNSUPPRESSED, /*suppressions_active=*/false);
|
||||
// Check that conditional logging, and unconditional logging with should_ratelimit=false is
|
||||
// not being ratelimited.
|
||||
for (Location location : {Location::DEBUG_LOG, Location::INFO_NOLIMIT}) {
|
||||
for (int i = 0; i < num_lines + 2; ++i) {
|
||||
TestLogFromLocation(location, log_message, Status::UNSUPPRESSED, /*suppressions_active=*/false);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_SUITE_END()
|
||||
|
||||
@@ -443,7 +443,7 @@ BOOST_AUTO_TEST_CASE(MempoolSizeLimitTest)
|
||||
tx1.vout.resize(1);
|
||||
tx1.vout[0].scriptPubKey = CScript() << OP_1 << OP_EQUAL;
|
||||
tx1.vout[0].nValue = 10 * COIN;
|
||||
AddToMempool(pool, entry.Fee(10000LL).FromTx(tx1));
|
||||
AddToMempool(pool, entry.Fee(1000LL).FromTx(tx1));
|
||||
|
||||
CMutableTransaction tx2 = CMutableTransaction();
|
||||
tx2.vin.resize(1);
|
||||
@@ -451,7 +451,7 @@ BOOST_AUTO_TEST_CASE(MempoolSizeLimitTest)
|
||||
tx2.vout.resize(1);
|
||||
tx2.vout[0].scriptPubKey = CScript() << OP_2 << OP_EQUAL;
|
||||
tx2.vout[0].nValue = 10 * COIN;
|
||||
AddToMempool(pool, entry.Fee(5000LL).FromTx(tx2));
|
||||
AddToMempool(pool, entry.Fee(500LL).FromTx(tx2));
|
||||
|
||||
pool.TrimToSize(pool.DynamicMemoryUsage()); // should do nothing
|
||||
BOOST_CHECK(pool.exists(GenTxid::Txid(tx1.GetHash())));
|
||||
@@ -469,7 +469,7 @@ BOOST_AUTO_TEST_CASE(MempoolSizeLimitTest)
|
||||
tx3.vout.resize(1);
|
||||
tx3.vout[0].scriptPubKey = CScript() << OP_3 << OP_EQUAL;
|
||||
tx3.vout[0].nValue = 10 * COIN;
|
||||
AddToMempool(pool, entry.Fee(20000LL).FromTx(tx3));
|
||||
AddToMempool(pool, entry.Fee(2000LL).FromTx(tx3));
|
||||
|
||||
pool.TrimToSize(pool.DynamicMemoryUsage() * 3 / 4); // tx3 should pay for tx2 (CPFP)
|
||||
BOOST_CHECK(!pool.exists(GenTxid::Txid(tx1.GetHash())));
|
||||
@@ -481,8 +481,8 @@ BOOST_AUTO_TEST_CASE(MempoolSizeLimitTest)
|
||||
BOOST_CHECK(!pool.exists(GenTxid::Txid(tx2.GetHash())));
|
||||
BOOST_CHECK(!pool.exists(GenTxid::Txid(tx3.GetHash())));
|
||||
|
||||
CFeeRate maxFeeRateRemoved(25000, GetVirtualTransactionSize(CTransaction(tx3)) + GetVirtualTransactionSize(CTransaction(tx2)));
|
||||
BOOST_CHECK_EQUAL(pool.GetMinFee(1).GetFeePerK(), maxFeeRateRemoved.GetFeePerK() + 1000);
|
||||
CFeeRate maxFeeRateRemoved(2500, GetVirtualTransactionSize(CTransaction(tx3)) + GetVirtualTransactionSize(CTransaction(tx2)));
|
||||
BOOST_CHECK_EQUAL(pool.GetMinFee(1).GetFeePerK(), maxFeeRateRemoved.GetFeePerK() + DEFAULT_INCREMENTAL_RELAY_FEE);
|
||||
|
||||
CMutableTransaction tx4 = CMutableTransaction();
|
||||
tx4.vin.resize(2);
|
||||
@@ -532,10 +532,10 @@ BOOST_AUTO_TEST_CASE(MempoolSizeLimitTest)
|
||||
tx7.vout[1].scriptPubKey = CScript() << OP_7 << OP_EQUAL;
|
||||
tx7.vout[1].nValue = 10 * COIN;
|
||||
|
||||
AddToMempool(pool, entry.Fee(7000LL).FromTx(tx4));
|
||||
AddToMempool(pool, entry.Fee(1000LL).FromTx(tx5));
|
||||
AddToMempool(pool, entry.Fee(1100LL).FromTx(tx6));
|
||||
AddToMempool(pool, entry.Fee(9000LL).FromTx(tx7));
|
||||
AddToMempool(pool, entry.Fee(700LL).FromTx(tx4));
|
||||
AddToMempool(pool, entry.Fee(100LL).FromTx(tx5));
|
||||
AddToMempool(pool, entry.Fee(110LL).FromTx(tx6));
|
||||
AddToMempool(pool, entry.Fee(900LL).FromTx(tx7));
|
||||
|
||||
// we only require this to remove, at max, 2 txn, because it's not clear what we're really optimizing for aside from that
|
||||
pool.TrimToSize(pool.DynamicMemoryUsage() - 1);
|
||||
@@ -544,8 +544,8 @@ BOOST_AUTO_TEST_CASE(MempoolSizeLimitTest)
|
||||
BOOST_CHECK(!pool.exists(GenTxid::Txid(tx7.GetHash())));
|
||||
|
||||
if (!pool.exists(GenTxid::Txid(tx5.GetHash())))
|
||||
AddToMempool(pool, entry.Fee(1000LL).FromTx(tx5));
|
||||
AddToMempool(pool, entry.Fee(9000LL).FromTx(tx7));
|
||||
AddToMempool(pool, entry.Fee(100LL).FromTx(tx5));
|
||||
AddToMempool(pool, entry.Fee(900LL).FromTx(tx7));
|
||||
|
||||
pool.TrimToSize(pool.DynamicMemoryUsage() / 2); // should maximize mempool size by only removing 5/7
|
||||
BOOST_CHECK(pool.exists(GenTxid::Txid(tx4.GetHash())));
|
||||
@@ -553,34 +553,34 @@ BOOST_AUTO_TEST_CASE(MempoolSizeLimitTest)
|
||||
BOOST_CHECK(pool.exists(GenTxid::Txid(tx6.GetHash())));
|
||||
BOOST_CHECK(!pool.exists(GenTxid::Txid(tx7.GetHash())));
|
||||
|
||||
AddToMempool(pool, entry.Fee(1000LL).FromTx(tx5));
|
||||
AddToMempool(pool, entry.Fee(9000LL).FromTx(tx7));
|
||||
AddToMempool(pool, entry.Fee(100LL).FromTx(tx5));
|
||||
AddToMempool(pool, entry.Fee(900LL).FromTx(tx7));
|
||||
|
||||
std::vector<CTransactionRef> vtx;
|
||||
SetMockTime(42);
|
||||
SetMockTime(42 + CTxMemPool::ROLLING_FEE_HALFLIFE);
|
||||
BOOST_CHECK_EQUAL(pool.GetMinFee(1).GetFeePerK(), maxFeeRateRemoved.GetFeePerK() + 1000);
|
||||
BOOST_CHECK_EQUAL(pool.GetMinFee(1).GetFeePerK(), maxFeeRateRemoved.GetFeePerK() + DEFAULT_INCREMENTAL_RELAY_FEE);
|
||||
// ... we should keep the same min fee until we get a block
|
||||
pool.removeForBlock(vtx, 1);
|
||||
SetMockTime(42 + 2*CTxMemPool::ROLLING_FEE_HALFLIFE);
|
||||
BOOST_CHECK_EQUAL(pool.GetMinFee(1).GetFeePerK(), llround((maxFeeRateRemoved.GetFeePerK() + 1000)/2.0));
|
||||
BOOST_CHECK_EQUAL(pool.GetMinFee(1).GetFeePerK(), llround((maxFeeRateRemoved.GetFeePerK() + DEFAULT_INCREMENTAL_RELAY_FEE)/2.0));
|
||||
// ... then feerate should drop 1/2 each halflife
|
||||
|
||||
SetMockTime(42 + 2*CTxMemPool::ROLLING_FEE_HALFLIFE + CTxMemPool::ROLLING_FEE_HALFLIFE/2);
|
||||
BOOST_CHECK_EQUAL(pool.GetMinFee(pool.DynamicMemoryUsage() * 5 / 2).GetFeePerK(), llround((maxFeeRateRemoved.GetFeePerK() + 1000)/4.0));
|
||||
BOOST_CHECK_EQUAL(pool.GetMinFee(pool.DynamicMemoryUsage() * 5 / 2).GetFeePerK(), llround((maxFeeRateRemoved.GetFeePerK() + DEFAULT_INCREMENTAL_RELAY_FEE)/4.0));
|
||||
// ... with a 1/2 halflife when mempool is < 1/2 its target size
|
||||
|
||||
SetMockTime(42 + 2*CTxMemPool::ROLLING_FEE_HALFLIFE + CTxMemPool::ROLLING_FEE_HALFLIFE/2 + CTxMemPool::ROLLING_FEE_HALFLIFE/4);
|
||||
BOOST_CHECK_EQUAL(pool.GetMinFee(pool.DynamicMemoryUsage() * 9 / 2).GetFeePerK(), llround((maxFeeRateRemoved.GetFeePerK() + 1000)/8.0));
|
||||
BOOST_CHECK_EQUAL(pool.GetMinFee(pool.DynamicMemoryUsage() * 9 / 2).GetFeePerK(), llround((maxFeeRateRemoved.GetFeePerK() + DEFAULT_INCREMENTAL_RELAY_FEE)/8.0));
|
||||
// ... with a 1/4 halflife when mempool is < 1/4 its target size
|
||||
|
||||
SetMockTime(42 + 7*CTxMemPool::ROLLING_FEE_HALFLIFE + CTxMemPool::ROLLING_FEE_HALFLIFE/2 + CTxMemPool::ROLLING_FEE_HALFLIFE/4);
|
||||
BOOST_CHECK_EQUAL(pool.GetMinFee(1).GetFeePerK(), 1000);
|
||||
// ... but feerate should never drop below 1000
|
||||
BOOST_CHECK_EQUAL(pool.GetMinFee(1).GetFeePerK(), DEFAULT_INCREMENTAL_RELAY_FEE);
|
||||
// ... but feerate should never drop below DEFAULT_INCREMENTAL_RELAY_FEE
|
||||
|
||||
SetMockTime(42 + 8*CTxMemPool::ROLLING_FEE_HALFLIFE + CTxMemPool::ROLLING_FEE_HALFLIFE/2 + CTxMemPool::ROLLING_FEE_HALFLIFE/4);
|
||||
BOOST_CHECK_EQUAL(pool.GetMinFee(1).GetFeePerK(), 0);
|
||||
// ... unless it has gone all the way to 0 (after getting past 1000/2)
|
||||
// ... unless it has gone all the way to 0 (after getting past DEFAULT_INCREMENTAL_RELAY_FEE/2)
|
||||
}
|
||||
|
||||
inline CTransactionRef make_tx(std::vector<CAmount>&& output_values, std::vector<CTransactionRef>&& inputs=std::vector<CTransactionRef>(), std::vector<uint32_t>&& input_indices=std::vector<uint32_t>())
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
#include <node/miner.h>
|
||||
#include <policy/policy.h>
|
||||
#include <test/util/random.h>
|
||||
#include <test/util/transaction_utils.h>
|
||||
#include <test/util/txmempool.h>
|
||||
#include <txmempool.h>
|
||||
#include <uint256.h>
|
||||
@@ -210,6 +211,9 @@ void MinerTestingSetup::TestPackageSelection(const CScript& scriptPubKey, const
|
||||
tx.vout.resize(2);
|
||||
tx.vout[0].nValue = 5000000000LL - 100000000;
|
||||
tx.vout[1].nValue = 100000000; // 1BTC output
|
||||
// Increase size to avoid rounding errors: when the feerate is extremely small (i.e. 1sat/kvB), evaluating the fee
|
||||
// at a smaller transaction size gives us a rounded value of 0.
|
||||
BulkTransaction(tx, 4000);
|
||||
Txid hashFreeTx2 = tx.GetHash();
|
||||
AddToMempool(tx_mempool, entry.Fee(0).SpendsCoinbase(true).FromTx(tx));
|
||||
|
||||
|
||||
@@ -238,10 +238,10 @@ BOOST_FIXTURE_TEST_CASE(rbf_helper_functions, TestChain100Setup)
|
||||
BOOST_CHECK(PaysForRBF(high_fee, high_fee - 1, 1, CFeeRate(0), unused_txid).has_value());
|
||||
BOOST_CHECK(PaysForRBF(high_fee + 1, high_fee, 1, CFeeRate(0), unused_txid).has_value());
|
||||
// Additional fees must cover the replacement's vsize at incremental relay fee
|
||||
BOOST_CHECK(PaysForRBF(high_fee, high_fee + 1, 2, incremental_relay_feerate, unused_txid).has_value());
|
||||
BOOST_CHECK(PaysForRBF(high_fee, high_fee + 2, 2, incremental_relay_feerate, unused_txid) == std::nullopt);
|
||||
BOOST_CHECK(PaysForRBF(high_fee, high_fee + 2, 2, higher_relay_feerate, unused_txid).has_value());
|
||||
BOOST_CHECK(PaysForRBF(high_fee, high_fee + 4, 2, higher_relay_feerate, unused_txid) == std::nullopt);
|
||||
BOOST_CHECK(PaysForRBF(high_fee, high_fee + 1, 11, incremental_relay_feerate, unused_txid).has_value());
|
||||
BOOST_CHECK(PaysForRBF(high_fee, high_fee + 1, 10, incremental_relay_feerate, unused_txid) == std::nullopt);
|
||||
BOOST_CHECK(PaysForRBF(high_fee, high_fee + 2, 11, higher_relay_feerate, unused_txid).has_value());
|
||||
BOOST_CHECK(PaysForRBF(high_fee, high_fee + 4, 20, higher_relay_feerate, unused_txid) == std::nullopt);
|
||||
BOOST_CHECK(PaysForRBF(low_fee, high_fee, 99999999, incremental_relay_feerate, unused_txid).has_value());
|
||||
BOOST_CHECK(PaysForRBF(low_fee, high_fee + 99999999, 99999999, incremental_relay_feerate, unused_txid) == std::nullopt);
|
||||
|
||||
|
||||
@@ -207,4 +207,94 @@ BOOST_AUTO_TEST_CASE(sighash_from_data)
|
||||
BOOST_CHECK_MESSAGE(sh.GetHex() == sigHashHex, strTest);
|
||||
}
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_CASE(sighash_caching)
|
||||
{
|
||||
// Get a script, transaction and parameters as inputs to the sighash function.
|
||||
CScript scriptcode;
|
||||
RandomScript(scriptcode);
|
||||
CScript diff_scriptcode{scriptcode};
|
||||
diff_scriptcode << OP_1;
|
||||
CMutableTransaction tx;
|
||||
RandomTransaction(tx, /*fSingle=*/false);
|
||||
const auto in_index{static_cast<uint32_t>(m_rng.randrange(tx.vin.size()))};
|
||||
const auto amount{m_rng.rand<CAmount>()};
|
||||
|
||||
// Exercise the sighash function under both legacy and segwit v0.
|
||||
for (const auto sigversion: {SigVersion::BASE, SigVersion::WITNESS_V0}) {
|
||||
// For each, run it against all the 6 standard hash types and a few additional random ones.
|
||||
std::vector<int32_t> hash_types{{SIGHASH_ALL, SIGHASH_SINGLE, SIGHASH_NONE, SIGHASH_ALL | SIGHASH_ANYONECANPAY,
|
||||
SIGHASH_SINGLE | SIGHASH_ANYONECANPAY, SIGHASH_NONE | SIGHASH_ANYONECANPAY,
|
||||
SIGHASH_ANYONECANPAY, 0, std::numeric_limits<int32_t>::max()}};
|
||||
for (int i{0}; i < 10; ++i) {
|
||||
hash_types.push_back(i % 2 == 0 ? m_rng.rand<int8_t>() : m_rng.rand<int32_t>());
|
||||
}
|
||||
|
||||
// Reuse the same cache across script types. This must not cause any issue as the cached value for one hash type must never
|
||||
// be confused for another (instantiating the cache within the loop instead would prevent testing this).
|
||||
SigHashCache cache;
|
||||
for (const auto hash_type: hash_types) {
|
||||
const bool expect_one{sigversion == SigVersion::BASE && ((hash_type & 0x1f) == SIGHASH_SINGLE) && in_index >= tx.vout.size()};
|
||||
|
||||
// The result of computing the sighash should be the same with or without cache.
|
||||
const auto sighash_with_cache{SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache)};
|
||||
const auto sighash_no_cache{SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, nullptr)};
|
||||
BOOST_CHECK_EQUAL(sighash_with_cache, sighash_no_cache);
|
||||
|
||||
// Calling the cached version again should return the same value again.
|
||||
BOOST_CHECK_EQUAL(sighash_with_cache, SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache));
|
||||
|
||||
// While here we might as well also check that the result for legacy is the same as for the old SignatureHash() function.
|
||||
if (sigversion == SigVersion::BASE) {
|
||||
BOOST_CHECK_EQUAL(sighash_with_cache, SignatureHashOld(scriptcode, CTransaction(tx), in_index, hash_type));
|
||||
}
|
||||
|
||||
// Calling with a different scriptcode (for instance in case a CODESEP is encountered) will not return the cache value but
|
||||
// overwrite it. The sighash will always be different except in case of legacy SIGHASH_SINGLE bug.
|
||||
const auto sighash_with_cache2{SignatureHash(diff_scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache)};
|
||||
const auto sighash_no_cache2{SignatureHash(diff_scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, nullptr)};
|
||||
BOOST_CHECK_EQUAL(sighash_with_cache2, sighash_no_cache2);
|
||||
if (!expect_one) {
|
||||
BOOST_CHECK_NE(sighash_with_cache, sighash_with_cache2);
|
||||
} else {
|
||||
BOOST_CHECK_EQUAL(sighash_with_cache, sighash_with_cache2);
|
||||
BOOST_CHECK_EQUAL(sighash_with_cache, uint256::ONE);
|
||||
}
|
||||
|
||||
// Calling the cached version again should return the same value again.
|
||||
BOOST_CHECK_EQUAL(sighash_with_cache2, SignatureHash(diff_scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache));
|
||||
|
||||
// And if we store a different value for this scriptcode and hash type it will return that instead.
|
||||
{
|
||||
HashWriter h{};
|
||||
h << 42;
|
||||
cache.Store(hash_type, scriptcode, h);
|
||||
const auto stored_hash{h.GetHash()};
|
||||
BOOST_CHECK(cache.Load(hash_type, scriptcode, h));
|
||||
const auto loaded_hash{h.GetHash()};
|
||||
BOOST_CHECK_EQUAL(stored_hash, loaded_hash);
|
||||
}
|
||||
|
||||
// And using this mutated cache with the sighash function will return the new value (except in the legacy SIGHASH_SINGLE bug
|
||||
// case in which it'll return 1).
|
||||
if (!expect_one) {
|
||||
BOOST_CHECK_NE(SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache), sighash_with_cache);
|
||||
HashWriter h{};
|
||||
BOOST_CHECK(cache.Load(hash_type, scriptcode, h));
|
||||
h << hash_type;
|
||||
const auto new_hash{h.GetHash()};
|
||||
BOOST_CHECK_EQUAL(SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache), new_hash);
|
||||
} else {
|
||||
BOOST_CHECK_EQUAL(SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache), uint256::ONE);
|
||||
}
|
||||
|
||||
// Wipe the cache and restore the correct cached value for this scriptcode and hash_type before starting the next iteration.
|
||||
HashWriter dummy{};
|
||||
cache.Store(hash_type, diff_scriptcode, dummy);
|
||||
(void)SignatureHash(scriptcode, tx, in_index, hash_type, amount, sigversion, nullptr, &cache);
|
||||
BOOST_CHECK(cache.Load(hash_type, scriptcode, dummy) || expect_one);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_SUITE_END()
|
||||
|
||||
@@ -1144,4 +1144,159 @@ BOOST_AUTO_TEST_CASE(max_standard_legacy_sigops)
|
||||
BOOST_CHECK(!::AreInputsStandard(CTransaction(tx_max_sigops), coins));
|
||||
}
|
||||
|
||||
/** Sanity check the return value of SpendsNonAnchorWitnessProg for various output types. */
|
||||
BOOST_AUTO_TEST_CASE(spends_witness_prog)
|
||||
{
|
||||
CCoinsView coins_dummy;
|
||||
CCoinsViewCache coins(&coins_dummy);
|
||||
CKey key;
|
||||
key.MakeNewKey(true);
|
||||
const CPubKey pubkey{key.GetPubKey()};
|
||||
CMutableTransaction tx_create{}, tx_spend{};
|
||||
tx_create.vout.emplace_back(0, CScript{});
|
||||
tx_spend.vin.emplace_back(Txid{}, 0);
|
||||
std::vector<std::vector<uint8_t>> sol_dummy;
|
||||
|
||||
// CNoDestination, PubKeyDestination, PKHash, ScriptHash, WitnessV0ScriptHash, WitnessV0KeyHash,
|
||||
// WitnessV1Taproot, PayToAnchor, WitnessUnknown.
|
||||
static_assert(std::variant_size_v<CTxDestination> == 9);
|
||||
|
||||
// Go through all defined output types and sanity check SpendsNonAnchorWitnessProg.
|
||||
|
||||
// P2PK
|
||||
tx_create.vout[0].scriptPubKey = GetScriptForDestination(PubKeyDestination{pubkey});
|
||||
BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::PUBKEY);
|
||||
tx_spend.vin[0].prevout.hash = tx_create.GetHash();
|
||||
AddCoins(coins, CTransaction{tx_create}, 0, false);
|
||||
BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
|
||||
// P2PKH
|
||||
tx_create.vout[0].scriptPubKey = GetScriptForDestination(PKHash{pubkey});
|
||||
BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::PUBKEYHASH);
|
||||
tx_spend.vin[0].prevout.hash = tx_create.GetHash();
|
||||
AddCoins(coins, CTransaction{tx_create}, 0, false);
|
||||
BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
|
||||
// P2SH
|
||||
auto redeem_script{CScript{} << OP_1 << OP_CHECKSIG};
|
||||
tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash{redeem_script});
|
||||
BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH);
|
||||
tx_spend.vin[0].prevout.hash = tx_create.GetHash();
|
||||
tx_spend.vin[0].scriptSig = CScript{} << OP_0 << ToByteVector(redeem_script);
|
||||
AddCoins(coins, CTransaction{tx_create}, 0, false);
|
||||
BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
tx_spend.vin[0].scriptSig.clear();
|
||||
|
||||
// native P2WSH
|
||||
const auto witness_script{CScript{} << OP_12 << OP_HASH160 << OP_DUP << OP_EQUAL};
|
||||
tx_create.vout[0].scriptPubKey = GetScriptForDestination(WitnessV0ScriptHash{witness_script});
|
||||
BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::WITNESS_V0_SCRIPTHASH);
|
||||
tx_spend.vin[0].prevout.hash = tx_create.GetHash();
|
||||
AddCoins(coins, CTransaction{tx_create}, 0, false);
|
||||
BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
|
||||
// P2SH-wrapped P2WSH
|
||||
redeem_script = tx_create.vout[0].scriptPubKey;
|
||||
tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash(redeem_script));
|
||||
BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH);
|
||||
tx_spend.vin[0].prevout.hash = tx_create.GetHash();
|
||||
tx_spend.vin[0].scriptSig = CScript{} << ToByteVector(redeem_script);
|
||||
AddCoins(coins, CTransaction{tx_create}, 0, false);
|
||||
BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
tx_spend.vin[0].scriptSig.clear();
|
||||
BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
|
||||
// native P2WPKH
|
||||
tx_create.vout[0].scriptPubKey = GetScriptForDestination(WitnessV0KeyHash{pubkey});
|
||||
BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::WITNESS_V0_KEYHASH);
|
||||
tx_spend.vin[0].prevout.hash = tx_create.GetHash();
|
||||
AddCoins(coins, CTransaction{tx_create}, 0, false);
|
||||
BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
|
||||
// P2SH-wrapped P2WPKH
|
||||
redeem_script = tx_create.vout[0].scriptPubKey;
|
||||
tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash(redeem_script));
|
||||
BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH);
|
||||
tx_spend.vin[0].prevout.hash = tx_create.GetHash();
|
||||
tx_spend.vin[0].scriptSig = CScript{} << ToByteVector(redeem_script);
|
||||
AddCoins(coins, CTransaction{tx_create}, 0, false);
|
||||
BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
tx_spend.vin[0].scriptSig.clear();
|
||||
BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
|
||||
// P2TR
|
||||
tx_create.vout[0].scriptPubKey = GetScriptForDestination(WitnessV1Taproot{XOnlyPubKey{pubkey}});
|
||||
BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::WITNESS_V1_TAPROOT);
|
||||
tx_spend.vin[0].prevout.hash = tx_create.GetHash();
|
||||
AddCoins(coins, CTransaction{tx_create}, 0, false);
|
||||
BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
|
||||
// P2SH-wrapped P2TR (undefined, non-standard)
|
||||
redeem_script = tx_create.vout[0].scriptPubKey;
|
||||
tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash(redeem_script));
|
||||
BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH);
|
||||
tx_spend.vin[0].prevout.hash = tx_create.GetHash();
|
||||
tx_spend.vin[0].scriptSig = CScript{} << ToByteVector(redeem_script);
|
||||
AddCoins(coins, CTransaction{tx_create}, 0, false);
|
||||
BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
tx_spend.vin[0].scriptSig.clear();
|
||||
BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
|
||||
// P2A
|
||||
tx_create.vout[0].scriptPubKey = GetScriptForDestination(PayToAnchor{});
|
||||
BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::ANCHOR);
|
||||
tx_spend.vin[0].prevout.hash = tx_create.GetHash();
|
||||
AddCoins(coins, CTransaction{tx_create}, 0, false);
|
||||
BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
|
||||
// P2SH-wrapped P2A (undefined, non-standard)
|
||||
redeem_script = tx_create.vout[0].scriptPubKey;
|
||||
tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash(redeem_script));
|
||||
BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH);
|
||||
tx_spend.vin[0].prevout.hash = tx_create.GetHash();
|
||||
tx_spend.vin[0].scriptSig = CScript{} << ToByteVector(redeem_script);
|
||||
AddCoins(coins, CTransaction{tx_create}, 0, false);
|
||||
BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
tx_spend.vin[0].scriptSig.clear();
|
||||
|
||||
// Undefined version 1 witness program
|
||||
tx_create.vout[0].scriptPubKey = GetScriptForDestination(WitnessUnknown{1, {0x42, 0x42}});
|
||||
BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::WITNESS_UNKNOWN);
|
||||
tx_spend.vin[0].prevout.hash = tx_create.GetHash();
|
||||
AddCoins(coins, CTransaction{tx_create}, 0, false);
|
||||
BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
|
||||
// P2SH-wrapped undefined version 1 witness program
|
||||
redeem_script = tx_create.vout[0].scriptPubKey;
|
||||
tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash(redeem_script));
|
||||
BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH);
|
||||
tx_spend.vin[0].prevout.hash = tx_create.GetHash();
|
||||
tx_spend.vin[0].scriptSig = CScript{} << ToByteVector(redeem_script);
|
||||
AddCoins(coins, CTransaction{tx_create}, 0, false);
|
||||
BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
tx_spend.vin[0].scriptSig.clear();
|
||||
BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
|
||||
// Various undefined version >1 32-byte witness programs.
|
||||
const auto program{ToByteVector(XOnlyPubKey{pubkey})};
|
||||
for (int i{2}; i <= 16; ++i) {
|
||||
tx_create.vout[0].scriptPubKey = GetScriptForDestination(WitnessUnknown{i, program});
|
||||
BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::WITNESS_UNKNOWN);
|
||||
tx_spend.vin[0].prevout.hash = tx_create.GetHash();
|
||||
AddCoins(coins, CTransaction{tx_create}, 0, false);
|
||||
BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
|
||||
// It's also detected within P2SH.
|
||||
redeem_script = tx_create.vout[0].scriptPubKey;
|
||||
tx_create.vout[0].scriptPubKey = GetScriptForDestination(ScriptHash(redeem_script));
|
||||
BOOST_CHECK_EQUAL(Solver(tx_create.vout[0].scriptPubKey, sol_dummy), TxoutType::SCRIPTHASH);
|
||||
tx_spend.vin[0].prevout.hash = tx_create.GetHash();
|
||||
tx_spend.vin[0].scriptSig = CScript{} << ToByteVector(redeem_script);
|
||||
AddCoins(coins, CTransaction{tx_create}, 0, false);
|
||||
BOOST_CHECK(::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
tx_spend.vin[0].scriptSig.clear();
|
||||
BOOST_CHECK(!::SpendsNonAnchorWitnessProg(CTransaction{tx_spend}, coins));
|
||||
}
|
||||
}
|
||||
|
||||
BOOST_AUTO_TEST_SUITE_END()
|
||||
|
||||
@@ -33,7 +33,7 @@ class DebugLogHelper
|
||||
|
||||
public:
|
||||
explicit DebugLogHelper(std::string message, MatchFn match = [](const std::string*){ return true; });
|
||||
~DebugLogHelper() { check_found(); }
|
||||
~DebugLogHelper() noexcept(false) { check_found(); }
|
||||
};
|
||||
|
||||
#define ASSERT_DEBUG_LOG(message) DebugLogHelper UNIQUE_NAME(debugloghelper)(message)
|
||||
|
||||
@@ -39,6 +39,7 @@
|
||||
#include <streams.h>
|
||||
#include <test/util/net.h>
|
||||
#include <test/util/random.h>
|
||||
#include <test/util/transaction_utils.h>
|
||||
#include <test/util/txmempool.h>
|
||||
#include <txdb.h>
|
||||
#include <txmempool.h>
|
||||
@@ -571,6 +572,9 @@ void TestChain100Setup::MockMempoolMinFee(const CFeeRate& target_feerate)
|
||||
CMutableTransaction mtx = CMutableTransaction();
|
||||
mtx.vin.emplace_back(COutPoint{Txid::FromUint256(m_rng.rand256()), 0});
|
||||
mtx.vout.emplace_back(1 * COIN, GetScriptForDestination(WitnessV0ScriptHash(CScript() << OP_TRUE)));
|
||||
// Set a large size so that the fee evaluated at target_feerate (which is usually in sats/kvB) is an integer.
|
||||
// Otherwise, GetMinFee() may end up slightly different from target_feerate.
|
||||
BulkTransaction(mtx, 4000);
|
||||
const auto tx{MakeTransactionRef(mtx)};
|
||||
LockPoints lp;
|
||||
// The new mempool min feerate is equal to the removed package's feerate + incremental feerate.
|
||||
|
||||
@@ -9,6 +9,13 @@
|
||||
|
||||
#ifdef ENABLE_TRACING
|
||||
|
||||
// Workaround for https://gcc.gnu.org/bugzilla/show_bug.cgi?id=103395
|
||||
// systemtap 4.6 on 32-bit ARM triggers internal compiler error
|
||||
// (this workaround is included in systemtap 4.7+)
|
||||
#if defined(__arm__)
|
||||
# define STAP_SDT_ARG_CONSTRAINT g
|
||||
#endif
|
||||
|
||||
// Setting SDT_USE_VARIADIC lets systemtap (sys/sdt.h) know that we want to use
|
||||
// the optional variadic macros to define tracepoints.
|
||||
#define SDT_USE_VARIADIC 1
|
||||
|
||||
@@ -1025,26 +1025,28 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
|
||||
// Even though just checking direct mempool parents for inheritance would be sufficient, we
|
||||
// check using the full ancestor set here because it's more convenient to use what we have
|
||||
// already calculated.
|
||||
if (const auto err{SingleTRUCChecks(ws.m_ptx, ws.m_ancestors, ws.m_conflicts, ws.m_vsize)}) {
|
||||
// Single transaction contexts only.
|
||||
if (args.m_allow_sibling_eviction && err->second != nullptr) {
|
||||
// We should only be considering where replacement is considered valid as well.
|
||||
Assume(args.m_allow_replacement);
|
||||
if (!args.m_bypass_limits) {
|
||||
if (const auto err{SingleTRUCChecks(ws.m_ptx, ws.m_ancestors, ws.m_conflicts, ws.m_vsize)}) {
|
||||
// Single transaction contexts only.
|
||||
if (args.m_allow_sibling_eviction && err->second != nullptr) {
|
||||
// We should only be considering where replacement is considered valid as well.
|
||||
Assume(args.m_allow_replacement);
|
||||
|
||||
// Potential sibling eviction. Add the sibling to our list of mempool conflicts to be
|
||||
// included in RBF checks.
|
||||
ws.m_conflicts.insert(err->second->GetHash());
|
||||
// Adding the sibling to m_iters_conflicting here means that it doesn't count towards
|
||||
// RBF Carve Out above. This is correct, since removing to-be-replaced transactions from
|
||||
// the descendant count is done separately in SingleTRUCChecks for TRUC transactions.
|
||||
ws.m_iters_conflicting.insert(m_pool.GetIter(err->second->GetHash()).value());
|
||||
ws.m_sibling_eviction = true;
|
||||
// The sibling will be treated as part of the to-be-replaced set in ReplacementChecks.
|
||||
// Note that we are not checking whether it opts in to replaceability via BIP125 or TRUC
|
||||
// (which is normally done in PreChecks). However, the only way a TRUC transaction can
|
||||
// have a non-TRUC and non-BIP125 descendant is due to a reorg.
|
||||
} else {
|
||||
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "TRUC-violation", err->first);
|
||||
// Potential sibling eviction. Add the sibling to our list of mempool conflicts to be
|
||||
// included in RBF checks.
|
||||
ws.m_conflicts.insert(err->second->GetHash());
|
||||
// Adding the sibling to m_iters_conflicting here means that it doesn't count towards
|
||||
// RBF Carve Out above. This is correct, since removing to-be-replaced transactions from
|
||||
// the descendant count is done separately in SingleTRUCChecks for TRUC transactions.
|
||||
ws.m_iters_conflicting.insert(m_pool.GetIter(err->second->GetHash()).value());
|
||||
ws.m_sibling_eviction = true;
|
||||
// The sibling will be treated as part of the to-be-replaced set in ReplacementChecks.
|
||||
// Note that we are not checking whether it opts in to replaceability via BIP125 or TRUC
|
||||
// (which is normally done in PreChecks). However, the only way a TRUC transaction can
|
||||
// have a non-TRUC and non-BIP125 descendant is due to a reorg.
|
||||
} else {
|
||||
return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "TRUC-violation", err->first);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1236,13 +1238,8 @@ bool MemPoolAccept::PolicyScriptChecks(const ATMPArgs& args, Workspace& ws)
|
||||
// Check input scripts and signatures.
|
||||
// This is done last to help prevent CPU exhaustion denial-of-service attacks.
|
||||
if (!CheckInputScripts(tx, state, m_view, scriptVerifyFlags, true, false, ws.m_precomputed_txdata, GetValidationCache())) {
|
||||
// SCRIPT_VERIFY_CLEANSTACK requires SCRIPT_VERIFY_WITNESS, so we
|
||||
// need to turn both off, and compare against just turning off CLEANSTACK
|
||||
// to see if the failure is specifically due to witness validation.
|
||||
TxValidationState state_dummy; // Want reported failures to be from first CheckInputScripts
|
||||
if (!tx.HasWitness() && CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~(SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_CLEANSTACK), true, false, ws.m_precomputed_txdata, GetValidationCache()) &&
|
||||
!CheckInputScripts(tx, state_dummy, m_view, scriptVerifyFlags & ~SCRIPT_VERIFY_CLEANSTACK, true, false, ws.m_precomputed_txdata, GetValidationCache())) {
|
||||
// Only the witness is missing, so the transaction itself may be fine.
|
||||
// Detect a failure due to a missing witness so that p2p code can handle rejection caching appropriately.
|
||||
if (!tx.HasWitness() && SpendsNonAnchorWitnessProg(tx, m_view)) {
|
||||
state.Invalid(TxValidationResult::TX_WITNESS_STRIPPED,
|
||||
state.GetRejectReason(), state.GetDebugMessage());
|
||||
}
|
||||
@@ -2212,34 +2209,17 @@ bool CheckInputScripts(const CTransaction& tx, TxValidationState& state,
|
||||
if (pvChecks) {
|
||||
pvChecks->emplace_back(std::move(check));
|
||||
} else if (auto result = check(); result.has_value()) {
|
||||
// Tx failures never trigger disconnections/bans.
|
||||
// This is so that network splits aren't triggered
|
||||
// either due to non-consensus relay policies (such as
|
||||
// non-standard DER encodings or non-null dummy
|
||||
// arguments) or due to new consensus rules introduced in
|
||||
// soft forks.
|
||||
if (flags & STANDARD_NOT_MANDATORY_VERIFY_FLAGS) {
|
||||
// Check whether the failure was caused by a
|
||||
// non-mandatory script verification check, such as
|
||||
// non-standard DER encodings or non-null dummy
|
||||
// arguments; if so, ensure we return NOT_STANDARD
|
||||
// instead of CONSENSUS to avoid downstream users
|
||||
// splitting the network between upgraded and
|
||||
// non-upgraded nodes by banning CONSENSUS-failing
|
||||
// data providers.
|
||||
CScriptCheck check2(txdata.m_spent_outputs[i], tx, validation_cache.m_signature_cache, i,
|
||||
flags & ~STANDARD_NOT_MANDATORY_VERIFY_FLAGS, cacheSigStore, &txdata);
|
||||
auto mandatory_result = check2();
|
||||
if (!mandatory_result.has_value()) {
|
||||
return state.Invalid(TxValidationResult::TX_NOT_STANDARD, strprintf("non-mandatory-script-verify-flag (%s)", ScriptErrorString(result->first)), result->second);
|
||||
} else {
|
||||
// If the second check failed, it failed due to a mandatory script verification
|
||||
// flag, but the first check might have failed on a non-mandatory script
|
||||
// verification flag.
|
||||
//
|
||||
// Avoid reporting a mandatory script check failure with a non-mandatory error
|
||||
// string by reporting the error from the second check.
|
||||
result = mandatory_result;
|
||||
}
|
||||
return state.Invalid(TxValidationResult::TX_NOT_STANDARD, strprintf("mempool-script-verify-flag-failed (%s)", ScriptErrorString(result->first)), result->second);
|
||||
} else {
|
||||
return state.Invalid(TxValidationResult::TX_CONSENSUS, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(result->first)), result->second);
|
||||
}
|
||||
|
||||
// MANDATORY flag failures correspond to
|
||||
// TxValidationResult::TX_CONSENSUS.
|
||||
return state.Invalid(TxValidationResult::TX_CONSENSUS, strprintf("mandatory-script-verify-flag-failed (%s)", ScriptErrorString(result->first)), result->second);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2995,15 +2975,17 @@ static void UpdateTipLog(
|
||||
{
|
||||
|
||||
AssertLockHeld(::cs_main);
|
||||
LogPrintf("%s%s: new best=%s height=%d version=0x%08x log2_work=%f tx=%lu date='%s' progress=%f cache=%.1fMiB(%utxo)%s\n",
|
||||
prefix, func_name,
|
||||
tip->GetBlockHash().ToString(), tip->nHeight, tip->nVersion,
|
||||
log(tip->nChainWork.getdouble()) / log(2.0), tip->m_chain_tx_count,
|
||||
FormatISO8601DateTime(tip->GetBlockTime()),
|
||||
chainman.GuessVerificationProgress(tip),
|
||||
coins_tip.DynamicMemoryUsage() * (1.0 / (1 << 20)),
|
||||
coins_tip.GetCacheSize(),
|
||||
!warning_messages.empty() ? strprintf(" warning='%s'", warning_messages) : "");
|
||||
|
||||
// Disable rate limiting in LogPrintLevel_ so this source location may log during IBD.
|
||||
LogPrintLevel_(BCLog::LogFlags::ALL, BCLog::Level::Info, /*should_ratelimit=*/false, "%s%s: new best=%s height=%d version=0x%08x log2_work=%f tx=%lu date='%s' progress=%f cache=%.1fMiB(%utxo)%s\n",
|
||||
prefix, func_name,
|
||||
tip->GetBlockHash().ToString(), tip->nHeight, tip->nVersion,
|
||||
log(tip->nChainWork.getdouble()) / log(2.0), tip->m_chain_tx_count,
|
||||
FormatISO8601DateTime(tip->GetBlockTime()),
|
||||
chainman.GuessVerificationProgress(tip),
|
||||
coins_tip.DynamicMemoryUsage() * (1.0 / (1 << 20)),
|
||||
coins_tip.GetCacheSize(),
|
||||
!warning_messages.empty() ? strprintf(" warning='%s'", warning_messages) : "");
|
||||
}
|
||||
|
||||
void Chainstate::UpdateTip(const CBlockIndex* pindexNew)
|
||||
|
||||
@@ -132,6 +132,21 @@ public:
|
||||
/** Return path to main database filename */
|
||||
std::string Filename() override { return fs::PathToString(env->Directory() / m_filename); }
|
||||
|
||||
std::vector<fs::path> Files() override
|
||||
{
|
||||
std::vector<fs::path> files;
|
||||
files.emplace_back(env->Directory() / m_filename);
|
||||
if (env->m_databases.size() == 1) {
|
||||
files.emplace_back(env->Directory() / "db.log");
|
||||
files.emplace_back(env->Directory() / ".walletlock");
|
||||
files.emplace_back(env->Directory() / "database" / "log.0000000001");
|
||||
files.emplace_back(env->Directory() / "database");
|
||||
// Note that this list is not exhaustive as BDB may create more log files, and possibly other ones too
|
||||
// However it should be good enough for the only calls to Files()
|
||||
}
|
||||
return files;
|
||||
}
|
||||
|
||||
std::string Format() override { return "bdb"; }
|
||||
/**
|
||||
* Pointer to shared database environment.
|
||||
|
||||
@@ -589,15 +589,15 @@ util::Result<SelectionResult> SelectCoinsSRD(const std::vector<OutputGroup>& utx
|
||||
|
||||
/** Find a subset of the OutputGroups that is at least as large as, but as close as possible to, the
|
||||
* target amount; solve subset sum.
|
||||
* param@[in] groups OutputGroups to choose from, sorted by value in descending order.
|
||||
* param@[in] nTotalLower Total (effective) value of the UTXOs in groups.
|
||||
* param@[in] nTargetValue Subset sum target, not including change.
|
||||
* param@[out] vfBest Boolean vector representing the subset chosen that is closest to
|
||||
* @param[in] groups OutputGroups to choose from, sorted by value in descending order.
|
||||
* @param[in] nTotalLower Total (effective) value of the UTXOs in groups.
|
||||
* @param[in] nTargetValue Subset sum target, not including change.
|
||||
* @param[out] vfBest Boolean vector representing the subset chosen that is closest to
|
||||
* nTargetValue, with indices corresponding to groups. If the ith
|
||||
* entry is true, that means the ith group in groups was selected.
|
||||
* param@[out] nBest Total amount of subset chosen that is closest to nTargetValue.
|
||||
* paramp[in] max_selection_weight The maximum allowed weight for a selection result to be valid.
|
||||
* param@[in] iterations Maximum number of tries.
|
||||
* @param[out] nBest Total amount of subset chosen that is closest to nTargetValue.
|
||||
* @param[in] max_selection_weight The maximum allowed weight for a selection result to be valid.
|
||||
* @param[in] iterations Maximum number of tries.
|
||||
*/
|
||||
static void ApproximateBestSubset(FastRandomContext& insecure_rand, const std::vector<OutputGroup>& groups,
|
||||
const CAmount& nTotalLower, const CAmount& nTargetValue,
|
||||
|
||||
@@ -170,6 +170,9 @@ public:
|
||||
/** Return path to main database file for logs and error messages. */
|
||||
virtual std::string Filename() = 0;
|
||||
|
||||
/** Return paths to all database created files */
|
||||
virtual std::vector<fs::path> Files() = 0;
|
||||
|
||||
virtual std::string Format() = 0;
|
||||
|
||||
std::atomic<unsigned int> nUpdateCounter;
|
||||
|
||||
@@ -288,11 +288,17 @@ bool CreateFromDump(const ArgsManager& args, const std::string& name, const fs::
|
||||
|
||||
dump_file.close();
|
||||
}
|
||||
// On failure, gather the paths to remove
|
||||
std::vector<fs::path> paths_to_remove = wallet->GetDatabase().Files();
|
||||
if (!name.empty()) paths_to_remove.push_back(wallet_path);
|
||||
|
||||
wallet.reset(); // The pointer deleter will close the wallet for us.
|
||||
|
||||
// Remove the wallet dir if we have a failure
|
||||
if (!ret) {
|
||||
fs::remove_all(wallet_path);
|
||||
for (const auto& p : paths_to_remove) {
|
||||
fs::remove(p);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
@@ -65,6 +65,7 @@ public:
|
||||
|
||||
/** Return path to main database file for logs and error messages. */
|
||||
std::string Filename() override { return fs::PathToString(m_filepath); }
|
||||
std::vector<fs::path> Files() override { return {m_filepath}; }
|
||||
|
||||
std::string Format() override { return "bdb_ro"; }
|
||||
|
||||
|
||||
@@ -1486,7 +1486,6 @@ RPCHelpMan sendall()
|
||||
CoinFilterParams coins_params;
|
||||
coins_params.min_amount = 0;
|
||||
for (const COutput& output : AvailableCoins(*pwallet, &coin_control, fee_rate, coins_params).All()) {
|
||||
CHECK_NONFATAL(output.input_bytes > 0);
|
||||
if (send_max && fee_rate.GetFee(output.input_bytes) > output.txout.nValue) {
|
||||
continue;
|
||||
}
|
||||
@@ -1505,6 +1504,9 @@ RPCHelpMan sendall()
|
||||
|
||||
// estimate final size of tx
|
||||
const TxSize tx_size{CalculateMaximumSignedTxSize(CTransaction(rawTx), pwallet.get())};
|
||||
if (tx_size.vsize == -1) {
|
||||
throw JSONRPCError(RPC_WALLET_ERROR, "Unable to determine the size of the transaction, the wallet contains unsolvable descriptors");
|
||||
}
|
||||
const CAmount fee_from_size{fee_rate.GetFee(tx_size.vsize)};
|
||||
const std::optional<CAmount> total_bump_fees{pwallet->chain().calculateCombinedBumpFee(outpoints_spent, fee_rate)};
|
||||
CAmount effective_value = total_input_value - fee_from_size - total_bump_fees.value_or(0);
|
||||
|
||||
@@ -63,6 +63,7 @@ public:
|
||||
void IncrementUpdateCounter() override { ++nUpdateCounter; }
|
||||
void ReloadDbEnv() override {}
|
||||
std::string Filename() override { return "dummy"; }
|
||||
std::vector<fs::path> Files() override { return {}; }
|
||||
std::string Format() override { return "dummy"; }
|
||||
std::unique_ptr<DatabaseBatch> MakeBatch(bool flush_on_close = true) override { return std::make_unique<DummyBatch>(); }
|
||||
};
|
||||
|
||||
@@ -123,14 +123,14 @@ FilteredOutputGroups GroupOutputs(const CWallet& wallet,
|
||||
* the solution (according to the waste metric) will be chosen. If a valid input cannot be found from any
|
||||
* single OutputType, fallback to running `ChooseSelectionResult()` over all available coins.
|
||||
*
|
||||
* param@[in] chain The chain interface to get information on unconfirmed UTXOs bump fees
|
||||
* param@[in] nTargetValue The target value
|
||||
* param@[in] groups The grouped outputs mapped by coin eligibility filters
|
||||
* param@[in] coin_selection_params Parameters for the coin selection
|
||||
* param@[in] allow_mixed_output_types Relax restriction that SelectionResults must be of the same OutputType
|
||||
* @param[in] chain The chain interface to get information on bump fees for unconfirmed UTXOs
|
||||
* @param[in] nTargetValue The target value
|
||||
* @param[in] groups The grouped outputs mapped by coin eligibility filters
|
||||
* @param[in] coin_selection_params Parameters for the coin selection
|
||||
* @param[in] allow_mixed_output_types Relax restriction that SelectionResults must be of the same OutputType
|
||||
* returns If successful, a SelectionResult containing the input set
|
||||
* If failed, returns (1) an empty error message if the target was not reached (general "Insufficient funds")
|
||||
* or (2) an specific error message if there was something particularly wrong (e.g. a selection
|
||||
* or (2) a specific error message if there was something particularly wrong (e.g. a selection
|
||||
* result that surpassed the tx max weight size).
|
||||
*/
|
||||
util::Result<SelectionResult> AttemptSelection(interfaces::Chain& chain, const CAmount& nTargetValue, OutputGroupTypeMap& groups,
|
||||
@@ -141,13 +141,13 @@ util::Result<SelectionResult> AttemptSelection(interfaces::Chain& chain, const C
|
||||
* Multiple coin selection algorithms will be run and the input set that produces the least waste
|
||||
* (according to the waste metric) will be chosen.
|
||||
*
|
||||
* param@[in] chain The chain interface to get information on unconfirmed UTXOs bump fees
|
||||
* param@[in] nTargetValue The target value
|
||||
* param@[in] groups The struct containing the outputs grouped by script and divided by (1) positive only outputs and (2) all outputs (positive + negative).
|
||||
* param@[in] coin_selection_params Parameters for the coin selection
|
||||
* @param[in] chain The chain interface to get information on bump fees for unconfirmed UTXOs
|
||||
* @param[in] nTargetValue The target value
|
||||
* @param[in] groups The struct containing the outputs grouped by script and divided by (1) positive only outputs and (2) all outputs (positive + negative).
|
||||
* @param[in] coin_selection_params Parameters for the coin selection
|
||||
* returns If successful, a SelectionResult containing the input set
|
||||
* If failed, returns (1) an empty error message if the target was not reached (general "Insufficient funds")
|
||||
* or (2) an specific error message if there was something particularly wrong (e.g. a selection
|
||||
* or (2) a specific error message if there was something particularly wrong (e.g. a selection
|
||||
* result that surpassed the tx max weight size).
|
||||
*/
|
||||
util::Result<SelectionResult> ChooseSelectionResult(interfaces::Chain& chain, const CAmount& nTargetValue, Groups& groups, const CoinSelectionParams& coin_selection_params);
|
||||
@@ -181,10 +181,10 @@ util::Result<PreSelectedInputs> FetchSelectedInputs(const CWallet& wallet, const
|
||||
|
||||
/**
|
||||
* Select a set of coins such that nTargetValue is met; never select unconfirmed coins if they are not ours
|
||||
* param@[in] wallet The wallet which provides data necessary to spend the selected coins
|
||||
* param@[in] available_coins The struct of coins, organized by OutputType, available for selection prior to filtering
|
||||
* param@[in] nTargetValue The target value
|
||||
* param@[in] coin_selection_params Parameters for this coin selection such as feerates, whether to avoid partial spends,
|
||||
* @param[in] wallet The wallet which provides data necessary to spend the selected coins
|
||||
* @param[in] available_coins The struct of coins, organized by OutputType, available for selection prior to filtering
|
||||
* @param[in] nTargetValue The target value
|
||||
* @param[in] coin_selection_params Parameters for this coin selection such as feerates, whether to avoid partial spends,
|
||||
* and whether to subtract the fee from the outputs.
|
||||
* returns If successful, a SelectionResult containing the selected coins
|
||||
* If failed, returns (1) an empty error message if the target was not reached (general "Insufficient funds")
|
||||
|
||||
@@ -112,12 +112,12 @@ Mutex SQLiteDatabase::g_sqlite_mutex;
|
||||
int SQLiteDatabase::g_sqlite_count = 0;
|
||||
|
||||
SQLiteDatabase::SQLiteDatabase(const fs::path& dir_path, const fs::path& file_path, const DatabaseOptions& options, bool mock)
|
||||
: WalletDatabase(), m_mock(mock), m_dir_path(fs::PathToString(dir_path)), m_file_path(fs::PathToString(file_path)), m_write_semaphore(1), m_use_unsafe_sync(options.use_unsafe_sync)
|
||||
: WalletDatabase(), m_mock(mock), m_dir_path(dir_path), m_file_path(fs::PathToString(file_path)), m_write_semaphore(1), m_use_unsafe_sync(options.use_unsafe_sync)
|
||||
{
|
||||
{
|
||||
LOCK(g_sqlite_mutex);
|
||||
LogPrintf("Using SQLite Version %s\n", SQLiteDatabaseVersion());
|
||||
LogPrintf("Using wallet %s\n", m_dir_path);
|
||||
LogPrintf("Using wallet %s\n", fs::PathToString(m_dir_path));
|
||||
|
||||
if (++g_sqlite_count == 1) {
|
||||
// Setup logging
|
||||
@@ -253,7 +253,7 @@ void SQLiteDatabase::Open()
|
||||
|
||||
if (m_db == nullptr) {
|
||||
if (!m_mock) {
|
||||
TryCreateDirectories(fs::PathFromString(m_dir_path));
|
||||
TryCreateDirectories(m_dir_path);
|
||||
}
|
||||
int ret = sqlite3_open_v2(m_file_path.c_str(), &m_db, flags, nullptr);
|
||||
if (ret != SQLITE_OK) {
|
||||
|
||||
@@ -105,7 +105,7 @@ class SQLiteDatabase : public WalletDatabase
|
||||
private:
|
||||
const bool m_mock{false};
|
||||
|
||||
const std::string m_dir_path;
|
||||
const fs::path m_dir_path;
|
||||
|
||||
const std::string m_file_path;
|
||||
|
||||
@@ -166,6 +166,14 @@ public:
|
||||
void IncrementUpdateCounter() override { ++nUpdateCounter; }
|
||||
|
||||
std::string Filename() override { return m_file_path; }
|
||||
/** Return paths to all database created files */
|
||||
std::vector<fs::path> Files() override
|
||||
{
|
||||
std::vector<fs::path> files;
|
||||
files.emplace_back(m_dir_path / fs::PathFromString(m_file_path));
|
||||
files.emplace_back(m_dir_path / fs::PathFromString(m_file_path + "-journal"));
|
||||
return files;
|
||||
}
|
||||
std::string Format() override { return "sqlite"; }
|
||||
|
||||
/** Make a SQLiteBatch connected to this database */
|
||||
|
||||
@@ -123,6 +123,7 @@ public:
|
||||
void ReloadDbEnv() override {}
|
||||
|
||||
std::string Filename() override { return "mockable"; }
|
||||
std::vector<fs::path> Files() override { return {}; }
|
||||
std::string Format() override { return "mock"; }
|
||||
std::unique_ptr<DatabaseBatch> MakeBatch(bool flush_on_close = true) override { return std::make_unique<MockableBatch>(m_records, m_pass); }
|
||||
};
|
||||
|
||||
@@ -501,6 +501,8 @@ std::shared_ptr<CWallet> RestoreWallet(WalletContext& context, const fs::path& b
|
||||
const fs::path wallet_path = fsbridge::AbsPathJoin(GetWalletDir(), fs::u8path(wallet_name));
|
||||
auto wallet_file = wallet_path / "wallet.dat";
|
||||
std::shared_ptr<CWallet> wallet;
|
||||
bool wallet_file_copied = false;
|
||||
bool created_parent_dir = false;
|
||||
|
||||
try {
|
||||
if (!fs::exists(backup_file)) {
|
||||
@@ -509,13 +511,34 @@ std::shared_ptr<CWallet> RestoreWallet(WalletContext& context, const fs::path& b
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (fs::exists(wallet_path) || !TryCreateDirectories(wallet_path)) {
|
||||
error = Untranslated(strprintf("Failed to create database path '%s'. Database already exists.", fs::PathToString(wallet_path)));
|
||||
status = DatabaseStatus::FAILED_ALREADY_EXISTS;
|
||||
return nullptr;
|
||||
// Wallet directories are allowed to exist, but must not contain a .dat file.
|
||||
// Any existing wallet database is treated as a hard failure to prevent overwriting.
|
||||
if (fs::exists(wallet_path)) {
|
||||
// If this is a file, it is the db and we don't want to overwrite it.
|
||||
if (!fs::is_directory(wallet_path)) {
|
||||
error = Untranslated(strprintf("Failed to restore wallet. Database file exists '%s'.", fs::PathToString(wallet_path)));
|
||||
status = DatabaseStatus::FAILED_ALREADY_EXISTS;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// Check we are not going to overwrite an existing db file
|
||||
if (fs::exists(wallet_file)) {
|
||||
error = Untranslated(strprintf("Failed to restore wallet. Database file exists in '%s'.", fs::PathToString(wallet_file)));
|
||||
status = DatabaseStatus::FAILED_ALREADY_EXISTS;
|
||||
return nullptr;
|
||||
}
|
||||
} else {
|
||||
// The directory doesn't exist, create it
|
||||
if (!TryCreateDirectories(wallet_path)) {
|
||||
error = Untranslated(strprintf("Failed to restore database path '%s'.", fs::PathToString(wallet_path)));
|
||||
status = DatabaseStatus::FAILED_ALREADY_EXISTS;
|
||||
return nullptr;
|
||||
}
|
||||
created_parent_dir = true;
|
||||
}
|
||||
|
||||
fs::copy_file(backup_file, wallet_file, fs::copy_options::none);
|
||||
wallet_file_copied = true;
|
||||
|
||||
if (load_after_restore) {
|
||||
wallet = LoadWallet(context, wallet_name, load_on_start, options, status, error, warnings);
|
||||
@@ -528,7 +551,13 @@ std::shared_ptr<CWallet> RestoreWallet(WalletContext& context, const fs::path& b
|
||||
|
||||
// Remove created wallet path only when loading fails
|
||||
if (load_after_restore && !wallet) {
|
||||
fs::remove_all(wallet_path);
|
||||
if (wallet_file_copied) fs::remove(wallet_file);
|
||||
// Clean up the parent directory if we created it during restoration.
|
||||
// As we have created it, it must be empty after deleting the wallet file.
|
||||
if (created_parent_dir) {
|
||||
Assume(fs::is_empty(wallet_path));
|
||||
fs::remove(wallet_path);
|
||||
}
|
||||
}
|
||||
|
||||
return wallet;
|
||||
@@ -1673,7 +1702,13 @@ isminetype CWallet::IsMine(const COutPoint& outpoint) const
|
||||
|
||||
bool CWallet::IsFromMe(const CTransaction& tx) const
|
||||
{
|
||||
return (GetDebit(tx, ISMINE_ALL) > 0);
|
||||
LOCK(cs_wallet);
|
||||
for (const CTxIn& txin : tx.vin) {
|
||||
if (IsMine(txin.prevout)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
CAmount CWallet::GetDebit(const CTransaction& tx, const isminefilter& filter) const
|
||||
@@ -4297,6 +4332,15 @@ bool CWallet::CanGrindR() const
|
||||
return !IsWalletFlagSet(WALLET_FLAG_EXTERNAL_SIGNER);
|
||||
}
|
||||
|
||||
// Returns wallet prefix for migration.
|
||||
// Used to name the backup file and newly created wallets.
|
||||
// E.g. a watch-only wallet is named "<prefix>_watchonly".
|
||||
static std::string MigrationPrefixName(CWallet& wallet)
|
||||
{
|
||||
const std::string& name{wallet.GetName()};
|
||||
return name.empty() ? "default_wallet" : name;
|
||||
}
|
||||
|
||||
bool DoMigration(CWallet& wallet, WalletContext& context, bilingual_str& error, MigrationResult& res) EXCLUSIVE_LOCKS_REQUIRED(wallet.cs_wallet)
|
||||
{
|
||||
AssertLockHeld(wallet.cs_wallet);
|
||||
@@ -4328,7 +4372,7 @@ bool DoMigration(CWallet& wallet, WalletContext& context, bilingual_str& error,
|
||||
|
||||
DatabaseStatus status;
|
||||
std::vector<bilingual_str> warnings;
|
||||
std::string wallet_name = wallet.GetName() + "_watchonly";
|
||||
std::string wallet_name = MigrationPrefixName(wallet) + "_watchonly";
|
||||
std::unique_ptr<WalletDatabase> database = MakeWalletDatabase(wallet_name, options, status, error);
|
||||
if (!database) {
|
||||
error = strprintf(_("Wallet file creation failed: %s"), error);
|
||||
@@ -4365,7 +4409,7 @@ bool DoMigration(CWallet& wallet, WalletContext& context, bilingual_str& error,
|
||||
|
||||
DatabaseStatus status;
|
||||
std::vector<bilingual_str> warnings;
|
||||
std::string wallet_name = wallet.GetName() + "_solvables";
|
||||
std::string wallet_name = MigrationPrefixName(wallet) + "_solvables";
|
||||
std::unique_ptr<WalletDatabase> database = MakeWalletDatabase(wallet_name, options, status, error);
|
||||
if (!database) {
|
||||
error = strprintf(_("Wallet file creation failed: %s"), error);
|
||||
@@ -4486,7 +4530,12 @@ util::Result<MigrationResult> MigrateLegacyToDescriptor(std::shared_ptr<CWallet>
|
||||
std::string name = to_reload->GetName();
|
||||
to_reload.reset();
|
||||
to_reload = LoadWallet(context, name, /*load_on_start=*/std::nullopt, options, status, error, warnings);
|
||||
return to_reload != nullptr;
|
||||
if (!to_reload) {
|
||||
LogError("Failed to load wallet '%s' after migration. Rolling back migration to preserve consistency. "
|
||||
"Error cause: %s\n", wallet_name, error.original);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
// Before anything else, check if there is something to migrate.
|
||||
@@ -4499,7 +4548,7 @@ util::Result<MigrationResult> MigrateLegacyToDescriptor(std::shared_ptr<CWallet>
|
||||
|
||||
// Make a backup of the DB
|
||||
fs::path this_wallet_dir = fs::absolute(fs::PathFromString(local_wallet->GetDatabase().Filename())).parent_path();
|
||||
fs::path backup_filename = fs::PathFromString(strprintf("%s_%d.legacy.bak", (wallet_name.empty() ? "default_wallet" : wallet_name), GetTime()));
|
||||
fs::path backup_filename = fs::PathFromString(strprintf("%s_%d.legacy.bak", MigrationPrefixName(*local_wallet), GetTime()));
|
||||
fs::path backup_path = this_wallet_dir / backup_filename;
|
||||
if (!local_wallet->BackupWallet(fs::PathToString(backup_path))) {
|
||||
if (was_loaded) {
|
||||
@@ -4542,26 +4591,44 @@ util::Result<MigrationResult> MigrateLegacyToDescriptor(std::shared_ptr<CWallet>
|
||||
}
|
||||
}
|
||||
|
||||
// In case of reloading failure, we need to remember the wallet dirs to remove
|
||||
// Set is used as it may be populated with the same wallet directory paths multiple times,
|
||||
// both before and after reloading. This ensures the set is complete even if one of the wallets
|
||||
// fails to reload.
|
||||
std::set<fs::path> wallet_dirs;
|
||||
// In case of loading failure, we need to remember the wallet files we have created to remove.
|
||||
// A `set` is used as it may be populated with the same wallet directory paths multiple times,
|
||||
// both before and after loading. This ensures the set is complete even if one of the wallets
|
||||
// fails to load.
|
||||
std::set<fs::path> wallet_files_to_remove;
|
||||
std::set<fs::path> wallet_empty_dirs_to_remove;
|
||||
|
||||
// Helper to track wallet files and directories for cleanup on failure.
|
||||
// Only directories of wallets created during migration (not the main wallet) are tracked.
|
||||
auto track_for_cleanup = [&](const CWallet& wallet) {
|
||||
const auto files = wallet.GetDatabase().Files();
|
||||
wallet_files_to_remove.insert(files.begin(), files.end());
|
||||
if (wallet.GetName() != wallet_name) {
|
||||
// If this isn’t the main wallet, mark its directory for removal.
|
||||
// This applies to the watch-only and solvable wallets.
|
||||
// Wallets stored directly as files in the top-level directory
|
||||
// (e.g. default unnamed wallets) don’t have a removable parent directory.
|
||||
wallet_empty_dirs_to_remove.insert(fs::PathFromString(wallet.GetDatabase().Filename()).parent_path());
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
if (success) {
|
||||
// Migration successful, unload all wallets locally, then reload them.
|
||||
// Reload the main wallet
|
||||
wallet_dirs.insert(fs::PathFromString(local_wallet->GetDatabase().Filename()).parent_path());
|
||||
LogInfo("Loading new wallets after migration...\n");
|
||||
track_for_cleanup(*local_wallet);
|
||||
success = reload_wallet(local_wallet);
|
||||
res.wallet = local_wallet;
|
||||
res.wallet_name = wallet_name;
|
||||
if (success && res.watchonly_wallet) {
|
||||
// Reload watchonly
|
||||
wallet_dirs.insert(fs::PathFromString(res.watchonly_wallet->GetDatabase().Filename()).parent_path());
|
||||
track_for_cleanup(*res.watchonly_wallet);
|
||||
success = reload_wallet(res.watchonly_wallet);
|
||||
}
|
||||
if (success && res.solvables_wallet) {
|
||||
// Reload solvables
|
||||
wallet_dirs.insert(fs::PathFromString(res.solvables_wallet->GetDatabase().Filename()).parent_path());
|
||||
track_for_cleanup(*res.solvables_wallet);
|
||||
success = reload_wallet(res.solvables_wallet);
|
||||
}
|
||||
}
|
||||
@@ -4569,7 +4636,7 @@ util::Result<MigrationResult> MigrateLegacyToDescriptor(std::shared_ptr<CWallet>
|
||||
// Migration failed, cleanup
|
||||
// Before deleting the wallet's directory, copy the backup file to the top-level wallets dir
|
||||
fs::path temp_backup_location = fsbridge::AbsPathJoin(GetWalletDir(), backup_filename);
|
||||
fs::copy_file(backup_path, temp_backup_location, fs::copy_options::none);
|
||||
fs::rename(backup_path, temp_backup_location);
|
||||
|
||||
// Make list of wallets to cleanup
|
||||
std::vector<std::shared_ptr<CWallet>> created_wallets;
|
||||
@@ -4578,8 +4645,8 @@ util::Result<MigrationResult> MigrateLegacyToDescriptor(std::shared_ptr<CWallet>
|
||||
if (res.solvables_wallet) created_wallets.push_back(std::move(res.solvables_wallet));
|
||||
|
||||
// Get the directories to remove after unloading
|
||||
for (std::shared_ptr<CWallet>& w : created_wallets) {
|
||||
wallet_dirs.emplace(fs::PathFromString(w->GetDatabase().Filename()).parent_path());
|
||||
for (std::shared_ptr<CWallet>& wallet : created_wallets) {
|
||||
track_for_cleanup(*wallet);
|
||||
}
|
||||
|
||||
// Unload the wallets
|
||||
@@ -4598,9 +4665,15 @@ util::Result<MigrationResult> MigrateLegacyToDescriptor(std::shared_ptr<CWallet>
|
||||
}
|
||||
}
|
||||
|
||||
// Delete the wallet directories
|
||||
for (const fs::path& dir : wallet_dirs) {
|
||||
fs::remove_all(dir);
|
||||
// First, delete the db files we have created throughout this process and nothing else
|
||||
for (const fs::path& file : wallet_files_to_remove) {
|
||||
fs::remove(file);
|
||||
}
|
||||
|
||||
// Second, delete the created wallet directories and nothing else. They must be empty at this point.
|
||||
for (const fs::path& dir : wallet_empty_dirs_to_remove) {
|
||||
Assume(fs::is_empty(dir));
|
||||
fs::remove(dir);
|
||||
}
|
||||
|
||||
// Restore the backup
|
||||
@@ -4614,8 +4687,7 @@ util::Result<MigrationResult> MigrateLegacyToDescriptor(std::shared_ptr<CWallet>
|
||||
}
|
||||
|
||||
// The wallet directory has been restored, but just in case, copy the previously created backup to the wallet dir
|
||||
fs::copy_file(temp_backup_location, backup_path, fs::copy_options::none);
|
||||
fs::remove(temp_backup_location);
|
||||
fs::rename(temp_backup_location, backup_path);
|
||||
|
||||
// Verify that there is no dangling wallet: when the wallet wasn't loaded before, expect null.
|
||||
// This check is performed after restoration to avoid an early error before saving the backup.
|
||||
|
||||
@@ -11,9 +11,10 @@ The alternate mainnet chain was generated as follows:
|
||||
- restart node with a faketime 2 minutes later
|
||||
|
||||
```sh
|
||||
for i in {1..2015}
|
||||
for i in {1..2016}
|
||||
do
|
||||
faketime "`date -d @"$(( 1231006505 + $i * 120 ))" +'%Y-%m-%d %H:%M:%S'`" \
|
||||
t=$(( 1231006505 + $i * 120 ))
|
||||
faketime "`date -d @$t +'%Y-%m-%d %H:%M:%S'`" \
|
||||
bitcoind -connect=0 -nocheckpoints -stopatheight=$i
|
||||
done
|
||||
```
|
||||
@@ -21,7 +22,9 @@ done
|
||||
The CPU miner is kept running as follows:
|
||||
|
||||
```sh
|
||||
./minerd --coinbase-addr 1NQpH6Nf8QtR2HphLRcvuVqfhXBXsiWn8r --no-stratum --algo sha256d --no-longpoll --scantime 3 --retry-pause 1
|
||||
./minerd -u ... -p ... -o http://127.0.0.1:8332 --no-stratum \
|
||||
--coinbase-addr 1NQpH6Nf8QtR2HphLRcvuVqfhXBXsiWn8r \
|
||||
--algo sha256d --no-longpoll --scantime 3 --retry-pause 1
|
||||
```
|
||||
|
||||
The payout address is derived from first BIP32 test vector master key:
|
||||
@@ -40,3 +43,8 @@ The timestamp was not kept constant because at difficulty 1 it's not sufficient
|
||||
to only grind the nonce. Grinding the extra_nonce or version field instead
|
||||
would have required additional (stratum) software. It would also make it more
|
||||
complicated to reconstruct the blocks in this test.
|
||||
|
||||
The `getblocktemplate` RPC code needs to be patched to ignore not being connected
|
||||
to any peers, and to ignore the IBD status check.
|
||||
|
||||
On macOS use `faketime "@$t"` instead.
|
||||
|
||||
@@ -69,9 +69,6 @@ class BadTxTemplate:
|
||||
# Only specified if it differs from mempool acceptance error.
|
||||
block_reject_reason = ""
|
||||
|
||||
# Do we expect to be disconnected after submitting this tx?
|
||||
expect_disconnect = False
|
||||
|
||||
# Is this tx considered valid when included in a block, but not for acceptance into
|
||||
# the mempool (i.e. does it violate policy but not consensus)?
|
||||
valid_in_block = False
|
||||
@@ -89,7 +86,6 @@ class BadTxTemplate:
|
||||
|
||||
class OutputMissing(BadTxTemplate):
|
||||
reject_reason = "bad-txns-vout-empty"
|
||||
expect_disconnect = True
|
||||
|
||||
def get_tx(self):
|
||||
tx = CTransaction()
|
||||
@@ -100,7 +96,6 @@ class OutputMissing(BadTxTemplate):
|
||||
|
||||
class InputMissing(BadTxTemplate):
|
||||
reject_reason = "bad-txns-vin-empty"
|
||||
expect_disconnect = True
|
||||
|
||||
# We use a blank transaction here to make sure
|
||||
# it is interpreted as a non-witness transaction.
|
||||
@@ -117,7 +112,6 @@ class InputMissing(BadTxTemplate):
|
||||
# tree depth commitment (CVE-2017-12842)
|
||||
class SizeTooSmall(BadTxTemplate):
|
||||
reject_reason = "tx-size-small"
|
||||
expect_disconnect = False
|
||||
valid_in_block = True
|
||||
|
||||
def get_tx(self):
|
||||
@@ -134,7 +128,6 @@ class BadInputOutpointIndex(BadTxTemplate):
|
||||
# Won't be rejected - nonexistent outpoint index is treated as an orphan since the coins
|
||||
# database can't distinguish between spent outpoints and outpoints which never existed.
|
||||
reject_reason = None
|
||||
expect_disconnect = False
|
||||
|
||||
def get_tx(self):
|
||||
num_indices = len(self.spend_tx.vin)
|
||||
@@ -149,7 +142,6 @@ class BadInputOutpointIndex(BadTxTemplate):
|
||||
|
||||
class DuplicateInput(BadTxTemplate):
|
||||
reject_reason = 'bad-txns-inputs-duplicate'
|
||||
expect_disconnect = True
|
||||
|
||||
def get_tx(self):
|
||||
tx = CTransaction()
|
||||
@@ -162,7 +154,6 @@ class DuplicateInput(BadTxTemplate):
|
||||
|
||||
class PrevoutNullInput(BadTxTemplate):
|
||||
reject_reason = 'bad-txns-prevout-null'
|
||||
expect_disconnect = True
|
||||
|
||||
def get_tx(self):
|
||||
tx = CTransaction()
|
||||
@@ -175,7 +166,6 @@ class PrevoutNullInput(BadTxTemplate):
|
||||
|
||||
class NonexistentInput(BadTxTemplate):
|
||||
reject_reason = None # Added as an orphan tx.
|
||||
expect_disconnect = False
|
||||
|
||||
def get_tx(self):
|
||||
tx = CTransaction()
|
||||
@@ -188,7 +178,6 @@ class NonexistentInput(BadTxTemplate):
|
||||
|
||||
class SpendTooMuch(BadTxTemplate):
|
||||
reject_reason = 'bad-txns-in-belowout'
|
||||
expect_disconnect = True
|
||||
|
||||
def get_tx(self):
|
||||
return create_tx_with_script(
|
||||
@@ -197,7 +186,6 @@ class SpendTooMuch(BadTxTemplate):
|
||||
|
||||
class CreateNegative(BadTxTemplate):
|
||||
reject_reason = 'bad-txns-vout-negative'
|
||||
expect_disconnect = True
|
||||
|
||||
def get_tx(self):
|
||||
return create_tx_with_script(self.spend_tx, 0, amount=-1)
|
||||
@@ -205,7 +193,6 @@ class CreateNegative(BadTxTemplate):
|
||||
|
||||
class CreateTooLarge(BadTxTemplate):
|
||||
reject_reason = 'bad-txns-vout-toolarge'
|
||||
expect_disconnect = True
|
||||
|
||||
def get_tx(self):
|
||||
return create_tx_with_script(self.spend_tx, 0, amount=MAX_MONEY + 1)
|
||||
@@ -213,7 +200,6 @@ class CreateTooLarge(BadTxTemplate):
|
||||
|
||||
class CreateSumTooLarge(BadTxTemplate):
|
||||
reject_reason = 'bad-txns-txouttotal-toolarge'
|
||||
expect_disconnect = True
|
||||
|
||||
def get_tx(self):
|
||||
tx = create_tx_with_script(self.spend_tx, 0, amount=MAX_MONEY)
|
||||
@@ -223,8 +209,7 @@ class CreateSumTooLarge(BadTxTemplate):
|
||||
|
||||
|
||||
class InvalidOPIFConstruction(BadTxTemplate):
|
||||
reject_reason = "mandatory-script-verify-flag-failed (Invalid OP_IF construction)"
|
||||
expect_disconnect = True
|
||||
reject_reason = "mempool-script-verify-flag-failed (Invalid OP_IF construction)"
|
||||
valid_in_block = True
|
||||
|
||||
def get_tx(self):
|
||||
@@ -236,7 +221,6 @@ class InvalidOPIFConstruction(BadTxTemplate):
|
||||
class TooManySigops(BadTxTemplate):
|
||||
reject_reason = "bad-txns-too-many-sigops"
|
||||
block_reject_reason = "bad-blk-sigops, out-of-bounds SigOpCount"
|
||||
expect_disconnect = False
|
||||
|
||||
def get_tx(self):
|
||||
lotsa_checksigs = CScript([OP_CHECKSIG] * (MAX_BLOCK_SIGOPS))
|
||||
@@ -258,15 +242,14 @@ def getDisabledOpcodeTemplate(opcode):
|
||||
|
||||
return type('DisabledOpcode_' + str(opcode), (BadTxTemplate,), {
|
||||
'reject_reason': "disabled opcode",
|
||||
'expect_disconnect': True,
|
||||
'get_tx': get_tx,
|
||||
'valid_in_block' : True
|
||||
})
|
||||
|
||||
class NonStandardAndInvalid(BadTxTemplate):
|
||||
"""A non-standard transaction which is also consensus-invalid should return the consensus error."""
|
||||
reject_reason = "mandatory-script-verify-flag-failed (OP_RETURN was encountered)"
|
||||
expect_disconnect = True
|
||||
"""A non-standard transaction which is also consensus-invalid should return the first error."""
|
||||
reject_reason = "mempool-script-verify-flag-failed (Using OP_CODESEPARATOR in non-witness script)"
|
||||
block_reject_reason = "mandatory-script-verify-flag-failed (OP_RETURN was encountered)"
|
||||
valid_in_block = False
|
||||
|
||||
def get_tx(self):
|
||||
|
||||
@@ -2014,7 +2014,8 @@
|
||||
1231247971,
|
||||
1231248071,
|
||||
1231248198,
|
||||
1231248322
|
||||
1231248322,
|
||||
1231248621
|
||||
],
|
||||
"nonces": [
|
||||
2345621585,
|
||||
@@ -4031,6 +4032,7 @@
|
||||
3658502865,
|
||||
2519048297,
|
||||
1915965760,
|
||||
1183846025
|
||||
1183846025,
|
||||
2713372123
|
||||
]
|
||||
}
|
||||
|
||||
@@ -164,9 +164,12 @@ class FullBlockTest(BitcoinTestFramework):
|
||||
self.sign_tx(badtx, attempt_spend_tx)
|
||||
badtx.rehash()
|
||||
badblock = self.update_block(blockname, [badtx])
|
||||
reject_reason = (template.block_reject_reason or template.reject_reason)
|
||||
if reject_reason and reject_reason.startswith("mempool-script-verify-flag-failed"):
|
||||
reject_reason = "mandatory-script-verify-flag-failed" + reject_reason[33:]
|
||||
self.send_blocks(
|
||||
[badblock], success=False,
|
||||
reject_reason=(template.block_reject_reason or template.reject_reason),
|
||||
reject_reason=reject_reason,
|
||||
reconnect=True, timeout=2)
|
||||
|
||||
self.move_tip(2)
|
||||
|
||||
@@ -154,12 +154,14 @@ class BIP65Test(BitcoinTestFramework):
|
||||
coin_vout = coin.prevout.n
|
||||
cltv_invalidate(spendtx, i)
|
||||
|
||||
blk_rej = "mandatory-script-verify-flag-failed"
|
||||
tx_rej = "mempool-script-verify-flag-failed"
|
||||
expected_cltv_reject_reason = [
|
||||
"mandatory-script-verify-flag-failed (Operation not valid with the current stack size)",
|
||||
"mandatory-script-verify-flag-failed (Negative locktime)",
|
||||
"mandatory-script-verify-flag-failed (Locktime requirement not satisfied)",
|
||||
"mandatory-script-verify-flag-failed (Locktime requirement not satisfied)",
|
||||
"mandatory-script-verify-flag-failed (Locktime requirement not satisfied)",
|
||||
" (Operation not valid with the current stack size)",
|
||||
" (Negative locktime)",
|
||||
" (Locktime requirement not satisfied)",
|
||||
" (Locktime requirement not satisfied)",
|
||||
" (Locktime requirement not satisfied)",
|
||||
][i]
|
||||
# First we show that this tx is valid except for CLTV by getting it
|
||||
# rejected from the mempool for exactly that reason.
|
||||
@@ -170,8 +172,8 @@ class BIP65Test(BitcoinTestFramework):
|
||||
'txid': spendtx_txid,
|
||||
'wtxid': spendtx_wtxid,
|
||||
'allowed': False,
|
||||
'reject-reason': expected_cltv_reject_reason,
|
||||
'reject-details': expected_cltv_reject_reason + f", input 0 of {spendtx_txid} (wtxid {spendtx_wtxid}), spending {coin_txid}:{coin_vout}"
|
||||
'reject-reason': tx_rej + expected_cltv_reject_reason,
|
||||
'reject-details': tx_rej + expected_cltv_reject_reason + f", input 0 of {spendtx_txid} (wtxid {spendtx_wtxid}), spending {coin_txid}:{coin_vout}"
|
||||
}],
|
||||
self.nodes[0].testmempoolaccept(rawtxs=[spendtx.serialize().hex()], maxfeerate=0),
|
||||
)
|
||||
@@ -181,7 +183,7 @@ class BIP65Test(BitcoinTestFramework):
|
||||
block.hashMerkleRoot = block.calc_merkle_root()
|
||||
block.solve()
|
||||
|
||||
with self.nodes[0].assert_debug_log(expected_msgs=[f'Block validation error: {expected_cltv_reject_reason}']):
|
||||
with self.nodes[0].assert_debug_log(expected_msgs=[f'Block validation error: {blk_rej + expected_cltv_reject_reason}']):
|
||||
peer.send_and_ping(msg_block(block))
|
||||
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
|
||||
peer.sync_with_ping()
|
||||
|
||||
@@ -322,6 +322,21 @@ class CoinStatsIndexTest(BitcoinTestFramework):
|
||||
res1 = index_node.gettxoutsetinfo(hash_type='muhash', hash_or_height=None, use_index=True)
|
||||
assert_equal(res["muhash"], res1["muhash"])
|
||||
|
||||
self.log.info("Test index with an unclean restart after a reorg")
|
||||
self.restart_node(1, extra_args=self.extra_args[1])
|
||||
committed_height = index_node.getblockcount()
|
||||
self.generate(index_node, 2, sync_fun=self.no_op)
|
||||
self.sync_index_node()
|
||||
block2 = index_node.getbestblockhash()
|
||||
index_node.invalidateblock(block2)
|
||||
self.generatetoaddress(index_node, 1, getnewdestination()[2], sync_fun=self.no_op)
|
||||
self.sync_index_node()
|
||||
index_node.kill_process()
|
||||
self.start_node(1, extra_args=self.extra_args[1])
|
||||
self.sync_index_node()
|
||||
# Because of the unclean shutdown above, indexes reset to the point we last committed them to disk.
|
||||
assert_equal(index_node.getindexinfo()['coinstatsindex']['best_block_height'], committed_height)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
CoinStatsIndexTest(__file__).main()
|
||||
|
||||
@@ -84,7 +84,7 @@ class ConfArgsTest(BitcoinTestFramework):
|
||||
|
||||
self.log.debug('Verifying that disabling of the config file means garbage inside of it does ' \
|
||||
'not prevent the node from starting, and message about existing config file is logged')
|
||||
ignored_file_message = [f'[InitConfig] Data directory "{self.nodes[0].datadir_path}" contains a "bitcoin.conf" file which is explicitly ignored using -noconf.']
|
||||
ignored_file_message = [f'Data directory "{self.nodes[0].datadir_path}" contains a "bitcoin.conf" file which is explicitly ignored using -noconf.']
|
||||
with self.nodes[0].assert_debug_log(timeout=60, expected_msgs=ignored_file_message):
|
||||
self.start_node(0, extra_args=settings + ['-noconf'])
|
||||
self.stop_node(0)
|
||||
|
||||
@@ -123,8 +123,8 @@ class BIP66Test(BitcoinTestFramework):
|
||||
'txid': spendtx_txid,
|
||||
'wtxid': spendtx_wtxid,
|
||||
'allowed': False,
|
||||
'reject-reason': 'mandatory-script-verify-flag-failed (Non-canonical DER signature)',
|
||||
'reject-details': 'mandatory-script-verify-flag-failed (Non-canonical DER signature), ' +
|
||||
'reject-reason': 'mempool-script-verify-flag-failed (Non-canonical DER signature)',
|
||||
'reject-details': 'mempool-script-verify-flag-failed (Non-canonical DER signature), ' +
|
||||
f"input 0 of {spendtx_txid} (wtxid {spendtx_wtxid}), spending {coin_txid}:0"
|
||||
}],
|
||||
self.nodes[0].testmempoolaccept(rawtxs=[spendtx.serialize().hex()], maxfeerate=0),
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user