Do base type propagation in miniscript_stable fuzzer

Keep track of which base type (B, K, V, or W) is desired in the miniscript_stable
ConsumeStableNode function. This allows aborting early if the constructed node
won't have the right type.

Note that this does not change the fuzzer format; the meaning of inputs in
ConsumeStableNode is unmodified. The only change is that often the fuzzer will
abort early.

The direct motivation is preventing recursing v: wrappers, which are the only
fragment type that does not otherwise increase the overall minimum possible script
size. In a later commit this will be exploited to prevent overly-large scripts from
being constructed.
This commit is contained in:
Pieter Wuille
2023-02-24 16:45:01 -05:00
parent 519ec2650e
commit 5abb0f5ac3

View File

@@ -329,27 +329,51 @@ std::optional<uint32_t> ConsumeTimeLock(FuzzedDataProvider& provider) {
* bytes as the number of keys define the index of each key in the test data. * bytes as the number of keys define the index of each key in the test data.
* - For thresh(), the next byte defines the threshold value and the following one the number of subs. * - For thresh(), the next byte defines the threshold value and the following one the number of subs.
*/ */
std::optional<NodeInfo> ConsumeNodeStable(FuzzedDataProvider& provider) { std::optional<NodeInfo> ConsumeNodeStable(FuzzedDataProvider& provider, Type type_needed) {
bool allow_B = (type_needed == ""_mst) || (type_needed << "B"_mst);
bool allow_K = (type_needed == ""_mst) || (type_needed << "K"_mst);
bool allow_V = (type_needed == ""_mst) || (type_needed << "V"_mst);
bool allow_W = (type_needed == ""_mst) || (type_needed << "W"_mst);
switch (provider.ConsumeIntegral<uint8_t>()) { switch (provider.ConsumeIntegral<uint8_t>()) {
case 0: return {{Fragment::JUST_0}}; case 0:
case 1: return {{Fragment::JUST_1}}; if (!allow_B) return {};
case 2: return {{Fragment::PK_K, ConsumePubKey(provider)}}; return {{Fragment::JUST_0}};
case 3: return {{Fragment::PK_H, ConsumePubKey(provider)}}; case 1:
if (!allow_B) return {};
return {{Fragment::JUST_1}};
case 2:
if (!allow_K) return {};
return {{Fragment::PK_K, ConsumePubKey(provider)}};
case 3:
if (!allow_K) return {};
return {{Fragment::PK_H, ConsumePubKey(provider)}};
case 4: { case 4: {
if (!allow_B) return {};
const auto k = ConsumeTimeLock(provider); const auto k = ConsumeTimeLock(provider);
if (!k) return {}; if (!k) return {};
return {{Fragment::OLDER, *k}}; return {{Fragment::OLDER, *k}};
} }
case 5: { case 5: {
if (!allow_B) return {};
const auto k = ConsumeTimeLock(provider); const auto k = ConsumeTimeLock(provider);
if (!k) return {}; if (!k) return {};
return {{Fragment::AFTER, *k}}; return {{Fragment::AFTER, *k}};
} }
case 6: return {{Fragment::SHA256, ConsumeSha256(provider)}}; case 6:
case 7: return {{Fragment::HASH256, ConsumeHash256(provider)}}; if (!allow_B) return {};
case 8: return {{Fragment::RIPEMD160, ConsumeRipemd160(provider)}}; return {{Fragment::SHA256, ConsumeSha256(provider)}};
case 9: return {{Fragment::HASH160, ConsumeHash160(provider)}}; case 7:
if (!allow_B) return {};
return {{Fragment::HASH256, ConsumeHash256(provider)}};
case 8:
if (!allow_B) return {};
return {{Fragment::RIPEMD160, ConsumeRipemd160(provider)}};
case 9:
if (!allow_B) return {};
return {{Fragment::HASH160, ConsumeHash160(provider)}};
case 10: { case 10: {
if (!allow_B) return {};
const auto k = provider.ConsumeIntegral<uint8_t>(); const auto k = provider.ConsumeIntegral<uint8_t>();
const auto n_keys = provider.ConsumeIntegral<uint8_t>(); const auto n_keys = provider.ConsumeIntegral<uint8_t>();
if (n_keys > 20 || k == 0 || k > n_keys) return {}; if (n_keys > 20 || k == 0 || k > n_keys) return {};
@@ -357,26 +381,59 @@ std::optional<NodeInfo> ConsumeNodeStable(FuzzedDataProvider& provider) {
for (auto& key: keys) key = ConsumePubKey(provider); for (auto& key: keys) key = ConsumePubKey(provider);
return {{Fragment::MULTI, k, std::move(keys)}}; return {{Fragment::MULTI, k, std::move(keys)}};
} }
case 11: return {{3, Fragment::ANDOR}}; case 11:
case 12: return {{2, Fragment::AND_V}}; if (!(allow_B || allow_K || allow_V)) return {};
case 13: return {{2, Fragment::AND_B}}; return {{{"B"_mst, type_needed, type_needed}, Fragment::ANDOR}};
case 15: return {{2, Fragment::OR_B}}; case 12:
case 16: return {{2, Fragment::OR_C}}; if (!(allow_B || allow_K || allow_V)) return {};
case 17: return {{2, Fragment::OR_D}}; return {{{"V"_mst, type_needed}, Fragment::AND_V}};
case 18: return {{2, Fragment::OR_I}}; case 13:
if (!allow_B) return {};
return {{{"B"_mst, "W"_mst}, Fragment::AND_B}};
case 15:
if (!allow_B) return {};
return {{{"B"_mst, "W"_mst}, Fragment::OR_B}};
case 16:
if (!allow_V) return {};
return {{{"B"_mst, "V"_mst}, Fragment::OR_C}};
case 17:
if (!allow_B) return {};
return {{{"B"_mst, "B"_mst}, Fragment::OR_D}};
case 18:
if (!(allow_B || allow_K || allow_V)) return {};
return {{{type_needed, type_needed}, Fragment::OR_I}};
case 19: { case 19: {
if (!allow_B) return {};
auto k = provider.ConsumeIntegral<uint8_t>(); auto k = provider.ConsumeIntegral<uint8_t>();
auto n_subs = provider.ConsumeIntegral<uint8_t>(); auto n_subs = provider.ConsumeIntegral<uint8_t>();
if (k == 0 || k > n_subs) return {}; if (k == 0 || k > n_subs) return {};
return {{n_subs, Fragment::THRESH, k}}; std::vector<Type> subtypes;
subtypes.reserve(n_subs);
subtypes.emplace_back("B"_mst);
for (size_t i = 1; i < n_subs; ++i) subtypes.emplace_back("W"_mst);
return {{std::move(subtypes), Fragment::THRESH, k}};
} }
case 20: return {{1, Fragment::WRAP_A}}; case 20:
case 21: return {{1, Fragment::WRAP_S}}; if (!allow_W) return {};
case 22: return {{1, Fragment::WRAP_C}}; return {{{"B"_mst}, Fragment::WRAP_A}};
case 23: return {{1, Fragment::WRAP_D}}; case 21:
case 24: return {{1, Fragment::WRAP_V}}; if (!allow_W) return {};
case 25: return {{1, Fragment::WRAP_J}}; return {{{"B"_mst}, Fragment::WRAP_S}};
case 26: return {{1, Fragment::WRAP_N}}; case 22:
if (!allow_B) return {};
return {{{"K"_mst}, Fragment::WRAP_C}};
case 23:
if (!allow_B) return {};
return {{{"V"_mst}, Fragment::WRAP_D}};
case 24:
if (!allow_V) return {};
return {{{"B"_mst}, Fragment::WRAP_V}};
case 25:
if (!allow_B) return {};
return {{{"B"_mst}, Fragment::WRAP_J}};
case 26:
if (!allow_B) return {};
return {{{"B"_mst}, Fragment::WRAP_N}};
default: default:
break; break;
} }
@@ -709,7 +766,7 @@ std::optional<NodeInfo> ConsumeNodeSmart(FuzzedDataProvider& provider, Type type
* a NodeRef whose Type() matches the type fed to ConsumeNode. * a NodeRef whose Type() matches the type fed to ConsumeNode.
*/ */
template<typename F> template<typename F>
NodeRef GenNode(F ConsumeNode, Type root_type = ""_mst, bool strict_valid = false) { NodeRef GenNode(F ConsumeNode, Type root_type, bool strict_valid = false) {
/** A stack of miniscript Nodes being built up. */ /** A stack of miniscript Nodes being built up. */
std::vector<NodeRef> stack; std::vector<NodeRef> stack;
/** The queue of instructions. */ /** The queue of instructions. */
@@ -921,9 +978,9 @@ void FuzzInitSmart()
FUZZ_TARGET_INIT(miniscript_stable, FuzzInit) FUZZ_TARGET_INIT(miniscript_stable, FuzzInit)
{ {
FuzzedDataProvider provider(buffer.data(), buffer.size()); FuzzedDataProvider provider(buffer.data(), buffer.size());
TestNode(GenNode([&](Type) { TestNode(GenNode([&](Type needed_type) {
return ConsumeNodeStable(provider); return ConsumeNodeStable(provider, needed_type);
}), provider); }, ""_mst), provider);
} }
/** Fuzz target that runs TestNode on nodes generated using ConsumeNodeSmart. */ /** Fuzz target that runs TestNode on nodes generated using ConsumeNodeSmart. */