Files
bitcoin/test/mp/test/test.cpp
Ryan Ofsky 2478a15ef9 Squashed 'src/ipc/libmultiprocess/' changes from 1868a84451f..70f632bda8f
70f632bda8f Merge bitcoin-core/libmultiprocess#265: ci: set LC_ALL in shell scripts
8e8e564259a Merge bitcoin-core/libmultiprocess#249: fixes for race conditions on disconnects
05d34cc2ec3 ci: set LC_ALL in shell scripts
e606fd84a8c Merge bitcoin-core/libmultiprocess#264: ci: reduce nproc multipliers
ff0eed1bf18 refactor: Use loop variable in type-context.h
ff1d8ba172a refactor: Move type-context.h getParams() call closer to use
1dbc59a4aa3 race fix: m_on_cancel called after request finishes
1643d05ba07 test: m_on_cancel called after request finishes
f5509a31fcc race fix: getParams() called after request cancel
4a60c39f24a test: getParams() called after request cancel
f11ec29ed20 race fix: worker thread destroyed before it is initialized
a1d643348f4 test: worker thread destroyed before it is initialized
336023382c4 ci: reduce nproc multipliers
b090beb9651 Merge bitcoin-core/libmultiprocess#256: ci: cache gnu32 nix store
be8622816da ci: cache gnu32 nix store
975270b619c Merge bitcoin-core/libmultiprocess#263: ci: bump timeout factor to 40
09f10e5a598 ci: bump timeout factor to 40
db8f76ad290 Merge bitcoin-core/libmultiprocess#253: ci: run some Bitcoin Core CI jobs
55a9b557b19 ci: set Bitcoin Core CI test repetition
fb0fc84d556 ci: add TSan job with instrumented libc++
0f29c38725b ci: add Bitcoin Core IPC tests (ASan + macOS)
3f64320315d Merge bitcoin-core/libmultiprocess#262: ci: enable clang-tidy in macOS job, use nullptr
cd9f8bdc9f0 Merge bitcoin-core/libmultiprocess#258: log: add socket connected info message and demote destroy logs to debug
b5d6258a42f Merge bitcoin-core/libmultiprocess#255: fix: use unsigned char cast and sizeof in LogEscape escape sequence
d94688e2c32 Merge bitcoin-core/libmultiprocess#251: Improved CustomBuildField for std::optional in IPC/libmultiprocess
a9499fad755 mp: use nullptr with pthread_threadid_np
f499e37850f ci: enable clang-tidy in macOS job
98f1352159d log: add socket connected info message and demote destroy logs to debug
554a481ea73 fix: use unsigned char cast and sizeof in LogEscape escape sequence
1977b9f3f65 Use std::forward in CustomBuildField for std::optional to allow move semantics, resolves FIXME
22bec918c97 Merge bitcoin-core/libmultiprocess#247: type-map: Work around LLVM 22 "out of bounds index" error
8a5e3ae6ed2 Merge bitcoin-core/libmultiprocess#242: proxy-types: add CustomHasField hook to map Cap'n Proto values to null C++ values
e8d35246918 Merge bitcoin-core/libmultiprocess#246: doc: Bump version 8 > 9
97d877053b6 proxy-types: add CustomHasField hook for nullable decode paths
8c2f10252c9 refactor: add missing includes to mp/type-data.h
b1638aceb40 doc: Bump version 8 > 9
f61af487217 type-map: Work around LLVM 22 "out of bounds index" error

git-subtree-dir: src/ipc/libmultiprocess
git-subtree-split: 70f632bda8f80449b6240f98da768206a535a04e
2026-03-27 05:50:19 -04:00

486 lines
18 KiB
C++

// Copyright (c) The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <mp/test/foo.capnp.h>
#include <mp/test/foo.capnp.proxy.h>
#include <atomic>
#include <capnp/capability.h>
#include <capnp/rpc.h>
#include <cassert>
#include <chrono>
#include <condition_variable>
#include <cstdint>
#include <cstring>
#include <functional>
#include <future>
#include <kj/async.h>
#include <kj/async-io.h>
#include <kj/common.h>
#include <kj/debug.h>
#include <kj/memory.h>
#include <kj/test.h>
#include <memory>
#include <mp/proxy.h>
#include <mp/proxy.capnp.h>
#include <mp/proxy-io.h>
#include <mp/util.h>
#include <mp/version.h>
#include <optional>
#include <set>
#include <stdexcept>
#include <string>
#include <string_view>
#include <thread>
#include <type_traits>
#include <utility>
#include <vector>
namespace mp {
namespace test {
/** Check version.h header values */
constexpr auto kMP_MAJOR_VERSION{MP_MAJOR_VERSION};
constexpr auto kMP_MINOR_VERSION{MP_MINOR_VERSION};
static_assert(std::is_integral_v<decltype(kMP_MAJOR_VERSION)>, "MP_MAJOR_VERSION must be an integral constant");
static_assert(std::is_integral_v<decltype(kMP_MINOR_VERSION)>, "MP_MINOR_VERSION must be an integral constant");
/**
* Test setup class creating a two way connection between a
* ProxyServer<FooInterface> object and a ProxyClient<FooInterface>.
*
* Provides disconnection lambdas that can be used to trigger
* disconnects and test handling of broken and closed connections.
*
* Accepts a client_owns_connection option to test different ProxyClient
* destroy_connection values and control whether destroying the ProxyClient
* object destroys the client Connection object. Normally it makes sense for
* this to be true to simplify shutdown and avoid needing to call
* client_disconnect manually, but false allows testing more ProxyClient
* behavior and the "IPC client method called after disconnect" code path.
*/
class TestSetup
{
public:
std::function<void()> server_disconnect;
std::function<void()> server_disconnect_later;
std::function<void()> client_disconnect;
std::promise<std::unique_ptr<ProxyClient<messages::FooInterface>>> client_promise;
std::unique_ptr<ProxyClient<messages::FooInterface>> client;
ProxyServer<messages::FooInterface>* server{nullptr};
//! Thread variable should be after other struct members so the thread does
//! not start until the other members are initialized.
std::thread thread;
TestSetup(bool client_owns_connection = true)
: thread{[&] {
EventLoop loop("mptest", [](mp::LogMessage log) {
// Info logs are not printed by default, but will be shown with `mptest --verbose`
KJ_LOG(INFO, log.level, log.message);
if (log.level == mp::Log::Raise) throw std::runtime_error(log.message);
});
auto pipe = loop.m_io_context.provider->newTwoWayPipe();
auto server_connection =
std::make_unique<Connection>(loop, kj::mv(pipe.ends[0]), [&](Connection& connection) {
auto server_proxy = kj::heap<ProxyServer<messages::FooInterface>>(
std::make_shared<FooImplementation>(), connection);
server = server_proxy;
return capnp::Capability::Client(kj::mv(server_proxy));
});
server_disconnect = [&] { loop.sync([&] { server_connection.reset(); }); };
server_disconnect_later = [&] {
assert(std::this_thread::get_id() == loop.m_thread_id);
loop.m_task_set->add(kj::evalLater([&] { server_connection.reset(); }));
};
// Set handler to destroy the server when the client disconnects. This
// is ignored if server_disconnect() is called instead.
server_connection->onDisconnect([&] { server_connection.reset(); });
auto client_connection = std::make_unique<Connection>(loop, kj::mv(pipe.ends[1]));
auto client_proxy = std::make_unique<ProxyClient<messages::FooInterface>>(
client_connection->m_rpc_system->bootstrap(ServerVatId().vat_id).castAs<messages::FooInterface>(),
client_connection.get(), /* destroy_connection= */ client_owns_connection);
if (client_owns_connection) {
(void)client_connection.release();
} else {
client_disconnect = [&] { loop.sync([&] { client_connection.reset(); }); };
}
client_promise.set_value(std::move(client_proxy));
loop.loop();
}}
{
client = client_promise.get_future().get();
}
~TestSetup()
{
// Test that client cleanup_fns are executed.
bool destroyed = false;
client->m_context.cleanup_fns.emplace_front([&destroyed] { destroyed = true; });
client.reset();
KJ_EXPECT(destroyed);
thread.join();
}
};
KJ_TEST("Call FooInterface methods")
{
TestSetup setup;
ProxyClient<messages::FooInterface>* foo = setup.client.get();
KJ_EXPECT(foo->add(1, 2) == 3);
int ret;
foo->addOut(3, 4, ret);
KJ_EXPECT(ret == 7);
foo->addInOut(3, ret);
KJ_EXPECT(ret == 10);
FooStruct in;
in.name = "name";
in.setint.insert(2);
in.setint.insert(1);
in.vbool.push_back(false);
in.vbool.push_back(true);
in.vbool.push_back(false);
FooStruct out = foo->pass(in);
KJ_EXPECT(in.name == out.name);
KJ_EXPECT(in.setint.size() == out.setint.size());
for (auto init{in.setint.begin()}, outit{out.setint.begin()}; init != in.setint.end() && outit != out.setint.end(); ++init, ++outit) {
KJ_EXPECT(*init == *outit);
}
KJ_EXPECT(in.vbool.size() == out.vbool.size());
for (size_t i = 0; i < in.vbool.size(); ++i) {
KJ_EXPECT(in.vbool[i] == out.vbool[i]);
}
FooStruct err;
try {
foo->raise(in);
} catch (const FooStruct& e) {
err = e;
}
KJ_EXPECT(in.name == err.name);
class Callback : public ExtendedCallback
{
public:
Callback(int expect, int ret) : m_expect(expect), m_ret(ret) {}
int call(int arg) override
{
KJ_EXPECT(arg == m_expect);
return m_ret;
}
int callExtended(int arg) override
{
KJ_EXPECT(arg == m_expect + 10);
return m_ret + 10;
}
int m_expect, m_ret;
};
foo->initThreadMap();
Callback callback(1, 2);
KJ_EXPECT(foo->callback(callback, 1) == 2);
KJ_EXPECT(foo->callbackUnique(std::make_unique<Callback>(3, 4), 3) == 4);
KJ_EXPECT(foo->callbackShared(std::make_shared<Callback>(5, 6), 5) == 6);
auto saved = std::make_shared<Callback>(7, 8);
KJ_EXPECT(saved.use_count() == 1);
foo->saveCallback(saved);
KJ_EXPECT(saved.use_count() == 2);
foo->callbackSaved(7);
KJ_EXPECT(foo->callbackSaved(7) == 8);
foo->saveCallback(nullptr);
KJ_EXPECT(saved.use_count() == 1);
KJ_EXPECT(foo->callbackExtended(callback, 11) == 12);
FooCustom custom_in;
custom_in.v1 = "v1";
custom_in.v2 = 5;
FooCustom custom_out = foo->passCustom(custom_in);
KJ_EXPECT(custom_in.v1 == custom_out.v1);
KJ_EXPECT(custom_in.v2 == custom_out.v2);
foo->passEmpty(FooEmpty{});
FooMessage message1;
message1.message = "init";
FooMessage message2{foo->passMessage(message1)};
KJ_EXPECT(message2.message == "init build read call build read");
FooMutable mut;
mut.message = "init";
foo->passMutable(mut);
KJ_EXPECT(mut.message == "init build pass call return read");
KJ_EXPECT(foo->passFn([]{ return 10; }) == 10);
std::vector<FooDataRef> data_in;
data_in.push_back(std::make_shared<FooData>(FooData{'H', 'i'}));
data_in.push_back(nullptr);
std::vector<FooDataRef> data_out{foo->passDataPointers(data_in)};
KJ_EXPECT(data_out.size() == 2);
KJ_REQUIRE(data_out[0] != nullptr);
KJ_EXPECT(*data_out[0] == *data_in[0]);
KJ_EXPECT(!data_out[1]);
}
KJ_TEST("Call IPC method after client connection is closed")
{
TestSetup setup{/*client_owns_connection=*/false};
ProxyClient<messages::FooInterface>* foo = setup.client.get();
KJ_EXPECT(foo->add(1, 2) == 3);
setup.client_disconnect();
bool disconnected{false};
try {
foo->add(1, 2);
} catch (const std::runtime_error& e) {
KJ_EXPECT(std::string_view{e.what()} == "IPC client method called after disconnect.");
disconnected = true;
}
KJ_EXPECT(disconnected);
}
KJ_TEST("Calling IPC method after server connection is closed")
{
TestSetup setup;
ProxyClient<messages::FooInterface>* foo = setup.client.get();
KJ_EXPECT(foo->add(1, 2) == 3);
setup.server_disconnect();
bool disconnected{false};
try {
foo->add(1, 2);
} catch (const std::runtime_error& e) {
KJ_EXPECT(std::string_view{e.what()} == "IPC client method call interrupted by disconnect.");
disconnected = true;
}
KJ_EXPECT(disconnected);
}
KJ_TEST("Calling IPC method and disconnecting during the call")
{
TestSetup setup{/*client_owns_connection=*/false};
ProxyClient<messages::FooInterface>* foo = setup.client.get();
KJ_EXPECT(foo->add(1, 2) == 3);
// Set m_fn to initiate client disconnect when server is in the middle of
// handling the callFn call to make sure this case is handled cleanly.
setup.server->m_impl->m_fn = setup.client_disconnect;
bool disconnected{false};
try {
foo->callFn();
} catch (const std::runtime_error& e) {
KJ_EXPECT(std::string_view{e.what()} == "IPC client method call interrupted by disconnect.");
disconnected = true;
}
KJ_EXPECT(disconnected);
}
KJ_TEST("Calling IPC method, disconnecting and blocking during the call")
{
// This test is similar to last test, except that instead of letting the IPC
// call return immediately after triggering a disconnect, make it disconnect
// & wait so server is forced to deal with having a disconnection and call
// in flight at the same time.
//
// Test uses callFnAsync() instead of callFn() to implement this. Both of
// these methods have the same implementation, but the callFnAsync() capnp
// method declaration takes an mp.Context argument so the method executes on
// an asynchronous thread instead of executing in the event loop thread, so
// it is able to block without deadlocking the event lock thread.
//
// This test adds important coverage because it causes the server Connection
// object to be destroyed before ProxyServer object, which is not a
// condition that usually happens because the m_rpc_system.reset() call in
// the ~Connection destructor usually would immediately free all remaining
// ProxyServer objects associated with the connection. Having an in-progress
// RPC call requires keeping the ProxyServer longer.
std::promise<void> signal;
TestSetup setup{/*client_owns_connection=*/false};
ProxyClient<messages::FooInterface>* foo = setup.client.get();
KJ_EXPECT(foo->add(1, 2) == 3);
foo->initThreadMap();
setup.server->m_impl->m_fn = [&] {
EventLoopRef loop{*setup.server->m_context.loop};
setup.client_disconnect();
signal.get_future().get();
};
bool disconnected{false};
try {
foo->callFnAsync();
} catch (const std::runtime_error& e) {
KJ_EXPECT(std::string_view{e.what()} == "IPC client method call interrupted by disconnect.");
disconnected = true;
}
KJ_EXPECT(disconnected);
// Now that the disconnect has been detected, set signal allowing the
// callFnAsync() IPC call to return. Since signalling may not wake up the
// thread right away, it is important for the signal variable to be declared
// *before* the TestSetup variable so is not destroyed while
// signal.get_future().get() is called.
signal.set_value();
}
KJ_TEST("Worker thread destroyed before it is initialized")
{
// Regression test for bitcoin/bitcoin#34711, bitcoin/bitcoin#34756 where a
// worker thread is destroyed before it starts waiting for work.
//
// The test uses the `makethread` hook to trigger a disconnect as soon as
// ProxyServer<ThreadMap>::makeThread is called, so without the bugfix,
// ProxyServer<Thread>::~ProxyServer would run and destroy the waiter before
// the worker thread started waiting, causing a SIGSEGV when it did start.
TestSetup setup;
ProxyClient<messages::FooInterface>* foo = setup.client.get();
foo->initThreadMap();
setup.server->m_impl->m_fn = [] {};
EventLoop& loop = *setup.server->m_context.connection->m_loop;
loop.testing_hook_makethread = [&] {
// Use disconnect_later to queue the disconnect, because the makethread
// hook is called on the event loop thread. The disconnect should happen
// as soon as the event loop is idle.
setup.server_disconnect_later();
};
loop.testing_hook_makethread_created = [&] {
// Sleep to allow event loop to run and process the queued disconnect
// before the worker thread starts waiting.
std::this_thread::sleep_for(std::chrono::milliseconds(10));
};
bool disconnected{false};
try {
foo->callFnAsync();
} catch (const std::runtime_error& e) {
KJ_EXPECT(std::string_view{e.what()} == "IPC client method call interrupted by disconnect.");
disconnected = true;
}
KJ_EXPECT(disconnected);
}
KJ_TEST("Calling async IPC method, with server disconnect racing the call")
{
// Regression test for bitcoin/bitcoin#34777 heap-use-after-free where
// an async request is canceled before it starts to execute.
//
// Use testing_hook_async_request_start to trigger a disconnect from the
// worker thread as soon as it begins to execute an async request. Without
// the bugfix, the worker thread would trigger a SIGSEGV after this by
// calling call_context.getParams().
TestSetup setup;
ProxyClient<messages::FooInterface>* foo = setup.client.get();
foo->initThreadMap();
setup.server->m_impl->m_fn = [] {};
EventLoop& loop = *setup.server->m_context.connection->m_loop;
loop.testing_hook_async_request_start = [&] {
setup.server_disconnect();
// Sleep is necessary to let the event loop fully clean up after the
// disconnect and trigger the SIGSEGV.
std::this_thread::sleep_for(std::chrono::milliseconds(10));
};
try {
foo->callFnAsync();
KJ_EXPECT(false);
} catch (const std::runtime_error& e) {
KJ_EXPECT(std::string_view{e.what()} == "IPC client method call interrupted by disconnect.");
}
}
KJ_TEST("Calling async IPC method, with server disconnect after cleanup")
{
// Regression test for bitcoin/bitcoin#34782 stack-use-after-return where
// an async request is canceled after it finishes executing but before the
// response is sent.
//
// Use testing_hook_async_request_done to trigger a disconnect from the
// worker thread after it executes an async request but before it returns.
// Without the bugfix, the m_on_cancel callback would be called at this
// point, accessing the cancel_mutex stack variable that had gone out of
// scope.
TestSetup setup;
ProxyClient<messages::FooInterface>* foo = setup.client.get();
foo->initThreadMap();
setup.server->m_impl->m_fn = [] {};
EventLoop& loop = *setup.server->m_context.connection->m_loop;
loop.testing_hook_async_request_done = [&] {
setup.server_disconnect();
};
try {
foo->callFnAsync();
KJ_EXPECT(false);
} catch (const std::runtime_error& e) {
KJ_EXPECT(std::string_view{e.what()} == "IPC client method call interrupted by disconnect.");
}
}
KJ_TEST("Make simultaneous IPC calls on single remote thread")
{
TestSetup setup;
ProxyClient<messages::FooInterface>* foo = setup.client.get();
std::promise<void> signal;
foo->initThreadMap();
// Use callFnAsync() to get the client to set up the request_thread
// that will be used for the test.
setup.server->m_impl->m_fn = [&] {};
foo->callFnAsync();
ThreadContext& tc{g_thread_context};
Thread::Client *callback_thread, *request_thread;
foo->m_context.loop->sync([&] {
Lock lock(tc.waiter->m_mutex);
callback_thread = &tc.callback_threads.at(foo->m_context.connection)->m_client;
request_thread = &tc.request_threads.at(foo->m_context.connection)->m_client;
});
// Call callIntFnAsync 3 times with n=100, 200, 300
std::atomic<int> expected = 100;
setup.server->m_impl->m_int_fn = [&](int n) {
assert(n == expected);
expected += 100;
return n;
};
auto client{foo->m_client};
std::atomic<size_t> running{3};
foo->m_context.loop->sync([&]
{
for (size_t i = 0; i < running; i++)
{
auto request{client.callIntFnAsyncRequest()};
auto context{request.initContext()};
context.setCallbackThread(*callback_thread);
context.setThread(*request_thread);
request.setArg(100 * (i+1));
foo->m_context.loop->m_task_set->add(request.send().then(
[&running, &tc, i](auto&& results) {
assert(results.getResult() == static_cast<int32_t>(100 * (i+1)));
running -= 1;
tc.waiter->m_cv.notify_all();
}));
}
});
{
Lock lock(tc.waiter->m_mutex);
tc.waiter->wait(lock, [&running] { return running == 0; });
}
KJ_EXPECT(expected == 400);
}
} // namespace test
} // namespace mp