mirror of
https://github.com/bitcoin/bitcoin.git
synced 2026-01-19 23:03:45 +01:00
b4120d34bad2 Merge bitcoin-core/libmultiprocess#192: doc: fix typos 6ecbdcd35a93 doc: fix typos a11e6905c238 Merge bitcoin-core/libmultiprocess#186: Fix mptest failures in bitcoin CI 6f340a583f2b doc: fix DrahtBot LLM Linter error c6f7fdf17350 type-context: revert client disconnect workaround e09143d2ea2f proxy-types: fix UndefinedBehaviorSanitizer: null-pointer-use 84b292fcc4db mptest: fix MemorySanitizer: use-of-uninitialized-value fe4a188803c6 proxy-io: fix race conditions in disconnect callback code d8011c83608e proxy-io: fix race conditions in ProxyClientBase cleanup handler 97e82ce19c47 doc: Add note about Waiter::m_mutex and interaction with the EventLoop::m_mutex 81d58f5580e8 refactor: Rename ProxyClient cleanup_it variable 07230f259f55 refactor: rename ProxyClient<Thread>::m_cleanup_it c0efaa5e8cb1 Merge chaincodelabs/libmultiprocess#187: ci: have bash scripts explicitly opt out of locale dependence. 0d986ff144cd mptest: fix race condition in TestSetup constructor d2f6aa2e84ef ci: add thread sanitizer job 3a6db38e561f ci: rename configs to .bash 401e0ce1d9c3 ci: add copyright to bash scripts e956467ae464 ci: export LC_ALL 8954cc0377d8 Merge chaincodelabs/libmultiprocess#184: Add CI jobs and fix clang-tidy and iwyu errors 757e13a75546 ci: add gnu32 cross-compiled 32-bit build 15bf349000eb doc: fix typo found by DrahtBot 1a598d5905f7 clang-tidy: drop 'bitcoin-*' check cbb1e43fdc6e ci: test libc++ instead of libstdc++ in one job 76313450c2c4 type-context: disable clang-tidy UndefinedBinaryOperatorResult error 4896e7fe51ba proxy-types: fix clang-tidy EnumCastOutOfRange error 060a73926956 proxy-types: fix clang-tidy StackAddressEscape error 977d721020f6 ci: add github actions jobs testing gcc, clang-20, clang-tidy, and iwyu 0d5f1faae5da iwyu: fix add/remove include errors 753d2b10cc27 util: fix clang-tidy modernize-use-equals-default error ae4f1dc2bb1a type-number: fix clang-tidy modernize-use-nullptr error 07a741bf6946 proxy-types: fix clang-tidy bugprone-use-after-move error 3673114bc9d9 proxy-types: fix clang-tidy bugprone-use-after-move error 422923f38485 proxy-types: fix clang-tidy bugprone-use-after-move error c6784c6adefa mpgen: disable clang-tidy misc-no-recursion error c5498aa11ba6 tidy: copy clang-tidy file from bitcoin core 258a617c1eec Merge chaincodelabs/libmultiprocess#160: refactor: EventLoop locking cleanups + client disconnect exception 84cf56a0b5f4 test: Test disconnects during IPC calls 949573da8411 Prevent IPC server crash if disconnected during IPC call 019839758085 Merge chaincodelabs/libmultiprocess#179: scripted-diff: Remove copyright year (ranges) ea38392960e1 Prevent EventLoop async cleanup thread early exit during shutdown 616d9a75d20a doc: Document ProxyClientBase destroy_connection option 56fff76f940b Improve IPC client disconnected exceptions 9b8ed3dc5f87 refactor: Add clang thread safety annotations to EventLoop 52256e730f51 refactor: Remove DestructorCatcher and AsyncCallable f24894794adf refactor: Drop addClient/removeClient methods 2b830e558e61 refactor: Use EventLoopRef instead of addClient/removeClient 315ff537fb65 refactor: Add ProxyContext EventLoop* member 9aaeec3678d3 proxy-io.h: Add EventLoopRef RAII class handle addClient/removeClient refcounting f58c8d8ba2f0 proxy-io.h: Add more detailed EventLoop comment 5108445e5d16 test: Add test coverage for client & server disconnections 59030c68cb5f Merge chaincodelabs/libmultiprocess#181: type-function.h: Fix CustomBuildField overload 688140b1dffc test: Add coverage for type-function.h 8b96229da58e type-function.h: Fix CustomBuildField overload fa2ff9a66842 scripted-diff: Remove copyright year (ranges) git-subtree-dir: src/ipc/libmultiprocess git-subtree-split: b4120d34bad2de28141c5770f6e8df8e54898987
158 lines
8.6 KiB
C++
158 lines
8.6 KiB
C++
// Copyright (c) The Bitcoin Core developers
|
|
// Distributed under the MIT software license, see the accompanying
|
|
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
|
|
|
#ifndef MP_PROXY_TYPE_CONTEXT_H
|
|
#define MP_PROXY_TYPE_CONTEXT_H
|
|
|
|
#include <mp/proxy-io.h>
|
|
#include <mp/util.h>
|
|
|
|
namespace mp {
|
|
template <typename Output>
|
|
void CustomBuildField(TypeList<>,
|
|
Priority<1>,
|
|
ClientInvokeContext& invoke_context,
|
|
Output&& output,
|
|
typename std::enable_if<std::is_same<decltype(output.get()), Context::Builder>::value>::type* enable = nullptr)
|
|
{
|
|
auto& connection = invoke_context.connection;
|
|
auto& thread_context = invoke_context.thread_context;
|
|
|
|
// Create local Thread::Server object corresponding to the current thread
|
|
// and pass a Thread::Client reference to it in the Context.callbackThread
|
|
// field so the function being called can make callbacks to this thread.
|
|
// Also store the Thread::Client reference in the callback_threads map so
|
|
// future calls over this connection can reuse it.
|
|
auto [callback_thread, _]{SetThread(
|
|
thread_context.callback_threads, thread_context.waiter->m_mutex, &connection,
|
|
[&] { return connection.m_threads.add(kj::heap<ProxyServer<Thread>>(thread_context, std::thread{})); })};
|
|
|
|
// Call remote ThreadMap.makeThread function so server will create a
|
|
// dedicated worker thread to run function calls from this thread. Store the
|
|
// Thread::Client reference it returns in the request_threads map.
|
|
auto make_request_thread{[&]{
|
|
// This code will only run if an IPC client call is being made for the
|
|
// first time on this thread. After the first call, subsequent calls
|
|
// will use the existing request thread. This code will also never run at
|
|
// all if the current thread is a request thread created for a different
|
|
// IPC client, because in that case PassField code (below) will have set
|
|
// request_thread to point to the calling thread.
|
|
auto request = connection.m_thread_map.makeThreadRequest();
|
|
request.setName(thread_context.thread_name);
|
|
return request.send().getResult(); // Nonblocking due to capnp request pipelining.
|
|
}};
|
|
auto [request_thread, _1]{SetThread(
|
|
thread_context.request_threads, thread_context.waiter->m_mutex,
|
|
&connection, make_request_thread)};
|
|
|
|
auto context = output.init();
|
|
context.setThread(request_thread->second.m_client);
|
|
context.setCallbackThread(callback_thread->second.m_client);
|
|
}
|
|
|
|
//! PassField override for mp.Context arguments. Return asynchronously and call
|
|
//! function on other thread found in context.
|
|
template <typename Accessor, typename ServerContext, typename Fn, typename... Args>
|
|
auto PassField(Priority<1>, TypeList<>, ServerContext& server_context, const Fn& fn, Args&&... args) ->
|
|
typename std::enable_if<
|
|
std::is_same<decltype(Accessor::get(server_context.call_context.getParams())), Context::Reader>::value,
|
|
kj::Promise<typename ServerContext::CallContext>>::type
|
|
{
|
|
const auto& params = server_context.call_context.getParams();
|
|
Context::Reader context_arg = Accessor::get(params);
|
|
auto future = kj::newPromiseAndFulfiller<typename ServerContext::CallContext>();
|
|
auto& server = server_context.proxy_server;
|
|
int req = server_context.req;
|
|
auto invoke = [fulfiller = kj::mv(future.fulfiller),
|
|
call_context = kj::mv(server_context.call_context), &server, req, fn, args...]() mutable {
|
|
const auto& params = call_context.getParams();
|
|
Context::Reader context_arg = Accessor::get(params);
|
|
ServerContext server_context{server, call_context, req};
|
|
{
|
|
// Before invoking the function, store a reference to the
|
|
// callbackThread provided by the client in the
|
|
// thread_local.request_threads map. This way, if this
|
|
// server thread needs to execute any RPCs that call back to
|
|
// the client, they will happen on the same client thread
|
|
// that is waiting for this function, just like what would
|
|
// happen if this were a normal function call made on the
|
|
// local stack.
|
|
//
|
|
// If the request_threads map already has an entry for this
|
|
// connection, it will be left unchanged, and it indicates
|
|
// that the current thread is an RPC client thread which is
|
|
// in the middle of an RPC call, and the current RPC call is
|
|
// a nested call from the remote thread handling that RPC
|
|
// call. In this case, the callbackThread value should point
|
|
// to the same thread already in the map, so there is no
|
|
// need to update the map.
|
|
auto& thread_context = g_thread_context;
|
|
auto& request_threads = thread_context.request_threads;
|
|
auto [request_thread, inserted]{SetThread(
|
|
request_threads, thread_context.waiter->m_mutex,
|
|
server.m_context.connection,
|
|
[&] { return context_arg.getCallbackThread(); })};
|
|
|
|
// If an entry was inserted into the requests_threads map,
|
|
// remove it after calling fn.invoke. If an entry was not
|
|
// inserted, one already existed, meaning this must be a
|
|
// recursive call (IPC call calling back to the caller which
|
|
// makes another IPC call), so avoid modifying the map.
|
|
const bool erase_thread{inserted};
|
|
KJ_DEFER(if (erase_thread) {
|
|
std::unique_lock<std::mutex> lock(thread_context.waiter->m_mutex);
|
|
// Call erase here with a Connection* argument instead
|
|
// of an iterator argument, because the `request_thread`
|
|
// iterator may be invalid if the connection is closed
|
|
// during this function call. More specifically, the
|
|
// iterator may be invalid because SetThread adds a
|
|
// cleanup callback to the Connection destructor that
|
|
// erases the thread from the map, and also because the
|
|
// ProxyServer<Thread> destructor calls
|
|
// request_threads.clear().
|
|
request_threads.erase(server.m_context.connection);
|
|
});
|
|
fn.invoke(server_context, args...);
|
|
}
|
|
KJ_IF_MAYBE(exception, kj::runCatchingExceptions([&]() {
|
|
server.m_context.loop->sync([&] {
|
|
auto fulfiller_dispose = kj::mv(fulfiller);
|
|
fulfiller_dispose->fulfill(kj::mv(call_context));
|
|
});
|
|
}))
|
|
{
|
|
server.m_context.loop->sync([&]() {
|
|
auto fulfiller_dispose = kj::mv(fulfiller);
|
|
fulfiller_dispose->reject(kj::mv(*exception));
|
|
});
|
|
}
|
|
};
|
|
|
|
// Lookup Thread object specified by the client. The specified thread should
|
|
// be a local Thread::Server object, but it needs to be looked up
|
|
// asynchronously with getLocalServer().
|
|
auto thread_client = context_arg.getThread();
|
|
return server.m_context.connection->m_threads.getLocalServer(thread_client)
|
|
.then([&server, invoke = kj::mv(invoke), req](const kj::Maybe<Thread::Server&>& perhaps) mutable {
|
|
// Assuming the thread object is found, pass it a pointer to the
|
|
// `invoke` lambda above which will invoke the function on that
|
|
// thread.
|
|
KJ_IF_MAYBE (thread_server, perhaps) {
|
|
const auto& thread = static_cast<ProxyServer<Thread>&>(*thread_server);
|
|
server.m_context.loop->log()
|
|
<< "IPC server post request #" << req << " {" << thread.m_thread_context.thread_name << "}";
|
|
thread.m_thread_context.waiter->post(std::move(invoke));
|
|
} else {
|
|
server.m_context.loop->log()
|
|
<< "IPC server error request #" << req << ", missing thread to execute request";
|
|
throw std::runtime_error("invalid thread handle");
|
|
}
|
|
})
|
|
// Wait for the invocation to finish before returning to the caller.
|
|
.then([invoke_wait = kj::mv(future.promise)]() mutable { return kj::mv(invoke_wait); });
|
|
}
|
|
} // namespace mp
|
|
|
|
#endif // MP_PROXY_TYPE_CONTEXT_H
|