mirror of
https://github.com/bitcoin/bitcoin.git
synced 2026-01-20 07:09:15 +01:00
Squashed 'src/ipc/libmultiprocess/' changes from 13424cf2ecc1..a4f929696490
a4f929696490 Merge bitcoin-core/libmultiprocess#224: doc: fix typos f4344ae87da0 Merge bitcoin-core/libmultiprocess#222: test, ci: Fix threadsanitizer errors in mptest 1434642b3804 doc: fix typos 73d22ba2e930 test: Fix tsan race in thread busy test b74e1bba014d ci: Use tsan-instrumented cap'n proto in sanitizers job c332774409ad test: Fix failing exception check in new thread busy test ca3c05d56709 test: Use KJ_LOG instead of std::cout for logging 7eb1da120ab6 ci: Use tsan-instrumented libcxx in sanitizers job ec86e4336e98 Merge bitcoin-core/libmultiprocess#220: Add log levels and advertise them to users via logging callback 515ce93ad349 Logging: Pass LogData struct to logging callback 213574ccc43d Logging: reclassify remaining log messages e4de0412b430 Logging: Break out expensive log messages and classify them as Trace 408874a78fdc Logging: Use new logging macros 67b092d835cd Logging: Disable logging if messsage level is less than the requested level d0a1ba7ebf21 Logging: add log levels to mirror Core's 463a8296d188 Logging: Disable moving or copying Logger 83a2e10c0b03 Logging: Add an EventLoop constructor to allow for user-specified log options 58cf47a7fc8c Merge bitcoin-core/libmultiprocess#221: test default PassField impl handles output parameters db03a663f514 Merge bitcoin-core/libmultiprocess#214: Fix crash on simultaneous IPC calls using the same thread afcc40b0f1e8 Merge bitcoin-core/libmultiprocess#213: util+doc: Clearer errors when attempting to run examples + polished docs 6db669628387 test In|Out parameter 29cf2ada75ea test default PassField impl handles output parameters 1238170f68e8 test: simultaneous IPC calls using same thread eb069ab75d83 Fix crash on simultaneous IPC calls using the same thread ec03a9639ab5 doc: Precision and typos 2b4348193551 doc: Where possible, remove links to ryanofsky/bitcoin/ 286fe469c9c9 util: Add helpful error message when failing to execute file 47d79db8a552 Merge bitcoin-core/libmultiprocess#201: bug: fix mptest hang, ProxyClient<Thread> deadlock in disconnect handler f15ae9c9b9fb Merge bitcoin-core/libmultiprocess#211: Add .gitignore 4a269b21b8c8 bug: fix ProxyClient<Thread> deadlock if disconnected as IPC call is returning 85df96482c49 Use try_emplace in SetThread instead of threads.find ca9b380ea91a Use std::optional in ConnThreads to allow shortening locks 9b0799113557 doc: describe ThreadContext struct and synchronization requirements d60db601ed9b proxy-io.h: add Waiter::m_mutex thread safety annotations 4e365b019a9f ci: Use -Wthread-safety not -Wthread-safety-analysis 15d7bafbb001 Add .gitignore fe1cd8c76131 Merge bitcoin-core/libmultiprocess#208: ci: Test minimum cmake version in olddeps job b713a0b7bfbc Merge bitcoin-core/libmultiprocess#207: ci: output CMake version in CI script 0f580397c913 ci: Test minimum cmake version in olddeps job d603dcc0eef0 ci: output CMake version in CI script git-subtree-dir: src/ipc/libmultiprocess git-subtree-split: a4f92969649018ca70f949a09148bccfeaecd99a
This commit is contained in:
@@ -66,8 +66,6 @@ struct ProxyClient<Thread> : public ProxyClientBase<Thread, ::capnp::Void>
|
||||
ProxyClient(const ProxyClient&) = delete;
|
||||
~ProxyClient();
|
||||
|
||||
void setDisconnectCallback(const std::function<void()>& fn);
|
||||
|
||||
//! Reference to callback function that is run if there is a sudden
|
||||
//! disconnect and the Connection object is destroyed before this
|
||||
//! ProxyClient<Thread> object. The callback will destroy this object and
|
||||
@@ -100,36 +98,29 @@ public:
|
||||
EventLoop& m_loop;
|
||||
};
|
||||
|
||||
using LogFn = std::function<void(bool raise, std::string message)>;
|
||||
|
||||
class Logger
|
||||
{
|
||||
public:
|
||||
Logger(bool raise, LogFn& fn) : m_raise(raise), m_fn(fn) {}
|
||||
Logger(Logger&& logger) : m_raise(logger.m_raise), m_fn(logger.m_fn), m_buffer(std::move(logger.m_buffer)) {}
|
||||
~Logger() noexcept(false)
|
||||
{
|
||||
if (m_fn) m_fn(m_raise, m_buffer.str());
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
friend Logger& operator<<(Logger& logger, T&& value)
|
||||
{
|
||||
if (logger.m_fn) logger.m_buffer << std::forward<T>(value);
|
||||
return logger;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
friend Logger& operator<<(Logger&& logger, T&& value)
|
||||
{
|
||||
return logger << std::forward<T>(value);
|
||||
}
|
||||
|
||||
bool m_raise;
|
||||
LogFn& m_fn;
|
||||
std::ostringstream m_buffer;
|
||||
//! Log flags. Update stringify function if changed!
|
||||
enum class Log {
|
||||
Trace = 0,
|
||||
Debug,
|
||||
Info,
|
||||
Warning,
|
||||
Error,
|
||||
Raise,
|
||||
};
|
||||
|
||||
kj::StringPtr KJ_STRINGIFY(Log flags);
|
||||
|
||||
struct LogMessage {
|
||||
|
||||
//! Message to be logged
|
||||
std::string message;
|
||||
|
||||
//! The severity level of this message
|
||||
Log level;
|
||||
};
|
||||
|
||||
using LogFn = std::function<void(LogMessage)>;
|
||||
|
||||
struct LogOptions {
|
||||
|
||||
//! External logging callback.
|
||||
@@ -138,8 +129,60 @@ struct LogOptions {
|
||||
//! Maximum number of characters to use when representing
|
||||
//! request and response structs as strings.
|
||||
size_t max_chars{200};
|
||||
|
||||
//! Messages with a severity level less than log_level will not be
|
||||
//! reported.
|
||||
Log log_level{Log::Trace};
|
||||
};
|
||||
|
||||
class Logger
|
||||
{
|
||||
public:
|
||||
Logger(const LogOptions& options, Log log_level) : m_options(options), m_log_level(log_level) {}
|
||||
|
||||
Logger(Logger&&) = delete;
|
||||
Logger& operator=(Logger&&) = delete;
|
||||
Logger(const Logger&) = delete;
|
||||
Logger& operator=(const Logger&) = delete;
|
||||
|
||||
~Logger() noexcept(false)
|
||||
{
|
||||
if (enabled()) m_options.log_fn({std::move(m_buffer).str(), m_log_level});
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
friend Logger& operator<<(Logger& logger, T&& value)
|
||||
{
|
||||
if (logger.enabled()) logger.m_buffer << std::forward<T>(value);
|
||||
return logger;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
friend Logger& operator<<(Logger&& logger, T&& value)
|
||||
{
|
||||
return logger << std::forward<T>(value);
|
||||
}
|
||||
|
||||
explicit operator bool() const
|
||||
{
|
||||
return enabled();
|
||||
}
|
||||
|
||||
private:
|
||||
bool enabled() const
|
||||
{
|
||||
return m_options.log_fn && m_log_level >= m_options.log_level;
|
||||
}
|
||||
|
||||
const LogOptions& m_options;
|
||||
Log m_log_level;
|
||||
std::ostringstream m_buffer;
|
||||
};
|
||||
|
||||
#define MP_LOGPLAIN(loop, ...) if (mp::Logger logger{(loop).m_log_opts, __VA_ARGS__}; logger) logger
|
||||
|
||||
#define MP_LOG(loop, ...) MP_LOGPLAIN(loop, __VA_ARGS__) << "{" << LongThreadName((loop).m_exe_name) << "} "
|
||||
|
||||
std::string LongThreadName(const char* exe_name);
|
||||
|
||||
//! Event loop implementation.
|
||||
@@ -170,8 +213,19 @@ std::string LongThreadName(const char* exe_name);
|
||||
class EventLoop
|
||||
{
|
||||
public:
|
||||
//! Construct event loop object.
|
||||
EventLoop(const char* exe_name, LogFn log_fn, void* context = nullptr);
|
||||
//! Construct event loop object with default logging options.
|
||||
EventLoop(const char* exe_name, LogFn log_fn, void* context = nullptr)
|
||||
: EventLoop(exe_name, LogOptions{std::move(log_fn)}, context){}
|
||||
|
||||
//! Construct event loop object with specified logging options.
|
||||
EventLoop(const char* exe_name, LogOptions log_opts, void* context = nullptr);
|
||||
|
||||
//! Backwards-compatible constructor for previous (deprecated) logging callback signature
|
||||
EventLoop(const char* exe_name, std::function<void(bool, std::string)> old_callback, void* context = nullptr)
|
||||
: EventLoop(exe_name,
|
||||
LogFn{[old_callback = std::move(old_callback)](LogMessage log_data) {old_callback(log_data.level == Log::Raise, std::move(log_data.message));}},
|
||||
context){}
|
||||
|
||||
~EventLoop();
|
||||
|
||||
//! Run event loop. Does not return until shutdown. This should only be
|
||||
@@ -212,15 +266,6 @@ public:
|
||||
//! Check if loop should exit.
|
||||
bool done() const MP_REQUIRES(m_mutex);
|
||||
|
||||
Logger log()
|
||||
{
|
||||
Logger logger(false, m_log_opts.log_fn);
|
||||
logger << "{" << LongThreadName(m_exe_name) << "} ";
|
||||
return logger;
|
||||
}
|
||||
Logger logPlain() { return {false, m_log_opts.log_fn}; }
|
||||
Logger raise() { return {true, m_log_opts.log_fn}; }
|
||||
|
||||
//! Process name included in thread names so combined debug output from
|
||||
//! multiple processes is easier to understand.
|
||||
const char* m_exe_name;
|
||||
@@ -283,18 +328,19 @@ struct Waiter
|
||||
Waiter() = default;
|
||||
|
||||
template <typename Fn>
|
||||
void post(Fn&& fn)
|
||||
bool post(Fn&& fn)
|
||||
{
|
||||
const std::unique_lock<std::mutex> lock(m_mutex);
|
||||
assert(!m_fn);
|
||||
const Lock lock(m_mutex);
|
||||
if (m_fn) return false;
|
||||
m_fn = std::forward<Fn>(fn);
|
||||
m_cv.notify_all();
|
||||
return true;
|
||||
}
|
||||
|
||||
template <class Predicate>
|
||||
void wait(std::unique_lock<std::mutex>& lock, Predicate pred)
|
||||
void wait(Lock& lock, Predicate pred)
|
||||
{
|
||||
m_cv.wait(lock, [&] {
|
||||
m_cv.wait(lock.m_lock, [&]() MP_REQUIRES(m_mutex) {
|
||||
// Important for this to be "while (m_fn)", not "if (m_fn)" to avoid
|
||||
// a lost-wakeup bug. A new m_fn and m_cv notification might be sent
|
||||
// after the fn() call and before the lock.lock() call in this loop
|
||||
@@ -317,9 +363,9 @@ struct Waiter
|
||||
//! mutexes than necessary. This mutex can be held at the same time as
|
||||
//! EventLoop::m_mutex as long as Waiter::mutex is locked first and
|
||||
//! EventLoop::m_mutex is locked second.
|
||||
std::mutex m_mutex;
|
||||
Mutex m_mutex;
|
||||
std::condition_variable m_cv;
|
||||
std::optional<kj::Function<void()>> m_fn;
|
||||
std::optional<kj::Function<void()>> m_fn MP_GUARDED_BY(m_mutex);
|
||||
};
|
||||
|
||||
//! Object holding network & rpc state associated with either an incoming server
|
||||
@@ -544,29 +590,73 @@ void ProxyServerBase<Interface, Impl>::invokeDestroy()
|
||||
CleanupRun(m_context.cleanup_fns);
|
||||
}
|
||||
|
||||
using ConnThreads = std::map<Connection*, ProxyClient<Thread>>;
|
||||
//! Map from Connection to local or remote thread handle which will be used over
|
||||
//! that connection. This map will typically only contain one entry, but can
|
||||
//! contain multiple if a single thread makes IPC calls over multiple
|
||||
//! connections. A std::optional value type is used to avoid the map needing to
|
||||
//! be locked while ProxyClient<Thread> objects are constructed, see
|
||||
//! ThreadContext "Synchronization note" below.
|
||||
using ConnThreads = std::map<Connection*, std::optional<ProxyClient<Thread>>>;
|
||||
using ConnThread = ConnThreads::iterator;
|
||||
|
||||
// Retrieve ProxyClient<Thread> object associated with this connection from a
|
||||
// map, or create a new one and insert it into the map. Return map iterator and
|
||||
// inserted bool.
|
||||
std::tuple<ConnThread, bool> SetThread(ConnThreads& threads, std::mutex& mutex, Connection* connection, const std::function<Thread::Client()>& make_thread);
|
||||
std::tuple<ConnThread, bool> SetThread(GuardedRef<ConnThreads> threads, Connection* connection, const std::function<Thread::Client()>& make_thread);
|
||||
|
||||
//! The thread_local ThreadContext g_thread_context struct provides information
|
||||
//! about individual threads and a way of communicating between them. Because
|
||||
//! it's a thread local struct, each ThreadContext instance is initialized by
|
||||
//! the thread that owns it.
|
||||
//!
|
||||
//! ThreadContext is used for any client threads created externally which make
|
||||
//! IPC calls, and for server threads created by
|
||||
//! ProxyServer<ThreadMap>::makeThread() which execute IPC calls for clients.
|
||||
//!
|
||||
//! In both cases, the struct holds information like the thread name, and a
|
||||
//! Waiter object where the EventLoop can post incoming IPC requests to execute
|
||||
//! on the thread. The struct also holds ConnThread maps associating the thread
|
||||
//! with local and remote ProxyClient<Thread> objects.
|
||||
struct ThreadContext
|
||||
{
|
||||
//! Identifying string for debug.
|
||||
std::string thread_name;
|
||||
|
||||
//! Waiter object used to allow client threads blocked waiting for a server
|
||||
//! response to execute callbacks made from the client's corresponding
|
||||
//! server thread.
|
||||
//! Waiter object used to allow remote clients to execute code on this
|
||||
//! thread. For server threads created by
|
||||
//! ProxyServer<ThreadMap>::makeThread(), this is initialized in that
|
||||
//! function. Otherwise, for client threads created externally, this is
|
||||
//! initialized the first time the thread tries to make an IPC call. Having
|
||||
//! a waiter is necessary for threads making IPC calls in case a server they
|
||||
//! are calling expects them to execute a callback during the call, before
|
||||
//! it sends a response.
|
||||
//!
|
||||
//! For IPC client threads, the Waiter pointer is never cleared and the Waiter
|
||||
//! just gets destroyed when the thread does. For server threads created by
|
||||
//! makeThread(), this pointer is set to null in the ~ProxyServer<Thread> as
|
||||
//! a signal for the thread to exit and destroy itself. In both cases, the
|
||||
//! same Waiter object is used across different calls and only created and
|
||||
//! destroyed once for the lifetime of the thread.
|
||||
std::unique_ptr<Waiter> waiter = nullptr;
|
||||
|
||||
//! When client is making a request to a server, this is the
|
||||
//! `callbackThread` argument it passes in the request, used by the server
|
||||
//! in case it needs to make callbacks into the client that need to execute
|
||||
//! while the client is waiting. This will be set to a local thread object.
|
||||
ConnThreads callback_threads;
|
||||
//!
|
||||
//! Synchronization note: The callback_thread and request_thread maps are
|
||||
//! only ever accessed internally by this thread's destructor and externally
|
||||
//! by Cap'n Proto event loop threads. Since it's possible for IPC client
|
||||
//! threads to make calls over different connections that could have
|
||||
//! different event loops, these maps are guarded by Waiter::m_mutex in case
|
||||
//! different event loop threads add or remove map entries simultaneously.
|
||||
//! However, individual ProxyClient<Thread> objects in the maps will only be
|
||||
//! associated with one event loop and guarded by EventLoop::m_mutex. So
|
||||
//! Waiter::m_mutex does not need to be held while accessing individual
|
||||
//! ProxyClient<Thread> instances, and may even need to be released to
|
||||
//! respect lock order and avoid locking Waiter::m_mutex before
|
||||
//! EventLoop::m_mutex.
|
||||
ConnThreads callback_threads MP_GUARDED_BY(waiter->m_mutex);
|
||||
|
||||
//! When client is making a request to a server, this is the `thread`
|
||||
//! argument it passes in the request, used to control which thread on
|
||||
@@ -575,7 +665,9 @@ struct ThreadContext
|
||||
//! by makeThread. If a client call is being made from a thread currently
|
||||
//! handling a server request, this will be set to the `callbackThread`
|
||||
//! request thread argument passed in that request.
|
||||
ConnThreads request_threads;
|
||||
//!
|
||||
//! Synchronization note: \ref callback_threads note applies here as well.
|
||||
ConnThreads request_threads MP_GUARDED_BY(waiter->m_mutex);
|
||||
|
||||
//! Whether this thread is a capnp event loop thread. Not really used except
|
||||
//! to assert false if there's an attempt to execute a blocking operation
|
||||
@@ -598,7 +690,7 @@ std::unique_ptr<ProxyClient<InitInterface>> ConnectStream(EventLoop& loop, int f
|
||||
init_client = connection->m_rpc_system->bootstrap(ServerVatId().vat_id).castAs<InitInterface>();
|
||||
Connection* connection_ptr = connection.get();
|
||||
connection->onDisconnect([&loop, connection_ptr] {
|
||||
loop.log() << "IPC client: unexpected network disconnect.";
|
||||
MP_LOG(loop, Log::Warning) << "IPC client: unexpected network disconnect.";
|
||||
delete connection_ptr;
|
||||
});
|
||||
});
|
||||
@@ -621,7 +713,7 @@ void _Serve(EventLoop& loop, kj::Own<kj::AsyncIoStream>&& stream, InitImpl& init
|
||||
});
|
||||
auto it = loop.m_incoming_connections.begin();
|
||||
it->onDisconnect([&loop, it] {
|
||||
loop.log() << "IPC server: socket disconnected.";
|
||||
MP_LOG(loop, Log::Info) << "IPC server: socket disconnected.";
|
||||
loop.m_incoming_connections.erase(it);
|
||||
});
|
||||
}
|
||||
|
||||
@@ -568,7 +568,7 @@ template <typename Client>
|
||||
void clientDestroy(Client& client)
|
||||
{
|
||||
if (client.m_context.connection) {
|
||||
client.m_context.loop->log() << "IPC client destroy " << typeid(client).name();
|
||||
MP_LOG(*client.m_context.loop, Log::Info) << "IPC client destroy " << typeid(client).name();
|
||||
} else {
|
||||
KJ_LOG(INFO, "IPC interrupted client destroy", typeid(client).name());
|
||||
}
|
||||
@@ -577,7 +577,7 @@ void clientDestroy(Client& client)
|
||||
template <typename Server>
|
||||
void serverDestroy(Server& server)
|
||||
{
|
||||
server.m_context.loop->log() << "IPC server destroy " << typeid(server).name();
|
||||
MP_LOG(*server.m_context.loop, Log::Info) << "IPC server destroy " << typeid(server).name();
|
||||
}
|
||||
|
||||
//! Entry point called by generated client code that looks like:
|
||||
@@ -605,7 +605,7 @@ void clientInvoke(ProxyClient& proxy_client, const GetRequest& get_request, Fiel
|
||||
// declaration so the server method runs in a dedicated thread.
|
||||
assert(!g_thread_context.loop_thread);
|
||||
g_thread_context.waiter = std::make_unique<Waiter>();
|
||||
proxy_client.m_context.loop->logPlain()
|
||||
MP_LOGPLAIN(*proxy_client.m_context.loop, Log::Info)
|
||||
<< "{" << g_thread_context.thread_name
|
||||
<< "} IPC client first request from current thread, constructing waiter";
|
||||
}
|
||||
@@ -617,7 +617,7 @@ void clientInvoke(ProxyClient& proxy_client, const GetRequest& get_request, Fiel
|
||||
const char* disconnected = nullptr;
|
||||
proxy_client.m_context.loop->sync([&]() {
|
||||
if (!proxy_client.m_context.connection) {
|
||||
const std::unique_lock<std::mutex> lock(thread_context.waiter->m_mutex);
|
||||
const Lock lock(thread_context.waiter->m_mutex);
|
||||
done = true;
|
||||
disconnected = "IPC client method called after disconnect.";
|
||||
thread_context.waiter->m_cv.notify_all();
|
||||
@@ -629,22 +629,26 @@ void clientInvoke(ProxyClient& proxy_client, const GetRequest& get_request, Fiel
|
||||
using FieldList = typename ProxyClientMethodTraits<typename Request::Params>::Fields;
|
||||
invoke_context.emplace(*proxy_client.m_context.connection, thread_context);
|
||||
IterateFields().handleChain(*invoke_context, request, FieldList(), typename FieldObjs::BuildParams{&fields}...);
|
||||
proxy_client.m_context.loop->logPlain()
|
||||
MP_LOGPLAIN(*proxy_client.m_context.loop, Log::Debug)
|
||||
<< "{" << thread_context.thread_name << "} IPC client send "
|
||||
<< TypeName<typename Request::Params>() << " " << LogEscape(request.toString(), proxy_client.m_context.loop->m_log_opts.max_chars);
|
||||
<< TypeName<typename Request::Params>();
|
||||
MP_LOGPLAIN(*proxy_client.m_context.loop, Log::Trace)
|
||||
<< "send data: " << LogEscape(request.toString(), proxy_client.m_context.loop->m_log_opts.max_chars);
|
||||
|
||||
proxy_client.m_context.loop->m_task_set->add(request.send().then(
|
||||
[&](::capnp::Response<typename Request::Results>&& response) {
|
||||
proxy_client.m_context.loop->logPlain()
|
||||
MP_LOGPLAIN(*proxy_client.m_context.loop, Log::Debug)
|
||||
<< "{" << thread_context.thread_name << "} IPC client recv "
|
||||
<< TypeName<typename Request::Results>() << " " << LogEscape(response.toString(), proxy_client.m_context.loop->m_log_opts.max_chars);
|
||||
<< TypeName<typename Request::Results>();
|
||||
MP_LOGPLAIN(*proxy_client.m_context.loop, Log::Trace)
|
||||
<< "recv data: " << LogEscape(response.toString(), proxy_client.m_context.loop->m_log_opts.max_chars);
|
||||
try {
|
||||
IterateFields().handleChain(
|
||||
*invoke_context, response, FieldList(), typename FieldObjs::ReadResults{&fields}...);
|
||||
} catch (...) {
|
||||
exception = std::current_exception();
|
||||
}
|
||||
const std::unique_lock<std::mutex> lock(thread_context.waiter->m_mutex);
|
||||
const Lock lock(thread_context.waiter->m_mutex);
|
||||
done = true;
|
||||
thread_context.waiter->m_cv.notify_all();
|
||||
},
|
||||
@@ -653,20 +657,20 @@ void clientInvoke(ProxyClient& proxy_client, const GetRequest& get_request, Fiel
|
||||
disconnected = "IPC client method call interrupted by disconnect.";
|
||||
} else {
|
||||
kj_exception = kj::str("kj::Exception: ", e).cStr();
|
||||
proxy_client.m_context.loop->logPlain()
|
||||
MP_LOGPLAIN(*proxy_client.m_context.loop, Log::Info)
|
||||
<< "{" << thread_context.thread_name << "} IPC client exception " << kj_exception;
|
||||
}
|
||||
const std::unique_lock<std::mutex> lock(thread_context.waiter->m_mutex);
|
||||
const Lock lock(thread_context.waiter->m_mutex);
|
||||
done = true;
|
||||
thread_context.waiter->m_cv.notify_all();
|
||||
}));
|
||||
});
|
||||
|
||||
std::unique_lock<std::mutex> lock(thread_context.waiter->m_mutex);
|
||||
Lock lock(thread_context.waiter->m_mutex);
|
||||
thread_context.waiter->wait(lock, [&done]() { return done; });
|
||||
if (exception) std::rethrow_exception(exception);
|
||||
if (!kj_exception.empty()) proxy_client.m_context.loop->raise() << kj_exception;
|
||||
if (disconnected) proxy_client.m_context.loop->raise() << disconnected;
|
||||
if (!kj_exception.empty()) MP_LOGPLAIN(*proxy_client.m_context.loop, Log::Raise) << kj_exception;
|
||||
if (disconnected) MP_LOGPLAIN(*proxy_client.m_context.loop, Log::Raise) << disconnected;
|
||||
}
|
||||
|
||||
//! Invoke callable `fn()` that may return void. If it does return void, replace
|
||||
@@ -700,8 +704,10 @@ kj::Promise<void> serverInvoke(Server& server, CallContext& call_context, Fn fn)
|
||||
using Results = typename decltype(call_context.getResults())::Builds;
|
||||
|
||||
int req = ++server_reqs;
|
||||
server.m_context.loop->log() << "IPC server recv request #" << req << " "
|
||||
<< TypeName<typename Params::Reads>() << " " << LogEscape(params.toString(), server.m_context.loop->m_log_opts.max_chars);
|
||||
MP_LOG(*server.m_context.loop, Log::Debug) << "IPC server recv request #" << req << " "
|
||||
<< TypeName<typename Params::Reads>();
|
||||
MP_LOG(*server.m_context.loop, Log::Trace) << "request data: "
|
||||
<< LogEscape(params.toString(), server.m_context.loop->m_log_opts.max_chars);
|
||||
|
||||
try {
|
||||
using ServerContext = ServerInvokeContext<Server, CallContext>;
|
||||
@@ -717,14 +723,15 @@ kj::Promise<void> serverInvoke(Server& server, CallContext& call_context, Fn fn)
|
||||
return ReplaceVoid([&]() { return fn.invoke(server_context, ArgList()); },
|
||||
[&]() { return kj::Promise<CallContext>(kj::mv(call_context)); })
|
||||
.then([&server, req](CallContext call_context) {
|
||||
server.m_context.loop->log() << "IPC server send response #" << req << " " << TypeName<Results>()
|
||||
<< " " << LogEscape(call_context.getResults().toString(), server.m_context.loop->m_log_opts.max_chars);
|
||||
MP_LOG(*server.m_context.loop, Log::Debug) << "IPC server send response #" << req << " " << TypeName<Results>();
|
||||
MP_LOG(*server.m_context.loop, Log::Trace) << "response data: "
|
||||
<< LogEscape(call_context.getResults().toString(), server.m_context.loop->m_log_opts.max_chars);
|
||||
});
|
||||
} catch (const std::exception& e) {
|
||||
server.m_context.loop->log() << "IPC server unhandled exception: " << e.what();
|
||||
MP_LOG(*server.m_context.loop, Log::Error) << "IPC server unhandled exception: " << e.what();
|
||||
throw;
|
||||
} catch (...) {
|
||||
server.m_context.loop->log() << "IPC server unhandled exception";
|
||||
MP_LOG(*server.m_context.loop, Log::Error) << "IPC server unhandled exception";
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,7 +25,7 @@ void CustomBuildField(TypeList<>,
|
||||
// Also store the Thread::Client reference in the callback_threads map so
|
||||
// future calls over this connection can reuse it.
|
||||
auto [callback_thread, _]{SetThread(
|
||||
thread_context.callback_threads, thread_context.waiter->m_mutex, &connection,
|
||||
GuardedRef{thread_context.waiter->m_mutex, thread_context.callback_threads}, &connection,
|
||||
[&] { return connection.m_threads.add(kj::heap<ProxyServer<Thread>>(thread_context, std::thread{})); })};
|
||||
|
||||
// Call remote ThreadMap.makeThread function so server will create a
|
||||
@@ -43,12 +43,12 @@ void CustomBuildField(TypeList<>,
|
||||
return request.send().getResult(); // Nonblocking due to capnp request pipelining.
|
||||
}};
|
||||
auto [request_thread, _1]{SetThread(
|
||||
thread_context.request_threads, thread_context.waiter->m_mutex,
|
||||
GuardedRef{thread_context.waiter->m_mutex, thread_context.request_threads},
|
||||
&connection, make_request_thread)};
|
||||
|
||||
auto context = output.init();
|
||||
context.setThread(request_thread->second.m_client);
|
||||
context.setCallbackThread(callback_thread->second.m_client);
|
||||
context.setThread(request_thread->second->m_client);
|
||||
context.setCallbackThread(callback_thread->second->m_client);
|
||||
}
|
||||
|
||||
//! PassField override for mp.Context arguments. Return asynchronously and call
|
||||
@@ -89,29 +89,39 @@ auto PassField(Priority<1>, TypeList<>, ServerContext& server_context, const Fn&
|
||||
// need to update the map.
|
||||
auto& thread_context = g_thread_context;
|
||||
auto& request_threads = thread_context.request_threads;
|
||||
auto [request_thread, inserted]{SetThread(
|
||||
request_threads, thread_context.waiter->m_mutex,
|
||||
server.m_context.connection,
|
||||
[&] { return context_arg.getCallbackThread(); })};
|
||||
ConnThread request_thread;
|
||||
bool inserted;
|
||||
server.m_context.loop->sync([&] {
|
||||
std::tie(request_thread, inserted) = SetThread(
|
||||
GuardedRef{thread_context.waiter->m_mutex, request_threads}, server.m_context.connection,
|
||||
[&] { return context_arg.getCallbackThread(); });
|
||||
});
|
||||
|
||||
// If an entry was inserted into the requests_threads map,
|
||||
// If an entry was inserted into the request_threads map,
|
||||
// remove it after calling fn.invoke. If an entry was not
|
||||
// inserted, one already existed, meaning this must be a
|
||||
// recursive call (IPC call calling back to the caller which
|
||||
// makes another IPC call), so avoid modifying the map.
|
||||
const bool erase_thread{inserted};
|
||||
KJ_DEFER(if (erase_thread) {
|
||||
std::unique_lock<std::mutex> lock(thread_context.waiter->m_mutex);
|
||||
// Call erase here with a Connection* argument instead
|
||||
// of an iterator argument, because the `request_thread`
|
||||
// iterator may be invalid if the connection is closed
|
||||
// during this function call. More specifically, the
|
||||
// iterator may be invalid because SetThread adds a
|
||||
// cleanup callback to the Connection destructor that
|
||||
// erases the thread from the map, and also because the
|
||||
// ProxyServer<Thread> destructor calls
|
||||
// request_threads.clear().
|
||||
request_threads.erase(server.m_context.connection);
|
||||
// Erase the request_threads entry on the event loop
|
||||
// thread with loop->sync(), so if the connection is
|
||||
// broken there is not a race between this thread and
|
||||
// the disconnect handler trying to destroy the thread
|
||||
// client object.
|
||||
server.m_context.loop->sync([&] {
|
||||
// Look up the thread again without using existing
|
||||
// iterator since entry may no longer be there after
|
||||
// a disconnect. Destroy node after releasing
|
||||
// Waiter::m_mutex, so the ProxyClient<Thread>
|
||||
// destructor is able to use EventLoop::mutex
|
||||
// without violating lock order.
|
||||
ConnThreads::node_type removed;
|
||||
{
|
||||
Lock lock(thread_context.waiter->m_mutex);
|
||||
removed = request_threads.extract(server.m_context.connection);
|
||||
}
|
||||
});
|
||||
});
|
||||
fn.invoke(server_context, args...);
|
||||
}
|
||||
@@ -140,11 +150,16 @@ auto PassField(Priority<1>, TypeList<>, ServerContext& server_context, const Fn&
|
||||
// thread.
|
||||
KJ_IF_MAYBE (thread_server, perhaps) {
|
||||
const auto& thread = static_cast<ProxyServer<Thread>&>(*thread_server);
|
||||
server.m_context.loop->log()
|
||||
MP_LOG(*server.m_context.loop, Log::Debug)
|
||||
<< "IPC server post request #" << req << " {" << thread.m_thread_context.thread_name << "}";
|
||||
thread.m_thread_context.waiter->post(std::move(invoke));
|
||||
if (!thread.m_thread_context.waiter->post(std::move(invoke))) {
|
||||
MP_LOG(*server.m_context.loop, Log::Error)
|
||||
<< "IPC server error request #" << req
|
||||
<< " {" << thread.m_thread_context.thread_name << "}" << ", thread busy";
|
||||
throw std::runtime_error("thread busy");
|
||||
}
|
||||
} else {
|
||||
server.m_context.loop->log()
|
||||
MP_LOG(*server.m_context.loop, Log::Error)
|
||||
<< "IPC server error request #" << req << ", missing thread to execute request";
|
||||
throw std::runtime_error("invalid thread handle");
|
||||
}
|
||||
|
||||
@@ -182,6 +182,17 @@ public:
|
||||
std::unique_lock<std::mutex> m_lock;
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
struct GuardedRef
|
||||
{
|
||||
Mutex& mutex;
|
||||
T& ref MP_GUARDED_BY(mutex);
|
||||
};
|
||||
|
||||
// CTAD for Clang 16: GuardedRef{mutex, x} -> GuardedRef<decltype(x)>
|
||||
template <class U>
|
||||
GuardedRef(Mutex&, U&) -> GuardedRef<U>;
|
||||
|
||||
//! Analog to std::lock_guard that unlocks instead of locks.
|
||||
template <typename Lock>
|
||||
struct UnlockGuard
|
||||
|
||||
Reference in New Issue
Block a user