mirror of
https://github.com/bitcoin/bitcoin.git
synced 2026-01-21 07:39:08 +01:00
Squashed 'src/ipc/libmultiprocess/' changes from 13424cf2ecc1..47d79db8a552
47d79db8a552 Merge bitcoin-core/libmultiprocess#201: bug: fix mptest hang, ProxyClient<Thread> deadlock in disconnect handler f15ae9c9b9fb Merge bitcoin-core/libmultiprocess#211: Add .gitignore 4a269b21b8c8 bug: fix ProxyClient<Thread> deadlock if disconnected as IPC call is returning 85df96482c49 Use try_emplace in SetThread instead of threads.find ca9b380ea91a Use std::optional in ConnThreads to allow shortening locks 9b0799113557 doc: describe ThreadContext struct and synchronization requirements d60db601ed9b proxy-io.h: add Waiter::m_mutex thread safety annotations 4e365b019a9f ci: Use -Wthread-safety not -Wthread-safety-analysis 15d7bafbb001 Add .gitignore fe1cd8c76131 Merge bitcoin-core/libmultiprocess#208: ci: Test minimum cmake version in olddeps job b713a0b7bfbc Merge bitcoin-core/libmultiprocess#207: ci: output CMake version in CI script 0f580397c913 ci: Test minimum cmake version in olddeps job d603dcc0eef0 ci: output CMake version in CI script git-subtree-dir: src/ipc/libmultiprocess git-subtree-split: 47d79db8a5528097b408e18f7b0bae11a6702d26
This commit is contained in:
@@ -12,6 +12,7 @@
|
||||
|
||||
#include <atomic>
|
||||
#include <capnp/capability.h>
|
||||
#include <capnp/common.h> // IWYU pragma: keep
|
||||
#include <capnp/rpc.h>
|
||||
#include <condition_variable>
|
||||
#include <functional>
|
||||
@@ -25,7 +26,6 @@
|
||||
#include <kj/memory.h>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <optional>
|
||||
#include <stdexcept>
|
||||
#include <string>
|
||||
@@ -81,6 +81,11 @@ ProxyContext::ProxyContext(Connection* connection) : connection(connection), loo
|
||||
|
||||
Connection::~Connection()
|
||||
{
|
||||
// Connection destructor is always called on the event loop thread. If this
|
||||
// is a local disconnect, it will trigger I/O, so this needs to run on the
|
||||
// event loop thread, and if there was a remote disconnect, this is called
|
||||
// by an onDisconnect callback directly from the event loop thread.
|
||||
assert(std::this_thread::get_id() == m_loop->m_thread_id);
|
||||
// Shut down RPC system first, since this will garbage collect any
|
||||
// ProxyServer objects that were not freed before the connection was closed.
|
||||
// Typically all ProxyServer objects associated with this connection will be
|
||||
@@ -156,6 +161,9 @@ CleanupIt Connection::addSyncCleanup(std::function<void()> fn)
|
||||
|
||||
void Connection::removeSyncCleanup(CleanupIt it)
|
||||
{
|
||||
// Require cleanup functions to be removed on the event loop thread to avoid
|
||||
// needing to deal with them being removed in the middle of a disconnect.
|
||||
assert(std::this_thread::get_id() == m_loop->m_thread_id);
|
||||
const Lock lock(m_loop->m_mutex);
|
||||
m_sync_cleanup_fns.erase(it);
|
||||
}
|
||||
@@ -305,29 +313,34 @@ bool EventLoop::done() const
|
||||
return m_num_clients == 0 && m_async_fns->empty();
|
||||
}
|
||||
|
||||
std::tuple<ConnThread, bool> SetThread(ConnThreads& threads, std::mutex& mutex, Connection* connection, const std::function<Thread::Client()>& make_thread)
|
||||
std::tuple<ConnThread, bool> SetThread(GuardedRef<ConnThreads> threads, Connection* connection, const std::function<Thread::Client()>& make_thread)
|
||||
{
|
||||
const std::unique_lock<std::mutex> lock(mutex);
|
||||
auto thread = threads.find(connection);
|
||||
if (thread != threads.end()) return {thread, false};
|
||||
thread = threads.emplace(
|
||||
std::piecewise_construct, std::forward_as_tuple(connection),
|
||||
std::forward_as_tuple(make_thread(), connection, /* destroy_connection= */ false)).first;
|
||||
thread->second.setDisconnectCallback([&threads, &mutex, thread] {
|
||||
// Note: it is safe to use the `thread` iterator in this cleanup
|
||||
// function, because the iterator would only be invalid if the map entry
|
||||
// was removed, and if the map entry is removed the ProxyClient<Thread>
|
||||
// destructor unregisters the cleanup.
|
||||
assert(std::this_thread::get_id() == connection->m_loop->m_thread_id);
|
||||
ConnThread thread;
|
||||
bool inserted;
|
||||
{
|
||||
const Lock lock(threads.mutex);
|
||||
std::tie(thread, inserted) = threads.ref.try_emplace(connection);
|
||||
}
|
||||
if (inserted) {
|
||||
thread->second.emplace(make_thread(), connection, /* destroy_connection= */ false);
|
||||
thread->second->m_disconnect_cb = connection->addSyncCleanup([threads, thread] {
|
||||
// Note: it is safe to use the `thread` iterator in this cleanup
|
||||
// function, because the iterator would only be invalid if the map entry
|
||||
// was removed, and if the map entry is removed the ProxyClient<Thread>
|
||||
// destructor unregisters the cleanup.
|
||||
|
||||
// Connection is being destroyed before thread client is, so reset
|
||||
// thread client m_disconnect_cb member so thread client destructor does not
|
||||
// try to unregister this callback after connection is destroyed.
|
||||
// Remove connection pointer about to be destroyed from the map
|
||||
const std::unique_lock<std::mutex> lock(mutex);
|
||||
thread->second.m_disconnect_cb.reset();
|
||||
threads.erase(thread);
|
||||
});
|
||||
return {thread, true};
|
||||
// Connection is being destroyed before thread client is, so reset
|
||||
// thread client m_disconnect_cb member so thread client destructor does not
|
||||
// try to unregister this callback after connection is destroyed.
|
||||
thread->second->m_disconnect_cb.reset();
|
||||
|
||||
// Remove connection pointer about to be destroyed from the map
|
||||
const Lock lock(threads.mutex);
|
||||
threads.ref.erase(thread);
|
||||
});
|
||||
}
|
||||
return {thread, inserted};
|
||||
}
|
||||
|
||||
ProxyClient<Thread>::~ProxyClient()
|
||||
@@ -336,17 +349,18 @@ ProxyClient<Thread>::~ProxyClient()
|
||||
// cleanup callback that was registered to handle the connection being
|
||||
// destroyed before the thread being destroyed.
|
||||
if (m_disconnect_cb) {
|
||||
m_context.connection->removeSyncCleanup(*m_disconnect_cb);
|
||||
// Remove disconnect callback on the event loop thread with
|
||||
// loop->sync(), so if the connection is broken there is not a race
|
||||
// between this thread trying to remove the callback and the disconnect
|
||||
// handler attempting to call it.
|
||||
m_context.loop->sync([&]() {
|
||||
if (m_disconnect_cb) {
|
||||
m_context.connection->removeSyncCleanup(*m_disconnect_cb);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
void ProxyClient<Thread>::setDisconnectCallback(const std::function<void()>& fn)
|
||||
{
|
||||
assert(fn);
|
||||
assert(!m_disconnect_cb);
|
||||
m_disconnect_cb = m_context.connection->addSyncCleanup(fn);
|
||||
}
|
||||
|
||||
ProxyServer<Thread>::ProxyServer(ThreadContext& thread_context, std::thread&& thread)
|
||||
: m_thread_context(thread_context), m_thread(std::move(thread))
|
||||
{
|
||||
@@ -364,7 +378,7 @@ ProxyServer<Thread>::~ProxyServer()
|
||||
assert(m_thread_context.waiter.get());
|
||||
std::unique_ptr<Waiter> waiter;
|
||||
{
|
||||
const std::unique_lock<std::mutex> lock(m_thread_context.waiter->m_mutex);
|
||||
const Lock lock(m_thread_context.waiter->m_mutex);
|
||||
//! Reset thread context waiter pointer, as shutdown signal for done
|
||||
//! lambda passed as waiter->wait() argument in makeThread code below.
|
||||
waiter = std::move(m_thread_context.waiter);
|
||||
@@ -398,7 +412,7 @@ kj::Promise<void> ProxyServer<ThreadMap>::makeThread(MakeThreadContext context)
|
||||
g_thread_context.thread_name = ThreadName(m_connection.m_loop->m_exe_name) + " (from " + from + ")";
|
||||
g_thread_context.waiter = std::make_unique<Waiter>();
|
||||
thread_context.set_value(&g_thread_context);
|
||||
std::unique_lock<std::mutex> lock(g_thread_context.waiter->m_mutex);
|
||||
Lock lock(g_thread_context.waiter->m_mutex);
|
||||
// Wait for shutdown signal from ProxyServer<Thread> destructor (signal
|
||||
// is just waiter getting set to null.)
|
||||
g_thread_context.waiter->wait(lock, [] { return !g_thread_context.waiter; });
|
||||
|
||||
Reference in New Issue
Block a user