disentangle things a little more.

having a single loop for everything was too much. goroutines things were getting stuck.
This commit is contained in:
fiatjaf 2023-06-23 16:22:57 -03:00
parent f0a35d7ab2
commit ac0c0769fe
No known key found for this signature in database
GPG Key ID: BAD43C4BE5C1A3A1
2 changed files with 107 additions and 96 deletions

View File

@ -186,15 +186,12 @@ func (r *Relay) Connect(ctx context.Context) error {
// ping every 29 seconds // ping every 29 seconds
ticker := time.NewTicker(29 * time.Second) ticker := time.NewTicker(29 * time.Second)
// queue all messages received from the relay on this // this ensures we don't send an event to the Events channel after closing it
messageHandler := make(chan Envelope) eventsChannelCloserMutex := &sync.Mutex{}
// we'll queue all relay actions (handling received messages etc) in a single queue // to be used when the connection is closed
// such that we can close channels safely without mutex spaghetti
go func() { go func() {
for { <-r.connectionContext.Done()
select {
case <-r.connectionContext.Done():
// close these things when the connection is closed // close these things when the connection is closed
if r.challenges != nil { if r.challenges != nil {
close(r.challenges) close(r.challenges)
@ -210,6 +207,12 @@ func (r *Relay) Connect(ctx context.Context) error {
return true return true
}) })
return return
}()
// queue all write operations here so we don't do mutex spaghetti
go func() {
for {
select {
case <-ticker.C: case <-ticker.C:
err := wsutil.WriteClientMessage(r.Connection.conn, ws.OpPing, nil) err := wsutil.WriteClientMessage(r.Connection.conn, ws.OpPing, nil)
if err != nil { if err != nil {
@ -217,23 +220,27 @@ func (r *Relay) Connect(ctx context.Context) error {
r.Close() // this should trigger a context cancelation r.Close() // this should trigger a context cancelation
return return
} }
case envelope := <-messageHandler:
// this will run synchronously in this goroutine
r.HandleRelayMessage(envelope)
case writeRequest := <-r.writeQueue: case writeRequest := <-r.writeQueue:
// all write requests will go through this to prevent races // all write requests will go through this to prevent races
if err := r.Connection.WriteMessage(writeRequest.msg); err != nil { if err := r.Connection.WriteMessage(writeRequest.msg); err != nil {
writeRequest.answer <- err writeRequest.answer <- err
} }
close(writeRequest.answer) close(writeRequest.answer)
case toClose := <-r.subscriptionChannelCloseQueue:
// every time a subscription ends we use this queue to close its Events channel
close(toClose.Events)
toClose.Events = make(chan *Event)
} }
} }
}() }()
// every time a subscription ends we use this queue to close its .Events channel
go func() {
for toClose := range r.subscriptionChannelCloseQueue {
eventsChannelCloserMutex.Lock()
close(toClose.Events)
toClose.Events = make(chan *Event)
eventsChannelCloserMutex.Unlock()
}
}()
// general message reader loop
go func() { go func() {
for { for {
message, err := conn.ReadMessage(r.connectionContext) message, err := conn.ReadMessage(r.connectionContext)
@ -249,15 +256,6 @@ func (r *Relay) Connect(ctx context.Context) error {
continue continue
} }
messageHandler <- envelope
}
}()
return nil
}
// HandleRelayMessage handles a message received from a relay.
func (r *Relay) HandleRelayMessage(envelope Envelope) {
switch env := envelope.(type) { switch env := envelope.(type) {
case *NoticeEnvelope: case *NoticeEnvelope:
// see WithNoticeHandler // see WithNoticeHandler
@ -300,9 +298,13 @@ func (r *Relay) HandleRelayMessage(envelope Envelope) {
} }
} }
go func() {
eventsChannelCloserMutex.Lock()
if subscription.live { if subscription.live {
subscription.Events <- &env.Event subscription.Events <- &env.Event
} }
eventsChannelCloserMutex.Unlock()
}()
} }
case *EOSEEnvelope: case *EOSEEnvelope:
if subscription, ok := r.Subscriptions.Load(string(*env)); ok { if subscription, ok := r.Subscriptions.Load(string(*env)); ok {
@ -315,6 +317,10 @@ func (r *Relay) HandleRelayMessage(envelope Envelope) {
okCallback(env.OK, *env.Reason) okCallback(env.OK, *env.Reason)
} }
} }
}
}()
return nil
} }
// Write queues a message to be sent to the relay. // Write queues a message to be sent to the relay.

View File

@ -57,20 +57,25 @@ func (sub *Subscription) GetID() string {
// Unsub closes the subscription, sending "CLOSE" to relay as in NIP-01. // Unsub closes the subscription, sending "CLOSE" to relay as in NIP-01.
// Unsub() also closes the channel sub.Events and makes a new one. // Unsub() also closes the channel sub.Events and makes a new one.
func (sub *Subscription) Unsub() { func (sub *Subscription) Unsub() {
id := sub.GetID() go sub.Close()
sub.live = false
id := sub.GetID()
sub.Relay.Subscriptions.Delete(id)
// do this so we don't have the possibility of closing the Events channel and then trying to send to it
sub.Relay.subscriptionChannelCloseQueue <- sub
}
// Close just sends a CLOSE message. You probably want Unsub() instead.
func (sub *Subscription) Close() {
if sub.Relay.IsConnected() { if sub.Relay.IsConnected() {
id := sub.GetID()
closeMsg := CloseEnvelope(id) closeMsg := CloseEnvelope(id)
closeb, _ := (&closeMsg).MarshalJSON() closeb, _ := (&closeMsg).MarshalJSON()
debugLog("{%s} sending %v", sub.Relay.URL, closeb) debugLog("{%s} sending %v", sub.Relay.URL, closeb)
sub.Relay.Write(closeb) sub.Relay.Write(closeb)
} }
sub.live = false
sub.Relay.Subscriptions.Delete(id)
// do this so we don't have the possibility of closing the Events channel and then trying to send to it
sub.Relay.subscriptionChannelCloseQueue <- sub
} }
// Sub sets sub.Filters and then calls sub.Fire(ctx). // Sub sets sub.Filters and then calls sub.Fire(ctx).