go-nostr/relay.go
2023-07-11 15:25:02 -03:00

547 lines
15 KiB
Go

package nostr
import (
"context"
"fmt"
"log"
"net/http"
"sync"
"sync/atomic"
"time"
"github.com/gobwas/ws"
"github.com/gobwas/ws/wsutil"
"github.com/puzpuzpuz/xsync"
)
type Status int
const (
PublishStatusSent Status = 0
PublishStatusFailed Status = -1
PublishStatusSucceeded Status = 1
)
var subscriptionIdCounter atomic.Int32
func (s Status) String() string {
switch s {
case PublishStatusSent:
return "sent"
case PublishStatusFailed:
return "failed"
case PublishStatusSucceeded:
return "success"
}
return "unknown"
}
type Relay struct {
URL string
RequestHeader http.Header // e.g. for origin header
Connection *Connection
Subscriptions *xsync.MapOf[string, *Subscription]
ConnectionError error
connectionContext context.Context // will be canceled when the connection closes
connectionContextCancel context.CancelFunc
challenges chan string // NIP-42 challenges
notices chan string // NIP-01 NOTICEs
okCallbacks *xsync.MapOf[string, func(bool, *string)]
writeQueue chan writeRequest
subscriptionChannelCloseQueue chan *Subscription
// custom things that aren't often used
//
AssumeValid bool // this will skip verifying signatures for events received from this relay
}
type writeRequest struct {
msg []byte
answer chan error
}
// NewRelay returns a new relay. The relay connection will be closed when the context is canceled.
func NewRelay(ctx context.Context, url string, opts ...RelayOption) *Relay {
ctx, cancel := context.WithCancel(ctx)
r := &Relay{
URL: NormalizeURL(url),
connectionContext: ctx,
connectionContextCancel: cancel,
Subscriptions: xsync.NewMapOf[*Subscription](),
okCallbacks: xsync.NewMapOf[func(bool, *string)](),
writeQueue: make(chan writeRequest),
subscriptionChannelCloseQueue: make(chan *Subscription),
}
for _, opt := range opts {
switch o := opt.(type) {
case WithNoticeHandler:
r.notices = make(chan string)
go func() {
for notice := range r.notices {
o(notice)
}
}()
case WithAuthHandler:
r.challenges = make(chan string)
go func() {
for challenge := range r.challenges {
authEvent := Event{
CreatedAt: Now(),
Kind: 22242,
Tags: Tags{
Tag{"relay", url},
Tag{"challenge", challenge},
},
Content: "",
}
if ok := o(r.connectionContext, &authEvent); ok {
r.Auth(r.connectionContext, authEvent)
}
}
}()
}
}
return r
}
// RelayConnect returns a relay object connected to url.
// Once successfully connected, cancelling ctx has no effect.
// To close the connection, call r.Close().
func RelayConnect(ctx context.Context, url string, opts ...RelayOption) (*Relay, error) {
r := NewRelay(context.Background(), url, opts...)
err := r.Connect(ctx)
return r, err
}
// When instantiating relay connections, some options may be passed.
// RelayOption is the type of the argument passed for that.
// Some examples of this are WithNoticeHandler and WithAuthHandler.
type RelayOption interface {
IsRelayOption()
}
// WithNoticeHandler just takes notices and is expected to do something with them.
// when not given, defaults to logging the notices.
type WithNoticeHandler func(notice string)
func (_ WithNoticeHandler) IsRelayOption() {}
var _ RelayOption = (WithNoticeHandler)(nil)
// WithAuthHandler takes an auth event and expects it to be signed.
// when not given, AUTH messages from relays are ignored.
type WithAuthHandler func(ctx context.Context, authEvent *Event) (ok bool)
func (_ WithAuthHandler) IsRelayOption() {}
var _ RelayOption = (WithAuthHandler)(nil)
// String just returns the relay URL.
func (r *Relay) String() string {
return r.URL
}
// Context retrieves the context that is associated with this relay connection.
func (r *Relay) Context() context.Context { return r.connectionContext }
// IsConnected returns true if the connection to this relay seems to be active.
func (r *Relay) IsConnected() bool { return r.connectionContext.Err() == nil }
// Connect tries to establish a websocket connection to r.URL.
// If the context expires before the connection is complete, an error is returned.
// Once successfully connected, context expiration has no effect: call r.Close
// to close the connection.
//
// The underlying relay connection will use a background context. If you want to
// pass a custom context to the underlying relay connection, use NewRelay() and
// then Relay.Connect().
func (r *Relay) Connect(ctx context.Context) error {
if r.connectionContext == nil || r.Subscriptions == nil {
return fmt.Errorf("relay must be initialized with a call to NewRelay()")
}
if r.URL == "" {
return fmt.Errorf("invalid relay URL '%s'", r.URL)
}
if _, ok := ctx.Deadline(); !ok {
// if no timeout is set, force it to 7 seconds
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 7*time.Second)
defer cancel()
}
conn, err := NewConnection(ctx, r.URL, r.RequestHeader)
if err != nil {
return fmt.Errorf("error opening websocket to '%s': %w", r.URL, err)
}
r.Connection = conn
// ping every 29 seconds
ticker := time.NewTicker(29 * time.Second)
// to be used when the connection is closed
go func() {
<-r.connectionContext.Done()
// close these things when the connection is closed
if r.challenges != nil {
close(r.challenges)
}
if r.notices != nil {
close(r.notices)
}
// stop the ticker
ticker.Stop()
// close all subscriptions
r.Subscriptions.Range(func(_ string, sub *Subscription) bool {
go sub.Unsub()
return true
})
return
}()
// queue all write operations here so we don't do mutex spaghetti
go func() {
for {
select {
case <-ticker.C:
err := wsutil.WriteClientMessage(r.Connection.conn, ws.OpPing, nil)
if err != nil {
InfoLogger.Printf("{%s} error writing ping: %v; closing websocket", r.URL, err)
r.Close() // this should trigger a context cancelation
return
}
case writeRequest := <-r.writeQueue:
// all write requests will go through this to prevent races
if err := r.Connection.WriteMessage(writeRequest.msg); err != nil {
writeRequest.answer <- err
}
close(writeRequest.answer)
case <-r.connectionContext.Done():
// stop here
return
}
}
}()
// general message reader loop
go func() {
for {
message, err := conn.ReadMessage(r.connectionContext)
if err != nil {
r.ConnectionError = err
r.Close()
break
}
debugLogf("{%s} %v\n", r.URL, message)
envelope := ParseMessage(message)
if envelope == nil {
continue
}
switch env := envelope.(type) {
case *NoticeEnvelope:
// see WithNoticeHandler
if r.notices != nil {
r.notices <- string(*env)
} else {
log.Printf("NOTICE from %s: '%s'\n", r.URL, string(*env))
}
case *AuthEnvelope:
if env.Challenge == nil {
continue
}
// see WithAuthHandler
if r.challenges != nil {
r.challenges <- *env.Challenge
}
case *EventEnvelope:
if env.SubscriptionID == nil {
continue
}
if subscription, ok := r.Subscriptions.Load(*env.SubscriptionID); !ok {
// InfoLogger.Printf("{%s} no subscription with id '%s'\n", r.URL, *env.SubscriptionID)
continue
} else {
// check if the event matches the desired filter, ignore otherwise
if !subscription.Filters.Match(&env.Event) {
InfoLogger.Printf("{%s} filter does not match: %v ~ %v\n", r.URL, subscription.Filters, env.Event)
continue
}
// check signature, ignore invalid, except from trusted (AssumeValid) relays
if !r.AssumeValid {
if ok, err := env.Event.CheckSignature(); !ok {
errmsg := ""
if err != nil {
errmsg = err.Error()
}
InfoLogger.Printf("{%s} bad signature: %s\n", r.URL, errmsg)
continue
}
}
// dispatch this to the internal .events channel of the subscription
select {
case subscription.events <- &env.Event:
case <-subscription.Context.Done():
}
}
case *EOSEEnvelope:
if subscription, ok := r.Subscriptions.Load(string(*env)); ok {
// implementation adapted from the naïve/incorrect implementation of sync.Once
// (which is ok for this use case)
if subscription.eosed.CompareAndSwap(false, true) {
go func() {
time.Sleep(time.Millisecond) // this basically ensures the EndOfStoredEvents call happens after the last EVENT
close(subscription.EndOfStoredEvents)
}()
}
}
case *OKEnvelope:
if okCallback, exist := r.okCallbacks.Load(env.EventID); exist {
okCallback(env.OK, env.Reason)
}
}
}
}()
return nil
}
// Write queues a message to be sent to the relay.
func (r *Relay) Write(msg []byte) <-chan error {
ch := make(chan error)
select {
case r.writeQueue <- writeRequest{msg: msg, answer: ch}:
case <-r.connectionContext.Done():
go func() { ch <- fmt.Errorf("connection closed") }()
}
return ch
}
// Publish sends an "EVENT" command to the relay r as in NIP-01.
// Status can be: success, failed, or sent (no response from relay before ctx times out).
func (r *Relay) Publish(ctx context.Context, event Event) (Status, error) {
status := PublishStatusFailed
var err error
// data races on status variable without this mutex
var mu sync.Mutex
if _, ok := ctx.Deadline(); !ok {
// if no timeout is set, force it to 7 seconds
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 7*time.Second)
defer cancel()
}
// make it cancellable so we can stop everything upon receiving an "OK"
var cancel context.CancelFunc
ctx, cancel = context.WithCancel(ctx)
defer cancel()
// listen for an OK callback
okCallback := func(ok bool, msg *string) {
mu.Lock()
defer mu.Unlock()
if ok {
status = PublishStatusSucceeded
} else {
status = PublishStatusFailed
reason := ""
if msg != nil {
reason = *msg
}
err = fmt.Errorf("msg: %s", reason)
}
cancel()
}
r.okCallbacks.Store(event.ID, okCallback)
defer r.okCallbacks.Delete(event.ID)
// publish event
envb, _ := EventEnvelope{Event: event}.MarshalJSON()
debugLogf("{%s} sending %v\n", r.URL, envb)
status = PublishStatusSent
if err := <-r.Write(envb); err != nil {
status = PublishStatusFailed
return status, err
}
for {
select {
case <-ctx.Done(): // this will be called when we get an OK
// proceed to return status as it is
// e.g. if this happens because of the timeout then status will probably be "failed"
// but if it happens because okCallback was called then it might be "succeeded"
// do not return if okCallback is in process
return status, err
case <-r.connectionContext.Done():
// same as above, but when the relay loses connectivity entirely
return status, err
}
}
}
// Auth sends an "AUTH" command client -> relay as in NIP-42.
// Status can be: success, failed, or sent (no response from relay before ctx times out).
func (r *Relay) Auth(ctx context.Context, event Event) (Status, error) {
status := PublishStatusFailed
var err error
// data races on status variable without this mutex
var mu sync.Mutex
if _, ok := ctx.Deadline(); !ok {
// if no timeout is set, force it to 3 seconds
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 3*time.Second)
defer cancel()
}
// make it cancellable so we can stop everything upon receiving an "OK"
var cancel context.CancelFunc
ctx, cancel = context.WithCancel(ctx)
defer cancel()
// listen for an OK callback
okCallback := func(ok bool, msg *string) {
mu.Lock()
if ok {
status = PublishStatusSucceeded
} else {
status = PublishStatusFailed
reason := ""
if msg != nil {
reason = *msg
}
err = fmt.Errorf("msg: %s", reason)
}
mu.Unlock()
cancel()
}
r.okCallbacks.Store(event.ID, okCallback)
defer r.okCallbacks.Delete(event.ID)
// send AUTH
authResponse, _ := AuthEnvelope{Event: event}.MarshalJSON()
debugLogf("{%s} sending %v\n", r.URL, authResponse)
if err := <-r.Write(authResponse); err != nil {
// status will be "failed"
return status, err
}
// use mu.Lock() just in case the okCallback got called, extremely unlikely.
mu.Lock()
status = PublishStatusSent
mu.Unlock()
// the context either times out, and the status is "sent"
// or the okCallback is called and the status is set to "succeeded" or "failed"
// NIP-42 does not mandate an "OK" reply to an "AUTH" message
<-ctx.Done()
mu.Lock()
defer mu.Unlock()
return status, err
}
// Subscribe sends a "REQ" command to the relay r as in NIP-01.
// Events are returned through the channel sub.Events.
// The subscription is closed when context ctx is cancelled ("CLOSE" in NIP-01).
func (r *Relay) Subscribe(ctx context.Context, filters Filters, opts ...SubscriptionOption) (*Subscription, error) {
sub := r.PrepareSubscription(ctx, filters, opts...)
if err := sub.Fire(); err != nil {
return nil, fmt.Errorf("couldn't subscribe to %v at %s: %w", filters, r.URL, err)
}
return sub, nil
}
// PrepareSubscription creates a subscription, but doesn't fire it.
func (r *Relay) PrepareSubscription(ctx context.Context, filters Filters, opts ...SubscriptionOption) *Subscription {
if r.Connection == nil {
panic(fmt.Errorf("must call .Connect() first before calling .Subscribe()"))
}
current := subscriptionIdCounter.Add(1)
ctx, cancel := context.WithCancel(ctx)
sub := &Subscription{
Relay: r,
Context: ctx,
cancel: cancel,
counter: int(current),
Events: make(chan *Event),
events: make(chan *Event),
EndOfStoredEvents: make(chan struct{}),
Filters: filters,
closeEventsChannel: make(chan struct{}),
}
for _, opt := range opts {
switch o := opt.(type) {
case WithLabel:
sub.label = string(o)
}
}
id := sub.GetID()
r.Subscriptions.Store(id, sub)
// start handling events, eose, unsub etc:
go sub.start()
return sub
}
func (r *Relay) QuerySync(ctx context.Context, filter Filter, opts ...SubscriptionOption) ([]*Event, error) {
sub, err := r.Subscribe(ctx, Filters{filter}, opts...)
if err != nil {
return nil, err
}
defer sub.Unsub()
if _, ok := ctx.Deadline(); !ok {
// if no timeout is set, force it to 3 seconds
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, 7*time.Second)
defer cancel()
}
var events []*Event
for {
select {
case evt := <-sub.Events:
if evt == nil {
// channel is closed
return events, nil
}
events = append(events, evt)
case <-sub.EndOfStoredEvents:
return events, nil
case <-ctx.Done():
return events, nil
}
}
}
func (r *Relay) Close() error {
if r.connectionContextCancel == nil {
return fmt.Errorf("relay not connected")
}
r.connectionContextCancel()
r.connectionContextCancel = nil
return r.Connection.Close()
}