go-nostr/pool.go

213 lines
5.0 KiB
Go
Raw Permalink Normal View History

2023-05-05 19:05:11 -03:00
package nostr
import (
"context"
2023-05-09 00:08:06 -03:00
"fmt"
2023-05-05 19:05:11 -03:00
"sync"
"time"
2023-05-05 19:05:11 -03:00
2023-09-04 16:44:36 -03:00
"github.com/puzpuzpuz/xsync/v2"
2023-05-05 19:05:11 -03:00
)
2023-11-24 17:24:36 +09:00
const (
seenAlreadyDropTick = time.Minute
)
2023-05-05 19:05:11 -03:00
type SimplePool struct {
Relays *xsync.MapOf[string, *Relay]
2023-05-06 14:32:39 -03:00
Context context.Context
2023-05-05 19:05:11 -03:00
2023-05-06 14:32:39 -03:00
cancel context.CancelFunc
2023-05-05 19:05:11 -03:00
}
type IncomingEvent struct {
*Event
Relay *Relay
}
2023-05-06 14:32:39 -03:00
func NewSimplePool(ctx context.Context) *SimplePool {
ctx, cancel := context.WithCancel(ctx)
2023-05-05 19:05:11 -03:00
return &SimplePool{
Relays: xsync.NewMapOf[*Relay](),
2023-05-06 14:32:39 -03:00
Context: ctx,
cancel: cancel,
2023-05-05 19:05:11 -03:00
}
}
2023-05-09 00:08:06 -03:00
func (pool *SimplePool) EnsureRelay(url string) (*Relay, error) {
2023-05-05 19:05:11 -03:00
nm := NormalizeURL(url)
defer namedLock(url)()
2023-05-05 19:05:11 -03:00
relay, ok := pool.Relays.Load(nm)
if ok && relay.IsConnected() {
2023-05-05 19:05:11 -03:00
// already connected, unlock and return
2023-05-09 00:08:06 -03:00
return relay, nil
2023-05-05 19:05:11 -03:00
} else {
var err error
2023-05-06 14:32:39 -03:00
// we use this ctx here so when the pool dies everything dies
ctx, cancel := context.WithTimeout(pool.Context, time.Second*15)
defer cancel()
2023-06-11 10:48:46 -03:00
if relay, err = RelayConnect(ctx, nm); err != nil {
2023-05-09 00:08:06 -03:00
return nil, fmt.Errorf("failed to connect: %w", err)
2023-05-05 19:05:11 -03:00
}
pool.Relays.Store(nm, relay)
2023-05-09 00:08:06 -03:00
return relay, nil
2023-05-05 19:05:11 -03:00
}
}
// SubMany opens a subscription with the given filters to multiple relays
// the subscriptions only end when the context is canceled
2023-10-08 14:46:16 -03:00
func (pool *SimplePool) SubMany(ctx context.Context, urls []string, filters Filters) chan IncomingEvent {
2023-10-02 14:16:16 -03:00
return pool.subMany(ctx, urls, filters, true)
}
// SubManyNonUnique is like SubMany, but returns duplicate events if they come from different relays
2023-10-08 14:46:16 -03:00
func (pool *SimplePool) SubManyNonUnique(ctx context.Context, urls []string, filters Filters) chan IncomingEvent {
2023-10-02 14:16:16 -03:00
return pool.subMany(ctx, urls, filters, false)
}
func (pool *SimplePool) subMany(ctx context.Context, urls []string, filters Filters, unique bool) chan IncomingEvent {
events := make(chan IncomingEvent)
2023-11-24 17:24:36 +09:00
seenAlready := xsync.NewMapOf[Timestamp]()
ticker := time.NewTicker(seenAlreadyDropTick)
eose := false
2023-05-05 19:05:11 -03:00
2023-09-16 07:51:22 -03:00
pending := xsync.NewCounter()
initial := len(urls)
pending.Add(int64(initial))
2023-05-05 19:05:11 -03:00
for _, url := range urls {
go func(nm string) {
2023-05-09 00:08:06 -03:00
relay, err := pool.EnsureRelay(nm)
if err != nil {
return
}
sub, _ := relay.Subscribe(ctx, filters)
2023-05-05 19:05:11 -03:00
if sub == nil {
return
}
for evt := range sub.Events {
2023-10-02 14:16:16 -03:00
if unique {
2023-11-24 17:24:36 +09:00
if _, seen := seenAlready.LoadOrStore(evt.ID, evt.CreatedAt); seen {
continue
2023-10-02 14:16:16 -03:00
}
2023-05-05 19:05:11 -03:00
}
select {
case <-sub.EndOfStoredEvents:
eose = true
2023-11-24 17:24:36 +09:00
case <-ticker.C:
if eose {
del := map[string]struct{}{}
old := Timestamp(time.Now().Add(-seenAlreadyDropTick).Unix())
seenAlready.Range(func(key string, value Timestamp) bool {
if value < old {
del[evt.ID] = struct{}{}
}
return true
})
for k := range del {
seenAlready.Delete(k)
2023-11-24 17:24:36 +09:00
}
}
case events <- IncomingEvent{Event: evt, Relay: relay}:
case <-ctx.Done():
return
}
2023-05-05 19:05:11 -03:00
}
pending.Dec()
if pending.Value() == 0 {
2023-10-02 14:16:16 -03:00
close(events)
}
2023-05-05 19:05:11 -03:00
}(NormalizeURL(url))
}
2023-10-02 14:16:16 -03:00
return events
2023-05-05 19:05:11 -03:00
}
// SubManyEose is like SubMany, but it stops subscriptions and closes the channel when gets a EOSE
func (pool *SimplePool) SubManyEose(ctx context.Context, urls []string, filters Filters) chan IncomingEvent {
2023-10-02 14:16:16 -03:00
return pool.subManyEose(ctx, urls, filters, true)
}
// SubManyEoseNonUnique is like SubManyEose, but returns duplicate events if they come from different relays
func (pool *SimplePool) SubManyEoseNonUnique(ctx context.Context, urls []string, filters Filters) chan IncomingEvent {
return pool.subManyEose(ctx, urls, filters, false)
}
func (pool *SimplePool) subManyEose(ctx context.Context, urls []string, filters Filters, unique bool) chan IncomingEvent {
2023-05-05 19:05:11 -03:00
ctx, cancel := context.WithCancel(ctx)
2023-10-02 14:16:16 -03:00
events := make(chan IncomingEvent)
2023-05-30 13:52:14 -03:00
seenAlready := xsync.NewMapOf[bool]()
2023-05-05 19:05:11 -03:00
wg := sync.WaitGroup{}
wg.Add(len(urls))
go func() {
// this will happen when all subscriptions get an eose (or when they die)
wg.Wait()
cancel()
2023-10-02 14:16:16 -03:00
close(events)
2023-05-05 19:05:11 -03:00
}()
for _, url := range urls {
go func(nm string) {
2023-07-08 08:15:34 -03:00
defer wg.Done()
2023-05-09 00:08:06 -03:00
relay, err := pool.EnsureRelay(nm)
if err != nil {
return
}
2023-07-11 15:25:02 -03:00
sub, err := relay.Subscribe(ctx, filters)
2023-05-05 19:05:11 -03:00
if sub == nil {
2023-07-11 15:25:02 -03:00
debugLogf("error subscribing to %s with %v: %s", relay, filters, err)
2023-05-05 19:05:11 -03:00
return
}
for {
select {
2023-07-08 08:15:34 -03:00
case <-ctx.Done():
return
2023-05-05 19:05:11 -03:00
case <-sub.EndOfStoredEvents:
return
case evt, more := <-sub.Events:
if !more {
return
}
2023-10-02 14:16:16 -03:00
if unique {
if _, seen := seenAlready.LoadOrStore(evt.ID, true); seen {
continue
2023-07-08 08:15:34 -03:00
}
2023-05-05 19:05:11 -03:00
}
select {
case events <- IncomingEvent{Event: evt, Relay: relay}:
case <-ctx.Done():
return
}
2023-05-05 19:05:11 -03:00
}
}
}(NormalizeURL(url))
}
2023-10-02 14:16:16 -03:00
return events
2023-05-05 19:05:11 -03:00
}
2023-08-06 19:57:08 -03:00
// QuerySingle returns the first event returned by the first relay, cancels everything else.
func (pool *SimplePool) QuerySingle(ctx context.Context, urls []string, filter Filter) *IncomingEvent {
2023-08-06 19:57:08 -03:00
ctx, cancel := context.WithCancel(ctx)
defer cancel()
for ievt := range pool.SubManyEose(ctx, urls, Filters{filter}) {
return &ievt
2023-08-06 19:57:08 -03:00
}
return nil
}