2024-01-23 21:21:46 +07:00
"use strict" ;
const { version } = require ( "./package.json" ) ;
2023-10-30 03:14:50 +07:00
const WebSocket = require ( "ws" ) ;
2024-01-01 11:16:53 +07:00
const { verifySignature , validateEvent , nip19 } = require ( "nostr-tools" ) ;
2023-11-18 16:49:20 +07:00
const auth = require ( "./auth.js" ) ;
const nip42 = require ( "./nip42.js" ) ;
2023-11-16 21:53:58 +07:00
2024-01-23 21:12:06 +07:00
let { relays , approved _publishers , log _about _relays , authorized _keys , private _keys , reconnect _time , wait _eose , pause _on _limit , eose _timeout , max _eose _score , cache _relays , max _orphan _sess , broadcast _ratelimit , upstream _ratelimit _expiration , max _client _subs } = require ( "./config" ) ;
2023-11-16 21:53:58 +07:00
2023-10-30 03:14:50 +07:00
const socks = new Set ( ) ;
const csess = new Map ( ) ;
2024-01-03 14:10:01 +07:00
log _about _relays = process . env . LOG _ABOUT _RELAYS || log _about _relays ;
2023-11-16 21:53:58 +07:00
authorized _keys = authorized _keys ? . map ( i => i . startsWith ( "npub" ) ? nip19 . decode ( i ) . data : i ) ;
2024-01-01 11:16:53 +07:00
approved _publishers = approved _publishers ? . map ( i => i . startsWith ( "npub" ) ? nip19 . decode ( i ) . data : i ) ;
2023-11-16 21:53:58 +07:00
2023-11-22 16:38:46 +07:00
// CL MaxEoseScore: Set <max_eose_score> as 0 if configured relays is under of the expected number from <max_eose_score>
if ( relays . length < max _eose _score ) max _eose _score = 0 ;
2023-12-19 22:56:48 +07:00
cache _relays = cache _relays ? . map ( i => i . endsWith ( "/" ) ? i : i + "/" ) ;
2023-10-30 03:14:50 +07:00
// CL - User socket
2024-01-23 22:49:43 +07:00
module . exports = ( ws , req , onClose ) => {
2023-11-16 21:53:58 +07:00
let authKey = null ;
let authorized = true ;
2024-01-03 14:10:01 +07:00
let orphan = getOrphanSess ( ) ; // if available
2024-01-04 22:01:08 +07:00
let lastEvent = Date . now ( ) ;
2024-01-24 16:11:49 +07:00
let ip = req . headers [ "x-forwarded-for" ] ? . split ( "," ) [ 0 ] || req . socket . address ( ) ? . address ;
2023-11-16 21:53:58 +07:00
2024-01-03 14:10:01 +07:00
ws . id = orphan || ( process . pid + Math . floor ( Math . random ( ) * 1000 ) + "_" + csess . size ) ;
2023-11-21 18:42:04 +07:00
ws . subs = new Map ( ) ; // contains filter submitted by clients. per subID
ws . pause _subs = new Set ( ) ; // pause subscriptions from receiving events after reached over <filter.limit> until all relays send EOSE. per subID
2023-11-18 16:49:20 +07:00
ws . events = new Map ( ) ; // only to prevent the retransmit of the same event. per subID
ws . my _events = new Set ( ) ; // for event retransmitting.
ws . pendingEOSE = new Map ( ) ; // each contain subID
2023-11-22 00:20:33 +07:00
ws . EOSETimeout = new Map ( ) ; // per subID
2023-12-24 21:16:44 +07:00
ws . reconnectTimeout = new Set ( ) ; // relays timeout() before reconnection. Only use after client disconnected.
2024-01-23 21:12:06 +07:00
ws . pubkey = null ;
2023-10-31 14:00:16 +07:00
2023-11-16 21:53:58 +07:00
if ( authorized _keys ? . length ) {
authKey = Date . now ( ) + Math . random ( ) . toString ( 36 ) ;
authorized = false ;
ws . send ( JSON . stringify ( [ "AUTH" , authKey ] ) ) ;
2023-11-20 21:21:22 +07:00
} else if ( private _keys !== { } ) {
// If there is no whitelist, Then we ask to client what is their public key.
// We will enable NIP-42 function for this session if user pubkey was available & valid in <private_keys>.
// There is no need to limit this session. We only ask who is this user.
// If it was the users listed at <private_keys> in config.js, Then the user could use NIP-42 protected relays.
authKey = Date . now ( ) + Math . random ( ) . toString ( 36 ) ;
ws . send ( JSON . stringify ( [ "AUTH" , authKey ] ) ) ;
2023-11-16 21:53:58 +07:00
}
2024-01-24 16:11:49 +07:00
console . log ( process . pid , ` ->- ${ ip } ( ${ ws . id } ) connected ${ orphan ? "(orphan reused) " : "" } [ ${ req . headers [ "user-agent" ] || "" } ] ` ) ;
2023-10-30 03:14:50 +07:00
ws . on ( "message" , data => {
try {
data = JSON . parse ( data ) ;
} catch {
return ws . send (
JSON . stringify ( [ "NOTICE" , "error: bad JSON." ] )
)
}
2024-01-23 21:12:06 +07:00
2023-10-30 03:14:50 +07:00
switch ( data [ 0 ] ) {
case "EVENT" :
2023-11-17 14:36:13 +07:00
if ( ! authorized ) return ;
2024-01-01 11:16:53 +07:00
if ( ! validateEvent ( data [ 1 ] ) || ! verifySignature ( data [ 1 ] ) ) return ws . send ( JSON . stringify ( [ "NOTICE" , "error: invalid event" ] ) ) ;
2023-11-16 21:53:58 +07:00
if ( data [ 1 ] . kind == 22242 ) return ws . send ( JSON . stringify ( [ "OK" , data [ 1 ] ? . id , false , "rejected: kind 22242" ] ) ) ;
2024-01-03 16:18:39 +07:00
2024-01-01 11:16:53 +07:00
if (
approved _publishers ? . length &&
! approved _publishers ? . includes ( data [ 1 ] . pubkey )
) return ws . send ( JSON . stringify ( [ "OK" , data [ 1 ] ? . id , false , "rejected: unauthorized" ] ) ) ;
2024-01-04 22:01:08 +07:00
if ( broadcast _ratelimit && ( broadcast _ratelimit > ( Date . now ( ) - lastEvent ) ) ) {
lastEvent = Date . now ( ) ;
return ws . send ( JSON . stringify ( [ "OK" , data [ 1 ] ? . id , false , "rate-limited: request too fast." ] ) ) ;
}
lastEvent = Date . now ( ) ;
2023-11-18 16:49:20 +07:00
ws . my _events . add ( data [ 1 ] ) ;
2023-12-19 23:48:24 +07:00
direct _bc ( data , ws . id ) ;
cache _bc ( data , ws . id ) ;
2023-10-30 03:14:50 +07:00
ws . send ( JSON . stringify ( [ "OK" , data [ 1 ] ? . id , true , "" ] ) ) ;
break ;
case "REQ" :
2023-11-17 14:36:13 +07:00
if ( ! authorized ) return ;
2023-10-30 03:14:50 +07:00
if ( data . length < 3 ) return ws . send ( JSON . stringify ( [ "NOTICE" , "error: bad request." ] ) ) ;
2024-01-04 13:09:06 +07:00
if ( typeof ( data [ 1 ] ) !== "string" ) return ws . send ( JSON . stringify ( [ "NOTICE" , "error: expected subID a string. but got the otherwise." ] ) ) ;
if ( typeof ( data [ 2 ] ) !== "object" ) return ws . send ( JSON . stringify ( [ "CLOSED" , data [ 1 ] , "error: expected filter to be obj, instead gives the otherwise." ] ) ) ;
2024-01-23 21:12:06 +07:00
if ( ( max _client _subs !== - 1 ) && ( ws . subs . size > max _client _subs ) ) return ws . send ( JSON . stringify ( [ "CLOSED" , data [ 1 ] , "rate-limited: too many subscriptions." ] ) ) ;
2024-01-04 22:01:08 +07:00
if ( ws . subs . has ( data [ 1 ] ) ) {
direct _bc ( [ "CLOSE" , data [ 1 ] ] , ws . id ) ;
cache _bc ( [ "CLOSE" , data [ 1 ] ] , ws . id ) ;
}
2024-01-04 13:09:06 +07:00
ws . subs . set ( data [ 1 ] , data . slice ( 2 ) ) ;
2023-11-18 16:49:20 +07:00
ws . events . set ( data [ 1 ] , new Set ( ) ) ;
2023-11-21 22:50:13 +07:00
ws . pause _subs . delete ( data [ 1 ] ) ;
2023-10-31 14:00:16 +07:00
bc ( data , ws . id ) ;
2023-11-12 14:25:43 +07:00
if ( data [ 2 ] ? . limit < 1 ) return ws . send ( JSON . stringify ( [ "EOSE" , data [ 1 ] ] ) ) ;
2023-11-18 16:49:20 +07:00
ws . pendingEOSE . set ( data [ 1 ] , 0 ) ;
2023-10-30 03:14:50 +07:00
break ;
case "CLOSE" :
2023-11-17 14:36:13 +07:00
if ( ! authorized ) return ;
2023-11-02 22:06:12 +07:00
if ( typeof ( data [ 1 ] ) !== "string" ) return ws . send ( JSON . stringify ( [ "NOTICE" , "error: bad request." ] ) ) ;
2024-01-04 13:09:06 +07:00
if ( ! ws . subs . has ( data [ 1 ] ) ) return ws . send ( JSON . stringify ( [ "CLOSED" , data [ 1 ] , "error: this sub is not opened." ] ) ) ;
2023-11-21 18:42:04 +07:00
ws . subs . delete ( data [ 1 ] ) ;
2023-11-18 16:49:20 +07:00
ws . events . delete ( data [ 1 ] ) ;
ws . pendingEOSE . delete ( data [ 1 ] ) ;
2023-11-21 18:42:04 +07:00
ws . pause _subs . delete ( data [ 1 ] ) ;
2023-11-22 00:20:33 +07:00
cancel _EOSETimeout ( ws . id , data [ 1 ] ) ;
2023-12-19 22:56:48 +07:00
cache _bc ( data , ws . id ) ;
direct _bc ( data , ws . id ) ;
2024-01-04 12:26:48 +07:00
ws . send ( JSON . stringify ( [ "CLOSED" , data [ 1 ] , "" ] ) ) ;
2023-10-30 03:14:50 +07:00
break ;
2023-11-16 21:53:58 +07:00
case "AUTH" :
2023-11-20 21:21:22 +07:00
if ( auth ( authKey , data [ 1 ] , ws , req ) ) {
2023-11-16 21:53:58 +07:00
ws . pubkey = data [ 1 ] . pubkey ;
2023-12-19 12:05:11 +09:00
console . log ( process . pid , "---" , ws . id , "successfully authorized as" , ws . pubkey , private _keys [ ws . pubkey ] ? "(admin)" : "(user)" ) ;
2023-11-20 21:21:22 +07:00
if ( authorized ) return ;
2024-01-03 14:10:01 +07:00
csess . set ( ws . id , ws ) ;
if ( ! orphan ) newsess ( ws . id ) ;
2023-11-20 21:21:22 +07:00
authorized = true ;
2024-01-04 22:01:08 +07:00
lastEvent = Date . now ( ) ;
2023-11-16 21:53:58 +07:00
}
break ;
2023-10-30 03:14:50 +07:00
default :
ws . send ( JSON . stringify ( [ "NOTICE" , "error: unrecognized command." ] ) ) ;
break ;
}
} ) ;
ws . on ( 'error' , console . error ) ;
ws . on ( 'close' , _ => {
2024-01-23 22:49:43 +07:00
onClose ( ) ;
2024-01-27 22:57:41 +07:00
2024-01-24 16:11:49 +07:00
console . log ( process . pid , "---" , ` ${ ip } ( ${ ws . id } ) disconnected ( ${ howManyOrphanSess ( ) + 1 } orphans) ` ) ;
2024-01-04 13:56:47 +07:00
if ( csess . has ( ws . id ) ) {
csess . set ( ws . id , null ) ; // set as orphan.
}
2023-10-30 03:14:50 +07:00
2024-01-23 21:21:46 +07:00
for ( const i of ws . EOSETimeout ) {
2023-11-22 00:20:33 +07:00
clearTimeout ( i [ 1 ] ) ;
}
2024-01-27 22:57:41 +07:00
// unauthorized session must be destroyed.
if ( ! authorized ) terminate _sess ( ws . id ) ;
2023-12-24 21:16:44 +07:00
2024-01-23 21:21:46 +07:00
for ( const i of ws . reconnectTimeout ) {
2023-12-25 23:12:26 +09:00
clearTimeout ( i ) ;
2023-12-24 21:16:44 +07:00
// Let the garbage collector do the thing. No need to add ws.reconnectTimeout.delete(i);
}
2024-01-03 14:10:01 +07:00
2024-01-23 21:21:46 +07:00
for ( const i of ws . subs ) {
2024-01-03 14:41:22 +07:00
direct _bc ( [ "CLOSE" , i [ 0 ] ] , ws . id ) ;
cache _bc ( [ "CLOSE" , i [ 0 ] ] , ws . id ) ;
}
2024-01-03 14:10:01 +07:00
onClientDisconnect ( ) ;
2024-01-03 17:15:41 +07:00
2024-01-23 21:12:06 +07:00
// sensitive session must not be preserved.
if ( private _keys && ( ws . pubkey in private _keys ) ) terminate _sess ( ws . id ) ;
2023-10-30 03:14:50 +07:00
} ) ;
2024-01-03 14:10:01 +07:00
if ( authorized ) {
csess . set ( ws . id , ws ) ;
if ( ! orphan ) newsess ( ws . id ) ;
}
2023-10-30 03:14:50 +07:00
}
2023-11-22 00:20:33 +07:00
// CL - Set up EOSE timeout
function timeoutEOSE ( id , subid ) {
const c = csess . get ( id ) ;
if ( ! c ) return ;
clearTimeout ( c . EOSETimeout . get ( subid ) ) ;
c . EOSETimeout . set ( subid , setTimeout ( _ => timed _out _eose ( id , subid ) , eose _timeout || 2300 ) ) ;
}
// CL - Handle timed out EOSE
function timed _out _eose ( id , subid ) {
const c = csess . get ( id ) ;
if ( ! c ) return ;
c . EOSETimeout . delete ( subid ) ;
if ( ! c . pendingEOSE . has ( subid ) ) return ;
c . pendingEOSE . delete ( subid ) ;
if ( c . pause _subs . has ( subid ) ) return c . pause _subs . delete ( subid ) ;
c . send ( JSON . stringify ( [ "EOSE" , subid ] ) ) ;
}
function cancel _EOSETimeout ( id , subid ) {
const c = csess . get ( id ) ;
if ( ! c ) return ;
clearTimeout ( c . EOSETimeout . get ( subid ) ) ;
c . EOSETimeout . delete ( subid ) ;
}
2023-12-19 22:56:48 +07:00
// WS - New session for client $id
function newsess ( id ) {
cache _relays ? . forEach ( _ => newConn ( _ , id ) ) ;
2024-01-03 14:10:01 +07:00
relays . forEach ( _ => newConn ( _ , id ) ) ;
2023-12-19 22:56:48 +07:00
}
2023-10-30 03:14:50 +07:00
// WS - Broadcast message to every existing sockets
2023-12-19 22:56:48 +07:00
function direct _bc ( msg , id ) {
2024-01-23 21:21:46 +07:00
for ( const sock of socks ) {
2023-12-19 22:56:48 +07:00
if ( cache _relays ? . includes ( sock . url ) ) continue ;
2023-10-31 15:14:02 +07:00
if ( sock . id !== id ) continue ;
2024-01-27 07:33:15 +07:00
if ( sock . readyState !== 1 ) continue ;
2024-01-23 21:12:06 +07:00
// skip the ratelimit after <config.upstream_ratelimit_expiration>
if ( ( upstream _ratelimit _expiration ) > ( Date . now ( ) - sock . ratelimit ) ) continue ;
2023-10-30 03:14:50 +07:00
sock . send ( JSON . stringify ( msg ) ) ;
2023-10-31 15:14:02 +07:00
}
2023-10-30 03:14:50 +07:00
}
2023-12-19 22:56:48 +07:00
function cache _bc ( msg , id ) {
2024-01-23 21:21:46 +07:00
for ( const sock of socks ) {
2023-12-19 22:56:48 +07:00
if ( ! cache _relays ? . includes ( sock . url ) ) continue ;
if ( sock . id !== id ) continue ;
2024-01-27 07:33:15 +07:00
if ( sock . readyState !== 1 ) continue ;
2023-12-19 22:56:48 +07:00
sock . send ( JSON . stringify ( msg ) ) ;
}
}
function bc ( msg , id ) {
if ( ! cache _relays ? . length ) direct _bc ( msg , id ) ;
else cache _bc ( msg , id ) ;
}
2023-10-31 14:00:16 +07:00
// WS - Terminate all existing sockets that were for <id>
2024-01-03 14:10:01 +07:00
function terminate _sess ( id ) {
2024-01-03 17:13:07 +07:00
csess . delete ( id ) ;
2024-01-23 21:21:46 +07:00
for ( const sock of socks ) {
2023-10-31 15:14:02 +07:00
if ( sock . id !== id ) continue ;
2023-10-31 14:00:16 +07:00
sock . terminate ( ) ;
2023-10-31 14:16:43 +07:00
socks . delete ( sock ) ;
2023-10-31 15:14:02 +07:00
}
2023-10-31 14:00:16 +07:00
}
2024-01-03 14:10:01 +07:00
function onClientDisconnect ( ) {
const orphanSessNum = howManyOrphanSess ( ) ;
const max = max _orphan _sess || 0 ;
if ( orphanSessNum > max ) {
2024-01-24 16:32:41 +07:00
if ( max ) console . log ( process . pid , ` There are ${ orphanSessNum } orphan sessions. I will clear ${ orphanSessNum - max } orphan sessions. ` ) ;
2024-01-03 14:10:01 +07:00
clearOrphanSess ( orphanSessNum - max ) ;
}
}
function getOrphanSess ( ) {
2024-01-23 21:21:46 +07:00
for ( const sess of csess ) {
2024-01-03 14:10:01 +07:00
if ( sess [ 1 ] !== null ) continue ;
return sess [ 0 ] ;
break ;
}
}
function howManyOrphanSess ( ) {
let howMany = 0 ;
2024-01-23 21:21:46 +07:00
for ( const sess of csess ) {
2024-01-03 14:10:01 +07:00
if ( sess [ 1 ] !== null ) continue ;
howMany ++
}
return howMany ;
}
function clearOrphanSess ( l ) {
let cn = 0 ;
2024-01-23 21:21:46 +07:00
for ( const sess of csess ) {
2024-01-03 22:42:38 +07:00
if ( cn >= l ) break ;
2024-01-03 14:10:01 +07:00
if ( sess [ 1 ] !== null ) continue ;
terminate _sess ( sess [ 0 ] ) ;
cn ++ ;
}
}
2023-10-30 03:14:50 +07:00
// WS - Sessions
2024-01-23 21:12:06 +07:00
function newConn ( addr , id , reconn _t = 0 ) {
2023-10-31 14:00:16 +07:00
if ( ! csess . has ( id ) ) return ;
2023-11-07 23:18:16 +07:00
const relay = new WebSocket ( addr , {
headers : {
2024-01-23 21:21:46 +07:00
"User-Agent" : ` Bostr (v ${ version } ); The nostr relay bouncer; https://github.com/Yonle/bostr `
2024-01-04 13:36:14 +07:00
} ,
2024-01-27 19:42:18 +07:00
noDelay : true ,
allowSynchronousEvents : true
2023-11-07 23:18:16 +07:00
} ) ;
2023-10-30 03:14:50 +07:00
2023-10-31 14:00:16 +07:00
relay . id = id ;
2024-01-23 21:12:06 +07:00
relay . ratelimit = 0 ;
2023-10-30 03:14:50 +07:00
relay . on ( 'open' , _ => {
2024-01-04 14:51:06 +07:00
const client = csess . get ( id ) ;
2023-12-24 21:22:56 +07:00
if ( ! csess . has ( id ) ) return relay . terminate ( ) ;
2024-01-23 21:12:06 +07:00
reconn _t = 0 ;
2024-01-04 14:51:06 +07:00
if ( log _about _relays ) console . log ( process . pid , "---" , ` [ ${ id } ] [ ${ socks . size } / ${ relays . length * csess . size } ] ${ relay . url } is connected ${ ! client ? "(orphan)" : "" } ` ) ;
2023-11-12 17:52:02 +07:00
2024-01-04 14:51:06 +07:00
if ( ! client ) return ; // is orphan, do nothing.
2024-01-23 21:21:46 +07:00
for ( const i of client . my _events ) {
2023-12-19 23:41:33 +07:00
relay . send ( JSON . stringify ( [ "EVENT" , i ] ) ) ;
}
2023-11-12 17:52:02 +07:00
2024-01-23 21:21:46 +07:00
for ( const i of client . subs ) {
2024-01-04 13:09:06 +07:00
relay . send ( JSON . stringify ( [ "REQ" , i [ 0 ] , ... i [ 1 ] ] ) ) ;
2023-10-31 15:14:02 +07:00
}
2023-10-30 03:14:50 +07:00
} ) ;
relay . on ( 'message' , data => {
2024-01-04 14:51:06 +07:00
const client = csess . get ( id ) ;
if ( ! client ) return ;
2023-10-30 03:14:50 +07:00
try {
data = JSON . parse ( data ) ;
} catch ( error ) {
2023-12-19 23:41:33 +07:00
return ;
2023-10-30 03:14:50 +07:00
}
switch ( data [ 0 ] ) {
case "EVENT" : {
2023-10-31 14:00:16 +07:00
if ( data . length < 3 || typeof ( data [ 1 ] ) !== "string" || typeof ( data [ 2 ] ) !== "object" ) return ;
2024-01-04 14:51:06 +07:00
if ( ! client . subs . has ( data [ 1 ] ) ) return ;
2023-11-22 00:20:33 +07:00
timeoutEOSE ( id , data [ 1 ] ) ;
2024-01-04 14:51:06 +07:00
if ( client . pause _subs . has ( data [ 1 ] ) && ! cache _relays ? . includes ( relay . url ) ) return ;
2023-11-19 08:46:35 +07:00
// if filter.since > receivedEvent.created_at, skip
// if receivedEvent.created_at > filter.until, skip
2024-01-27 07:38:04 +07:00
const cFilter = client . subs . get ( data [ 1 ] ) [ 0 ] ;
if ( Array . isArray ( cFilter ? . ids ) && ! cFilter . ids . includes ( data [ 2 ] . id ) ) return ;
if ( Array . isArray ( cFilter ? . authors ) && ! cFilter . authors . includes ( data [ 2 ] . pubkey ) ) return ;
2023-12-24 21:16:44 +07:00
if ( cFilter ? . since > data [ 2 ] . created _at ) return ;
if ( data [ 2 ] . created _at > cFilter ? . until ) return ;
const NotInSearchQuery = "search" in cFilter && ! data [ 2 ] ? . content ? . toLowerCase ( ) . includes ( cFilter . search . toLowerCase ( ) ) ;
2023-11-12 20:07:45 +07:00
if ( NotInSearchQuery ) return ;
2024-01-04 14:51:06 +07:00
if ( client . events . get ( data [ 1 ] ) . has ( data [ 2 ] ? . id ) ) return ; // No need to transmit once it has been transmitted before.
2023-10-31 14:00:16 +07:00
2024-01-04 14:51:06 +07:00
if ( ! client . pause _subs . has ( data [ 1 ] ) ) {
client . events . get ( data [ 1 ] ) . add ( data [ 2 ] ? . id ) ;
client . send ( JSON . stringify ( data ) ) ;
2023-12-19 23:41:33 +07:00
}
2023-12-19 22:56:48 +07:00
// send into cache relays.
if ( ! cache _relays ? . includes ( relay . url ) ) cache _bc ( [ "EVENT" , data [ 2 ] ] , id ) ;
2023-11-06 20:10:48 +07:00
// Now count for REQ limit requested by client.
// If it's at the limit, Send EOSE to client and delete pendingEOSE of subID
// Skip if EOSE has been omitted
2024-01-04 14:54:23 +07:00
if ( ! client . pendingEOSE . has ( data [ 1 ] ) || ! client . subs . get ( data [ 1 ] ) [ 0 ] ? . limit || client . pause _subs . has ( data [ 1 ] ) ) return ;
if ( client . events . get ( data [ 1 ] ) . size >= client . subs . get ( data [ 1 ] ) [ 0 ] ? . limit ) {
2023-11-21 18:42:04 +07:00
// Once reached to <filter.limit>, send EOSE to client.
2024-01-04 14:51:06 +07:00
client . send ( JSON . stringify ( [ "EOSE" , data [ 1 ] ] ) ) ;
2023-12-19 23:41:33 +07:00
if ( pause _on _limit || cache _relays ? . includes ( relay . url ) ) {
2024-01-04 14:51:06 +07:00
client . pause _subs . add ( data [ 1 ] ) ;
2023-11-21 18:42:04 +07:00
} else {
2024-01-04 14:51:06 +07:00
client . pendingEOSE . delete ( data [ 1 ] ) ;
2023-11-21 18:42:04 +07:00
}
2023-11-06 20:10:48 +07:00
}
2023-10-30 03:14:50 +07:00
break ;
}
2023-11-01 22:20:21 +07:00
case "EOSE" :
2024-01-04 14:51:06 +07:00
if ( ! client . pendingEOSE . has ( data [ 1 ] ) ) return ;
client . pendingEOSE . set ( data [ 1 ] , client . pendingEOSE . get ( data [ 1 ] ) + 1 ) ;
if ( log _about _relays ) console . log ( process . pid , "---" , ` [ ${ id } ] ` , ` got EOSE from ${ relay . url } for ${ data [ 1 ] } . There are ${ client . pendingEOSE . get ( data [ 1 ] ) } EOSE received out of ${ Array . from ( socks ) . filter ( sock => sock . id === id ) . length } connected relays. ` ) ;
2023-12-19 22:56:48 +07:00
if ( ! cache _relays ? . includes ( relay . url ) ) {
2024-01-04 14:51:06 +07:00
if ( wait _eose && ( ( client . pendingEOSE . get ( data [ 1 ] ) < max _eose _score ) || ( client . pendingEOSE . get ( data [ 1 ] ) < Array . from ( socks ) . filter ( sock => sock . id === id ) . length ) ) ) return ;
if ( client . pause _subs . has ( data [ 1 ] ) ) return client . pause _subs . delete ( data [ 1 ] ) ;
2023-12-19 23:41:33 +07:00
2023-12-19 22:56:48 +07:00
cancel _EOSETimeout ( data [ 1 ] ) ;
} else {
2024-01-04 14:51:06 +07:00
if ( client . pendingEOSE . get ( data [ 1 ] ) < Array . from ( socks ) . filter ( sock => ( sock . id === id ) && cache _relays ? . includes ( sock . url ) ) . length ) return ;
2023-12-19 22:56:48 +07:00
// get the filter
2024-01-04 14:51:06 +07:00
const filter = client . subs . get ( data [ 1 ] ) ;
if ( client . pause _subs . has ( data [ 1 ] ) ) {
client . pause _subs . delete ( data [ 1 ] ) ;
client . pendingEOSE . delete ( data [ 1 ] ) ;
2023-12-19 23:41:33 +07:00
}
2023-12-19 22:56:48 +07:00
// now req to the direct connection, with the recent one please.
2024-01-04 14:51:06 +07:00
return direct _bc ( [ "REQ" , data [ 1 ] , ... filter ] , id ) ;
2023-12-19 22:56:48 +07:00
}
2024-01-04 14:51:06 +07:00
client . send ( JSON . stringify ( data ) ) ;
2023-11-01 22:20:21 +07:00
break ;
2023-11-18 16:49:20 +07:00
case "AUTH" :
2024-01-04 14:51:06 +07:00
if ( ! private _keys || typeof ( data [ 1 ] ) !== "string" || ! client . pubkey ) return ;
nip42 ( relay , client . pubkey , private _keys [ client . pubkey ] , data [ 1 ] ) ;
2023-11-16 21:53:58 +07:00
break ;
2024-01-23 21:12:06 +07:00
case "NOTICE" :
case "CLOSED" :
if ( typeof ( data [ 1 ] ) !== "string" ) return ;
if ( data [ 1 ] . startsWith ( "rate-limited" ) ) relay . ratelimit = Date . now ( ) ;
break ;
case "OK" :
if ( typeof ( data [ 2 ] ) !== "string" ) return ;
if ( data [ 2 ] . startsWith ( "rate-limited" ) ) relay . ratelimit = Date . now ( ) ;
break ;
2023-10-30 03:14:50 +07:00
}
} ) ;
2023-11-01 19:54:03 +07:00
relay . on ( 'error' , _ => {
2024-01-03 14:10:01 +07:00
if ( log _about _relays ) console . error ( process . pid , "-!-" , ` [ ${ id } ] ` , relay . url , _ . toString ( ) )
2023-11-01 19:54:03 +07:00
} ) ;
2023-11-18 16:49:20 +07:00
2023-10-30 03:14:50 +07:00
relay . on ( 'close' , _ => {
2024-01-04 14:51:06 +07:00
const client = csess . get ( id ) ;
2024-01-23 22:49:43 +07:00
socks . delete ( relay ) ; // Remove this socket session from [socks] list
2024-01-03 14:10:01 +07:00
if ( log _about _relays ) console . log ( process . pid , "-!-" , ` [ ${ id } ] [ ${ socks . size } / ${ relays . length * csess . size } ] ` , "Disconnected from" , relay . url ) ;
2023-10-30 03:14:50 +07:00
2023-10-31 14:00:16 +07:00
if ( ! csess . has ( id ) ) return ;
2024-01-23 21:12:06 +07:00
reconn _t += reconnect _time || 5000
2023-12-24 21:16:44 +07:00
const reconnectTimeout = setTimeout ( _ => {
2024-01-23 21:12:06 +07:00
newConn ( addr , id , reconn _t ) ;
2024-01-04 14:51:06 +07:00
client ? . reconnectTimeout . delete ( reconnectTimeout ) ;
2024-01-23 21:12:06 +07:00
} , reconn _t ) ; // As a bouncer server, We need to reconnect.
2024-01-04 14:51:06 +07:00
client ? . reconnectTimeout . add ( reconnectTimeout ) ;
2023-10-30 03:14:50 +07:00
} ) ;
2024-01-23 21:12:06 +07:00
relay . on ( 'unexpected-response' , ( req , res ) => {
socks . delete ( relay ) ;
if ( res . statusCode >= 500 ) return relay . emit ( "close" , null ) ;
delete relays [ relays . indexOf ( addr ) ] ;
console . log ( process . pid , "-!-" , ` ${ relay . url } give status code ${ res . statusCode } . Not (re)connect with new session again. ` ) ;
} ) ;
2024-01-23 22:49:43 +07:00
socks . add ( relay ) ; // Add this socket session to [socks]
2023-10-30 03:14:50 +07:00
}