@@ -266,10 +266,10 @@ method unsubscribePeer*(g: GossipSub, peer: PeerId) =
266
266
267
267
procCall FloodSub (g).unsubscribePeer (peer)
268
268
269
- proc handleSubscribe * (g: GossipSub ,
270
- peer: PubSubPeer ,
271
- topic: string ,
272
- subscribe: bool ) =
269
+ proc handleSubscribe (g: GossipSub ,
270
+ peer: PubSubPeer ,
271
+ topic: string ,
272
+ subscribe: bool ) =
273
273
logScope:
274
274
peer
275
275
topic
@@ -395,9 +395,7 @@ proc validateAndRelay(g: GossipSub,
395
395
396
396
g.floodsub.withValue (topic, peers): toSendPeers.incl (peers[])
397
397
g.mesh.withValue (topic, peers): toSendPeers.incl (peers[])
398
-
399
- # add direct peers
400
- toSendPeers.incl (g.subscribedDirectPeers.getOrDefault (topic))
398
+ g.subscribedDirectPeers.withValue (topic, peers): toSendPeers.incl (peers[])
401
399
402
400
# Don't send it to source peer, or peers that
403
401
# sent it during validation
@@ -468,6 +466,11 @@ method rpcHandler*(g: GossipSub,
468
466
var rpcMsg = decodeRpcMsg (data).valueOr:
469
467
debug " failed to decode msg from peer" , peer, err = error
470
468
await rateLimit (g, peer, msgSize)
469
+ # Raising in the handler closes the gossipsub connection (but doesn't
470
+ # disconnect the peer!)
471
+ # TODO evaluate behaviour penalty values
472
+ peer.behaviourPenalty += 0.1
473
+
471
474
raise newException (CatchableError , " Peer msg couldn't be decoded" )
472
475
473
476
when defined (libp2p_expensive_metrics):
@@ -477,12 +480,13 @@ method rpcHandler*(g: GossipSub,
477
480
trace " decoded msg from peer" , peer, msg = rpcMsg.shortLog
478
481
await rateLimit (g, peer, g.messageOverhead (rpcMsg, msgSize))
479
482
480
- # trigger hooks
483
+ # trigger hooks - these may modify the message
481
484
peer.recvObservers (rpcMsg)
482
485
483
486
if rpcMsg.ping.len in 1 ..< 64 and peer.pingBudget > 0 :
484
487
g.send (peer, RPCMsg (pong: rpcMsg.ping), isHighPriority = true )
485
488
peer.pingBudget.dec
489
+
486
490
for i in 0 ..< min (g.topicsHigh, rpcMsg.subscriptions.len):
487
491
template sub : untyped = rpcMsg.subscriptions[i]
488
492
g.handleSubscribe (peer, sub.topic, sub.subscribe)
@@ -502,16 +506,14 @@ method rpcHandler*(g: GossipSub,
502
506
if msgIdResult.isErr:
503
507
debug " Dropping message due to failed message id generation" ,
504
508
error = msgIdResult.error
505
- # TODO : descore peers due to error during message validation (malicious? )
509
+ await g. punishInvalidMessage (peer, msg )
506
510
continue
507
511
508
512
let
509
513
msgId = msgIdResult.get
510
514
msgIdSalted = g.salt (msgId)
511
515
topic = msg.topic
512
516
513
- # addSeen adds salt to msgId to avoid
514
- # remote attacking the hash function
515
517
if g.addSeen (msgIdSalted):
516
518
trace " Dropping already-seen message" , msgId = shortLog (msgId), peer
517
519
@@ -599,25 +601,24 @@ method onTopicSubscription*(g: GossipSub, topic: string, subscribed: bool) =
599
601
600
602
g.mesh.del (topic)
601
603
602
-
603
604
# Send unsubscribe (in reverse order to sub/graft)
604
605
procCall PubSub (g).onTopicSubscription (topic, subscribed)
605
606
606
607
method publish * (g: GossipSub ,
607
608
topic: string ,
608
609
data: seq [byte ]): Future [int ] {.async .} =
609
- # base returns always 0
610
- discard await procCall PubSub (g).publish (topic, data)
611
-
612
610
logScope:
613
611
topic
614
612
615
- trace " Publishing message on topic" , data = data.shortLog
616
-
617
613
if topic.len <= 0 : # data could be 0/empty
618
614
debug " Empty topic, skipping publish"
619
615
return 0
620
616
617
+ # base returns always 0
618
+ discard await procCall PubSub (g).publish (topic, data)
619
+
620
+ trace " Publishing message on topic" , data = data.shortLog
621
+
621
622
var peers: HashSet [PubSubPeer ]
622
623
623
624
# add always direct peers
@@ -630,38 +631,39 @@ method publish*(g: GossipSub,
630
631
# With flood publishing enabled, the mesh is used when propagating messages from other peers,
631
632
# but a peer's own messages will always be published to all known peers in the topic, limited
632
633
# to the amount of peers we can send it to in one heartbeat
633
- var maxPeersToFlodOpt: Opt [int64 ]
634
- if g.parameters.bandwidthEstimatebps > 0 :
635
- let
636
- bandwidth = (g.parameters.bandwidthEstimatebps) div 8 div 1000 # Divisions are to convert it to Bytes per ms TODO replace with bandwidth estimate
637
- msToTransmit = max (data.len div bandwidth, 1 )
638
- maxPeersToFlodOpt = Opt .some (max (g.parameters.heartbeatInterval.milliseconds div msToTransmit, g.parameters.dLow))
634
+
635
+ let maxPeersToFlood =
636
+ if g.parameters.bandwidthEstimatebps > 0 :
637
+ let
638
+ bandwidth = (g.parameters.bandwidthEstimatebps) div 8 div 1000 # Divisions are to convert it to Bytes per ms TODO replace with bandwidth estimate
639
+ msToTransmit = max (data.len div bandwidth, 1 )
640
+ max (g.parameters.heartbeatInterval.milliseconds div msToTransmit, g.parameters.dLow)
641
+ else :
642
+ int .high () # unlimited
639
643
640
644
for peer in g.gossipsub.getOrDefault (topic):
641
- maxPeersToFlodOpt. withValue (maxPeersToFlod):
642
- if peers.len >= maxPeersToFlod: break
645
+ if peers.len >= maxPeersToFlood: break
646
+
643
647
if peer.score >= g.parameters.publishThreshold:
644
648
trace " publish: including flood/high score peer" , peer
645
649
peers.incl (peer)
646
650
647
- if peers.len < g.parameters.dLow:
648
- # not subscribed, or bad mesh, send to fanout peers
649
- var fanoutPeers = g.fanout.getOrDefault (topic).toSeq ()
650
- if fanoutPeers.len < g.parameters.dLow:
651
- g.replenishFanout (topic)
652
- fanoutPeers = g.fanout.getOrDefault (topic).toSeq ()
651
+ elif peers.len < g.parameters.dLow:
652
+ # not subscribed or bad mesh, send to fanout peers
653
+ # when flood-publishing, fanout won't help since all potential peers have
654
+ # already been added
653
655
656
+ g.replenishFanout (topic) # Make sure fanout is populated
657
+
658
+ var fanoutPeers = g.fanout.getOrDefault (topic).toSeq ()
654
659
g.rng.shuffle (fanoutPeers)
655
660
656
661
for fanPeer in fanoutPeers:
657
662
peers.incl (fanPeer)
658
663
if peers.len > g.parameters.d: break
659
664
660
- # even if we couldn't publish,
661
- # we still attempted to publish
662
- # on the topic, so it makes sense
663
- # to update the last topic publish
664
- # time
665
+ # Attempting to publish counts as fanout send (even if the message
666
+ # ultimately is not sent)
665
667
g.lastFanoutPubSub[topic] = Moment .fromNow (g.parameters.fanoutTTL)
666
668
667
669
if peers.len == 0 :
@@ -690,7 +692,9 @@ method publish*(g: GossipSub,
690
692
trace " Created new message" , msg = shortLog (msg), peers = peers.len
691
693
692
694
if g.addSeen (g.salt (msgId)):
693
- # custom msgid providers might cause this
695
+ # If the message was received or published recently, don't re-publish it -
696
+ # this might happen when not using sequence id:s and / or with a custom
697
+ # message id provider
694
698
trace " Dropping already-seen message"
695
699
return 0
696
700
0 commit comments