Download raw body.
bgpd rde poll busy loop fix
The switch to ibufq introduced a bug with the imsg_pending tracking.
peer_imsg_flush() did not adjust imsg_pending so when a peer is reset
in the wrong moment imsg_pending becomes off and so peer_work_pending
would always return 1.
I dislike this imsg_pending tracking and decided it is better to
just walk the peertable in peer_work_pending. This is more work but less
error prone.
This seems to fix my spinning RDE at 100% issue (I hope).
--
:wq Claudio
Index: rde_peer.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde_peer.c,v
diff -u -p -r1.49 rde_peer.c
--- rde_peer.c 4 Jun 2025 09:11:38 -0000 1.49
+++ rde_peer.c 21 Aug 2025 19:11:14 -0000
@@ -29,7 +29,6 @@
struct peer_tree peertable = RB_INITIALIZER(&peertable);
struct peer_tree zombietable = RB_INITIALIZER(&zombietable);
struct rde_peer *peerself;
-static long imsg_pending;
CTASSERT(sizeof(peerself->recv_eor) * 8 >= AID_MAX);
CTASSERT(sizeof(peerself->sent_eor) * 8 >= AID_MAX);
@@ -666,9 +665,17 @@ peer_reaper(struct rde_peer *peer)
int
peer_work_pending(void)
{
+ struct rde_peer *p;
+
if (!RB_EMPTY(&zombietable))
return 1;
- return imsg_pending != 0;
+
+ RB_FOREACH(p, peer_tree, &peertable) {
+ if (ibufq_queuelen(p->ibufq) != 0)
+ return 1;
+ }
+
+ return 0;
}
/*
@@ -678,7 +685,6 @@ void
peer_imsg_push(struct rde_peer *peer, struct imsg *imsg)
{
imsg_ibufq_push(peer->ibufq, imsg);
- imsg_pending++;
}
/*
@@ -692,7 +698,6 @@ peer_imsg_pop(struct rde_peer *peer, str
case 0:
return 0;
case 1:
- imsg_pending--;
return 1;
default:
fatal("imsg_ibufq_pop");
bgpd rde poll busy loop fix