Index | Thread | Search

From:
Claudio Jeker <cjeker@diehard.n-r-g.com>
Subject:
bgpd: cleanup / refactor rde_peer.c
To:
tech@openbsd.org
Date:
Tue, 10 Dec 2024 10:51:33 +0100

Download raw body.

Thread
This is a diff in preparation for Adj-RIB-Out caching.

This cleans up the peer_dump() call chain a bit.
- Move and rename rde_up_adjout_force_upcall() to peer_blast_upcall()
- Move and rename rde_up_adjout_force_done() to peer_blast_done()
- Introduce peer_blast() which does send the BEGIN_RR marker if needed
and then issues a prefix walk using peer_blast_upcall() and
peer_blast_done() to force out the Adj-RIB-Out.
- Move and simplify rde_up_adjout_force_done() to peer_dump_done(). This
function now simply calls peer_blast(). 
- Move and rename rde_up_dump_upcall() to peer_dump_upcall()
- Change peer_dump() to first always throttle the peer,
  also the sending of the BEGIN_RR marker was moved to peer_blast().
  Use peer_blast in the two cases that don't need any table walks instead
  of doing the same by hand or via abuse of the old rde_up_dump_done()
  call.

So all in all nothing should change unless I missed something.
-- 
:wq Claudio

Index: rde_peer.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde_peer.c,v
diff -u -p -r1.38 rde_peer.c
--- rde_peer.c	28 Aug 2024 13:21:39 -0000	1.38
+++ rde_peer.c	10 Dec 2024 09:34:57 -0000
@@ -347,58 +347,6 @@ peer_flush_upcall(struct rib_entry *re, 
 	}
 }
 
-static void
-rde_up_adjout_force_upcall(struct prefix *p, void *ptr)
-{
-	if (p->flags & PREFIX_FLAG_STALE) {
-		/* remove stale entries */
-		prefix_adjout_destroy(p);
-	} else if (p->flags & PREFIX_FLAG_DEAD) {
-		/* ignore dead prefixes, they will go away soon */
-	} else if ((p->flags & PREFIX_FLAG_MASK) == 0) {
-		/* put entries on the update queue if not allready on a queue */
-		p->flags |= PREFIX_FLAG_UPDATE;
-		if (RB_INSERT(prefix_tree, &prefix_peer(p)->updates[p->pt->aid],
-		    p) != NULL)
-			fatalx("%s: RB tree invariant violated", __func__);
-	}
-}
-
-static void
-rde_up_adjout_force_done(void *ptr, uint8_t aid)
-{
-	struct rde_peer		*peer = ptr;
-
-	/* Adj-RIB-Out ready, unthrottle peer and inject EOR */
-	peer->throttled = 0;
-	if (peer->capa.grestart.restart)
-		prefix_add_eor(peer, aid);
-}
-
-static void
-rde_up_dump_upcall(struct rib_entry *re, void *ptr)
-{
-	struct rde_peer		*peer = ptr;
-	struct prefix		*p;
-
-	if ((p = prefix_best(re)) == NULL)
-		/* no eligible prefix, not even for 'evaluate all' */
-		return;
-
-	peer_generate_update(peer, re, NULL, NULL, 0);
-}
-
-static void
-rde_up_dump_done(void *ptr, uint8_t aid)
-{
-	struct rde_peer		*peer = ptr;
-
-	/* force out all updates of Adj-RIB-Out for this peer */
-	if (prefix_dump_new(peer, aid, 0, peer, rde_up_adjout_force_upcall,
-	    rde_up_adjout_force_done, NULL) == -1)
-		fatal("%s: prefix_dump_new", __func__);
-}
-
 /*
  * Session got established, bring peer up, load RIBs do initial table dump.
  */
@@ -543,32 +491,103 @@ peer_stale(struct rde_peer *peer, uint8_
 }
 
 /*
- * Load the Adj-RIB-Out of a peer normally called when a session is established.
- * Once the Adj-RIB-Out is ready stale routes are removed from the Adj-RIB-Out
- * and all routes are put on the update queue so they will be sent out.
+ * RIB walker callback for peer_blast.
+ * Enqueue a prefix onto the update queue so it can be sent out.
  */
-void
-peer_dump(struct rde_peer *peer, uint8_t aid)
+static void
+peer_blast_upcall(struct prefix *p, void *ptr)
+{
+	if (p->flags & PREFIX_FLAG_STALE) {
+		/* remove stale entries */
+		prefix_adjout_destroy(p);
+	} else if (p->flags & PREFIX_FLAG_DEAD) {
+		/* ignore dead prefixes, they will go away soon */
+	} else if ((p->flags & PREFIX_FLAG_MASK) == 0) {
+		/* put entries on the update queue if not allready on a queue */
+		p->flags |= PREFIX_FLAG_UPDATE;
+		if (RB_INSERT(prefix_tree, &prefix_peer(p)->updates[p->pt->aid],
+		    p) != NULL)
+			fatalx("%s: RB tree invariant violated", __func__);
+	}
+}
+
+/*
+ * Called after all prefixes are put onto the update queue and we are
+ * ready to blast out updates to the peer.
+ */
+static void
+peer_blast_done(void *ptr, uint8_t aid)
+{
+	struct rde_peer		*peer = ptr;
+
+	/* Adj-RIB-Out ready, unthrottle peer and inject EOR */
+	peer->throttled = 0;
+	if (peer->capa.grestart.restart)
+		prefix_add_eor(peer, aid);
+}
+
+/*
+ * Send out the full Adj-RIB-Out by putting all prefixes onto the update
+ * queue.
+ */
+static void
+peer_blast(struct rde_peer *peer, uint8_t aid)
 {
 	if (peer->capa.enhanced_rr && (peer->sent_eor & (1 << aid)))
 		rde_peer_send_rrefresh(peer, aid, ROUTE_REFRESH_BEGIN_RR);
 
+	/* force out all updates from the Adj-RIB-Out */
+	if (prefix_dump_new(peer, aid, 0, peer, peer_blast_upcall,
+	    peer_blast_done, NULL) == -1)
+		fatal("%s: prefix_dump_new", __func__);
+}
+
+/* RIB walker callbacks for peer_dump. */
+static void
+peer_dump_upcall(struct rib_entry *re, void *ptr)
+{
+	struct rde_peer		*peer = ptr;
+	struct prefix		*p;
+
+	if ((p = prefix_best(re)) == NULL)
+		/* no eligible prefix, not even for 'evaluate all' */
+		return;
+
+	peer_generate_update(peer, re, NULL, NULL, 0);
+}
+
+static void
+peer_dump_done(void *ptr, uint8_t aid)
+{
+	struct rde_peer		*peer = ptr;
+
+	/* Adj-RIB-Out is ready, blast it out */
+	peer_blast(peer, aid);
+}
+
+/*
+ * Load the Adj-RIB-Out of a peer normally called when a session comes up
+ * for the first time. Once the Adj-RIB-Out is ready it will blast the
+ * updates out.
+ */
+void
+peer_dump(struct rde_peer *peer, uint8_t aid)
+{
+	/* throttle peer until dump is done */
+	peer->throttled = 1;
+
 	if (peer->export_type == EXPORT_NONE) {
-		/* nothing to send apart from the marker */
-		if (peer->capa.grestart.restart)
-			prefix_add_eor(peer, aid);
+		peer_blast(peer, aid);
 	} else if (peer->export_type == EXPORT_DEFAULT_ROUTE) {
 		up_generate_default(peer, aid);
-		rde_up_dump_done(peer, aid);
+		peer_blast(peer, aid);
 	} else if (aid == AID_FLOWSPECv4 || aid == AID_FLOWSPECv6) {
-		prefix_flowspec_dump(aid, peer, rde_up_dump_upcall,
-		    rde_up_dump_done);
+		prefix_flowspec_dump(aid, peer, peer_dump_upcall,
+		    peer_dump_done);
 	} else {
 		if (rib_dump_new(peer->loc_rib_id, aid, RDE_RUNNER_ROUNDS, peer,
-		    rde_up_dump_upcall, rde_up_dump_done, NULL) == -1)
+		    peer_dump_upcall, peer_dump_done, NULL) == -1)
 			fatal("%s: rib_dump_new", __func__);
-		/* throttle peer until dump is done */
-		peer->throttled = 1;
 	}
 }