Download raw body.
bgpd: cleanup / refactor rde_peer.c
On Tue, Dec 10, 2024 at 10:51:33AM +0100, Claudio Jeker wrote:
> This is a diff in preparation for Adj-RIB-Out caching.
>
> This cleans up the peer_dump() call chain a bit.
> - Move and rename rde_up_adjout_force_upcall() to peer_blast_upcall()
> - Move and rename rde_up_adjout_force_done() to peer_blast_done()
> - Introduce peer_blast() which does send the BEGIN_RR marker if needed
> and then issues a prefix walk using peer_blast_upcall() and
> peer_blast_done() to force out the Adj-RIB-Out.
> - Move and simplify rde_up_adjout_force_done() to peer_dump_done(). This
> function now simply calls peer_blast().
> - Move and rename rde_up_dump_upcall() to peer_dump_upcall()
> - Change peer_dump() to first always throttle the peer,
> also the sending of the BEGIN_RR marker was moved to peer_blast().
> Use peer_blast in the two cases that don't need any table walks instead
> of doing the same by hand or via abuse of the old rde_up_dump_done()
> call.
>
> So all in all nothing should change unless I missed something.
ok tb
After I shuffled the functions back to where they were I found it a lot
easier to convince myself that nothing actually changed (except that
things became cleaner and less hacky), see below. Of course I had to
add a prototype for peer_blast() to make it compile.
One tiny nit:
> +static void
> +peer_blast_upcall(struct prefix *p, void *ptr)
> +{
> + if (p->flags & PREFIX_FLAG_STALE) {
> + /* remove stale entries */
> + prefix_adjout_destroy(p);
> + } else if (p->flags & PREFIX_FLAG_DEAD) {
> + /* ignore dead prefixes, they will go away soon */
> + } else if ((p->flags & PREFIX_FLAG_MASK) == 0) {
> + /* put entries on the update queue if not allready on a queue */
typo: "already"
> + p->flags |= PREFIX_FLAG_UPDATE;
> + if (RB_INSERT(prefix_tree, &prefix_peer(p)->updates[p->pt->aid],
> + p) != NULL)
> + fatalx("%s: RB tree invariant violated", __func__);
> + }
> +}
Index: rde_peer.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde_peer.c,v
diff -u -p -r1.38 rde_peer.c
--- rde_peer.c 28 Aug 2024 13:21:39 -0000 1.38
+++ rde_peer.c 10 Dec 2024 10:45:17 -0000
@@ -347,8 +347,12 @@ peer_flush_upcall(struct rib_entry *re,
}
}
+/*
+ * RIB walker callback for peer_blast.
+ * Enqueue a prefix onto the update queue so it can be sent out.
+ */
static void
-rde_up_adjout_force_upcall(struct prefix *p, void *ptr)
+peer_blast_upcall(struct prefix *p, void *ptr)
{
if (p->flags & PREFIX_FLAG_STALE) {
/* remove stale entries */
@@ -364,8 +368,12 @@ rde_up_adjout_force_upcall(struct prefix
}
}
+/*
+ * Called after all prefixes are put onto the update queue and we are
+ * ready to blast out updates to the peer.
+ */
static void
-rde_up_adjout_force_done(void *ptr, uint8_t aid)
+peer_blast_done(void *ptr, uint8_t aid)
{
struct rde_peer *peer = ptr;
@@ -375,8 +383,9 @@ rde_up_adjout_force_done(void *ptr, uint
prefix_add_eor(peer, aid);
}
+/* RIB walker callbacks for peer_dump. */
static void
-rde_up_dump_upcall(struct rib_entry *re, void *ptr)
+peer_dump_upcall(struct rib_entry *re, void *ptr)
{
struct rde_peer *peer = ptr;
struct prefix *p;
@@ -388,15 +397,15 @@ rde_up_dump_upcall(struct rib_entry *re,
peer_generate_update(peer, re, NULL, NULL, 0);
}
+static void peer_blast(struct rde_peer *peer, uint8_t aid);
+
static void
-rde_up_dump_done(void *ptr, uint8_t aid)
+peer_dump_done(void *ptr, uint8_t aid)
{
struct rde_peer *peer = ptr;
- /* force out all updates of Adj-RIB-Out for this peer */
- if (prefix_dump_new(peer, aid, 0, peer, rde_up_adjout_force_upcall,
- rde_up_adjout_force_done, NULL) == -1)
- fatal("%s: prefix_dump_new", __func__);
+ /* Adj-RIB-Out is ready, blast it out */
+ peer_blast(peer, aid);
}
/*
@@ -543,32 +552,44 @@ peer_stale(struct rde_peer *peer, uint8_
}
/*
- * Load the Adj-RIB-Out of a peer normally called when a session is established.
- * Once the Adj-RIB-Out is ready stale routes are removed from the Adj-RIB-Out
- * and all routes are put on the update queue so they will be sent out.
+ * Send out the full Adj-RIB-Out by putting all prefixes onto the update
+ * queue.
*/
-void
-peer_dump(struct rde_peer *peer, uint8_t aid)
+static void
+peer_blast(struct rde_peer *peer, uint8_t aid)
{
if (peer->capa.enhanced_rr && (peer->sent_eor & (1 << aid)))
rde_peer_send_rrefresh(peer, aid, ROUTE_REFRESH_BEGIN_RR);
+ /* force out all updates from the Adj-RIB-Out */
+ if (prefix_dump_new(peer, aid, 0, peer, peer_blast_upcall,
+ peer_blast_done, NULL) == -1)
+ fatal("%s: prefix_dump_new", __func__);
+}
+
+/*
+ * Load the Adj-RIB-Out of a peer normally called when a session comes up
+ * for the first time. Once the Adj-RIB-Out is ready it will blast the
+ * updates out.
+ */
+void
+peer_dump(struct rde_peer *peer, uint8_t aid)
+{
+ /* throttle peer until dump is done */
+ peer->throttled = 1;
+
if (peer->export_type == EXPORT_NONE) {
- /* nothing to send apart from the marker */
- if (peer->capa.grestart.restart)
- prefix_add_eor(peer, aid);
+ peer_blast(peer, aid);
} else if (peer->export_type == EXPORT_DEFAULT_ROUTE) {
up_generate_default(peer, aid);
- rde_up_dump_done(peer, aid);
+ peer_blast(peer, aid);
} else if (aid == AID_FLOWSPECv4 || aid == AID_FLOWSPECv6) {
- prefix_flowspec_dump(aid, peer, rde_up_dump_upcall,
- rde_up_dump_done);
+ prefix_flowspec_dump(aid, peer, peer_dump_upcall,
+ peer_dump_done);
} else {
if (rib_dump_new(peer->loc_rib_id, aid, RDE_RUNNER_ROUNDS, peer,
- rde_up_dump_upcall, rde_up_dump_done, NULL) == -1)
+ peer_dump_upcall, peer_dump_done, NULL) == -1)
fatal("%s: rib_dump_new", __func__);
- /* throttle peer until dump is done */
- peer->throttled = 1;
}
}
bgpd: cleanup / refactor rde_peer.c