Index | Thread | Search

From:
Claudio Jeker <cjeker@diehard.n-r-g.com>
Subject:
bgpd: suffle the adjout deck chairs into a new corner
To:
tech@openbsd.org
Date:
Tue, 18 Nov 2025 14:54:40 +0100

Download raw body.

Thread
This is the first step to a major rewrite of the adj-rib-out handling.
I tried to do this rewrite in multiple small steps to make it easier to
review.

This diff just moves code around. It moves all the bits which build the
adj-rib-out out into a new file: rde_adjout.c
The prefix code there now uses prefix_adjout_XYZ as name.
Because of the move some functions need to be exported for now.
Apart from thosw two bits it should be a 1-to-1 move.

-- 
:wq Claudio


Index: Makefile
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/Makefile,v
diff -u -p -r1.43 Makefile
--- Makefile	29 Oct 2025 10:32:38 -0000	1.43
+++ Makefile	18 Nov 2025 13:27:29 -0000
@@ -18,6 +18,7 @@ SRCS+=	pfkey.c
 SRCS+=	pftable.c
 SRCS+=	printconf.c
 SRCS+=	rde.c
+SRCS+=	rde_adjout.c
 SRCS+=	rde_aspa.c
 SRCS+=	rde_attr.c
 SRCS+=	rde_community.c
Index: rde.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde.c,v
diff -u -p -r1.664 rde.c
--- rde.c	14 Nov 2025 19:34:40 -0000	1.664
+++ rde.c	18 Nov 2025 13:27:29 -0000
@@ -3077,13 +3077,13 @@ rde_dump_done(void *arg, uint8_t aid)
 		ctx->peerid = peer->conf.id;
 		switch (ctx->req.type) {
 		case IMSG_CTL_SHOW_RIB:
-			if (prefix_dump_new(peer, ctx->req.aid,
+			if (prefix_adjout_dump_new(peer, ctx->req.aid,
 			    CTL_MSG_HIGH_MARK, ctx, rde_dump_adjout_upcall,
 			    rde_dump_done, rde_dump_throttled) == -1)
 				goto nomem;
 			break;
 		case IMSG_CTL_SHOW_RIB_PREFIX:
-			if (prefix_dump_subtree(peer, &ctx->req.prefix,
+			if (prefix_adjout_dump_subtree(peer, &ctx->req.prefix,
 			    ctx->req.prefixlen, CTL_MSG_HIGH_MARK, ctx,
 			    rde_dump_adjout_upcall, rde_dump_done,
 			    rde_dump_throttled) == -1)
@@ -3152,15 +3152,16 @@ rde_dump_ctx_new(struct ctl_show_rib_req
 		ctx->peerid = peer->conf.id;
 		switch (ctx->req.type) {
 		case IMSG_CTL_SHOW_RIB:
-			if (prefix_dump_new(peer, ctx->req.aid,
+			if (prefix_adjout_dump_new(peer, ctx->req.aid,
 			    CTL_MSG_HIGH_MARK, ctx, rde_dump_adjout_upcall,
 			    rde_dump_done, rde_dump_throttled) == -1)
 				goto nomem;
 			break;
 		case IMSG_CTL_SHOW_RIB_PREFIX:
 			if (req->flags & F_LONGER) {
-				if (prefix_dump_subtree(peer, &req->prefix,
-				    req->prefixlen, CTL_MSG_HIGH_MARK, ctx,
+				if (prefix_adjout_dump_subtree(peer,
+				    &req->prefix, req->prefixlen,
+				    CTL_MSG_HIGH_MARK, ctx,
 				    rde_dump_adjout_upcall,
 				    rde_dump_done, rde_dump_throttled) == -1)
 					goto nomem;
@@ -3835,7 +3836,7 @@ rde_reload_done(void)
 			rde_eval_all = 1;
 
 		if (peer->reconf_rib) {
-			if (prefix_dump_new(peer, AID_UNSPEC,
+			if (prefix_adjout_dump_new(peer, AID_UNSPEC,
 			    RDE_RUNNER_ROUNDS, NULL, rde_up_flush_upcall,
 			    rde_softreconfig_in_done, NULL) == -1)
 				fatal("%s: prefix_dump_new", __func__);
@@ -3886,8 +3887,8 @@ rde_reload_done(void)
 					if (peer->reconf_rib)
 						continue;
 
-					if (prefix_dump_new(peer, AID_UNSPEC,
-					    RDE_RUNNER_ROUNDS, NULL,
+					if (prefix_adjout_dump_new(peer,
+					    AID_UNSPEC, RDE_RUNNER_ROUNDS, NULL,
 					    rde_up_flush_upcall,
 					    rde_softreconfig_in_done,
 					    NULL) == -1)
Index: rde.h
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde.h,v
diff -u -p -r1.318 rde.h
--- rde.h	29 Oct 2025 10:34:23 -0000	1.318
+++ rde.h	18 Nov 2025 13:27:29 -0000
@@ -1,4 +1,4 @@
-/*	$OpenBSD: rde.h,v 1.318 2025/10/29 10:34:23 claudio Exp $ */
+/*	$OpenBSD: rde.h,v 1.317 2025/09/24 14:04:04 claudio Exp $ */
 
 /*
  * Copyright (c) 2003, 2004 Claudio Jeker <claudio@openbsd.org> and
@@ -330,6 +330,22 @@ enum eval_mode {
 	EVAL_RECONF,
 };
 
+struct rib_context {
+	LIST_ENTRY(rib_context)		 entry;
+	struct rib_entry		*ctx_re;
+	struct prefix			*ctx_p;
+	uint32_t			 ctx_id;
+	void		(*ctx_rib_call)(struct rib_entry *, void *);
+	void		(*ctx_prefix_call)(struct prefix *, void *);
+	void		(*ctx_done)(void *, uint8_t);
+	int		(*ctx_throttle)(void *);
+	void				*ctx_arg;
+	struct bgpd_addr		 ctx_subtree;
+	unsigned int			 ctx_count;
+	uint8_t				 ctx_aid;
+	uint8_t				 ctx_subtreelen;
+};
+
 extern struct rde_memstats rdemem;
 
 /* prototypes */
@@ -557,6 +573,7 @@ struct rib_entry *rib_get_addr(struct ri
 struct rib_entry *rib_match(struct rib *, struct bgpd_addr *);
 int		 rib_dump_pending(void);
 void		 rib_dump_runner(void);
+void		 rib_dump_insert(struct rib_context *);
 int		 rib_dump_new(uint16_t, uint8_t, unsigned int, void *,
 		    void (*)(struct rib_entry *, void *),
 		    void (*)(void *, uint8_t),
@@ -580,6 +597,10 @@ re_rib(struct rib_entry *re)
 }
 
 void		 path_init(void);
+int		 path_equal(const struct rde_aspath *,
+		    const struct rde_aspath *);
+struct rde_aspath *path_getcache(struct rde_aspath *);
+struct rde_aspath *path_lookup(struct rde_aspath *);
 struct rde_aspath *path_copy(struct rde_aspath *, const struct rde_aspath *);
 struct rde_aspath *path_prep(struct rde_aspath *);
 struct rde_aspath *path_get(void);
@@ -589,13 +610,6 @@ void		 path_put(struct rde_aspath *);
 #define	PREFIX_SIZE(x)	(((x) + 7) / 8 + 1)
 struct prefix	*prefix_get(struct rib *, struct rde_peer *, uint32_t,
 		    struct bgpd_addr *, int);
-struct prefix	*prefix_adjout_get(struct rde_peer *, uint32_t,
-		    struct pt_entry *);
-struct prefix	*prefix_adjout_first(struct rde_peer *, struct pt_entry *);
-struct prefix	*prefix_adjout_next(struct rde_peer *, struct prefix *);
-struct prefix	*prefix_adjout_lookup(struct rde_peer *, struct bgpd_addr *,
-		    int);
-struct prefix	*prefix_adjout_match(struct rde_peer *, struct bgpd_addr *);
 int		 prefix_update(struct rib *, struct rde_peer *, uint32_t,
 		    uint32_t, struct filterstate *, int, struct bgpd_addr *,
 		    int);
@@ -607,20 +621,13 @@ int		 prefix_flowspec_withdraw(struct rd
 void		 prefix_flowspec_dump(uint8_t, void *,
 		    void (*)(struct rib_entry *, void *),
 		    void (*)(void *, uint8_t));
-void		 prefix_add_eor(struct rde_peer *, uint8_t);
-void		 prefix_adjout_update(struct prefix *, struct rde_peer *,
-		    struct filterstate *, struct pt_entry *, uint32_t);
-void		 prefix_adjout_withdraw(struct prefix *);
-void		 prefix_adjout_destroy(struct prefix *);
-void		 prefix_adjout_flush_pending(struct rde_peer *);
-int		 prefix_adjout_reaper(struct rde_peer *);
-int		 prefix_dump_new(struct rde_peer *, uint8_t, unsigned int,
-		    void *, void (*)(struct prefix *, void *),
-		    void (*)(void *, uint8_t), int (*)(void *));
-int		 prefix_dump_subtree(struct rde_peer *, struct bgpd_addr *,
-		    uint8_t, unsigned int, void *,
-		    void (*)(struct prefix *, void *),
-		    void (*)(void *, uint8_t), int (*)(void *));
+
+void		 prefix_link(struct prefix *, struct rib_entry *,
+		    struct pt_entry *, struct rde_peer *, uint32_t, uint32_t,
+		    struct rde_aspath *, struct rde_community *,
+		    struct nexthop *, uint8_t, uint8_t);
+void		 prefix_unlink(struct prefix *);
+
 struct prefix	*prefix_bypeer(struct rib_entry *, struct rde_peer *,
 		    uint32_t);
 void		 prefix_destroy(struct prefix *);
@@ -707,6 +714,31 @@ void		 nexthop_update(struct kroute_next
 struct nexthop	*nexthop_get(struct bgpd_addr *);
 struct nexthop	*nexthop_ref(struct nexthop *);
 int		 nexthop_unref(struct nexthop *);
+
+/* rde_adjout.c */
+struct prefix	*prefix_adjout_get(struct rde_peer *, uint32_t,
+		    struct pt_entry *);
+struct prefix	*prefix_adjout_first(struct rde_peer *, struct pt_entry *);
+struct prefix	*prefix_adjout_next(struct rde_peer *, struct prefix *);
+struct prefix	*prefix_adjout_lookup(struct rde_peer *, struct bgpd_addr *,
+		    int);
+struct prefix	*prefix_adjout_match(struct rde_peer *, struct bgpd_addr *);
+
+void		 prefix_add_eor(struct rde_peer *, uint8_t);
+void		 prefix_adjout_update(struct prefix *, struct rde_peer *,
+		    struct filterstate *, struct pt_entry *, uint32_t);
+void		 prefix_adjout_withdraw(struct prefix *);
+void		 prefix_adjout_destroy(struct prefix *);
+void		 prefix_adjout_flush_pending(struct rde_peer *);
+int		 prefix_adjout_reaper(struct rde_peer *);
+void		 prefix_adjout_dump_r(struct rib_context *);
+int		 prefix_adjout_dump_new(struct rde_peer *, uint8_t,
+		    unsigned int, void *, void (*)(struct prefix *, void *),
+		    void (*)(void *, uint8_t), int (*)(void *));
+int		 prefix_adjout_dump_subtree(struct rde_peer *,
+		    struct bgpd_addr *, uint8_t, unsigned int, void *,
+		    void (*)(struct prefix *, void *),
+		    void (*)(void *, uint8_t), int (*)(void *));
 
 /* rde_update.c */
 void		 up_generate_updates(struct rde_peer *, struct rib_entry *);
Index: rde_adjout.c
===================================================================
RCS file: rde_adjout.c
diff -N rde_adjout.c
--- /dev/null	1 Jan 1970 00:00:00 -0000
+++ rde_adjout.c	18 Nov 2025 13:27:29 -0000
@@ -0,0 +1,577 @@
+/*	$OpenBSD: rde_rib.c,v 1.272 2025/09/24 13:27:18 claudio Exp $ */
+
+/*
+ * Copyright (c) 2003, 2004, 2025 Claudio Jeker <claudio@openbsd.org>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <sys/types.h>
+#include <sys/queue.h>
+
+#include <limits.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+#include "bgpd.h"
+#include "rde.h"
+#include "log.h"
+
+/* adj-rib-out specific functions */
+
+static inline struct prefix *
+prefix_lock(struct prefix *p)
+{
+	if (p->flags & PREFIX_FLAG_LOCKED)
+		fatalx("%s: locking locked prefix", __func__);
+	p->flags |= PREFIX_FLAG_LOCKED;
+	return p;
+}
+
+static inline struct prefix *
+prefix_unlock(struct prefix *p)
+{
+	if ((p->flags & PREFIX_FLAG_LOCKED) == 0)
+		fatalx("%s: unlocking unlocked prefix", __func__);
+	p->flags &= ~PREFIX_FLAG_LOCKED;
+	return p;
+}
+
+static inline int
+prefix_is_locked(struct prefix *p)
+{
+	return (p->flags & PREFIX_FLAG_LOCKED) != 0;
+}
+
+static inline int
+prefix_is_dead(struct prefix *p)
+{
+	return (p->flags & PREFIX_FLAG_DEAD) != 0;
+}
+
+static struct prefix	*prefix_alloc(void);
+static void		 prefix_free(struct prefix *);
+
+/* RB tree comparison function */
+static inline int
+prefix_index_cmp(struct prefix *a, struct prefix *b)
+{
+	int r;
+	r = pt_prefix_cmp(a->pt, b->pt);
+	if (r != 0)
+		return r;
+
+	if (a->path_id_tx > b->path_id_tx)
+		return 1;
+	if (a->path_id_tx < b->path_id_tx)
+		return -1;
+	return 0;
+}
+
+static inline int
+prefix_cmp(struct prefix *a, struct prefix *b)
+{
+	if ((a->flags & PREFIX_FLAG_EOR) != (b->flags & PREFIX_FLAG_EOR))
+		return (a->flags & PREFIX_FLAG_EOR) ? 1 : -1;
+	/* if EOR marker no need to check the rest */
+	if (a->flags & PREFIX_FLAG_EOR)
+		return 0;
+
+	if (a->aspath != b->aspath)
+		return (a->aspath > b->aspath ? 1 : -1);
+	if (a->communities != b->communities)
+		return (a->communities > b->communities ? 1 : -1);
+	if (a->nexthop != b->nexthop)
+		return (a->nexthop > b->nexthop ? 1 : -1);
+	if (a->nhflags != b->nhflags)
+		return (a->nhflags > b->nhflags ? 1 : -1);
+	return prefix_index_cmp(a, b);
+}
+
+RB_GENERATE(prefix_tree, prefix, entry.tree.update, prefix_cmp)
+RB_GENERATE_STATIC(prefix_index, prefix, entry.tree.index, prefix_index_cmp)
+
+/*
+ * Search for specified prefix in the peer prefix_index.
+ * Returns NULL if not found.
+ */
+struct prefix *
+prefix_adjout_get(struct rde_peer *peer, uint32_t path_id_tx,
+    struct pt_entry *pte)
+{
+	struct prefix xp;
+
+	memset(&xp, 0, sizeof(xp));
+	xp.pt = pte;
+	xp.path_id_tx = path_id_tx;
+
+	return RB_FIND(prefix_index, &peer->adj_rib_out, &xp);
+}
+
+/*
+ * Lookup a prefix without considering path_id in the peer prefix_index.
+ * Returns NULL if not found.
+ */
+struct prefix *
+prefix_adjout_first(struct rde_peer *peer, struct pt_entry *pte)
+{
+	struct prefix xp, *np;
+
+	memset(&xp, 0, sizeof(xp));
+	xp.pt = pte;
+
+	np = RB_NFIND(prefix_index, &peer->adj_rib_out, &xp);
+	if (np == NULL || pt_prefix_cmp(np->pt, xp.pt) != 0)
+		return NULL;
+	return np;
+}
+
+/*
+ * Return next prefix after a lookup that is actually an update.
+ */
+struct prefix *
+prefix_adjout_next(struct rde_peer *peer, struct prefix *p)
+{
+	struct prefix *np;
+
+	np = RB_NEXT(prefix_index, &peer->adj_rib_out, p);
+	if (np == NULL || np->pt != p->pt)
+		return NULL;
+	return np;
+}
+
+/*
+ * Lookup addr/prefixlen in the peer prefix_index. Returns first match.
+ * Returns NULL if not found.
+ */
+struct prefix *
+prefix_adjout_lookup(struct rde_peer *peer, struct bgpd_addr *addr, int plen)
+{
+	return prefix_adjout_first(peer, pt_fill(addr, plen));
+}
+
+/*
+ * Lookup addr in the peer prefix_index. Returns first match.
+ * Returns NULL if not found.
+ */
+struct prefix *
+prefix_adjout_match(struct rde_peer *peer, struct bgpd_addr *addr)
+{
+	struct prefix *p;
+	int i;
+
+	switch (addr->aid) {
+	case AID_INET:
+	case AID_VPN_IPv4:
+		for (i = 32; i >= 0; i--) {
+			p = prefix_adjout_lookup(peer, addr, i);
+			if (p != NULL)
+				return p;
+		}
+		break;
+	case AID_INET6:
+	case AID_VPN_IPv6:
+		for (i = 128; i >= 0; i--) {
+			p = prefix_adjout_lookup(peer, addr, i);
+			if (p != NULL)
+				return p;
+		}
+		break;
+	default:
+		fatalx("%s: unknown af", __func__);
+	}
+	return NULL;
+}
+
+/*
+ * Insert an End-of-RIB marker into the update queue.
+ */
+void
+prefix_add_eor(struct rde_peer *peer, uint8_t aid)
+{
+	struct prefix *p;
+
+	p = prefix_alloc();
+	p->flags = PREFIX_FLAG_ADJOUT | PREFIX_FLAG_UPDATE | PREFIX_FLAG_EOR;
+	if (RB_INSERT(prefix_tree, &peer->updates[aid], p) != NULL)
+		/* no need to add if EoR marker already present */
+		prefix_free(p);
+	/* EOR marker is not inserted into the adj_rib_out index */
+}
+
+/*
+ * Put a prefix from the Adj-RIB-Out onto the update queue.
+ */
+void
+prefix_adjout_update(struct prefix *p, struct rde_peer *peer,
+    struct filterstate *state, struct pt_entry *pte, uint32_t path_id_tx)
+{
+	struct rde_aspath *asp;
+	struct rde_community *comm;
+
+	if (p == NULL) {
+		p = prefix_alloc();
+		/* initially mark DEAD so code below is skipped */
+		p->flags |= PREFIX_FLAG_ADJOUT | PREFIX_FLAG_DEAD;
+
+		p->pt = pt_ref(pte);
+		p->peer = peer;
+		p->path_id_tx = path_id_tx;
+
+		if (RB_INSERT(prefix_index, &peer->adj_rib_out, p) != NULL)
+			fatalx("%s: RB index invariant violated", __func__);
+	}
+
+	if ((p->flags & PREFIX_FLAG_ADJOUT) == 0)
+		fatalx("%s: prefix without PREFIX_FLAG_ADJOUT hit", __func__);
+	if ((p->flags & (PREFIX_FLAG_WITHDRAW | PREFIX_FLAG_DEAD)) == 0) {
+		/*
+		 * XXX for now treat a different path_id_tx like different
+		 * attributes and force out an update. It is unclear how
+		 * common it is to have equivalent updates from alternative
+		 * paths.
+		 */
+		if (p->path_id_tx == path_id_tx &&
+		    prefix_nhflags(p) == state->nhflags &&
+		    prefix_nexthop(p) == state->nexthop &&
+		    communities_equal(&state->communities,
+		    prefix_communities(p)) &&
+		    path_equal(&state->aspath, prefix_aspath(p))) {
+			/* nothing changed */
+			p->validation_state = state->vstate;
+			p->lastchange = getmonotime();
+			p->flags &= ~PREFIX_FLAG_STALE;
+			return;
+		}
+
+		/* if pending update unhook it before it is unlinked */
+		if (p->flags & PREFIX_FLAG_UPDATE) {
+			RB_REMOVE(prefix_tree, &peer->updates[pte->aid], p);
+			peer->stats.pending_update--;
+		}
+
+		/* unlink prefix so it can be relinked below */
+		prefix_unlink(p);
+		peer->stats.prefix_out_cnt--;
+	}
+	if (p->flags & PREFIX_FLAG_WITHDRAW) {
+		RB_REMOVE(prefix_tree, &peer->withdraws[pte->aid], p);
+		peer->stats.pending_withdraw--;
+	}
+
+	/* nothing needs to be done for PREFIX_FLAG_DEAD and STALE */
+	p->flags &= ~PREFIX_FLAG_MASK;
+
+	/* update path_id_tx now that the prefix is unlinked */
+	if (p->path_id_tx != path_id_tx) {
+		/* path_id_tx is part of the index so remove and re-insert p */
+		RB_REMOVE(prefix_index, &peer->adj_rib_out, p);
+		p->path_id_tx = path_id_tx;
+		if (RB_INSERT(prefix_index, &peer->adj_rib_out, p) != NULL)
+			fatalx("%s: RB index invariant violated", __func__);
+	}
+
+	asp = path_getcache(&state->aspath);
+
+	if ((comm = communities_lookup(&state->communities)) == NULL) {
+		/* Communities not available, create and link a new one. */
+		comm = communities_link(&state->communities);
+	}
+
+	prefix_link(p, NULL, p->pt, peer, 0, p->path_id_tx, asp, comm,
+	    state->nexthop, state->nhflags, state->vstate);
+	peer->stats.prefix_out_cnt++;
+
+	if (p->flags & PREFIX_FLAG_MASK)
+		fatalx("%s: bad flags %x", __func__, p->flags);
+	if (peer_is_up(peer)) {
+		p->flags |= PREFIX_FLAG_UPDATE;
+		if (RB_INSERT(prefix_tree, &peer->updates[pte->aid], p) != NULL)
+			fatalx("%s: RB tree invariant violated", __func__);
+		peer->stats.pending_update++;
+	}
+}
+
+/*
+ * Withdraw a prefix from the Adj-RIB-Out, this unlinks the aspath but leaves
+ * the prefix in the RIB linked to the peer withdraw list.
+ */
+void
+prefix_adjout_withdraw(struct prefix *p)
+{
+	struct rde_peer *peer = prefix_peer(p);
+
+	if ((p->flags & PREFIX_FLAG_ADJOUT) == 0)
+		fatalx("%s: prefix without PREFIX_FLAG_ADJOUT hit", __func__);
+
+	/* already a withdraw, shortcut */
+	if (p->flags & PREFIX_FLAG_WITHDRAW) {
+		p->lastchange = getmonotime();
+		p->flags &= ~PREFIX_FLAG_STALE;
+		return;
+	}
+	/* pending update just got withdrawn */
+	if (p->flags & PREFIX_FLAG_UPDATE) {
+		RB_REMOVE(prefix_tree, &peer->updates[p->pt->aid], p);
+		peer->stats.pending_update--;
+	}
+	/* unlink prefix if it was linked (not a withdraw or dead) */
+	if ((p->flags & (PREFIX_FLAG_WITHDRAW | PREFIX_FLAG_DEAD)) == 0) {
+		prefix_unlink(p);
+		peer->stats.prefix_out_cnt--;
+	}
+
+	/* nothing needs to be done for PREFIX_FLAG_DEAD and STALE */
+	p->flags &= ~PREFIX_FLAG_MASK;
+	p->lastchange = getmonotime();
+
+	if (peer_is_up(peer)) {
+		p->flags |= PREFIX_FLAG_WITHDRAW;
+		if (RB_INSERT(prefix_tree, &peer->withdraws[p->pt->aid],
+		    p) != NULL)
+			fatalx("%s: RB tree invariant violated", __func__);
+		peer->stats.pending_withdraw++;
+	} else {
+		/* mark prefix dead to skip unlink on destroy */
+		p->flags |= PREFIX_FLAG_DEAD;
+		prefix_adjout_destroy(p);
+	}
+}
+
+void
+prefix_adjout_destroy(struct prefix *p)
+{
+	struct rde_peer *peer = prefix_peer(p);
+
+	if ((p->flags & PREFIX_FLAG_ADJOUT) == 0)
+		fatalx("%s: prefix without PREFIX_FLAG_ADJOUT hit", __func__);
+
+	if (p->flags & PREFIX_FLAG_EOR) {
+		/* EOR marker is not linked in the index */
+		prefix_free(p);
+		return;
+	}
+
+	if (p->flags & PREFIX_FLAG_WITHDRAW) {
+		RB_REMOVE(prefix_tree, &peer->withdraws[p->pt->aid], p);
+		peer->stats.pending_withdraw--;
+	}
+	if (p->flags & PREFIX_FLAG_UPDATE) {
+		RB_REMOVE(prefix_tree, &peer->updates[p->pt->aid], p);
+		peer->stats.pending_update--;
+	}
+	/* unlink prefix if it was linked (not a withdraw or dead) */
+	if ((p->flags & (PREFIX_FLAG_WITHDRAW | PREFIX_FLAG_DEAD)) == 0) {
+		prefix_unlink(p);
+		peer->stats.prefix_out_cnt--;
+	}
+
+	/* nothing needs to be done for PREFIX_FLAG_DEAD and STALE */
+	p->flags &= ~PREFIX_FLAG_MASK;
+
+	if (prefix_is_locked(p)) {
+		/* mark prefix dead but leave it for prefix_restart */
+		p->flags |= PREFIX_FLAG_DEAD;
+	} else {
+		RB_REMOVE(prefix_index, &peer->adj_rib_out, p);
+		/* remove the last prefix reference before free */
+		pt_unref(p->pt);
+		prefix_free(p);
+	}
+}
+
+void
+prefix_adjout_flush_pending(struct rde_peer *peer)
+{
+	struct prefix *p, *np;
+	uint8_t aid;
+
+	for (aid = AID_MIN; aid < AID_MAX; aid++) {
+		RB_FOREACH_SAFE(p, prefix_tree, &peer->withdraws[aid], np) {
+			prefix_adjout_destroy(p);
+		}
+		RB_FOREACH_SAFE(p, prefix_tree, &peer->updates[aid], np) {
+			p->flags &= ~PREFIX_FLAG_UPDATE;
+			RB_REMOVE(prefix_tree, &peer->updates[aid], p);
+			if (p->flags & PREFIX_FLAG_EOR) {
+				prefix_adjout_destroy(p);
+			} else {
+				peer->stats.pending_update--;
+			}
+		}
+	}
+}
+
+int
+prefix_adjout_reaper(struct rde_peer *peer)
+{
+	struct prefix *p, *np;
+	int count = RDE_REAPER_ROUNDS;
+
+	RB_FOREACH_SAFE(p, prefix_index, &peer->adj_rib_out, np) {
+		prefix_adjout_destroy(p);
+		if (count-- <= 0)
+			return 0;
+	}
+	return 1;
+}
+
+static struct prefix *
+prefix_restart(struct rib_context *ctx)
+{
+	struct prefix *p = NULL;
+
+	if (ctx->ctx_p)
+		p = prefix_unlock(ctx->ctx_p);
+
+	if (p && prefix_is_dead(p)) {
+		struct prefix *next;
+
+		next = RB_NEXT(prefix_index, unused, p);
+		prefix_adjout_destroy(p);
+		p = next;
+	}
+	ctx->ctx_p = NULL;
+	return p;
+}
+
+void
+prefix_adjout_dump_r(struct rib_context *ctx)
+{
+	struct prefix *p, *next;
+	struct rde_peer *peer;
+	unsigned int i;
+
+	if ((peer = peer_get(ctx->ctx_id)) == NULL)
+		goto done;
+
+	if (ctx->ctx_p == NULL && ctx->ctx_subtree.aid == AID_UNSPEC)
+		p = RB_MIN(prefix_index, &peer->adj_rib_out);
+	else
+		p = prefix_restart(ctx);
+
+	for (i = 0; p != NULL; p = next) {
+		next = RB_NEXT(prefix_index, unused, p);
+		if (prefix_is_dead(p))
+			continue;
+		if (ctx->ctx_aid != AID_UNSPEC &&
+		    ctx->ctx_aid != p->pt->aid)
+			continue;
+		if (ctx->ctx_subtree.aid != AID_UNSPEC) {
+			struct bgpd_addr addr;
+			pt_getaddr(p->pt, &addr);
+			if (prefix_compare(&ctx->ctx_subtree, &addr,
+			    ctx->ctx_subtreelen) != 0)
+				/* left subtree, walk is done */
+				break;
+		}
+		if (ctx->ctx_count && i++ >= ctx->ctx_count &&
+		    !prefix_is_locked(p)) {
+			/* store and lock last element */
+			ctx->ctx_p = prefix_lock(p);
+			return;
+		}
+		ctx->ctx_prefix_call(p, ctx->ctx_arg);
+	}
+
+done:
+	if (ctx->ctx_done)
+		ctx->ctx_done(ctx->ctx_arg, ctx->ctx_aid);
+	LIST_REMOVE(ctx, entry);
+	free(ctx);
+}
+
+int
+prefix_adjout_dump_new(struct rde_peer *peer, uint8_t aid, unsigned int count,
+    void *arg, void (*upcall)(struct prefix *, void *),
+    void (*done)(void *, uint8_t), int (*throttle)(void *))
+{
+	struct rib_context *ctx;
+
+	if ((ctx = calloc(1, sizeof(*ctx))) == NULL)
+		return -1;
+	ctx->ctx_id = peer->conf.id;
+	ctx->ctx_aid = aid;
+	ctx->ctx_count = count;
+	ctx->ctx_arg = arg;
+	ctx->ctx_prefix_call = upcall;
+	ctx->ctx_done = done;
+	ctx->ctx_throttle = throttle;
+
+	rib_dump_insert(ctx);
+
+	/* requested a sync traversal */
+	if (count == 0)
+		prefix_adjout_dump_r(ctx);
+
+	return 0;
+}
+
+int
+prefix_adjout_dump_subtree(struct rde_peer *peer, struct bgpd_addr *subtree,
+    uint8_t subtreelen, unsigned int count, void *arg,
+    void (*upcall)(struct prefix *, void *), void (*done)(void *, uint8_t),
+    int (*throttle)(void *))
+{
+	struct rib_context *ctx;
+	struct prefix xp;
+
+	if ((ctx = calloc(1, sizeof(*ctx))) == NULL)
+		return -1;
+	ctx->ctx_id = peer->conf.id;
+	ctx->ctx_aid = subtree->aid;
+	ctx->ctx_count = count;
+	ctx->ctx_arg = arg;
+	ctx->ctx_prefix_call = upcall;
+	ctx->ctx_done = done;
+	ctx->ctx_throttle = throttle;
+	ctx->ctx_subtree = *subtree;
+	ctx->ctx_subtreelen = subtreelen;
+
+	/* lookup start of subtree */
+	memset(&xp, 0, sizeof(xp));
+	xp.pt = pt_fill(subtree, subtreelen);
+	ctx->ctx_p = RB_NFIND(prefix_index, &peer->adj_rib_out, &xp);
+	if (ctx->ctx_p)
+		prefix_lock(ctx->ctx_p);
+
+	rib_dump_insert(ctx);
+
+	/* requested a sync traversal */
+	if (count == 0)
+		prefix_adjout_dump_r(ctx);
+
+	return 0;
+}
+
+/* alloc and zero new entry. May not fail. */
+static struct prefix *
+prefix_alloc(void)
+{
+	struct prefix *p;
+
+	p = calloc(1, sizeof(*p));
+	if (p == NULL)
+		fatal("prefix_alloc");
+	rdemem.prefix_cnt++;
+	return p;
+}
+
+/* free a unlinked entry */
+static void
+prefix_free(struct prefix *p)
+{
+	rdemem.prefix_cnt--;
+	free(p);
+}
Index: rde_peer.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde_peer.c,v
diff -u -p -r1.52 rde_peer.c
--- rde_peer.c	14 Nov 2025 19:34:40 -0000	1.52
+++ rde_peer.c	18 Nov 2025 13:27:29 -0000
@@ -557,9 +557,9 @@ peer_blast(struct rde_peer *peer, uint8_
 		rde_peer_send_rrefresh(peer, aid, ROUTE_REFRESH_BEGIN_RR);
 
 	/* force out all updates from the Adj-RIB-Out */
-	if (prefix_dump_new(peer, aid, 0, peer, peer_blast_upcall,
+	if (prefix_adjout_dump_new(peer, aid, 0, peer, peer_blast_upcall,
 	    peer_blast_done, NULL) == -1)
-		fatal("%s: prefix_dump_new", __func__);
+		fatal("%s: prefix_adjout_dump_new", __func__);
 }
 
 /* RIB walker callbacks for peer_dump. */
Index: rde_rib.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde_rib.c,v
diff -u -p -r1.275 rde_rib.c
--- rde_rib.c	7 Nov 2025 12:33:42 -0000	1.275
+++ rde_rib.c	18 Nov 2025 13:29:42 -0000
@@ -51,25 +51,8 @@ static void rib_dump_abort(uint16_t);
 RB_PROTOTYPE(rib_tree, rib_entry, rib_e, rib_compare);
 RB_GENERATE(rib_tree, rib_entry, rib_e, rib_compare);
 
-struct rib_context {
-	LIST_ENTRY(rib_context)		 entry;
-	struct rib_entry		*ctx_re;
-	struct prefix			*ctx_p;
-	uint32_t			 ctx_id;
-	void		(*ctx_rib_call)(struct rib_entry *, void *);
-	void		(*ctx_prefix_call)(struct prefix *, void *);
-	void		(*ctx_done)(void *, uint8_t);
-	int		(*ctx_throttle)(void *);
-	void				*ctx_arg;
-	struct bgpd_addr		 ctx_subtree;
-	unsigned int			 ctx_count;
-	uint8_t				 ctx_aid;
-	uint8_t				 ctx_subtreelen;
-};
 LIST_HEAD(, rib_context) rib_dumps = LIST_HEAD_INITIALIZER(rib_dumps);
 
-static void	prefix_dump_r(struct rib_context *);
-
 static inline struct rib_entry *
 re_lock(struct rib_entry *re)
 {
@@ -95,15 +78,6 @@ re_is_locked(struct rib_entry *re)
 }
 
 static inline struct prefix *
-prefix_lock(struct prefix *p)
-{
-	if (p->flags & PREFIX_FLAG_LOCKED)
-		fatalx("%s: locking locked prefix", __func__);
-	p->flags |= PREFIX_FLAG_LOCKED;
-	return p;
-}
-
-static inline struct prefix *
 prefix_unlock(struct prefix *p)
 {
 	if ((p->flags & PREFIX_FLAG_LOCKED) == 0)
@@ -113,12 +87,6 @@ prefix_unlock(struct prefix *p)
 }
 
 static inline int
-prefix_is_locked(struct prefix *p)
-{
-	return (p->flags & PREFIX_FLAG_LOCKED) != 0;
-}
-
-static inline int
 prefix_is_dead(struct prefix *p)
 {
 	return (p->flags & PREFIX_FLAG_DEAD) != 0;
@@ -493,7 +461,7 @@ rib_dump_runner(void)
 		if (ctx->ctx_rib_call != NULL)
 			rib_dump_r(ctx);
 		else
-			prefix_dump_r(ctx);
+			prefix_adjout_dump_r(ctx);
 	}
 }
 
@@ -534,6 +502,12 @@ rib_dump_terminate(void *arg)
 	}
 }
 
+void
+rib_dump_insert(struct rib_context *ctx)
+{
+	LIST_INSERT_HEAD(&rib_dumps, ctx, entry);
+}
+
 int
 rib_dump_new(uint16_t id, uint8_t aid, unsigned int count, void *arg,
     void (*upcall)(struct rib_entry *, void *), void (*done)(void *, uint8_t),
@@ -551,7 +525,7 @@ rib_dump_new(uint16_t id, uint8_t aid, u
 	ctx->ctx_done = done;
 	ctx->ctx_throttle = throttle;
 
-	LIST_INSERT_HEAD(&rib_dumps, ctx, entry);
+	rib_dump_insert(ctx);
 
 	/* requested a sync traversal */
 	if (count == 0)
@@ -580,8 +554,6 @@ rib_dump_subtree(uint16_t id, struct bgp
 	ctx->ctx_subtree = *subtree;
 	ctx->ctx_subtreelen = subtreelen;
 
-	LIST_INSERT_HEAD(&rib_dumps, ctx, entry);
-
 	/* lookup start of subtree */
 	memset(&xre, 0, sizeof(xre));
 	xre.prefix = pt_fill(subtree, subtreelen);
@@ -589,6 +561,8 @@ rib_dump_subtree(uint16_t id, struct bgp
 	if (ctx->ctx_re)
 		re_lock(ctx->ctx_re);
 
+	rib_dump_insert(ctx);
+
 	/* requested a sync traversal */
 	if (count == 0)
 		rib_dump_r(ctx);
@@ -598,7 +572,6 @@ rib_dump_subtree(uint16_t id, struct bgp
 
 /* path specific functions */
 
-static struct rde_aspath *path_getcache(struct rde_aspath *);
 static void path_link(struct rde_aspath *);
 static void path_unlink(struct rde_aspath *);
 
@@ -622,7 +595,7 @@ path_calc_hash(const struct rde_aspath *
 	return SipHash24_End(&ctx);
 }
 
-static inline int
+int
 path_equal(const struct rde_aspath *a, const struct rde_aspath *b)
 {
 	if (a == NULL && b == NULL)
@@ -689,7 +662,7 @@ path_init(void)
 	arc4random_buf(&pathkey, sizeof(pathkey));
 }
 
-static struct rde_aspath *
+struct rde_aspath *
 path_getcache(struct rde_aspath *aspath)
 {
 	struct rde_aspath *asp;
@@ -707,7 +680,7 @@ path_getcache(struct rde_aspath *aspath)
  * Link this aspath into the global table.
  * The asp had to be alloced with path_get.
  */
-static void
+void
 path_link(struct rde_aspath *asp)
 {
 	if (CH_INSERT(path_tree, &pathtable, asp, NULL) != 1)
@@ -823,54 +796,9 @@ static int	prefix_move(struct prefix *, 
 		    struct rde_aspath *, struct rde_community *,
 		    struct nexthop *, uint8_t, uint8_t, int);
 
-static void	prefix_link(struct prefix *, struct rib_entry *,
-		    struct pt_entry *, struct rde_peer *, uint32_t, uint32_t,
-		    struct rde_aspath *, struct rde_community *,
-		    struct nexthop *, uint8_t, uint8_t);
-static void	prefix_unlink(struct prefix *);
-
 static struct prefix	*prefix_alloc(void);
 static void		 prefix_free(struct prefix *);
 
-/* RB tree comparison function */
-static inline int
-prefix_index_cmp(struct prefix *a, struct prefix *b)
-{
-	int r;
-	r = pt_prefix_cmp(a->pt, b->pt);
-	if (r != 0)
-		return r;
-
-	if (a->path_id_tx > b->path_id_tx)
-		return 1;
-	if (a->path_id_tx < b->path_id_tx)
-		return -1;
-	return 0;
-}
-
-static inline int
-prefix_cmp(struct prefix *a, struct prefix *b)
-{
-	if ((a->flags & PREFIX_FLAG_EOR) != (b->flags & PREFIX_FLAG_EOR))
-		return (a->flags & PREFIX_FLAG_EOR) ? 1 : -1;
-	/* if EOR marker no need to check the rest */
-	if (a->flags & PREFIX_FLAG_EOR)
-		return 0;
-
-	if (a->aspath != b->aspath)
-		return (a->aspath > b->aspath ? 1 : -1);
-	if (a->communities != b->communities)
-		return (a->communities > b->communities ? 1 : -1);
-	if (a->nexthop != b->nexthop)
-		return (a->nexthop > b->nexthop ? 1 : -1);
-	if (a->nhflags != b->nhflags)
-		return (a->nhflags > b->nhflags ? 1 : -1);
-	return prefix_index_cmp(a, b);
-}
-
-RB_GENERATE(prefix_tree, prefix, entry.tree.update, prefix_cmp)
-RB_GENERATE_STATIC(prefix_index, prefix, entry.tree.index, prefix_index_cmp)
-
 /*
  * Search for specified prefix of a peer. Returns NULL if not found.
  */
@@ -887,98 +815,6 @@ prefix_get(struct rib *rib, struct rde_p
 }
 
 /*
- * Search for specified prefix in the peer prefix_index.
- * Returns NULL if not found.
- */
-struct prefix *
-prefix_adjout_get(struct rde_peer *peer, uint32_t path_id_tx,
-    struct pt_entry *pte)
-{
-	struct prefix xp;
-
-	memset(&xp, 0, sizeof(xp));
-	xp.pt = pte;
-	xp.path_id_tx = path_id_tx;
-
-	return RB_FIND(prefix_index, &peer->adj_rib_out, &xp);
-}
-
-/*
- * Lookup a prefix without considering path_id in the peer prefix_index.
- * Returns NULL if not found.
- */
-struct prefix *
-prefix_adjout_first(struct rde_peer *peer, struct pt_entry *pte)
-{
-	struct prefix xp, *np;
-
-	memset(&xp, 0, sizeof(xp));
-	xp.pt = pte;
-
-	np = RB_NFIND(prefix_index, &peer->adj_rib_out, &xp);
-	if (np == NULL || pt_prefix_cmp(np->pt, xp.pt) != 0)
-		return NULL;
-	return np;
-}
-
-/*
- * Return next prefix after a lookup that is actually an update.
- */
-struct prefix *
-prefix_adjout_next(struct rde_peer *peer, struct prefix *p)
-{
-	struct prefix *np;
-
-	np = RB_NEXT(prefix_index, &peer->adj_rib_out, p);
-	if (np == NULL || np->pt != p->pt)
-		return NULL;
-	return np;
-}
-
-/*
- * Lookup addr/prefixlen in the peer prefix_index. Returns first match.
- * Returns NULL if not found.
- */
-struct prefix *
-prefix_adjout_lookup(struct rde_peer *peer, struct bgpd_addr *addr, int plen)
-{
-	return prefix_adjout_first(peer, pt_fill(addr, plen));
-}
-
-/*
- * Lookup addr in the peer prefix_index. Returns first match.
- * Returns NULL if not found.
- */
-struct prefix *
-prefix_adjout_match(struct rde_peer *peer, struct bgpd_addr *addr)
-{
-	struct prefix *p;
-	int i;
-
-	switch (addr->aid) {
-	case AID_INET:
-	case AID_VPN_IPv4:
-		for (i = 32; i >= 0; i--) {
-			p = prefix_adjout_lookup(peer, addr, i);
-			if (p != NULL)
-				return p;
-		}
-		break;
-	case AID_INET6:
-	case AID_VPN_IPv6:
-		for (i = 128; i >= 0; i--) {
-			p = prefix_adjout_lookup(peer, addr, i);
-			if (p != NULL)
-				return p;
-		}
-		break;
-	default:
-		fatalx("%s: unknown af", __func__);
-	}
-	return NULL;
-}
-
-/*
  * Update a prefix.
  * Return 1 if prefix was newly added, 0 if it was just changed.
  */
@@ -1227,367 +1063,6 @@ prefix_flowspec_dump(uint8_t aid, void *
 }
 
 /*
- * Insert an End-of-RIB marker into the update queue.
- */
-void
-prefix_add_eor(struct rde_peer *peer, uint8_t aid)
-{
-	struct prefix *p;
-
-	p = prefix_alloc();
-	p->flags = PREFIX_FLAG_ADJOUT | PREFIX_FLAG_UPDATE | PREFIX_FLAG_EOR;
-	if (RB_INSERT(prefix_tree, &peer->updates[aid], p) != NULL)
-		/* no need to add if EoR marker already present */
-		prefix_free(p);
-	/* EOR marker is not inserted into the adj_rib_out index */
-}
-
-/*
- * Put a prefix from the Adj-RIB-Out onto the update queue.
- */
-void
-prefix_adjout_update(struct prefix *p, struct rde_peer *peer,
-    struct filterstate *state, struct pt_entry *pte, uint32_t path_id_tx)
-{
-	struct rde_aspath *asp;
-	struct rde_community *comm;
-
-	if (p == NULL) {
-		p = prefix_alloc();
-		/* initially mark DEAD so code below is skipped */
-		p->flags |= PREFIX_FLAG_ADJOUT | PREFIX_FLAG_DEAD;
-
-		p->pt = pt_ref(pte);
-		p->peer = peer;
-		p->path_id_tx = path_id_tx;
-
-		if (RB_INSERT(prefix_index, &peer->adj_rib_out, p) != NULL)
-			fatalx("%s: RB index invariant violated", __func__);
-	}
-
-	if ((p->flags & PREFIX_FLAG_ADJOUT) == 0)
-		fatalx("%s: prefix without PREFIX_FLAG_ADJOUT hit", __func__);
-	if ((p->flags & (PREFIX_FLAG_WITHDRAW | PREFIX_FLAG_DEAD)) == 0) {
-		/*
-		 * XXX for now treat a different path_id_tx like different
-		 * attributes and force out an update. It is unclear how
-		 * common it is to have equivalent updates from alternative
-		 * paths.
-		 */
-		if (p->path_id_tx == path_id_tx &&
-		    prefix_nhflags(p) == state->nhflags &&
-		    prefix_nexthop(p) == state->nexthop &&
-		    communities_equal(&state->communities,
-		    prefix_communities(p)) &&
-		    path_equal(&state->aspath, prefix_aspath(p))) {
-			/* nothing changed */
-			p->validation_state = state->vstate;
-			p->lastchange = getmonotime();
-			p->flags &= ~PREFIX_FLAG_STALE;
-			return;
-		}
-
-		/* if pending update unhook it before it is unlinked */
-		if (p->flags & PREFIX_FLAG_UPDATE) {
-			RB_REMOVE(prefix_tree, &peer->updates[pte->aid], p);
-			peer->stats.pending_update--;
-		}
-
-		/* unlink prefix so it can be relinked below */
-		prefix_unlink(p);
-		peer->stats.prefix_out_cnt--;
-	}
-	if (p->flags & PREFIX_FLAG_WITHDRAW) {
-		RB_REMOVE(prefix_tree, &peer->withdraws[pte->aid], p);
-		peer->stats.pending_withdraw--;
-	}
-
-	/* nothing needs to be done for PREFIX_FLAG_DEAD and STALE */
-	p->flags &= ~PREFIX_FLAG_MASK;
-
-	/* update path_id_tx now that the prefix is unlinked */
-	if (p->path_id_tx != path_id_tx) {
-		/* path_id_tx is part of the index so remove and re-insert p */
-		RB_REMOVE(prefix_index, &peer->adj_rib_out, p);
-		p->path_id_tx = path_id_tx;
-		if (RB_INSERT(prefix_index, &peer->adj_rib_out, p) != NULL)
-			fatalx("%s: RB index invariant violated", __func__);
-	}
-
-	asp = path_getcache(&state->aspath);
-
-	if ((comm = communities_lookup(&state->communities)) == NULL) {
-		/* Communities not available, create and link a new one. */
-		comm = communities_link(&state->communities);
-	}
-
-	prefix_link(p, NULL, p->pt, peer, 0, p->path_id_tx, asp, comm,
-	    state->nexthop, state->nhflags, state->vstate);
-	peer->stats.prefix_out_cnt++;
-
-	if (p->flags & PREFIX_FLAG_MASK)
-		fatalx("%s: bad flags %x", __func__, p->flags);
-	if (peer_is_up(peer)) {
-		p->flags |= PREFIX_FLAG_UPDATE;
-		if (RB_INSERT(prefix_tree, &peer->updates[pte->aid], p) != NULL)
-			fatalx("%s: RB tree invariant violated", __func__);
-		peer->stats.pending_update++;
-	}
-}
-
-/*
- * Withdraw a prefix from the Adj-RIB-Out, this unlinks the aspath but leaves
- * the prefix in the RIB linked to the peer withdraw list.
- */
-void
-prefix_adjout_withdraw(struct prefix *p)
-{
-	struct rde_peer *peer = prefix_peer(p);
-
-	if ((p->flags & PREFIX_FLAG_ADJOUT) == 0)
-		fatalx("%s: prefix without PREFIX_FLAG_ADJOUT hit", __func__);
-
-	/* already a withdraw, shortcut */
-	if (p->flags & PREFIX_FLAG_WITHDRAW) {
-		p->lastchange = getmonotime();
-		p->flags &= ~PREFIX_FLAG_STALE;
-		return;
-	}
-	/* pending update just got withdrawn */
-	if (p->flags & PREFIX_FLAG_UPDATE) {
-		RB_REMOVE(prefix_tree, &peer->updates[p->pt->aid], p);
-		peer->stats.pending_update--;
-	}
-	/* unlink prefix if it was linked (not a withdraw or dead) */
-	if ((p->flags & (PREFIX_FLAG_WITHDRAW | PREFIX_FLAG_DEAD)) == 0) {
-		prefix_unlink(p);
-		peer->stats.prefix_out_cnt--;
-	}
-
-	/* nothing needs to be done for PREFIX_FLAG_DEAD and STALE */
-	p->flags &= ~PREFIX_FLAG_MASK;
-	p->lastchange = getmonotime();
-
-	if (peer_is_up(peer)) {
-		p->flags |= PREFIX_FLAG_WITHDRAW;
-		if (RB_INSERT(prefix_tree, &peer->withdraws[p->pt->aid],
-		    p) != NULL)
-			fatalx("%s: RB tree invariant violated", __func__);
-		peer->stats.pending_withdraw++;
-	} else {
-		/* mark prefix dead to skip unlink on destroy */
-		p->flags |= PREFIX_FLAG_DEAD;
-		prefix_adjout_destroy(p);
-	}
-}
-
-void
-prefix_adjout_destroy(struct prefix *p)
-{
-	struct rde_peer *peer = prefix_peer(p);
-
-	if ((p->flags & PREFIX_FLAG_ADJOUT) == 0)
-		fatalx("%s: prefix without PREFIX_FLAG_ADJOUT hit", __func__);
-
-	if (p->flags & PREFIX_FLAG_EOR) {
-		/* EOR marker is not linked in the index */
-		prefix_free(p);
-		return;
-	}
-
-	if (p->flags & PREFIX_FLAG_WITHDRAW) {
-		RB_REMOVE(prefix_tree, &peer->withdraws[p->pt->aid], p);
-		peer->stats.pending_withdraw--;
-	}
-	if (p->flags & PREFIX_FLAG_UPDATE) {
-		RB_REMOVE(prefix_tree, &peer->updates[p->pt->aid], p);
-		peer->stats.pending_update--;
-	}
-	/* unlink prefix if it was linked (not a withdraw or dead) */
-	if ((p->flags & (PREFIX_FLAG_WITHDRAW | PREFIX_FLAG_DEAD)) == 0) {
-		prefix_unlink(p);
-		peer->stats.prefix_out_cnt--;
-	}
-
-	/* nothing needs to be done for PREFIX_FLAG_DEAD and STALE */
-	p->flags &= ~PREFIX_FLAG_MASK;
-
-	if (prefix_is_locked(p)) {
-		/* mark prefix dead but leave it for prefix_restart */
-		p->flags |= PREFIX_FLAG_DEAD;
-	} else {
-		RB_REMOVE(prefix_index, &peer->adj_rib_out, p);
-		/* remove the last prefix reference before free */
-		pt_unref(p->pt);
-		prefix_free(p);
-	}
-}
-
-void
-prefix_adjout_flush_pending(struct rde_peer *peer)
-{
-	struct prefix *p, *np;
-	uint8_t aid;
-
-	for (aid = AID_MIN; aid < AID_MAX; aid++) {
-		RB_FOREACH_SAFE(p, prefix_tree, &peer->withdraws[aid], np) {
-			prefix_adjout_destroy(p);
-		}
-		RB_FOREACH_SAFE(p, prefix_tree, &peer->updates[aid], np) {
-			p->flags &= ~PREFIX_FLAG_UPDATE;
-			RB_REMOVE(prefix_tree, &peer->updates[aid], p);
-			if (p->flags & PREFIX_FLAG_EOR) {
-				prefix_adjout_destroy(p);
-			} else {
-				peer->stats.pending_update--;
-			}
-		}
-	}
-}
-
-int
-prefix_adjout_reaper(struct rde_peer *peer)
-{
-	struct prefix *p, *np;
-	int count = RDE_REAPER_ROUNDS;
-
-	RB_FOREACH_SAFE(p, prefix_index, &peer->adj_rib_out, np) {
-		prefix_adjout_destroy(p);
-		if (count-- <= 0)
-			return 0;
-	}
-	return 1;
-}
-
-static struct prefix *
-prefix_restart(struct rib_context *ctx)
-{
-	struct prefix *p = NULL;
-
-	if (ctx->ctx_p)
-		p = prefix_unlock(ctx->ctx_p);
-
-	if (p && prefix_is_dead(p)) {
-		struct prefix *next;
-
-		next = RB_NEXT(prefix_index, unused, p);
-		prefix_adjout_destroy(p);
-		p = next;
-	}
-	ctx->ctx_p = NULL;
-	return p;
-}
-
-static void
-prefix_dump_r(struct rib_context *ctx)
-{
-	struct prefix *p, *next;
-	struct rde_peer *peer;
-	unsigned int i;
-
-	if ((peer = peer_get(ctx->ctx_id)) == NULL)
-		goto done;
-
-	if (ctx->ctx_p == NULL && ctx->ctx_subtree.aid == AID_UNSPEC)
-		p = RB_MIN(prefix_index, &peer->adj_rib_out);
-	else
-		p = prefix_restart(ctx);
-
-	for (i = 0; p != NULL; p = next) {
-		next = RB_NEXT(prefix_index, unused, p);
-		if (prefix_is_dead(p))
-			continue;
-		if (ctx->ctx_aid != AID_UNSPEC &&
-		    ctx->ctx_aid != p->pt->aid)
-			continue;
-		if (ctx->ctx_subtree.aid != AID_UNSPEC) {
-			struct bgpd_addr addr;
-			pt_getaddr(p->pt, &addr);
-			if (prefix_compare(&ctx->ctx_subtree, &addr,
-			    ctx->ctx_subtreelen) != 0)
-				/* left subtree, walk is done */
-				break;
-		}
-		if (ctx->ctx_count && i++ >= ctx->ctx_count &&
-		    !prefix_is_locked(p)) {
-			/* store and lock last element */
-			ctx->ctx_p = prefix_lock(p);
-			return;
-		}
-		ctx->ctx_prefix_call(p, ctx->ctx_arg);
-	}
-
-done:
-	if (ctx->ctx_done)
-		ctx->ctx_done(ctx->ctx_arg, ctx->ctx_aid);
-	LIST_REMOVE(ctx, entry);
-	free(ctx);
-}
-
-int
-prefix_dump_new(struct rde_peer *peer, uint8_t aid, unsigned int count,
-    void *arg, void (*upcall)(struct prefix *, void *),
-    void (*done)(void *, uint8_t), int (*throttle)(void *))
-{
-	struct rib_context *ctx;
-
-	if ((ctx = calloc(1, sizeof(*ctx))) == NULL)
-		return -1;
-	ctx->ctx_id = peer->conf.id;
-	ctx->ctx_aid = aid;
-	ctx->ctx_count = count;
-	ctx->ctx_arg = arg;
-	ctx->ctx_prefix_call = upcall;
-	ctx->ctx_done = done;
-	ctx->ctx_throttle = throttle;
-
-	LIST_INSERT_HEAD(&rib_dumps, ctx, entry);
-
-	/* requested a sync traversal */
-	if (count == 0)
-		prefix_dump_r(ctx);
-
-	return 0;
-}
-
-int
-prefix_dump_subtree(struct rde_peer *peer, struct bgpd_addr *subtree,
-    uint8_t subtreelen, unsigned int count, void *arg,
-    void (*upcall)(struct prefix *, void *), void (*done)(void *, uint8_t),
-    int (*throttle)(void *))
-{
-	struct rib_context *ctx;
-	struct prefix xp;
-
-	if ((ctx = calloc(1, sizeof(*ctx))) == NULL)
-		return -1;
-	ctx->ctx_id = peer->conf.id;
-	ctx->ctx_aid = subtree->aid;
-	ctx->ctx_count = count;
-	ctx->ctx_arg = arg;
-	ctx->ctx_prefix_call = upcall;
-	ctx->ctx_done = done;
-	ctx->ctx_throttle = throttle;
-	ctx->ctx_subtree = *subtree;
-	ctx->ctx_subtreelen = subtreelen;
-
-	LIST_INSERT_HEAD(&rib_dumps, ctx, entry);
-
-	/* lookup start of subtree */
-	memset(&xp, 0, sizeof(xp));
-	xp.pt = pt_fill(subtree, subtreelen);
-	ctx->ctx_p = RB_NFIND(prefix_index, &peer->adj_rib_out, &xp);
-	if (ctx->ctx_p)
-		prefix_lock(ctx->ctx_p);
-
-	/* requested a sync traversal */
-	if (count == 0)
-		prefix_dump_r(ctx);
-
-	return 0;
-}
-
-/*
  * Searches in the prefix list of specified rib_entry for a prefix entry
  * belonging to the peer peer. Returns NULL if no match found.
  */
@@ -1616,7 +1091,7 @@ prefix_destroy(struct prefix *p)
 /*
  * Link a prefix into the different parent objects.
  */
-static void
+void
 prefix_link(struct prefix *p, struct rib_entry *re, struct pt_entry *pt,
     struct rde_peer *peer, uint32_t path_id, uint32_t path_id_tx,
     struct rde_aspath *asp, struct rde_community *comm,
@@ -1640,7 +1115,7 @@ prefix_link(struct prefix *p, struct rib
 /*
  * Unlink a prefix from the different parent objects.
  */
-static void
+void
 prefix_unlink(struct prefix *p)
 {
 	struct rib_entry	*re = prefix_re(p);