Index | Thread | Search

From:
Claudio Jeker <cjeker@diehard.n-r-g.com>
Subject:
bgpd: add attr cache to adj-rib-out
To:
tech@openbsd.org
Date:
Mon, 1 Dec 2025 15:43:28 +0100

Download raw body.

Thread
Introduce an adjout_attr object that decouples some of the common data
from the prefix_adjout. This helps to reduce the memory footprint by
around 20% in large IXP setups.

The size of struct prefix_adjout is critical since we allocate very many
of those. e.g. 250k prefixes * 750 peer = 187mio objects. Every byte saved
makes a big difference. This removes 28 bytes from the struct.

-- 
:wq Claudio

Index: bgpctl/output.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpctl/output.c,v
diff -u -p -r1.62 output.c
--- bgpctl/output.c	12 Nov 2025 15:18:50 -0000	1.62
+++ bgpctl/output.c	25 Nov 2025 15:29:46 -0000
@@ -1069,6 +1069,14 @@ show_rib_mem(struct rde_memstats *stats)
 	printf("%10lld prefix entries using %s of memory\n",
 	    stats->prefix_cnt, fmt_mem(stats->prefix_cnt *
 	    sizeof(struct prefix)));
+	printf("%10lld adjout_prefix entries using %s of memory\n",
+	    stats->adjout_prefix_cnt, fmt_mem(stats->adjout_prefix_cnt *
+	    sizeof(struct prefix_adjout)));
+	printf("%10lld adjout attribute entries using %s of memory\n",
+	    stats->adjout_attr_cnt, fmt_mem(stats->adjout_attr_cnt *
+	    sizeof(struct adjout_attr)));
+	printf("\t   and holding %lld references\n",
+	    stats->adjout_attr_refs);
 	printf("%10lld BGP path attribute entries using %s of memory\n",
 	    stats->path_cnt, fmt_mem(stats->path_cnt *
 	    sizeof(struct rde_aspath)));
Index: bgpctl/output_json.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpctl/output_json.c,v
diff -u -p -r1.53 output_json.c
--- bgpctl/output_json.c	12 Nov 2025 15:18:50 -0000	1.53
+++ bgpctl/output_json.c	25 Nov 2025 15:29:46 -0000
@@ -903,6 +903,12 @@ json_rib_mem(struct rde_memstats *stats)
 	    stats->rib_cnt * sizeof(struct rib_entry), UINT64_MAX);
 	json_rib_mem_element("prefix", stats->prefix_cnt,
 	    stats->prefix_cnt * sizeof(struct prefix), UINT64_MAX);
+	json_rib_mem_element("adjout_prefix", stats->adjout_prefix_cnt,
+	    stats->adjout_prefix_cnt * sizeof(struct prefix_adjout),
+	    UINT64_MAX);
+	json_rib_mem_element("adjout_attr", stats->adjout_attr_cnt,
+	    stats->adjout_attr_cnt * sizeof(struct adjout_attr),
+	    stats->adjout_attr_refs);
 	json_rib_mem_element("rde_aspath", stats->path_cnt,
 	    stats->path_cnt * sizeof(struct rde_aspath),
 	    stats->path_refs);
Index: bgpctl/output_ometric.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpctl/output_ometric.c,v
diff -u -p -r1.17 output_ometric.c
--- bgpctl/output_ometric.c	12 Nov 2025 15:18:50 -0000	1.17
+++ bgpctl/output_ometric.c	25 Nov 2025 15:29:46 -0000
@@ -293,6 +293,12 @@ ometric_rib_mem(struct rde_memstats *sta
 	    stats->rib_cnt * sizeof(struct rib_entry), UINT64_MAX);
 	ometric_rib_mem_element("prefix", stats->prefix_cnt,
 	    stats->prefix_cnt * sizeof(struct prefix), UINT64_MAX);
+	ometric_rib_mem_element("adjout_prefix", stats->adjout_prefix_cnt,
+	    stats->adjout_prefix_cnt * sizeof(struct prefix_adjout),
+	    UINT64_MAX);
+	ometric_rib_mem_element("adjout_attr", stats->adjout_attr_cnt,
+	    stats->adjout_attr_cnt * sizeof(struct adjout_attr),
+	    stats->adjout_attr_refs);
 	ometric_rib_mem_element("rde_aspath", stats->path_cnt,
 	    stats->path_cnt * sizeof(struct rde_aspath),
 	    stats->path_refs);
Index: bgpd/bgpd.h
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/bgpd.h,v
diff -u -p -r1.522 bgpd.h
--- bgpd/bgpd.h	12 Nov 2025 15:17:43 -0000	1.522
+++ bgpd/bgpd.h	25 Nov 2025 15:29:46 -0000
@@ -1391,6 +1391,7 @@ struct rde_memstats {
 	long long	path_cnt;
 	long long	path_refs;
 	long long	prefix_cnt;
+	long long	adjout_prefix_cnt;
 	long long	rib_cnt;
 	long long	pt_cnt[AID_MAX];
 	long long	pt_size[AID_MAX];
@@ -1405,6 +1406,8 @@ struct rde_memstats {
 	long long	attr_refs;
 	long long	attr_data;
 	long long	attr_dcnt;
+	long long	adjout_attr_cnt;
+	long long	adjout_attr_refs;
 	long long	aset_cnt;
 	long long	aset_size;
 	long long	aset_nmemb;
Index: bgpd/rde.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde.c,v
diff -u -p -r1.670 rde.c
--- bgpd/rde.c	1 Dec 2025 13:07:28 -0000	1.670
+++ bgpd/rde.c	1 Dec 2025 13:07:57 -0000
@@ -215,6 +215,7 @@ rde_main(int debug, int verbose)
 	pt_init();
 	attr_init();
 	path_init();
+	adjout_init();
 	communities_init();
 	peer_init(out_rules);
 
@@ -2960,21 +2961,18 @@ rde_dump_rib_as(struct prefix *p, struct
 }
 
 static void
-rde_dump_adjout_as(struct prefix_adjout *p, struct rde_aspath *asp, pid_t pid,
-    int flags)
+rde_dump_adjout_as(struct rde_peer *peer, struct prefix_adjout *p,
+    struct rde_aspath *asp, pid_t pid, int flags)
 {
 	struct ctl_show_rib	 rib;
 	struct ibuf		*wbuf;
 	struct attr		*a;
 	struct nexthop		*nexthop;
-	struct rde_peer		*peer;
 	size_t			 aslen;
 	uint8_t			 l;
 
 	nexthop = prefix_adjout_nexthop(p);
-	peer = prefix_adjout_peer(p);
 	memset(&rib, 0, sizeof(rib));
-	rib.lastchange = p->lastchange;
 	rib.local_pref = asp->lpref;
 	rib.med = asp->med;
 	rib.weight = asp->weight;
@@ -3098,12 +3096,12 @@ rde_dump_filter(struct prefix *p, struct
 }
 
 static void
-rde_dump_adjout_filter(struct prefix_adjout *p,
+rde_dump_adjout_filter(struct rde_peer *peer, struct prefix_adjout *p,
      struct ctl_show_rib_request *req)
 {
 	struct rde_aspath	*asp;
 
-	if (!rde_match_peer(prefix_adjout_peer(p), &req->neighbor))
+	if (!rde_match_peer(peer, &req->neighbor))
 		return;
 
 	asp = prefix_adjout_aspath(p);
@@ -3121,7 +3119,7 @@ rde_dump_adjout_filter(struct prefix_adj
 			return;
 	}
 	/* in the adj-rib-out, skip matching against roa and aspa state */
-	rde_dump_adjout_as(p, asp, req->pid, req->flags);
+	rde_dump_adjout_as(peer, p, asp, req->pid, req->flags);
 }
 
 static void
@@ -3140,10 +3138,14 @@ static void
 rde_dump_adjout_upcall(struct prefix_adjout *p, void *ptr)
 {
 	struct rde_dump_ctx	*ctx = ptr;
+	struct rde_peer		*peer;
 
+	if ((peer = peer_get(ctx->peerid)) == NULL)
+		return;
 	if (p->flags & PREFIX_ADJOUT_FLAG_WITHDRAW)
 		return;
-	rde_dump_adjout_filter(p, &ctx->req);
+
+	rde_dump_adjout_filter(peer, p, &ctx->req);
 }
 
 static int
@@ -3561,7 +3563,9 @@ rde_evaluate_all(void)
 static void
 rde_up_flush_upcall(struct prefix_adjout *p, void *ptr)
 {
-	prefix_adjout_withdraw(p);
+	struct rde_peer *peer = ptr;
+
+	prefix_adjout_withdraw(peer, p);
 }
 
 int
@@ -3928,7 +3932,7 @@ rde_reload_done(void)
 
 		if (peer->reconf_rib) {
 			if (prefix_adjout_dump_new(peer, AID_UNSPEC,
-			    RDE_RUNNER_ROUNDS, NULL, rde_up_flush_upcall,
+			    RDE_RUNNER_ROUNDS, peer, rde_up_flush_upcall,
 			    rde_softreconfig_in_done, NULL) == -1)
 				fatal("%s: prefix_dump_new", __func__);
 			log_peer_info(&peer->conf, "flushing Adj-RIB-Out");
@@ -3979,7 +3983,7 @@ rde_reload_done(void)
 						continue;
 
 					if (prefix_adjout_dump_new(peer,
-					    AID_UNSPEC, RDE_RUNNER_ROUNDS, NULL,
+					    AID_UNSPEC, RDE_RUNNER_ROUNDS, peer,
 					    rde_up_flush_upcall,
 					    rde_softreconfig_in_done,
 					    NULL) == -1)
Index: bgpd/rde.h
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde.h,v
diff -u -p -r1.325 rde.h
--- bgpd/rde.h	1 Dec 2025 13:07:28 -0000	1.325
+++ bgpd/rde.h	1 Dec 2025 13:07:57 -0000
@@ -278,7 +278,7 @@ struct prefix {
 	monotime_t		 lastchange;
 	uint32_t		 path_id;
 	uint32_t		 path_id_tx;
-	uint16_t		 flags;
+	uint8_t			 flags;
 	uint8_t			 validation_state;
 	uint8_t			 nhflags;
 	int8_t			 dmetric;	/* decision metric */
@@ -302,18 +302,20 @@ struct prefix {
 #define	NEXTHOP_MASK		0x0f
 #define	NEXTHOP_VALID		0x80
 
+struct adjout_attr {
+	uint64_t		 hash;
+	struct rde_aspath	*aspath;
+	struct rde_community	*communities;
+	struct nexthop		*nexthop;
+	int			 refcnt;
+};
+
 struct prefix_adjout {
 	RB_ENTRY(prefix_adjout)		 index, update;
 	struct pt_entry			*pt;
-	struct rde_aspath		*aspath;
-	struct rde_community		*communities;
-	struct rde_peer			*peer;
-	struct nexthop			*nexthop;	/* may be NULL */
-	monotime_t			 lastchange;
-	uint32_t			 path_id;
+	struct adjout_attr		*attrs;
 	uint32_t			 path_id_tx;
 	uint8_t			 	 flags;
-	uint8_t				 validation_state;
 };
 #define	PREFIX_ADJOUT_FLAG_WITHDRAW	0x01	/* enqueued on withdraw queue */
 #define	PREFIX_ADJOUT_FLAG_UPDATE	0x02	/* enqueued on update queue */
@@ -717,6 +719,7 @@ struct nexthop	*nexthop_ref(struct nexth
 int		 nexthop_unref(struct nexthop *);
 
 /* rde_adjout.c */
+void			 adjout_init(void);
 struct prefix_adjout	*prefix_adjout_get(struct rde_peer *, uint32_t,
 			    struct pt_entry *);
 struct prefix_adjout	*prefix_adjout_first(struct rde_peer *,
@@ -731,11 +734,13 @@ struct prefix_adjout	*prefix_adjout_matc
 void		 prefix_add_eor(struct rde_peer *, uint8_t);
 void		 prefix_adjout_update(struct prefix_adjout *, struct rde_peer *,
 		    struct filterstate *, struct pt_entry *, uint32_t);
-void		 prefix_adjout_withdraw(struct prefix_adjout *);
-void		 prefix_adjout_destroy(struct prefix_adjout *);
+void		 prefix_adjout_withdraw(struct rde_peer *,
+		    struct prefix_adjout *);
+void		 prefix_adjout_destroy(struct rde_peer *,
+		    struct prefix_adjout *);
 void		 prefix_adjout_flush_pending(struct rde_peer *);
 int		 prefix_adjout_reaper(struct rde_peer *);
-void		 prefix_adjout_dump_cleanup(struct prefix_adjout *);
+void		 prefix_adjout_dump_cleanup(struct rib_context *);
 void		 prefix_adjout_dump_r(struct rib_context *);
 int		 prefix_adjout_dump_new(struct rde_peer *, uint8_t,
 		    unsigned int, void *,
@@ -746,28 +751,22 @@ int		 prefix_adjout_dump_subtree(struct 
 		    void (*)(struct prefix_adjout *, void *),
 		    void (*)(void *, uint8_t), int (*)(void *));
 
-static inline struct rde_peer *
-prefix_adjout_peer(struct prefix_adjout *p)
-{
-	return (p->peer);
-}
-
 static inline struct rde_aspath *
 prefix_adjout_aspath(struct prefix_adjout *p)
 {
-	return (p->aspath);
+	return (p->attrs->aspath);
 }
 
 static inline struct rde_community *
 prefix_adjout_communities(struct prefix_adjout *p)
 {
-	return (p->communities);
+	return (p->attrs->communities);
 }
 
 static inline struct nexthop *
 prefix_adjout_nexthop(struct prefix_adjout *p)
 {
-	return (p->nexthop);
+	return (p->attrs->nexthop);
 }
 
 /* rde_update.c */
Index: bgpd/rde_adjout.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde_adjout.c,v
diff -u -p -r1.6 rde_adjout.c
--- bgpd/rde_adjout.c	20 Nov 2025 14:04:36 -0000	1.6
+++ bgpd/rde_adjout.c	25 Nov 2025 15:34:54 -0000
@@ -20,6 +20,7 @@
 #include <sys/queue.h>
 
 #include <limits.h>
+#include <siphash.h>
 #include <stdlib.h>
 #include <string.h>
 #include <time.h>
@@ -27,8 +28,140 @@
 #include "bgpd.h"
 #include "rde.h"
 #include "log.h"
+#include "chash.h"
 
 /* adj-rib-out specific functions */
+static uint64_t		attrkey;
+
+static inline uint64_t
+adjout_attr_hash(const struct adjout_attr *a)
+{
+	return a->hash;
+}
+
+static uint64_t
+adjout_attr_calc_hash(const struct adjout_attr *a)
+{
+	uint64_t h;
+	h = ch_qhash64(attrkey, (uintptr_t)a->aspath);
+	h = ch_qhash64(h, (uintptr_t)a->communities);
+	h = ch_qhash64(h, (uintptr_t)a->nexthop);
+	return h;
+}
+
+static inline int
+adjout_attr_eq(const struct adjout_attr *a, const struct adjout_attr *b)
+{
+	if (a->aspath == b->aspath &&
+	    a->communities == b->communities &&
+	    a->nexthop == b->nexthop)
+		return 1;
+	return 0;
+}
+
+CH_HEAD(adjout_attr_tree, adjout_attr);
+CH_PROTOTYPE(adjout_attr_tree, adjout_attr, adjout_attr_hash);
+
+static struct adjout_attr_tree attrtable = CH_INITIALIZER(&attrtable);
+
+void
+adjout_init(void)
+{
+	arc4random_buf(&attrkey, sizeof(attrkey));
+}
+
+/* Alloc, init and add a new entry into the has table. May not fail. */
+static struct adjout_attr *
+adjout_attr_alloc(struct rde_aspath *asp, struct rde_community *comm,
+    struct nexthop *nexthop, uint64_t hash)
+{
+	struct adjout_attr *a;
+
+	a = calloc(1, sizeof(*a));
+	if (a == NULL)
+		fatal(__func__);
+	rdemem.adjout_attr_cnt++;
+
+	a->aspath = path_ref(asp);
+	a->communities = communities_ref(comm);
+	a->nexthop = nexthop_ref(nexthop);
+
+	a->hash = hash;
+	if (CH_INSERT(adjout_attr_tree, &attrtable, a, NULL) != 1)
+		fatalx("%s: object already in table", __func__);
+
+	return a;
+}
+
+/* Free a entry after removing it from the hash table */
+static void
+adjout_attr_free(struct adjout_attr *a)
+{
+	CH_REMOVE(adjout_attr_tree, &attrtable, a);
+
+	/* destroy all references to other objects */
+	/* remove nexthop ref ... */
+	nexthop_unref(a->nexthop);
+	a->nexthop = NULL;
+	/* ... communities ... */
+	communities_unref(a->communities);
+	a->communities = NULL;
+	/* and unlink from aspath */
+	path_unref(a->aspath);
+	a->aspath = NULL;
+
+	rdemem.adjout_attr_cnt--;
+	free(a);
+}
+
+static struct adjout_attr *
+adjout_attr_ref(struct adjout_attr *attrs, struct rde_peer *peer)
+{
+	attrs->refcnt++;
+	rdemem.adjout_attr_refs++;
+	return attrs;
+}
+
+static void
+adjout_attr_unref(struct adjout_attr *attrs, struct rde_peer *peer)
+{
+	attrs->refcnt--;
+	rdemem.adjout_attr_refs--;
+	if (attrs->refcnt > 0)
+		return;
+
+	adjout_attr_free(attrs);
+}
+
+static struct adjout_attr *
+adjout_attr_get(struct filterstate *state)
+{
+	struct adjout_attr *attr, needle = { 0 };
+	struct rde_aspath *asp;
+	struct rde_community *comm;
+	struct nexthop *nexthop;
+
+	/* lookup or insert new */
+	asp = path_getcache(&state->aspath);
+	if ((comm = communities_lookup(&state->communities)) == NULL) {
+		/* Communities not available, create and link a new one. */
+		comm = communities_link(&state->communities);
+	}
+	nexthop = state->nexthop;
+
+	needle.aspath = asp;
+	needle.communities = comm;
+	needle.nexthop = nexthop;
+	needle.hash = adjout_attr_calc_hash(&needle);
+
+	if ((attr = CH_FIND(adjout_attr_tree, &attrtable, &needle)) == NULL) {
+		attr = adjout_attr_alloc(asp, comm, nexthop, needle.hash);
+	}
+
+	return attr;
+}
+
+CH_GENERATE(adjout_attr_tree, adjout_attr, adjout_attr_eq, adjout_attr_hash);
 
 static inline struct prefix_adjout *
 prefix_adjout_lock(struct prefix_adjout *p)
@@ -60,11 +193,10 @@ prefix_is_dead(struct prefix_adjout *p)
 	return (p->flags & PREFIX_ADJOUT_FLAG_DEAD) != 0;
 }
 
-static void	 prefix_adjout_link(struct prefix_adjout *, struct pt_entry *,
-		    struct rde_peer *, uint32_t, uint32_t,
-		    struct rde_aspath *, struct rde_community *,
-		    struct nexthop *, uint8_t);
-static void	 prefix_adjout_unlink(struct prefix_adjout *);
+static void	 prefix_adjout_link(struct prefix_adjout *, struct rde_peer *,
+		    struct adjout_attr *, struct pt_entry *, uint32_t);
+static void	 prefix_adjout_unlink(struct prefix_adjout *,
+		    struct rde_peer *);
 
 static struct prefix_adjout	*prefix_adjout_alloc(void);
 static void			 prefix_adjout_free(struct prefix_adjout *);
@@ -95,12 +227,8 @@ prefix_cmp(struct prefix_adjout *a, stru
 	if (a->flags & PREFIX_ADJOUT_FLAG_EOR)
 		return 0;
 
-	if (a->aspath != b->aspath)
-		return (a->aspath > b->aspath ? 1 : -1);
-	if (a->communities != b->communities)
-		return (a->communities > b->communities ? 1 : -1);
-	if (a->nexthop != b->nexthop)
-		return (a->nexthop > b->nexthop ? 1 : -1);
+	if (a->attrs != b->attrs)
+		return (a->attrs > b->attrs ? 1 : -1);
 	return prefix_index_cmp(a, b);
 }
 
@@ -222,8 +350,7 @@ void
 prefix_adjout_update(struct prefix_adjout *p, struct rde_peer *peer,
     struct filterstate *state, struct pt_entry *pte, uint32_t path_id_tx)
 {
-	struct rde_aspath *asp;
-	struct rde_community *comm;
+	struct adjout_attr *attrs;
 
 	if (p == NULL) {
 		p = prefix_adjout_alloc();
@@ -231,7 +358,6 @@ prefix_adjout_update(struct prefix_adjou
 		p->flags |= PREFIX_ADJOUT_FLAG_DEAD;
 
 		p->pt = pt_ref(pte);
-		p->peer = peer;
 		p->path_id_tx = path_id_tx;
 
 		if (RB_INSERT(prefix_index, &peer->adj_rib_out, p) != NULL)
@@ -252,8 +378,6 @@ prefix_adjout_update(struct prefix_adjou
 		    prefix_adjout_communities(p)) &&
 		    path_equal(&state->aspath, prefix_adjout_aspath(p))) {
 			/* nothing changed */
-			p->validation_state = state->vstate;
-			p->lastchange = getmonotime();
 			p->flags &= ~PREFIX_ADJOUT_FLAG_STALE;
 			return;
 		}
@@ -265,7 +389,7 @@ prefix_adjout_update(struct prefix_adjou
 		}
 
 		/* unlink prefix so it can be relinked below */
-		prefix_adjout_unlink(p);
+		prefix_adjout_unlink(p, peer);
 		peer->stats.prefix_out_cnt--;
 	}
 	if (p->flags & PREFIX_ADJOUT_FLAG_WITHDRAW) {
@@ -285,15 +409,9 @@ prefix_adjout_update(struct prefix_adjou
 			fatalx("%s: RB index invariant violated", __func__);
 	}
 
-	asp = path_getcache(&state->aspath);
+	attrs = adjout_attr_get(state);
 
-	if ((comm = communities_lookup(&state->communities)) == NULL) {
-		/* Communities not available, create and link a new one. */
-		comm = communities_link(&state->communities);
-	}
-
-	prefix_adjout_link(p, p->pt, peer, 0, p->path_id_tx, asp, comm,
-	    state->nexthop, state->vstate);
+	prefix_adjout_link(p, peer, attrs, p->pt, p->path_id_tx);
 	peer->stats.prefix_out_cnt++;
 
 	if (p->flags & PREFIX_ADJOUT_FLAG_MASK)
@@ -311,13 +429,10 @@ prefix_adjout_update(struct prefix_adjou
  * the prefix in the RIB linked to the peer withdraw list.
  */
 void
-prefix_adjout_withdraw(struct prefix_adjout *p)
+prefix_adjout_withdraw(struct rde_peer *peer, struct prefix_adjout *p)
 {
-	struct rde_peer *peer = prefix_adjout_peer(p);
-
 	/* already a withdraw, shortcut */
 	if (p->flags & PREFIX_ADJOUT_FLAG_WITHDRAW) {
-		p->lastchange = getmonotime();
 		p->flags &= ~PREFIX_ADJOUT_FLAG_STALE;
 		return;
 	}
@@ -329,13 +444,12 @@ prefix_adjout_withdraw(struct prefix_adj
 	/* unlink prefix if it was linked (not a withdraw or dead) */
 	if ((p->flags & (PREFIX_ADJOUT_FLAG_WITHDRAW |
 	    PREFIX_ADJOUT_FLAG_DEAD)) == 0) {
-		prefix_adjout_unlink(p);
+		prefix_adjout_unlink(p, peer);
 		peer->stats.prefix_out_cnt--;
 	}
 
 	/* nothing needs to be done for PREFIX_ADJOUT_FLAG_DEAD and STALE */
 	p->flags &= ~PREFIX_ADJOUT_FLAG_MASK;
-	p->lastchange = getmonotime();
 
 	if (peer_is_up(peer)) {
 		p->flags |= PREFIX_ADJOUT_FLAG_WITHDRAW;
@@ -346,15 +460,13 @@ prefix_adjout_withdraw(struct prefix_adj
 	} else {
 		/* mark prefix dead to skip unlink on destroy */
 		p->flags |= PREFIX_ADJOUT_FLAG_DEAD;
-		prefix_adjout_destroy(p);
+		prefix_adjout_destroy(peer, p);
 	}
 }
 
 void
-prefix_adjout_destroy(struct prefix_adjout *p)
+prefix_adjout_destroy(struct rde_peer *peer, struct prefix_adjout *p)
 {
-	struct rde_peer *peer = prefix_adjout_peer(p);
-
 	if (p->flags & PREFIX_ADJOUT_FLAG_EOR) {
 		/* EOR marker is not linked in the index */
 		prefix_adjout_free(p);
@@ -372,7 +484,7 @@ prefix_adjout_destroy(struct prefix_adjo
 	/* unlink prefix if it was linked (not a withdraw or dead) */
 	if ((p->flags & (PREFIX_ADJOUT_FLAG_WITHDRAW |
 	    PREFIX_ADJOUT_FLAG_DEAD)) == 0) {
-		prefix_adjout_unlink(p);
+		prefix_adjout_unlink(p, peer);
 		peer->stats.prefix_out_cnt--;
 	}
 
@@ -398,13 +510,13 @@ prefix_adjout_flush_pending(struct rde_p
 
 	for (aid = AID_MIN; aid < AID_MAX; aid++) {
 		RB_FOREACH_SAFE(p, prefix_tree, &peer->withdraws[aid], np) {
-			prefix_adjout_destroy(p);
+			prefix_adjout_destroy(peer, p);
 		}
 		RB_FOREACH_SAFE(p, prefix_tree, &peer->updates[aid], np) {
 			p->flags &= ~PREFIX_ADJOUT_FLAG_UPDATE;
 			RB_REMOVE(prefix_tree, &peer->updates[aid], p);
 			if (p->flags & PREFIX_ADJOUT_FLAG_EOR) {
-				prefix_adjout_destroy(p);
+				prefix_adjout_destroy(peer, p);
 			} else {
 				peer->stats.pending_update--;
 			}
@@ -419,7 +531,7 @@ prefix_adjout_reaper(struct rde_peer *pe
 	int count = RDE_REAPER_ROUNDS;
 
 	RB_FOREACH_SAFE(p, prefix_index, &peer->adj_rib_out, np) {
-		prefix_adjout_destroy(p);
+		prefix_adjout_destroy(peer, p);
 		if (count-- <= 0)
 			return 0;
 	}
@@ -430,6 +542,10 @@ static struct prefix_adjout *
 prefix_restart(struct rib_context *ctx)
 {
 	struct prefix_adjout *p = NULL;
+	struct rde_peer *peer;
+
+	if ((peer = peer_get(ctx->ctx_id)) == NULL)
+		return NULL;
 
 	if (ctx->ctx_p)
 		p = prefix_adjout_unlock(ctx->ctx_p);
@@ -438,7 +554,7 @@ prefix_restart(struct rib_context *ctx)
 		struct prefix_adjout *next;
 
 		next = RB_NEXT(prefix_index, unused, p);
-		prefix_adjout_destroy(p);
+		prefix_adjout_destroy(peer, p);
 		p = next;
 	}
 	ctx->ctx_p = NULL;
@@ -446,10 +562,15 @@ prefix_restart(struct rib_context *ctx)
 }
 
 void
-prefix_adjout_dump_cleanup(struct prefix_adjout *p)
+prefix_adjout_dump_cleanup(struct rib_context *ctx)
 {
+	struct prefix_adjout *p = ctx->ctx_p;
+	struct rde_peer *peer;
+
+	if ((peer = peer_get(ctx->ctx_id)) == NULL)
+		return;
 	if (prefix_is_dead(prefix_adjout_unlock(p)))
-		prefix_adjout_destroy(p);
+		prefix_adjout_destroy(peer, p);
 }
 
 void
@@ -566,40 +687,25 @@ prefix_adjout_dump_subtree(struct rde_pe
  * Link a prefix into the different parent objects.
  */
 static void
-prefix_adjout_link(struct prefix_adjout *p, struct pt_entry *pt,
-    struct rde_peer *peer, uint32_t path_id, uint32_t path_id_tx,
-    struct rde_aspath *asp, struct rde_community *comm,
-    struct nexthop *nexthop, uint8_t vstate)
-{
-	p->aspath = path_ref(asp);
-	p->communities = communities_ref(comm);
-	p->peer = peer;
+prefix_adjout_link(struct prefix_adjout *p, struct rde_peer *peer,
+    struct adjout_attr *attrs, struct pt_entry *pt, uint32_t path_id_tx)
+{
+	p->attrs = adjout_attr_ref(attrs, peer);
 	p->pt = pt_ref(pt);
-	p->path_id = path_id;
 	p->path_id_tx = path_id_tx;
-	p->validation_state = vstate;
-	p->nexthop = nexthop_ref(nexthop);
-	p->lastchange = getmonotime();
 }
 
 /*
  * Unlink a prefix from the different parent objects.
  */
 static void
-prefix_adjout_unlink(struct prefix_adjout *p)
+prefix_adjout_unlink(struct prefix_adjout *p, struct rde_peer *peer)
 {
 	/* destroy all references to other objects */
-	/* remove nexthop ref ... */
-	nexthop_unref(p->nexthop);
-	p->nexthop = NULL;
-	/* ... communities ... */
-	communities_unref(p->communities);
-	p->communities = NULL;
-	/* and unlink from aspath */
-	path_unref(p->aspath);
-	p->aspath = NULL;
-
+	adjout_attr_unref(p->attrs, peer);
+	p->attrs = NULL;
 	pt_unref(p->pt);
+	/* must keep p->pt valid since there is an extra ref */
 }
 
 /* alloc and zero new entry. May not fail. */
@@ -610,8 +716,8 @@ prefix_adjout_alloc(void)
 
 	p = calloc(1, sizeof(*p));
 	if (p == NULL)
-		fatal("prefix_alloc");
-	rdemem.prefix_cnt++;
+		fatal(__func__);
+	rdemem.adjout_prefix_cnt++;
 	return p;
 }
 
@@ -619,6 +725,6 @@ prefix_adjout_alloc(void)
 static void
 prefix_adjout_free(struct prefix_adjout *p)
 {
-	rdemem.prefix_cnt--;
+	rdemem.adjout_prefix_cnt--;
 	free(p);
 }
Index: bgpd/rde_peer.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde_peer.c,v
diff -u -p -r1.57 rde_peer.c
--- bgpd/rde_peer.c	1 Dec 2025 13:07:28 -0000	1.57
+++ bgpd/rde_peer.c	1 Dec 2025 13:07:57 -0000
@@ -519,10 +519,9 @@ peer_stale(struct rde_peer *peer, uint8_
 static void
 peer_blast_upcall(struct prefix_adjout *p, void *ptr)
 {
-	struct rde_peer		*peer;
+	struct rde_peer		*peer = ptr;
 
 	if ((p->flags & PREFIX_ADJOUT_FLAG_MASK) == 0) {
-		peer = prefix_adjout_peer(p);
 		/* put entries on the update queue if not already on a queue */
 		p->flags |= PREFIX_ADJOUT_FLAG_UPDATE;
 		if (RB_INSERT(prefix_tree, &peer->updates[p->pt->aid],
Index: bgpd/rde_rib.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde_rib.c,v
diff -u -p -r1.282 rde_rib.c
--- bgpd/rde_rib.c	1 Dec 2025 13:07:28 -0000	1.282
+++ bgpd/rde_rib.c	1 Dec 2025 13:07:57 -0000
@@ -451,8 +451,9 @@ rib_dump_runner(void)
 }
 
 static void
-rib_dump_cleanup(struct rib_entry *re)
+rib_dump_cleanup(struct rib_context *ctx)
 {
+	struct rib_entry *re = ctx->ctx_re;
 	if (rib_empty(re_unlock(re)))
 		rib_remove(re);
 }
@@ -463,9 +464,9 @@ rib_dump_free(struct rib_context *ctx)
 	if (ctx->ctx_done)
 		ctx->ctx_done(ctx->ctx_arg, ctx->ctx_aid);
 	if (ctx->ctx_re)
-		rib_dump_cleanup(ctx->ctx_re);
+		rib_dump_cleanup(ctx);
 	if (ctx->ctx_p)
-		prefix_adjout_dump_cleanup(ctx->ctx_p);
+		prefix_adjout_dump_cleanup(ctx);
 	LIST_REMOVE(ctx, entry);
 	free(ctx);
 }
Index: bgpd/rde_update.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde_update.c,v
diff -u -p -r1.184 rde_update.c
--- bgpd/rde_update.c	1 Dec 2025 13:07:28 -0000	1.184
+++ bgpd/rde_update.c	1 Dec 2025 13:08:58 -0000
@@ -244,7 +244,7 @@ up_generate_updates(struct rde_peer *pee
 done:
 	/* withdraw prefix */
 	if (p != NULL)
-		prefix_adjout_withdraw(p);
+		prefix_adjout_withdraw(peer, p);
 }
 
 /*
@@ -334,7 +334,7 @@ up_generate_addpath(struct rde_peer *pee
 	/* withdraw stale paths */
 	for (p = head; p != NULL; p = prefix_adjout_next(peer, p)) {
 		if (p->flags & PREFIX_ADJOUT_FLAG_STALE)
-			prefix_adjout_withdraw(p);
+			prefix_adjout_withdraw(peer, p);
 	}
 }
 
@@ -383,7 +383,7 @@ up_generate_addpath_all(struct rde_peer 
 		/* withdraw old path */
 		p = prefix_adjout_get(peer, old_pathid_tx, re->prefix);
 		if (p != NULL)
-			prefix_adjout_withdraw(p);
+			prefix_adjout_withdraw(peer, p);
 	}
 }
 
@@ -808,7 +808,7 @@ up_is_eor(struct rde_peer *peer, uint8_t
 		 */
 		RB_REMOVE(prefix_tree, &peer->updates[aid], p);
 		p->flags &= ~PREFIX_ADJOUT_FLAG_UPDATE;
-		prefix_adjout_destroy(p);
+		prefix_adjout_destroy(peer, p);
 		return 1;
 	}
 	return 0;
@@ -823,7 +823,7 @@ up_prefix_free(struct prefix_tree *prefi
 {
 	if (withdraw) {
 		/* prefix no longer needed, remove it */
-		prefix_adjout_destroy(p);
+		prefix_adjout_destroy(peer, p);
 		peer->stats.prefix_sent_withdraw++;
 	} else {
 		/* prefix still in Adj-RIB-Out, keep it */
@@ -856,9 +856,7 @@ up_dump_prefix(struct ibuf *buf, struct 
 
 		/* make sure we only dump prefixes which belong together */
 		if (np == NULL ||
-		    np->aspath != p->aspath ||
-		    np->communities != p->communities ||
-		    np->nexthop != p->nexthop ||
+		    np->attrs != p->attrs ||
 		    (np->flags & PREFIX_ADJOUT_FLAG_EOR))
 			done = 1;