Download raw body.
bgpd: next round of adjout shuffling
Duplicate struct prefix into struct prefix_adjout and adjust code
to work with that. This is the next step to allow me to alter
struct prefix_adjout in big ways.
Most bits are ok but esp the dump code for the control socket is a
somewhat horrible copy-paste job. I hope that over time this can be
reduced but for now this gets us going :)
--
:wq Claudio
? obj
Index: rde.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde.c,v
diff -u -p -r1.665 rde.c
--- rde.c 18 Nov 2025 16:39:36 -0000 1.665
+++ rde.c 19 Nov 2025 13:49:20 -0000
@@ -109,8 +109,8 @@ static void flowspec_dump_upcall(struct
static void flowspec_dump_done(void *, uint8_t);
void rde_shutdown(void);
-static int ovs_match(struct prefix *, uint32_t);
-static int avs_match(struct prefix *, uint32_t);
+static int ovs_match(uint8_t, uint32_t);
+static int avs_match(uint8_t, uint32_t);
static struct imsgbuf *ibuf_se;
static struct imsgbuf *ibuf_se_ctl;
@@ -2840,8 +2840,7 @@ rde_reflector(struct rde_peer *peer, str
* control specific functions
*/
static void
-rde_dump_rib_as(struct prefix *p, struct rde_aspath *asp, pid_t pid, int flags,
- int adjout)
+rde_dump_rib_as(struct prefix *p, struct rde_aspath *asp, pid_t pid, int flags)
{
struct ctl_show_rib rib;
struct ibuf *wbuf;
@@ -2880,7 +2879,7 @@ rde_dump_rib_as(struct prefix *p, struct
rib.aspa_validation_state = prefix_aspa_vstate(p);
rib.dmetric = p->dmetric;
rib.flags = 0;
- if (!adjout && prefix_eligible(p)) {
+ if (prefix_eligible(p)) {
re = prefix_re(p);
TAILQ_FOREACH(xp, &re->prefix_h, entry.list.rib) {
switch (xp->dmetric) {
@@ -2921,17 +2920,91 @@ rde_dump_rib_as(struct prefix *p, struct
if (monotime_valid(staletime) &&
monotime_cmp(p->lastchange, staletime) <= 0)
rib.flags |= F_PREF_STALE;
- if (!adjout) {
- if (peer_has_add_path(peer, p->pt->aid, CAPA_AP_RECV)) {
- rib.path_id = p->path_id;
- rib.flags |= F_PREF_PATH_ID;
+ if (peer_has_add_path(peer, p->pt->aid, CAPA_AP_RECV)) {
+ rib.path_id = p->path_id;
+ rib.flags |= F_PREF_PATH_ID;
+ }
+ aslen = aspath_length(asp->aspath);
+
+ if ((wbuf = imsg_create(ibuf_se_ctl, IMSG_CTL_SHOW_RIB, 0, pid,
+ sizeof(rib) + aslen)) == NULL)
+ return;
+ if (imsg_add(wbuf, &rib, sizeof(rib)) == -1 ||
+ imsg_add(wbuf, aspath_dump(asp->aspath), aslen) == -1)
+ return;
+ imsg_close(ibuf_se_ctl, wbuf);
+
+ if (flags & F_CTL_DETAIL) {
+ struct rde_community *comm = prefix_communities(p);
+ size_t len = comm->nentries * sizeof(struct community);
+ if (comm->nentries > 0) {
+ if (imsg_compose(ibuf_se_ctl,
+ IMSG_CTL_SHOW_RIB_COMMUNITIES, 0, pid, -1,
+ comm->communities, len) == -1)
+ return;
}
- } else {
- if (peer_has_add_path(peer, p->pt->aid, CAPA_AP_SEND)) {
- rib.path_id = p->path_id_tx;
- rib.flags |= F_PREF_PATH_ID;
+ for (l = 0; l < asp->others_len; l++) {
+ if ((a = asp->others[l]) == NULL)
+ break;
+ if ((wbuf = imsg_create(ibuf_se_ctl,
+ IMSG_CTL_SHOW_RIB_ATTR, 0, pid, 0)) == NULL)
+ return;
+ if (attr_writebuf(wbuf, a->flags, a->type, a->data,
+ a->len) == -1) {
+ ibuf_free(wbuf);
+ return;
+ }
+ imsg_close(ibuf_se_ctl, wbuf);
}
}
+}
+
+static void
+rde_dump_adjout_as(struct prefix_adjout *p, struct rde_aspath *asp, pid_t pid,
+ int flags)
+{
+ struct ctl_show_rib rib;
+ struct ibuf *wbuf;
+ struct attr *a;
+ struct nexthop *nexthop;
+ struct rde_peer *peer;
+ size_t aslen;
+ uint8_t l;
+
+ nexthop = prefix_adjout_nexthop(p);
+ peer = prefix_adjout_peer(p);
+ memset(&rib, 0, sizeof(rib));
+ rib.lastchange = p->lastchange;
+ rib.local_pref = asp->lpref;
+ rib.med = asp->med;
+ rib.weight = asp->weight;
+ strlcpy(rib.descr, peer->conf.descr, sizeof(rib.descr));
+ memcpy(&rib.remote_addr, &peer->remote_addr,
+ sizeof(rib.remote_addr));
+ rib.remote_id = peer->remote_bgpid;
+ if (nexthop != NULL) {
+ rib.exit_nexthop = nexthop->exit_nexthop;
+ rib.true_nexthop = nexthop->true_nexthop;
+ } else {
+ /* announced network can have a NULL nexthop */
+ rib.exit_nexthop.aid = p->pt->aid;
+ rib.true_nexthop.aid = p->pt->aid;
+ }
+ pt_getaddr(p->pt, &rib.prefix);
+ rib.prefixlen = p->pt->prefixlen;
+ rib.origin = asp->origin;
+ /* roa and aspa vstate skipped, they don't matter in adj-rib-out */
+ rib.dmetric = p->dmetric;
+ rib.flags = 0;
+ rib.flags |= F_PREF_ELIGIBLE;
+ if (!peer->conf.ebgp)
+ rib.flags |= F_PREF_INTERNAL;
+ if (asp->flags & F_PREFIX_ANNOUNCED)
+ rib.flags |= F_PREF_ANNOUNCE;
+ if (peer_has_add_path(peer, p->pt->aid, CAPA_AP_SEND)) {
+ rib.path_id = p->path_id_tx;
+ rib.flags |= F_PREF_PATH_ID;
+ }
aslen = aspath_length(asp->aspath);
if ((wbuf = imsg_create(ibuf_se_ctl, IMSG_CTL_SHOW_RIB, 0, pid,
@@ -2943,7 +3016,7 @@ rde_dump_rib_as(struct prefix *p, struct
imsg_close(ibuf_se_ctl, wbuf);
if (flags & F_CTL_DETAIL) {
- struct rde_community *comm = prefix_communities(p);
+ struct rde_community *comm = prefix_adjout_communities(p);
size_t len = comm->nentries * sizeof(struct community);
if (comm->nentries > 0) {
if (imsg_compose(ibuf_se_ctl,
@@ -2986,7 +3059,7 @@ rde_match_peer(struct rde_peer *p, struc
}
static void
-rde_dump_filter(struct prefix *p, struct ctl_show_rib_request *req, int adjout)
+rde_dump_filter(struct prefix *p, struct ctl_show_rib_request *req)
{
struct rde_aspath *asp;
@@ -3007,14 +3080,8 @@ rde_dump_filter(struct prefix *p, struct
(asp->flags & F_ATTR_OTC_LEAK) == 0)
return;
if ((req->flags & F_CTL_HAS_PATHID)) {
- /* Match against the transmit path id if adjout is used. */
- if (adjout) {
- if (req->path_id != p->path_id_tx)
- return;
- } else {
- if (req->path_id != p->path_id)
- return;
- }
+ if (req->path_id != p->path_id)
+ return;
}
if (req->as.type != AS_UNDEF &&
!aspath_match(asp->aspath, &req->as, 0))
@@ -3024,11 +3091,38 @@ rde_dump_filter(struct prefix *p, struct
NULL))
return;
}
- if (!ovs_match(p, req->flags))
+ if (!ovs_match(prefix_roa_vstate(p), req->flags))
+ return;
+ if (!avs_match(prefix_aspa_vstate(p), req->flags))
+ return;
+ rde_dump_rib_as(p, asp, req->pid, req->flags);
+}
+
+static void
+rde_dump_adjout_filter(struct prefix_adjout *p,
+ struct ctl_show_rib_request *req)
+{
+ struct rde_aspath *asp;
+
+ if (!rde_match_peer(prefix_adjout_peer(p), &req->neighbor))
return;
- if (!avs_match(p, req->flags))
+
+ asp = prefix_adjout_aspath(p);
+ if ((req->flags & F_CTL_HAS_PATHID)) {
+ /* Match against the transmit path id if adjout is used. */
+ if (req->path_id != p->path_id_tx)
+ return;
+ }
+ if (req->as.type != AS_UNDEF &&
+ !aspath_match(asp->aspath, &req->as, 0))
return;
- rde_dump_rib_as(p, asp, req->pid, req->flags, adjout);
+ if (req->community.flags != 0) {
+ if (!community_match(prefix_adjout_communities(p),
+ &req->community, NULL))
+ return;
+ }
+ /* in the adj-rib-out, skip matching against roa and aspa state */
+ rde_dump_adjout_as(p, asp, req->pid, req->flags);
}
static void
@@ -3040,11 +3134,11 @@ rde_dump_upcall(struct rib_entry *re, vo
if (re == NULL)
return;
TAILQ_FOREACH(p, &re->prefix_h, entry.list.rib)
- rde_dump_filter(p, &ctx->req, 0);
+ rde_dump_filter(p, &ctx->req);
}
static void
-rde_dump_adjout_upcall(struct prefix *p, void *ptr)
+rde_dump_adjout_upcall(struct prefix_adjout *p, void *ptr)
{
struct rde_dump_ctx *ctx = ptr;
@@ -3052,7 +3146,7 @@ rde_dump_adjout_upcall(struct prefix *p,
fatalx("%s: prefix without PREFIX_FLAG_ADJOUT hit", __func__);
if (p->flags & PREFIX_FLAG_WITHDRAW)
return;
- rde_dump_filter(p, &ctx->req, 1);
+ rde_dump_adjout_filter(p, &ctx->req);
}
static int
@@ -3114,7 +3208,7 @@ rde_dump_ctx_new(struct ctl_show_rib_req
{
struct rde_dump_ctx *ctx;
struct rib_entry *re;
- struct prefix *p;
+ struct prefix_adjout *p;
u_int error;
uint8_t hostplen, plen;
uint16_t rid;
@@ -3468,7 +3562,7 @@ rde_evaluate_all(void)
/* flush Adj-RIB-Out by withdrawing all prefixes */
static void
-rde_up_flush_upcall(struct prefix *p, void *ptr)
+rde_up_flush_upcall(struct prefix_adjout *p, void *ptr)
{
prefix_adjout_withdraw(p);
}
@@ -4856,10 +4950,10 @@ rde_roa_validity(struct rde_prefixset *p
}
static int
-ovs_match(struct prefix *p, uint32_t flag)
+ovs_match(uint8_t roa_vstate, uint32_t flag)
{
if (flag & (F_CTL_OVS_VALID|F_CTL_OVS_INVALID|F_CTL_OVS_NOTFOUND)) {
- switch (prefix_roa_vstate(p)) {
+ switch (roa_vstate) {
case ROA_VALID:
if (!(flag & F_CTL_OVS_VALID))
return 0;
@@ -4881,10 +4975,10 @@ ovs_match(struct prefix *p, uint32_t fla
}
static int
-avs_match(struct prefix *p, uint32_t flag)
+avs_match(uint8_t aspa_vstate, uint32_t flag)
{
if (flag & (F_CTL_AVS_VALID|F_CTL_AVS_INVALID|F_CTL_AVS_UNKNOWN)) {
- switch (prefix_aspa_vstate(p) & ASPA_MASK) {
+ switch (aspa_vstate & ASPA_MASK) {
case ASPA_VALID:
if (!(flag & F_CTL_AVS_VALID))
return 0;
Index: rde.h
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde.h,v
diff -u -p -r1.320 rde.h
--- rde.h 19 Nov 2025 09:49:27 -0000 1.320
+++ rde.h 19 Nov 2025 13:49:21 -0000
@@ -71,8 +71,8 @@ struct rib {
* Currently I assume that we can do that with the neighbor_ip...
*/
RB_HEAD(peer_tree, rde_peer);
-RB_HEAD(prefix_tree, prefix);
-RB_HEAD(prefix_index, prefix);
+RB_HEAD(prefix_tree, prefix_adjout);
+RB_HEAD(prefix_index, prefix_adjout);
struct rde_peer {
RB_ENTRY(rde_peer) entry;
@@ -273,9 +273,6 @@ struct prefix {
LIST_ENTRY(prefix) nexthop;
struct rib_entry *re;
} list;
- struct {
- RB_ENTRY(prefix) index, update;
- } tree;
} entry;
struct pt_entry *pt;
struct rde_aspath *aspath;
@@ -316,6 +313,26 @@ struct prefix {
#define NEXTHOP_MASK 0x0f
#define NEXTHOP_VALID 0x80
+struct prefix_adjout {
+ union {
+ struct {
+ RB_ENTRY(prefix_adjout) index, update;
+ } tree;
+ } entry;
+ struct pt_entry *pt;
+ struct rde_aspath *aspath;
+ struct rde_community *communities;
+ struct rde_peer *peer;
+ struct nexthop *nexthop; /* may be NULL */
+ monotime_t lastchange;
+ uint32_t path_id;
+ uint32_t path_id_tx;
+ uint16_t flags;
+ uint8_t validation_state;
+ uint8_t nhflags;
+ int8_t dmetric; /* decision metric */
+};
+
struct filterstate {
struct rde_aspath aspath;
struct rde_community communities;
@@ -333,10 +350,10 @@ enum eval_mode {
struct rib_context {
LIST_ENTRY(rib_context) entry;
struct rib_entry *ctx_re;
- struct prefix *ctx_p;
+ struct prefix_adjout *ctx_p;
uint32_t ctx_id;
void (*ctx_rib_call)(struct rib_entry *, void *);
- void (*ctx_prefix_call)(struct prefix *, void *);
+ void (*ctx_prefix_call)(struct prefix_adjout *, void *);
void (*ctx_done)(void *, uint8_t);
int (*ctx_throttle)(void *);
void *ctx_arg;
@@ -597,6 +614,8 @@ re_rib(struct rib_entry *re)
}
void path_init(void);
+struct rde_aspath *path_ref(struct rde_aspath *);
+void path_unref(struct rde_aspath *);
int path_equal(const struct rde_aspath *,
const struct rde_aspath *);
struct rde_aspath *path_getcache(struct rde_aspath *);
@@ -622,17 +641,11 @@ void prefix_flowspec_dump(uint8_t, voi
void (*)(struct rib_entry *, void *),
void (*)(void *, uint8_t));
-void prefix_link(struct prefix *, struct rib_entry *,
- struct pt_entry *, struct rde_peer *, uint32_t, uint32_t,
- struct rde_aspath *, struct rde_community *,
- struct nexthop *, uint8_t, uint8_t);
-void prefix_unlink(struct prefix *);
-
struct prefix *prefix_bypeer(struct rib_entry *, struct rde_peer *,
uint32_t);
void prefix_destroy(struct prefix *);
-RB_PROTOTYPE(prefix_tree, prefix, entry, prefix_cmp)
+RB_PROTOTYPE(prefix_tree, prefix_adjout, entry, prefix_cmp)
static inline struct rde_peer *
prefix_peer(struct prefix *p)
@@ -716,30 +729,64 @@ struct nexthop *nexthop_ref(struct nexth
int nexthop_unref(struct nexthop *);
/* rde_adjout.c */
-struct prefix *prefix_adjout_get(struct rde_peer *, uint32_t,
- struct pt_entry *);
-struct prefix *prefix_adjout_first(struct rde_peer *, struct pt_entry *);
-struct prefix *prefix_adjout_next(struct rde_peer *, struct prefix *);
-struct prefix *prefix_adjout_lookup(struct rde_peer *, struct bgpd_addr *,
- int);
-struct prefix *prefix_adjout_match(struct rde_peer *, struct bgpd_addr *);
+struct prefix_adjout *prefix_adjout_get(struct rde_peer *, uint32_t,
+ struct pt_entry *);
+struct prefix_adjout *prefix_adjout_first(struct rde_peer *,
+ struct pt_entry *);
+struct prefix_adjout *prefix_adjout_next(struct rde_peer *,
+ struct prefix_adjout *);
+struct prefix_adjout *prefix_adjout_lookup(struct rde_peer *,
+ struct bgpd_addr *, int);
+struct prefix_adjout *prefix_adjout_match(struct rde_peer *,
+ struct bgpd_addr *);
void prefix_add_eor(struct rde_peer *, uint8_t);
-void prefix_adjout_update(struct prefix *, struct rde_peer *,
+void prefix_adjout_update(struct prefix_adjout *, struct rde_peer *,
struct filterstate *, struct pt_entry *, uint32_t);
-void prefix_adjout_withdraw(struct prefix *);
-void prefix_adjout_destroy(struct prefix *);
+void prefix_adjout_withdraw(struct prefix_adjout *);
+void prefix_adjout_destroy(struct prefix_adjout *);
void prefix_adjout_flush_pending(struct rde_peer *);
int prefix_adjout_reaper(struct rde_peer *);
-void prefix_adjout_dump_cleanup(struct prefix *);
+void prefix_adjout_dump_cleanup(struct prefix_adjout *);
void prefix_adjout_dump_r(struct rib_context *);
int prefix_adjout_dump_new(struct rde_peer *, uint8_t,
- unsigned int, void *, void (*)(struct prefix *, void *),
+ unsigned int, void *,
+ void (*)(struct prefix_adjout *, void *),
void (*)(void *, uint8_t), int (*)(void *));
int prefix_adjout_dump_subtree(struct rde_peer *,
struct bgpd_addr *, uint8_t, unsigned int, void *,
- void (*)(struct prefix *, void *),
+ void (*)(struct prefix_adjout *, void *),
void (*)(void *, uint8_t), int (*)(void *));
+
+static inline struct rde_peer *
+prefix_adjout_peer(struct prefix_adjout *p)
+{
+ return (p->peer);
+}
+
+static inline struct rde_aspath *
+prefix_adjout_aspath(struct prefix_adjout *p)
+{
+ return (p->aspath);
+}
+
+static inline struct rde_community *
+prefix_adjout_communities(struct prefix_adjout *p)
+{
+ return (p->communities);
+}
+
+static inline struct nexthop *
+prefix_adjout_nexthop(struct prefix_adjout *p)
+{
+ return (p->nexthop);
+}
+
+static inline uint8_t
+prefix_adjout_nhflags(struct prefix_adjout *p)
+{
+ return (p->nhflags & NEXTHOP_MASK);
+}
/* rde_update.c */
void up_generate_updates(struct rde_peer *, struct rib_entry *);
Index: rde_adjout.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde_adjout.c,v
diff -u -p -r1.2 rde_adjout.c
--- rde_adjout.c 19 Nov 2025 09:49:27 -0000 1.2
+++ rde_adjout.c 19 Nov 2025 13:49:21 -0000
@@ -30,8 +30,8 @@
/* adj-rib-out specific functions */
-static inline struct prefix *
-prefix_lock(struct prefix *p)
+static inline struct prefix_adjout *
+prefix_adjout_lock(struct prefix_adjout *p)
{
if (p->flags & PREFIX_FLAG_LOCKED)
fatalx("%s: locking locked prefix", __func__);
@@ -39,8 +39,8 @@ prefix_lock(struct prefix *p)
return p;
}
-static inline struct prefix *
-prefix_unlock(struct prefix *p)
+static inline struct prefix_adjout *
+prefix_adjout_unlock(struct prefix_adjout *p)
{
if ((p->flags & PREFIX_FLAG_LOCKED) == 0)
fatalx("%s: unlocking unlocked prefix", __func__);
@@ -49,23 +49,29 @@ prefix_unlock(struct prefix *p)
}
static inline int
-prefix_is_locked(struct prefix *p)
+prefix_is_locked(struct prefix_adjout *p)
{
return (p->flags & PREFIX_FLAG_LOCKED) != 0;
}
static inline int
-prefix_is_dead(struct prefix *p)
+prefix_is_dead(struct prefix_adjout *p)
{
return (p->flags & PREFIX_FLAG_DEAD) != 0;
}
-static struct prefix *prefix_alloc(void);
-static void prefix_free(struct prefix *);
+static void prefix_adjout_link(struct prefix_adjout *, struct pt_entry *,
+ struct rde_peer *, uint32_t, uint32_t,
+ struct rde_aspath *, struct rde_community *,
+ struct nexthop *, uint8_t, uint8_t);
+static void prefix_adjout_unlink(struct prefix_adjout *);
+
+static struct prefix_adjout *prefix_adjout_alloc(void);
+static void prefix_adjout_free(struct prefix_adjout *);
/* RB tree comparison function */
static inline int
-prefix_index_cmp(struct prefix *a, struct prefix *b)
+prefix_index_cmp(struct prefix_adjout *a, struct prefix_adjout *b)
{
int r;
r = pt_prefix_cmp(a->pt, b->pt);
@@ -80,7 +86,7 @@ prefix_index_cmp(struct prefix *a, struc
}
static inline int
-prefix_cmp(struct prefix *a, struct prefix *b)
+prefix_cmp(struct prefix_adjout *a, struct prefix_adjout *b)
{
if ((a->flags & PREFIX_FLAG_EOR) != (b->flags & PREFIX_FLAG_EOR))
return (a->flags & PREFIX_FLAG_EOR) ? 1 : -1;
@@ -99,18 +105,18 @@ prefix_cmp(struct prefix *a, struct pref
return prefix_index_cmp(a, b);
}
-RB_GENERATE(prefix_tree, prefix, entry.tree.update, prefix_cmp)
-RB_GENERATE_STATIC(prefix_index, prefix, entry.tree.index, prefix_index_cmp)
+RB_GENERATE(prefix_tree, prefix_adjout, entry.tree.update, prefix_cmp)
+RB_GENERATE_STATIC(prefix_index, prefix_adjout, entry.tree.index, prefix_index_cmp)
/*
* Search for specified prefix in the peer prefix_index.
* Returns NULL if not found.
*/
-struct prefix *
+struct prefix_adjout *
prefix_adjout_get(struct rde_peer *peer, uint32_t path_id_tx,
struct pt_entry *pte)
{
- struct prefix xp;
+ struct prefix_adjout xp;
memset(&xp, 0, sizeof(xp));
xp.pt = pte;
@@ -123,10 +129,10 @@ prefix_adjout_get(struct rde_peer *peer,
* Lookup a prefix without considering path_id in the peer prefix_index.
* Returns NULL if not found.
*/
-struct prefix *
+struct prefix_adjout *
prefix_adjout_first(struct rde_peer *peer, struct pt_entry *pte)
{
- struct prefix xp, *np;
+ struct prefix_adjout xp, *np;
memset(&xp, 0, sizeof(xp));
xp.pt = pte;
@@ -140,10 +146,10 @@ prefix_adjout_first(struct rde_peer *pee
/*
* Return next prefix after a lookup that is actually an update.
*/
-struct prefix *
-prefix_adjout_next(struct rde_peer *peer, struct prefix *p)
+struct prefix_adjout *
+prefix_adjout_next(struct rde_peer *peer, struct prefix_adjout *p)
{
- struct prefix *np;
+ struct prefix_adjout *np;
np = RB_NEXT(prefix_index, &peer->adj_rib_out, p);
if (np == NULL || np->pt != p->pt)
@@ -155,7 +161,7 @@ prefix_adjout_next(struct rde_peer *peer
* Lookup addr/prefixlen in the peer prefix_index. Returns first match.
* Returns NULL if not found.
*/
-struct prefix *
+struct prefix_adjout *
prefix_adjout_lookup(struct rde_peer *peer, struct bgpd_addr *addr, int plen)
{
return prefix_adjout_first(peer, pt_fill(addr, plen));
@@ -165,10 +171,10 @@ prefix_adjout_lookup(struct rde_peer *pe
* Lookup addr in the peer prefix_index. Returns first match.
* Returns NULL if not found.
*/
-struct prefix *
+struct prefix_adjout *
prefix_adjout_match(struct rde_peer *peer, struct bgpd_addr *addr)
{
- struct prefix *p;
+ struct prefix_adjout *p;
int i;
switch (addr->aid) {
@@ -200,13 +206,13 @@ prefix_adjout_match(struct rde_peer *pee
void
prefix_add_eor(struct rde_peer *peer, uint8_t aid)
{
- struct prefix *p;
+ struct prefix_adjout *p;
- p = prefix_alloc();
+ p = prefix_adjout_alloc();
p->flags = PREFIX_FLAG_ADJOUT | PREFIX_FLAG_UPDATE | PREFIX_FLAG_EOR;
if (RB_INSERT(prefix_tree, &peer->updates[aid], p) != NULL)
/* no need to add if EoR marker already present */
- prefix_free(p);
+ prefix_adjout_free(p);
/* EOR marker is not inserted into the adj_rib_out index */
}
@@ -214,14 +220,14 @@ prefix_add_eor(struct rde_peer *peer, ui
* Put a prefix from the Adj-RIB-Out onto the update queue.
*/
void
-prefix_adjout_update(struct prefix *p, struct rde_peer *peer,
+prefix_adjout_update(struct prefix_adjout *p, struct rde_peer *peer,
struct filterstate *state, struct pt_entry *pte, uint32_t path_id_tx)
{
struct rde_aspath *asp;
struct rde_community *comm;
if (p == NULL) {
- p = prefix_alloc();
+ p = prefix_adjout_alloc();
/* initially mark DEAD so code below is skipped */
p->flags |= PREFIX_FLAG_ADJOUT | PREFIX_FLAG_DEAD;
@@ -243,11 +249,11 @@ prefix_adjout_update(struct prefix *p, s
* paths.
*/
if (p->path_id_tx == path_id_tx &&
- prefix_nhflags(p) == state->nhflags &&
- prefix_nexthop(p) == state->nexthop &&
+ prefix_adjout_nhflags(p) == state->nhflags &&
+ prefix_adjout_nexthop(p) == state->nexthop &&
communities_equal(&state->communities,
- prefix_communities(p)) &&
- path_equal(&state->aspath, prefix_aspath(p))) {
+ prefix_adjout_communities(p)) &&
+ path_equal(&state->aspath, prefix_adjout_aspath(p))) {
/* nothing changed */
p->validation_state = state->vstate;
p->lastchange = getmonotime();
@@ -262,7 +268,7 @@ prefix_adjout_update(struct prefix *p, s
}
/* unlink prefix so it can be relinked below */
- prefix_unlink(p);
+ prefix_adjout_unlink(p);
peer->stats.prefix_out_cnt--;
}
if (p->flags & PREFIX_FLAG_WITHDRAW) {
@@ -289,7 +295,7 @@ prefix_adjout_update(struct prefix *p, s
comm = communities_link(&state->communities);
}
- prefix_link(p, NULL, p->pt, peer, 0, p->path_id_tx, asp, comm,
+ prefix_adjout_link(p, p->pt, peer, 0, p->path_id_tx, asp, comm,
state->nexthop, state->nhflags, state->vstate);
peer->stats.prefix_out_cnt++;
@@ -308,9 +314,9 @@ prefix_adjout_update(struct prefix *p, s
* the prefix in the RIB linked to the peer withdraw list.
*/
void
-prefix_adjout_withdraw(struct prefix *p)
+prefix_adjout_withdraw(struct prefix_adjout *p)
{
- struct rde_peer *peer = prefix_peer(p);
+ struct rde_peer *peer = prefix_adjout_peer(p);
if ((p->flags & PREFIX_FLAG_ADJOUT) == 0)
fatalx("%s: prefix without PREFIX_FLAG_ADJOUT hit", __func__);
@@ -328,7 +334,7 @@ prefix_adjout_withdraw(struct prefix *p)
}
/* unlink prefix if it was linked (not a withdraw or dead) */
if ((p->flags & (PREFIX_FLAG_WITHDRAW | PREFIX_FLAG_DEAD)) == 0) {
- prefix_unlink(p);
+ prefix_adjout_unlink(p);
peer->stats.prefix_out_cnt--;
}
@@ -350,16 +356,16 @@ prefix_adjout_withdraw(struct prefix *p)
}
void
-prefix_adjout_destroy(struct prefix *p)
+prefix_adjout_destroy(struct prefix_adjout *p)
{
- struct rde_peer *peer = prefix_peer(p);
+ struct rde_peer *peer = prefix_adjout_peer(p);
if ((p->flags & PREFIX_FLAG_ADJOUT) == 0)
fatalx("%s: prefix without PREFIX_FLAG_ADJOUT hit", __func__);
if (p->flags & PREFIX_FLAG_EOR) {
/* EOR marker is not linked in the index */
- prefix_free(p);
+ prefix_adjout_free(p);
return;
}
@@ -373,7 +379,7 @@ prefix_adjout_destroy(struct prefix *p)
}
/* unlink prefix if it was linked (not a withdraw or dead) */
if ((p->flags & (PREFIX_FLAG_WITHDRAW | PREFIX_FLAG_DEAD)) == 0) {
- prefix_unlink(p);
+ prefix_adjout_unlink(p);
peer->stats.prefix_out_cnt--;
}
@@ -387,14 +393,14 @@ prefix_adjout_destroy(struct prefix *p)
RB_REMOVE(prefix_index, &peer->adj_rib_out, p);
/* remove the last prefix reference before free */
pt_unref(p->pt);
- prefix_free(p);
+ prefix_adjout_free(p);
}
}
void
prefix_adjout_flush_pending(struct rde_peer *peer)
{
- struct prefix *p, *np;
+ struct prefix_adjout *p, *np;
uint8_t aid;
for (aid = AID_MIN; aid < AID_MAX; aid++) {
@@ -416,7 +422,7 @@ prefix_adjout_flush_pending(struct rde_p
int
prefix_adjout_reaper(struct rde_peer *peer)
{
- struct prefix *p, *np;
+ struct prefix_adjout *p, *np;
int count = RDE_REAPER_ROUNDS;
RB_FOREACH_SAFE(p, prefix_index, &peer->adj_rib_out, np) {
@@ -427,16 +433,16 @@ prefix_adjout_reaper(struct rde_peer *pe
return 1;
}
-static struct prefix *
+static struct prefix_adjout *
prefix_restart(struct rib_context *ctx)
{
- struct prefix *p = NULL;
+ struct prefix_adjout *p = NULL;
if (ctx->ctx_p)
- p = prefix_unlock(ctx->ctx_p);
+ p = prefix_adjout_unlock(ctx->ctx_p);
- if (p && prefix_is_dead(p)) {
- struct prefix *next;
+ while (p && prefix_is_dead(p)) {
+ struct prefix_adjout *next;
next = RB_NEXT(prefix_index, unused, p);
prefix_adjout_destroy(p);
@@ -447,16 +453,16 @@ prefix_restart(struct rib_context *ctx)
}
void
-prefix_adjout_dump_cleanup(struct prefix *p)
+prefix_adjout_dump_cleanup(struct prefix_adjout *p)
{
- if (prefix_is_dead(prefix_unlock(p)))
+ if (prefix_is_dead(prefix_adjout_unlock(p)))
prefix_adjout_destroy(p);
}
void
prefix_adjout_dump_r(struct rib_context *ctx)
{
- struct prefix *p, *next;
+ struct prefix_adjout *p, *next;
struct rde_peer *peer;
unsigned int i;
@@ -486,7 +492,7 @@ prefix_adjout_dump_r(struct rib_context
if (ctx->ctx_count && i++ >= ctx->ctx_count &&
!prefix_is_locked(p)) {
/* store and lock last element */
- ctx->ctx_p = prefix_lock(p);
+ ctx->ctx_p = prefix_adjout_lock(p);
return;
}
ctx->ctx_prefix_call(p, ctx->ctx_arg);
@@ -501,7 +507,7 @@ done:
int
prefix_adjout_dump_new(struct rde_peer *peer, uint8_t aid, unsigned int count,
- void *arg, void (*upcall)(struct prefix *, void *),
+ void *arg, void (*upcall)(struct prefix_adjout *, void *),
void (*done)(void *, uint8_t), int (*throttle)(void *))
{
struct rib_context *ctx;
@@ -528,11 +534,12 @@ prefix_adjout_dump_new(struct rde_peer *
int
prefix_adjout_dump_subtree(struct rde_peer *peer, struct bgpd_addr *subtree,
uint8_t subtreelen, unsigned int count, void *arg,
- void (*upcall)(struct prefix *, void *), void (*done)(void *, uint8_t),
+ void (*upcall)(struct prefix_adjout *, void *),
+ void (*done)(void *, uint8_t),
int (*throttle)(void *))
{
struct rib_context *ctx;
- struct prefix xp;
+ struct prefix_adjout xp;
if ((ctx = calloc(1, sizeof(*ctx))) == NULL)
return -1;
@@ -551,7 +558,7 @@ prefix_adjout_dump_subtree(struct rde_pe
xp.pt = pt_fill(subtree, subtreelen);
ctx->ctx_p = RB_NFIND(prefix_index, &peer->adj_rib_out, &xp);
if (ctx->ctx_p)
- prefix_lock(ctx->ctx_p);
+ prefix_adjout_lock(ctx->ctx_p);
rib_dump_insert(ctx);
@@ -562,11 +569,55 @@ prefix_adjout_dump_subtree(struct rde_pe
return 0;
}
+/*
+ * Link a prefix into the different parent objects.
+ */
+static void
+prefix_adjout_link(struct prefix_adjout *p, struct pt_entry *pt,
+ struct rde_peer *peer, uint32_t path_id, uint32_t path_id_tx,
+ struct rde_aspath *asp, struct rde_community *comm,
+ struct nexthop *nexthop, uint8_t nhflags, uint8_t vstate)
+{
+ p->aspath = path_ref(asp);
+ p->communities = communities_ref(comm);
+ p->peer = peer;
+ p->pt = pt_ref(pt);
+ p->path_id = path_id;
+ p->path_id_tx = path_id_tx;
+ p->validation_state = vstate;
+ p->nexthop = nexthop_ref(nexthop);
+ p->nhflags = nhflags;
+ /* All nexthops are valid in Adj-RIB-Out */
+ p->nhflags |= NEXTHOP_VALID;
+ p->lastchange = getmonotime();
+}
+
+/*
+ * Unlink a prefix from the different parent objects.
+ */
+static void
+prefix_adjout_unlink(struct prefix_adjout *p)
+{
+ /* destroy all references to other objects */
+ /* remove nexthop ref ... */
+ nexthop_unref(p->nexthop);
+ p->nexthop = NULL;
+ p->nhflags = 0;
+ /* ... communities ... */
+ communities_unref(p->communities);
+ p->communities = NULL;
+ /* and unlink from aspath */
+ path_unref(p->aspath);
+ p->aspath = NULL;
+
+ pt_unref(p->pt);
+}
+
/* alloc and zero new entry. May not fail. */
-static struct prefix *
-prefix_alloc(void)
+static struct prefix_adjout *
+prefix_adjout_alloc(void)
{
- struct prefix *p;
+ struct prefix_adjout *p;
p = calloc(1, sizeof(*p));
if (p == NULL)
@@ -577,7 +628,7 @@ prefix_alloc(void)
/* free a unlinked entry */
static void
-prefix_free(struct prefix *p)
+prefix_adjout_free(struct prefix_adjout *p)
{
rdemem.prefix_cnt--;
free(p);
Index: rde_peer.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde_peer.c,v
diff -u -p -r1.53 rde_peer.c
--- rde_peer.c 18 Nov 2025 16:39:36 -0000 1.53
+++ rde_peer.c 19 Nov 2025 13:49:21 -0000
@@ -516,12 +516,12 @@ peer_stale(struct rde_peer *peer, uint8_
* Enqueue a prefix onto the update queue so it can be sent out.
*/
static void
-peer_blast_upcall(struct prefix *p, void *ptr)
+peer_blast_upcall(struct prefix_adjout *p, void *ptr)
{
struct rde_peer *peer;
if ((p->flags & PREFIX_FLAG_MASK) == 0) {
- peer = prefix_peer(p);
+ peer = prefix_adjout_peer(p);
/* put entries on the update queue if not already on a queue */
p->flags |= PREFIX_FLAG_UPDATE;
if (RB_INSERT(prefix_tree, &peer->updates[p->pt->aid],
Index: rde_rib.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde_rib.c,v
diff -u -p -r1.278 rde_rib.c
--- rde_rib.c 19 Nov 2025 13:30:59 -0000 1.278
+++ rde_rib.c 19 Nov 2025 13:49:21 -0000
@@ -624,7 +624,7 @@ CH_PROTOTYPE(path_tree, rde_aspath, path
static struct path_tree pathtable = CH_INITIALIZER(&pathtable);
-static inline struct rde_aspath *
+struct rde_aspath *
path_ref(struct rde_aspath *asp)
{
if ((asp->flags & F_ATTR_LINKED) == 0)
@@ -635,7 +635,7 @@ path_ref(struct rde_aspath *asp)
return asp;
}
-static inline void
+void
path_unref(struct rde_aspath *asp)
{
if (asp == NULL)
@@ -788,6 +788,12 @@ static int prefix_move(struct prefix *,
struct rde_aspath *, struct rde_community *,
struct nexthop *, uint8_t, uint8_t, int);
+static void prefix_link(struct prefix *, struct rib_entry *,
+ struct pt_entry *, struct rde_peer *, uint32_t, uint32_t,
+ struct rde_aspath *, struct rde_community *,
+ struct nexthop *, uint8_t, uint8_t);
+static void prefix_unlink(struct prefix *);
+
static struct prefix *prefix_alloc(void);
static void prefix_free(struct prefix *);
@@ -1083,7 +1089,7 @@ prefix_destroy(struct prefix *p)
/*
* Link a prefix into the different parent objects.
*/
-void
+static void
prefix_link(struct prefix *p, struct rib_entry *re, struct pt_entry *pt,
struct rde_peer *peer, uint32_t path_id, uint32_t path_id_tx,
struct rde_aspath *asp, struct rde_community *comm,
@@ -1107,7 +1113,7 @@ prefix_link(struct prefix *p, struct rib
/*
* Unlink a prefix from the different parent objects.
*/
-void
+static void
prefix_unlink(struct prefix *p)
{
struct rib_entry *re = prefix_re(p);
Index: rde_update.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde_update.c,v
diff -u -p -r1.179 rde_update.c
--- rde_update.c 13 Nov 2025 21:43:12 -0000 1.179
+++ rde_update.c 19 Nov 2025 13:49:21 -0000
@@ -158,7 +158,8 @@ up_enforce_open_policy(struct rde_peer *
* - UP_EXCLUDED if prefix was excluded because of up_test_update()
*/
static enum up_state
-up_process_prefix(struct rde_peer *peer, struct prefix *new, struct prefix *p)
+up_process_prefix(struct rde_peer *peer, struct prefix *new,
+ struct prefix_adjout *p)
{
struct filterstate state;
struct bgpd_addr addr;
@@ -217,7 +218,8 @@ up_process_prefix(struct rde_peer *peer,
void
up_generate_updates(struct rde_peer *peer, struct rib_entry *re)
{
- struct prefix *new, *p;
+ struct prefix *new;
+ struct prefix_adjout *p;
p = prefix_adjout_first(peer, re->prefix);
@@ -255,7 +257,8 @@ done:
void
up_generate_addpath(struct rde_peer *peer, struct rib_entry *re)
{
- struct prefix *head, *new, *p;
+ struct prefix *new;
+ struct prefix_adjout *head, *p;
int maxpaths = 0, extrapaths = 0, extra;
int checkmode = 1;
@@ -343,7 +346,7 @@ void
up_generate_addpath_all(struct rde_peer *peer, struct rib_entry *re,
struct prefix *new, struct prefix *old)
{
- struct prefix *p;
+ struct prefix_adjout *p;
/*
* If old and new are NULL then re-insert all prefixes from re,
@@ -387,7 +390,7 @@ up_generate_default(struct rde_peer *pee
extern struct rde_peer *peerself;
struct filterstate state;
struct rde_aspath *asp;
- struct prefix *p;
+ struct prefix_adjout *p;
struct pt_entry *pte;
struct bgpd_addr addr;
@@ -791,7 +794,7 @@ up_generate_attr(struct ibuf *buf, struc
int
up_is_eor(struct rde_peer *peer, uint8_t aid)
{
- struct prefix *p;
+ struct prefix_adjout *p;
p = RB_MIN(prefix_tree, &peer->updates[aid]);
if (p != NULL && (p->flags & PREFIX_FLAG_EOR)) {
@@ -811,7 +814,7 @@ up_is_eor(struct rde_peer *peer, uint8_t
#define MIN_UPDATE_LEN 16
static void
-up_prefix_free(struct prefix_tree *prefix_head, struct prefix *p,
+up_prefix_free(struct prefix_tree *prefix_head, struct prefix_adjout *p,
struct rde_peer *peer, int withdraw)
{
if (withdraw) {
@@ -836,8 +839,8 @@ static int
up_dump_prefix(struct ibuf *buf, struct prefix_tree *prefix_head,
struct rde_peer *peer, int withdraw)
{
- struct prefix *p, *np;
- int done = 0, has_ap = -1, rv = -1;
+ struct prefix_adjout *p, *np;
+ int done = 0, has_ap = -1, rv = -1;
RB_FOREACH_SAFE(p, prefix_tree, prefix_head, np) {
if (has_ap == -1)
@@ -1079,7 +1082,8 @@ up_dump_withdraws(struct imsgbuf *imsg,
* Withdraw a single prefix after an error.
*/
static int
-up_dump_withdraw_one(struct rde_peer *peer, struct prefix *p, struct ibuf *buf)
+up_dump_withdraw_one(struct rde_peer *peer, struct prefix_adjout *p,
+ struct ibuf *buf)
{
size_t off;
int has_ap;
@@ -1152,7 +1156,7 @@ up_dump_update(struct imsgbuf *imsg, str
{
struct ibuf *buf;
struct bgpd_addr addr;
- struct prefix *p;
+ struct prefix_adjout *p;
size_t off, pkgsize = MAX_PKTSIZE;
uint16_t len;
int force_ip4mp = 0;
@@ -1162,7 +1166,7 @@ up_dump_update(struct imsgbuf *imsg, str
return;
if (aid == AID_INET && peer_has_ext_nexthop(peer, AID_INET)) {
- struct nexthop *nh = prefix_nexthop(p);
+ struct nexthop *nh = prefix_adjout_nexthop(p);
if (nh != NULL && nh->exit_nexthop.aid == AID_INET6)
force_ip4mp = 1;
}
@@ -1185,8 +1189,8 @@ up_dump_update(struct imsgbuf *imsg, str
if (ibuf_add_zero(buf, sizeof(len)) == -1)
goto fail;
- if (up_generate_attr(buf, peer, prefix_aspath(p),
- prefix_communities(p), prefix_nexthop(p), aid) == -1)
+ if (up_generate_attr(buf, peer, prefix_adjout_aspath(p),
+ prefix_adjout_communities(p), prefix_adjout_nexthop(p), aid) == -1)
goto drop;
if (aid != AID_INET || force_ip4mp) {
@@ -1198,8 +1202,8 @@ up_dump_update(struct imsgbuf *imsg, str
* merge the attributes together in reverse order of
* creation.
*/
- if (up_generate_mp_reach(buf, peer, prefix_nexthop(p), aid) ==
- -1)
+ if (up_generate_mp_reach(buf, peer, prefix_adjout_nexthop(p),
+ aid) == -1)
goto drop;
}
bgpd: next round of adjout shuffling