Download raw body.
bgpd: more churn, cleanup struct prefix and prefix_adjout
The union struct wrapping for list and tree in struct prefix and
prefix_adjout are no longer needed. Just remove the wrapping.
Compiler agrees with all the churn in this diff.
--
:wq Claudio
? obj
Index: mrt.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/mrt.c,v
diff -u -p -r1.131 mrt.c
--- mrt.c 6 Nov 2025 15:33:48 -0000 1.131
+++ mrt.c 20 Nov 2025 10:16:14 -0000
@@ -620,7 +620,7 @@ mrt_dump_entry_v2_rib(struct rib_entry *
*np = 0;
*app = 0;
- TAILQ_FOREACH(p, &re->prefix_h, entry.list.rib) {
+ TAILQ_FOREACH(p, &re->prefix_h, rib_l) {
struct nexthop *nexthop;
struct bgpd_addr *nh;
@@ -917,7 +917,7 @@ mrt_dump_upcall(struct rib_entry *re, vo
* dumps the table so we do the same. If only the active route should
* be dumped p should be set to p = pt->active.
*/
- TAILQ_FOREACH(p, &re->prefix_h, entry.list.rib) {
+ TAILQ_FOREACH(p, &re->prefix_h, rib_l) {
if (mrtbuf->type == MRT_TABLE_DUMP)
mrt_dump_entry(mrtbuf, p, mrtbuf->seqnum++,
prefix_peer(p));
Index: rde.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde.c,v
diff -u -p -r1.666 rde.c
--- rde.c 20 Nov 2025 10:10:36 -0000 1.666
+++ rde.c 20 Nov 2025 10:16:14 -0000
@@ -1874,7 +1874,7 @@ pathid_conflict(struct rib_entry *re, ui
if (re == NULL)
return 0;
- TAILQ_FOREACH(p, &re->prefix_h, entry.list.rib)
+ TAILQ_FOREACH(p, &re->prefix_h, rib_l)
if (p->path_id_tx == pathid)
return 1;
return 0;
@@ -2881,7 +2881,7 @@ rde_dump_rib_as(struct prefix *p, struct
rib.flags = 0;
if (prefix_eligible(p)) {
re = prefix_re(p);
- TAILQ_FOREACH(xp, &re->prefix_h, entry.list.rib) {
+ TAILQ_FOREACH(xp, &re->prefix_h, rib_l) {
switch (xp->dmetric) {
case PREFIX_DMETRIC_BEST:
if (xp == p)
@@ -3133,7 +3133,7 @@ rde_dump_upcall(struct rib_entry *re, vo
if (re == NULL)
return;
- TAILQ_FOREACH(p, &re->prefix_h, entry.list.rib)
+ TAILQ_FOREACH(p, &re->prefix_h, rib_l)
rde_dump_filter(p, &ctx->req);
}
@@ -4171,7 +4171,7 @@ rde_softreconfig_in(struct rib_entry *re
pt = re->prefix;
pt_getaddr(pt, &prefix);
- TAILQ_FOREACH(p, &re->prefix_h, entry.list.rib) {
+ TAILQ_FOREACH(p, &re->prefix_h, rib_l) {
asp = prefix_aspath(p);
peer = prefix_peer(p);
@@ -4242,7 +4242,7 @@ rde_softreconfig_sync_reeval(struct rib_
* all dependent adj-rib-out were already flushed
* unlink nexthop if it was linked
*/
- TAILQ_FOREACH(p, &re->prefix_h, entry.list.rib) {
+ TAILQ_FOREACH(p, &re->prefix_h, rib_l) {
if (p->flags & PREFIX_NEXTHOP_LINKED)
nexthop_unlink(p);
p->dmetric = PREFIX_DMETRIC_INVALID;
@@ -4251,7 +4251,7 @@ rde_softreconfig_sync_reeval(struct rib_
}
/* evaluation process is turned on, so evaluate all prefixes again */
- TAILQ_CONCAT(&prefixes, &re->prefix_h, entry.list.rib);
+ TAILQ_CONCAT(&prefixes, &re->prefix_h, rib_l);
/*
* TODO: this code works but is not optimal. prefix_evaluate()
@@ -4259,9 +4259,9 @@ rde_softreconfig_sync_reeval(struct rib_
* to resort the list once and then call rde_generate_updates()
* and rde_send_kroute() once.
*/
- TAILQ_FOREACH_SAFE(p, &prefixes, entry.list.rib, next) {
+ TAILQ_FOREACH_SAFE(p, &prefixes, rib_l, next) {
/* need to re-link the nexthop if not already linked */
- TAILQ_REMOVE(&prefixes, p, entry.list.rib);
+ TAILQ_REMOVE(&prefixes, p, rib_l);
if ((p->flags & PREFIX_NEXTHOP_LINKED) == 0)
nexthop_link(p);
prefix_evaluate(re, p, NULL);
@@ -4314,7 +4314,7 @@ rde_rpki_softreload(struct rib_entry *re
pt = re->prefix;
pt_getaddr(pt, &prefix);
- TAILQ_FOREACH(p, &re->prefix_h, entry.list.rib) {
+ TAILQ_FOREACH(p, &re->prefix_h, rib_l) {
asp = prefix_aspath(p);
peer = prefix_peer(p);
@@ -4729,7 +4729,7 @@ network_dump_upcall(struct rib_entry *re
struct bgpd_addr addr;
struct rde_dump_ctx *ctx = ptr;
- TAILQ_FOREACH(p, &re->prefix_h, entry.list.rib) {
+ TAILQ_FOREACH(p, &re->prefix_h, rib_l) {
asp = prefix_aspath(p);
if (!(asp->flags & F_PREFIX_ANNOUNCED))
continue;
@@ -4842,7 +4842,7 @@ flowspec_dump_upcall(struct rib_entry *r
uint8_t *flow;
int len;
- TAILQ_FOREACH(p, &re->prefix_h, entry.list.rib) {
+ TAILQ_FOREACH(p, &re->prefix_h, rib_l) {
asp = prefix_aspath(p);
if (!(asp->flags & F_PREFIX_ANNOUNCED))
continue;
Index: rde.h
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde.h,v
diff -u -p -r1.321 rde.h
--- rde.h 20 Nov 2025 10:10:36 -0000 1.321
+++ rde.h 20 Nov 2025 10:16:15 -0000
@@ -267,25 +267,21 @@ struct pt_entry {
};
struct prefix {
- union {
- struct {
- TAILQ_ENTRY(prefix) rib;
- LIST_ENTRY(prefix) nexthop;
- struct rib_entry *re;
- } list;
- } entry;
- struct pt_entry *pt;
- struct rde_aspath *aspath;
- struct rde_community *communities;
- struct rde_peer *peer;
- struct nexthop *nexthop; /* may be NULL */
- monotime_t lastchange;
- uint32_t path_id;
- uint32_t path_id_tx;
- uint16_t flags;
- uint8_t validation_state;
- uint8_t nhflags;
- int8_t dmetric; /* decision metric */
+ TAILQ_ENTRY(prefix) rib_l;
+ LIST_ENTRY(prefix) nexthop_l;
+ struct rib_entry *re;
+ struct pt_entry *pt;
+ struct rde_aspath *aspath;
+ struct rde_community *communities;
+ struct rde_peer *peer;
+ struct nexthop *nexthop; /* may be NULL */
+ monotime_t lastchange;
+ uint32_t path_id;
+ uint32_t path_id_tx;
+ uint16_t flags;
+ uint8_t validation_state;
+ uint8_t nhflags;
+ int8_t dmetric; /* decision metric */
};
#define PREFIX_FLAG_WITHDRAW 0x0001 /* enqueued on withdraw queue */
#define PREFIX_FLAG_UPDATE 0x0002 /* enqueued on update queue */
@@ -314,11 +310,7 @@ struct prefix {
#define NEXTHOP_VALID 0x80
struct prefix_adjout {
- union {
- struct {
- RB_ENTRY(prefix_adjout) index, update;
- } tree;
- } entry;
+ RB_ENTRY(prefix_adjout) index, update;
struct pt_entry *pt;
struct rde_aspath *aspath;
struct rde_community *communities;
@@ -705,9 +697,7 @@ prefix_set_vstate(struct prefix *p, uint
static inline struct rib_entry *
prefix_re(struct prefix *p)
{
- if (p->flags & PREFIX_FLAG_ADJOUT)
- return NULL;
- return (p->entry.list.re);
+ return (p->re);
}
static inline int
Index: rde_adjout.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde_adjout.c,v
diff -u -p -r1.3 rde_adjout.c
--- rde_adjout.c 20 Nov 2025 10:10:36 -0000 1.3
+++ rde_adjout.c 20 Nov 2025 10:16:15 -0000
@@ -105,8 +105,8 @@ prefix_cmp(struct prefix_adjout *a, stru
return prefix_index_cmp(a, b);
}
-RB_GENERATE(prefix_tree, prefix_adjout, entry.tree.update, prefix_cmp)
-RB_GENERATE_STATIC(prefix_index, prefix_adjout, entry.tree.index, prefix_index_cmp)
+RB_GENERATE(prefix_tree, prefix_adjout, update, prefix_cmp)
+RB_GENERATE_STATIC(prefix_index, prefix_adjout, index, prefix_index_cmp)
/*
* Search for specified prefix in the peer prefix_index.
Index: rde_decide.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde_decide.c,v
diff -u -p -r1.104 rde_decide.c
--- rde_decide.c 20 Feb 2025 19:47:31 -0000 1.104
+++ rde_decide.c 20 Nov 2025 10:16:15 -0000
@@ -343,7 +343,7 @@ prefix_insert(struct prefix *new, struct
ep = TAILQ_FIRST(&re->prefix_h);
for (xp = ep; xp != NULL; xp = np) {
- np = TAILQ_NEXT(xp, entry.list.rib);
+ np = TAILQ_NEXT(xp, rib_l);
if ((preferred = (prefix_cmp(new, xp, &testall) > 0))) {
/* new is preferred over xp */
@@ -352,8 +352,8 @@ prefix_insert(struct prefix *new, struct
* MED inversion, take out prefix and
* put it onto redo queue.
*/
- TAILQ_REMOVE(&re->prefix_h, xp, entry.list.rib);
- TAILQ_INSERT_TAIL(&redo, xp, entry.list.rib);
+ TAILQ_REMOVE(&re->prefix_h, xp, rib_l);
+ TAILQ_INSERT_TAIL(&redo, xp, rib_l);
removed = 1;
continue;
}
@@ -388,7 +388,7 @@ prefix_insert(struct prefix *new, struct
*/
if (removed) {
prefix_set_dmetric(TAILQ_PREV(xp, prefix_queue,
- entry.list.rib), xp);
+ rib_l), xp);
removed = 0;
}
@@ -397,18 +397,18 @@ prefix_insert(struct prefix *new, struct
}
if (insertp == NULL) {
- TAILQ_INSERT_HEAD(&re->prefix_h, new, entry.list.rib);
+ TAILQ_INSERT_HEAD(&re->prefix_h, new, rib_l);
} else {
- TAILQ_INSERT_AFTER(&re->prefix_h, insertp, new, entry.list.rib);
+ TAILQ_INSERT_AFTER(&re->prefix_h, insertp, new, rib_l);
}
prefix_set_dmetric(insertp, new);
- prefix_set_dmetric(new, TAILQ_NEXT(new, entry.list.rib));
+ prefix_set_dmetric(new, TAILQ_NEXT(new, rib_l));
/* Fixup MED order again. All elements are < new */
while (!TAILQ_EMPTY(&redo)) {
xp = TAILQ_FIRST(&redo);
- TAILQ_REMOVE(&redo, xp, entry.list.rib);
+ TAILQ_REMOVE(&redo, xp, rib_l);
prefix_insert(xp, new, re);
}
@@ -430,16 +430,16 @@ prefix_remove(struct prefix *old, struct
struct prefix *xp, *np, *pp;
int testall, removed = 0;
- xp = TAILQ_NEXT(old, entry.list.rib);
- pp = TAILQ_PREV(old, prefix_queue, entry.list.rib);
- TAILQ_REMOVE(&re->prefix_h, old, entry.list.rib);
+ xp = TAILQ_NEXT(old, rib_l);
+ pp = TAILQ_PREV(old, prefix_queue, rib_l);
+ TAILQ_REMOVE(&re->prefix_h, old, rib_l);
/* check if a MED inversion could be possible */
prefix_cmp(old, xp, &testall);
if (testall > 0) {
/* maybe MED route, scan tail for other possible routes */
for (; xp != NULL; xp = np) {
- np = TAILQ_NEXT(xp, entry.list.rib);
+ np = TAILQ_NEXT(xp, rib_l);
/* only interested in the testall result */
prefix_cmp(old, xp, &testall);
@@ -448,8 +448,8 @@ prefix_remove(struct prefix *old, struct
* possible MED inversion, take out prefix and
* put it onto redo queue.
*/
- TAILQ_REMOVE(&re->prefix_h, xp, entry.list.rib);
- TAILQ_INSERT_TAIL(&redo, xp, entry.list.rib);
+ TAILQ_REMOVE(&re->prefix_h, xp, rib_l);
+ TAILQ_INSERT_TAIL(&redo, xp, rib_l);
removed = 1;
continue;
}
@@ -460,7 +460,7 @@ prefix_remove(struct prefix *old, struct
*/
if (removed) {
prefix_set_dmetric(TAILQ_PREV(xp, prefix_queue,
- entry.list.rib), xp);
+ rib_l), xp);
removed = 0;
}
if (testall == 0)
@@ -469,14 +469,14 @@ prefix_remove(struct prefix *old, struct
}
if (pp)
- prefix_set_dmetric(pp, TAILQ_NEXT(pp, entry.list.rib));
+ prefix_set_dmetric(pp, TAILQ_NEXT(pp, rib_l));
else
prefix_set_dmetric(NULL, TAILQ_FIRST(&re->prefix_h));
/* Fixup MED order again, reinsert prefixes from the start */
while (!TAILQ_EMPTY(&redo)) {
xp = TAILQ_FIRST(&redo);
- TAILQ_REMOVE(&redo, xp, entry.list.rib);
+ TAILQ_REMOVE(&redo, xp, rib_l);
prefix_insert(xp, NULL, re);
}
@@ -538,9 +538,9 @@ prefix_evaluate(struct rib_entry *re, st
if (rib->flags & F_RIB_NOEVALUATE) {
/* decision process is turned off */
if (old != NULL)
- TAILQ_REMOVE(&re->prefix_h, old, entry.list.rib);
+ TAILQ_REMOVE(&re->prefix_h, old, rib_l);
if (new != NULL) {
- TAILQ_INSERT_HEAD(&re->prefix_h, new, entry.list.rib);
+ TAILQ_INSERT_HEAD(&re->prefix_h, new, rib_l);
new->dmetric = PREFIX_DMETRIC_INVALID;
}
return;
Index: rde_peer.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde_peer.c,v
diff -u -p -r1.54 rde_peer.c
--- rde_peer.c 20 Nov 2025 10:10:36 -0000 1.54
+++ rde_peer.c 20 Nov 2025 10:16:15 -0000
@@ -314,7 +314,7 @@ peer_flush_upcall(struct rib_entry *re,
pt_getaddr(re->prefix, &addr);
prefixlen = re->prefix->prefixlen;
- TAILQ_FOREACH_SAFE(p, &re->prefix_h, entry.list.rib, np) {
+ TAILQ_FOREACH_SAFE(p, &re->prefix_h, rib_l, np) {
if (peer != prefix_peer(p))
continue;
if (monotime_valid(staletime) &&
Index: rde_rib.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde_rib.c,v
diff -u -p -r1.279 rde_rib.c
--- rde_rib.c 20 Nov 2025 10:10:36 -0000 1.279
+++ rde_rib.c 20 Nov 2025 10:16:15 -0000
@@ -1007,12 +1007,12 @@ prefix_flowspec_update(struct rde_peer *
prefix_link(new, re, re->prefix, peer, 0, path_id_tx, asp, comm,
NULL, 0, 0);
- TAILQ_INSERT_HEAD(&re->prefix_h, new, entry.list.rib);
+ TAILQ_INSERT_HEAD(&re->prefix_h, new, rib_l);
rde_generate_updates(re, new, old, EVAL_DEFAULT);
if (old != NULL) {
- TAILQ_REMOVE(&re->prefix_h, old, entry.list.rib);
+ TAILQ_REMOVE(&re->prefix_h, old, rib_l);
prefix_unlink(old);
prefix_free(old);
return 0;
@@ -1036,7 +1036,7 @@ prefix_flowspec_withdraw(struct rde_peer
if (p == NULL)
return 0;
rde_generate_updates(re, NULL, p, EVAL_DEFAULT);
- TAILQ_REMOVE(&re->prefix_h, p, entry.list.rib);
+ TAILQ_REMOVE(&re->prefix_h, p, rib_l);
prefix_unlink(p);
prefix_free(p);
return 1;
@@ -1069,7 +1069,7 @@ prefix_bypeer(struct rib_entry *re, stru
{
struct prefix *p;
- TAILQ_FOREACH(p, &re->prefix_h, entry.list.rib)
+ TAILQ_FOREACH(p, &re->prefix_h, rib_l)
if (prefix_peer(p) == peer && p->path_id == path_id)
return (p);
return (NULL);
@@ -1096,7 +1096,7 @@ prefix_link(struct prefix *p, struct rib
struct nexthop *nexthop, uint8_t nhflags, uint8_t vstate)
{
if (re)
- p->entry.list.re = re;
+ p->re = re;
p->aspath = path_ref(asp);
p->communities = communities_ref(comm);
p->peer = peer;
@@ -1222,7 +1222,7 @@ nexthop_runner(void)
p = nh->next_prefix;
for (j = 0; p != NULL && j < RDE_RUNNER_ROUNDS; j++) {
prefix_evaluate_nexthop(p, nh->state, nh->oldstate);
- p = LIST_NEXT(p, entry.list.nexthop);
+ p = LIST_NEXT(p, nexthop_l);
}
/* prep for next run, if not finished readd to tail of queue */
@@ -1337,7 +1337,7 @@ nexthop_link(struct prefix *p)
if (p->nexthop == NULL)
return;
p->flags |= PREFIX_NEXTHOP_LINKED;
- LIST_INSERT_HEAD(&p->nexthop->prefix_h, p, entry.list.nexthop);
+ LIST_INSERT_HEAD(&p->nexthop->prefix_h, p, nexthop_l);
}
void
@@ -1349,7 +1349,7 @@ nexthop_unlink(struct prefix *p)
return;
if (p == p->nexthop->next_prefix) {
- p->nexthop->next_prefix = LIST_NEXT(p, entry.list.nexthop);
+ p->nexthop->next_prefix = LIST_NEXT(p, nexthop_l);
/* remove nexthop from list if no prefixes left to update */
if (p->nexthop->next_prefix == NULL) {
TAILQ_REMOVE(&nexthop_runners, p->nexthop, runner_l);
@@ -1359,7 +1359,7 @@ nexthop_unlink(struct prefix *p)
}
p->flags &= ~PREFIX_NEXTHOP_LINKED;
- LIST_REMOVE(p, entry.list.nexthop);
+ LIST_REMOVE(p, nexthop_l);
}
struct nexthop *
Index: rde_update.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde_update.c,v
diff -u -p -r1.180 rde_update.c
--- rde_update.c 20 Nov 2025 10:10:36 -0000 1.180
+++ rde_update.c 20 Nov 2025 10:16:15 -0000
@@ -231,7 +231,7 @@ up_generate_updates(struct rde_peer *pee
return;
case UP_FILTERED:
if (peer->flags & PEERFLAG_EVALUATE_ALL) {
- new = TAILQ_NEXT(new, entry.list.rib);
+ new = TAILQ_NEXT(new, rib_l);
if (new != NULL && prefix_eligible(new))
continue;
}
@@ -326,7 +326,7 @@ up_generate_addpath(struct rde_peer *pee
}
/* only allow valid prefixes */
- new = TAILQ_NEXT(new, entry.list.rib);
+ new = TAILQ_NEXT(new, rib_l);
if (new == NULL || !prefix_eligible(new))
break;
}
bgpd: more churn, cleanup struct prefix and prefix_adjout