Download raw body.
bgpd: rename prefix_adjout to adjout_prefix
This is a mechanical rename of prefix_adjout to adjout_prefix. Both for
the struct and the various functions. This makes it clearer what belongs
to the adjout code.
All the things for the Adj-RIB-Out handling should use adjout_ as prefix.
--
:wq Claudio
Index: bgpctl/output.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpctl/output.c,v
diff -u -p -r1.63 output.c
--- bgpctl/output.c 2 Dec 2025 10:51:20 -0000 1.63
+++ bgpctl/output.c 2 Dec 2025 12:46:09 -0000
@@ -1071,7 +1071,7 @@ show_rib_mem(struct rde_memstats *stats)
sizeof(struct prefix)));
printf("%10lld adjout_prefix entries using %s of memory\n",
stats->adjout_prefix_cnt, fmt_mem(stats->adjout_prefix_cnt *
- sizeof(struct prefix_adjout)));
+ sizeof(struct adjout_prefix)));
printf("%10lld adjout attribute entries using %s of memory\n",
stats->adjout_attr_cnt, fmt_mem(stats->adjout_attr_cnt *
sizeof(struct adjout_attr)));
Index: bgpctl/output_json.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpctl/output_json.c,v
diff -u -p -r1.54 output_json.c
--- bgpctl/output_json.c 2 Dec 2025 10:51:20 -0000 1.54
+++ bgpctl/output_json.c 2 Dec 2025 12:46:09 -0000
@@ -904,7 +904,7 @@ json_rib_mem(struct rde_memstats *stats)
json_rib_mem_element("prefix", stats->prefix_cnt,
stats->prefix_cnt * sizeof(struct prefix), UINT64_MAX);
json_rib_mem_element("adjout_prefix", stats->adjout_prefix_cnt,
- stats->adjout_prefix_cnt * sizeof(struct prefix_adjout),
+ stats->adjout_prefix_cnt * sizeof(struct adjout_prefix),
UINT64_MAX);
json_rib_mem_element("adjout_attr", stats->adjout_attr_cnt,
stats->adjout_attr_cnt * sizeof(struct adjout_attr),
Index: bgpctl/output_ometric.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpctl/output_ometric.c,v
diff -u -p -r1.18 output_ometric.c
--- bgpctl/output_ometric.c 2 Dec 2025 10:51:20 -0000 1.18
+++ bgpctl/output_ometric.c 2 Dec 2025 12:46:09 -0000
@@ -294,7 +294,7 @@ ometric_rib_mem(struct rde_memstats *sta
ometric_rib_mem_element("prefix", stats->prefix_cnt,
stats->prefix_cnt * sizeof(struct prefix), UINT64_MAX);
ometric_rib_mem_element("adjout_prefix", stats->adjout_prefix_cnt,
- stats->adjout_prefix_cnt * sizeof(struct prefix_adjout),
+ stats->adjout_prefix_cnt * sizeof(struct adjout_prefix),
UINT64_MAX);
ometric_rib_mem_element("adjout_attr", stats->adjout_attr_cnt,
stats->adjout_attr_cnt * sizeof(struct adjout_attr),
Index: bgpd/rde.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde.c,v
diff -u -p -r1.671 rde.c
--- bgpd/rde.c 2 Dec 2025 10:50:19 -0000 1.671
+++ bgpd/rde.c 2 Dec 2025 12:46:09 -0000
@@ -2961,7 +2961,7 @@ rde_dump_rib_as(struct prefix *p, struct
}
static void
-rde_dump_adjout_as(struct rde_peer *peer, struct prefix_adjout *p,
+rde_dump_adjout_as(struct rde_peer *peer, struct adjout_prefix *p,
struct rde_aspath *asp, pid_t pid, int flags)
{
struct ctl_show_rib rib;
@@ -2971,7 +2971,7 @@ rde_dump_adjout_as(struct rde_peer *peer
size_t aslen;
uint8_t l;
- nexthop = prefix_adjout_nexthop(p);
+ nexthop = adjout_prefix_nexthop(p);
memset(&rib, 0, sizeof(rib));
rib.local_pref = asp->lpref;
rib.med = asp->med;
@@ -3013,7 +3013,7 @@ rde_dump_adjout_as(struct rde_peer *peer
imsg_close(ibuf_se_ctl, wbuf);
if (flags & F_CTL_DETAIL) {
- struct rde_community *comm = prefix_adjout_communities(p);
+ struct rde_community *comm = adjout_prefix_communities(p);
size_t len = comm->nentries * sizeof(struct community);
if (comm->nentries > 0) {
if (imsg_compose(ibuf_se_ctl,
@@ -3096,7 +3096,7 @@ rde_dump_filter(struct prefix *p, struct
}
static void
-rde_dump_adjout_filter(struct rde_peer *peer, struct prefix_adjout *p,
+rde_dump_adjout_filter(struct rde_peer *peer, struct adjout_prefix *p,
struct ctl_show_rib_request *req)
{
struct rde_aspath *asp;
@@ -3104,7 +3104,7 @@ rde_dump_adjout_filter(struct rde_peer *
if (!rde_match_peer(peer, &req->neighbor))
return;
- asp = prefix_adjout_aspath(p);
+ asp = adjout_prefix_aspath(p);
if ((req->flags & F_CTL_HAS_PATHID)) {
/* Match against the transmit path id if adjout is used. */
if (req->path_id != p->path_id_tx)
@@ -3114,7 +3114,7 @@ rde_dump_adjout_filter(struct rde_peer *
!aspath_match(asp->aspath, &req->as, 0))
return;
if (req->community.flags != 0) {
- if (!community_match(prefix_adjout_communities(p),
+ if (!community_match(adjout_prefix_communities(p),
&req->community, NULL))
return;
}
@@ -3135,7 +3135,7 @@ rde_dump_upcall(struct rib_entry *re, vo
}
static void
-rde_dump_adjout_upcall(struct prefix_adjout *p, void *ptr)
+rde_dump_adjout_upcall(struct adjout_prefix *p, void *ptr)
{
struct rde_dump_ctx *ctx = ptr;
struct rde_peer *peer;
@@ -3170,13 +3170,13 @@ rde_dump_done(void *arg, uint8_t aid)
ctx->peerid = peer->conf.id;
switch (ctx->req.type) {
case IMSG_CTL_SHOW_RIB:
- if (prefix_adjout_dump_new(peer, ctx->req.aid,
+ if (adjout_prefix_dump_new(peer, ctx->req.aid,
CTL_MSG_HIGH_MARK, ctx, rde_dump_adjout_upcall,
rde_dump_done, rde_dump_throttled) == -1)
goto nomem;
break;
case IMSG_CTL_SHOW_RIB_PREFIX:
- if (prefix_adjout_dump_subtree(peer, &ctx->req.prefix,
+ if (adjout_prefix_dump_subtree(peer, &ctx->req.prefix,
ctx->req.prefixlen, CTL_MSG_HIGH_MARK, ctx,
rde_dump_adjout_upcall, rde_dump_done,
rde_dump_throttled) == -1)
@@ -3207,7 +3207,7 @@ rde_dump_ctx_new(struct ctl_show_rib_req
{
struct rde_dump_ctx *ctx;
struct rib_entry *re;
- struct prefix_adjout *p;
+ struct adjout_prefix *p;
u_int error;
uint8_t hostplen, plen;
uint16_t rid;
@@ -3245,14 +3245,14 @@ rde_dump_ctx_new(struct ctl_show_rib_req
ctx->peerid = peer->conf.id;
switch (ctx->req.type) {
case IMSG_CTL_SHOW_RIB:
- if (prefix_adjout_dump_new(peer, ctx->req.aid,
+ if (adjout_prefix_dump_new(peer, ctx->req.aid,
CTL_MSG_HIGH_MARK, ctx, rde_dump_adjout_upcall,
rde_dump_done, rde_dump_throttled) == -1)
goto nomem;
break;
case IMSG_CTL_SHOW_RIB_PREFIX:
if (req->flags & F_LONGER) {
- if (prefix_adjout_dump_subtree(peer,
+ if (adjout_prefix_dump_subtree(peer,
&req->prefix, req->prefixlen,
CTL_MSG_HIGH_MARK, ctx,
rde_dump_adjout_upcall,
@@ -3277,28 +3277,28 @@ rde_dump_ctx_new(struct ctl_show_rib_req
if (req->flags & F_SHORTER) {
for (plen = 0; plen <= req->prefixlen;
plen++) {
- p = prefix_adjout_lookup(peer,
+ p = adjout_prefix_lookup(peer,
&req->prefix, plen);
/* dump all matching paths */
while (p != NULL) {
rde_dump_adjout_upcall(
p, ctx);
- p = prefix_adjout_next(
+ p = adjout_prefix_next(
peer, p);
}
}
p = NULL;
} else if (req->prefixlen == hostplen) {
- p = prefix_adjout_match(peer,
+ p = adjout_prefix_match(peer,
&req->prefix);
} else {
- p = prefix_adjout_lookup(peer,
+ p = adjout_prefix_lookup(peer,
&req->prefix, req->prefixlen);
}
/* dump all matching paths */
while (p != NULL) {
rde_dump_adjout_upcall(p, ctx);
- p = prefix_adjout_next(peer, p);
+ p = adjout_prefix_next(peer, p);
}
} while ((peer = peer_match(&req->neighbor,
peer->conf.id)));
@@ -3561,11 +3561,11 @@ rde_evaluate_all(void)
/* flush Adj-RIB-Out by withdrawing all prefixes */
static void
-rde_up_flush_upcall(struct prefix_adjout *p, void *ptr)
+rde_up_flush_upcall(struct adjout_prefix *p, void *ptr)
{
struct rde_peer *peer = ptr;
- prefix_adjout_withdraw(peer, p);
+ adjout_prefix_withdraw(peer, p);
}
int
@@ -3931,7 +3931,7 @@ rde_reload_done(void)
rde_eval_all = 1;
if (peer->reconf_rib) {
- if (prefix_adjout_dump_new(peer, AID_UNSPEC,
+ if (adjout_prefix_dump_new(peer, AID_UNSPEC,
RDE_RUNNER_ROUNDS, peer, rde_up_flush_upcall,
rde_softreconfig_in_done, NULL) == -1)
fatal("%s: prefix_dump_new", __func__);
@@ -3982,7 +3982,7 @@ rde_reload_done(void)
if (peer->reconf_rib)
continue;
- if (prefix_adjout_dump_new(peer,
+ if (adjout_prefix_dump_new(peer,
AID_UNSPEC, RDE_RUNNER_ROUNDS, peer,
rde_up_flush_upcall,
rde_softreconfig_in_done,
Index: bgpd/rde.h
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde.h,v
diff -u -p -r1.326 rde.h
--- bgpd/rde.h 2 Dec 2025 10:50:19 -0000 1.326
+++ bgpd/rde.h 2 Dec 2025 12:46:09 -0000
@@ -71,8 +71,8 @@ struct rib {
* Currently I assume that we can do that with the neighbor_ip...
*/
RB_HEAD(peer_tree, rde_peer);
-RB_HEAD(prefix_tree, prefix_adjout);
-RB_HEAD(prefix_index, prefix_adjout);
+RB_HEAD(prefix_tree, adjout_prefix);
+RB_HEAD(prefix_index, adjout_prefix);
struct rde_peer {
RB_ENTRY(rde_peer) entry;
@@ -310,8 +310,8 @@ struct adjout_attr {
int refcnt;
};
-struct prefix_adjout {
- RB_ENTRY(prefix_adjout) index, update;
+struct adjout_prefix {
+ RB_ENTRY(adjout_prefix) index, update;
struct pt_entry *pt;
struct adjout_attr *attrs;
uint32_t path_id_tx;
@@ -342,10 +342,10 @@ enum eval_mode {
struct rib_context {
LIST_ENTRY(rib_context) entry;
struct rib_entry *ctx_re;
- struct prefix_adjout *ctx_p;
+ struct adjout_prefix *ctx_p;
uint32_t ctx_id;
void (*ctx_rib_call)(struct rib_entry *, void *);
- void (*ctx_prefix_call)(struct prefix_adjout *, void *);
+ void (*ctx_prefix_call)(struct adjout_prefix *, void *);
void (*ctx_done)(void *, uint8_t);
int (*ctx_throttle)(void *);
void *ctx_arg;
@@ -637,7 +637,7 @@ struct prefix *prefix_bypeer(struct rib_
uint32_t);
void prefix_destroy(struct prefix *);
-RB_PROTOTYPE(prefix_tree, prefix_adjout, entry, prefix_cmp)
+RB_PROTOTYPE(prefix_tree, adjout_prefix, entry, prefix_cmp)
static inline struct rde_peer *
prefix_peer(struct prefix *p)
@@ -720,51 +720,51 @@ int nexthop_unref(struct nexthop *);
/* rde_adjout.c */
void adjout_init(void);
-struct prefix_adjout *prefix_adjout_get(struct rde_peer *, uint32_t,
+struct adjout_prefix *adjout_prefix_get(struct rde_peer *, uint32_t,
struct pt_entry *);
-struct prefix_adjout *prefix_adjout_first(struct rde_peer *,
+struct adjout_prefix *adjout_prefix_first(struct rde_peer *,
struct pt_entry *);
-struct prefix_adjout *prefix_adjout_next(struct rde_peer *,
- struct prefix_adjout *);
-struct prefix_adjout *prefix_adjout_lookup(struct rde_peer *,
+struct adjout_prefix *adjout_prefix_next(struct rde_peer *,
+ struct adjout_prefix *);
+struct adjout_prefix *adjout_prefix_lookup(struct rde_peer *,
struct bgpd_addr *, int);
-struct prefix_adjout *prefix_adjout_match(struct rde_peer *,
+struct adjout_prefix *adjout_prefix_match(struct rde_peer *,
struct bgpd_addr *);
void prefix_add_eor(struct rde_peer *, uint8_t);
-void prefix_adjout_update(struct prefix_adjout *, struct rde_peer *,
+void adjout_prefix_update(struct adjout_prefix *, struct rde_peer *,
struct filterstate *, struct pt_entry *, uint32_t);
-void prefix_adjout_withdraw(struct rde_peer *,
- struct prefix_adjout *);
-void prefix_adjout_destroy(struct rde_peer *,
- struct prefix_adjout *);
-void prefix_adjout_flush_pending(struct rde_peer *);
-int prefix_adjout_reaper(struct rde_peer *);
-void prefix_adjout_dump_cleanup(struct rib_context *);
-void prefix_adjout_dump_r(struct rib_context *);
-int prefix_adjout_dump_new(struct rde_peer *, uint8_t,
+void adjout_prefix_withdraw(struct rde_peer *,
+ struct adjout_prefix *);
+void adjout_prefix_destroy(struct rde_peer *,
+ struct adjout_prefix *);
+void adjout_prefix_flush_pending(struct rde_peer *);
+int adjout_prefix_reaper(struct rde_peer *);
+void adjout_prefix_dump_cleanup(struct rib_context *);
+void adjout_prefix_dump_r(struct rib_context *);
+int adjout_prefix_dump_new(struct rde_peer *, uint8_t,
unsigned int, void *,
- void (*)(struct prefix_adjout *, void *),
+ void (*)(struct adjout_prefix *, void *),
void (*)(void *, uint8_t), int (*)(void *));
-int prefix_adjout_dump_subtree(struct rde_peer *,
+int adjout_prefix_dump_subtree(struct rde_peer *,
struct bgpd_addr *, uint8_t, unsigned int, void *,
- void (*)(struct prefix_adjout *, void *),
+ void (*)(struct adjout_prefix *, void *),
void (*)(void *, uint8_t), int (*)(void *));
static inline struct rde_aspath *
-prefix_adjout_aspath(struct prefix_adjout *p)
+adjout_prefix_aspath(struct adjout_prefix *p)
{
return (p->attrs->aspath);
}
static inline struct rde_community *
-prefix_adjout_communities(struct prefix_adjout *p)
+adjout_prefix_communities(struct adjout_prefix *p)
{
return (p->attrs->communities);
}
static inline struct nexthop *
-prefix_adjout_nexthop(struct prefix_adjout *p)
+adjout_prefix_nexthop(struct adjout_prefix *p)
{
return (p->attrs->nexthop);
}
Index: bgpd/rde_adjout.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde_adjout.c,v
diff -u -p -r1.7 rde_adjout.c
--- bgpd/rde_adjout.c 2 Dec 2025 10:50:19 -0000 1.7
+++ bgpd/rde_adjout.c 2 Dec 2025 12:46:09 -0000
@@ -163,8 +163,8 @@ adjout_attr_get(struct filterstate *stat
CH_GENERATE(adjout_attr_tree, adjout_attr, adjout_attr_eq, adjout_attr_hash);
-static inline struct prefix_adjout *
-prefix_adjout_lock(struct prefix_adjout *p)
+static inline struct adjout_prefix *
+adjout_prefix_lock(struct adjout_prefix *p)
{
if (p->flags & PREFIX_ADJOUT_FLAG_LOCKED)
fatalx("%s: locking locked prefix", __func__);
@@ -172,8 +172,8 @@ prefix_adjout_lock(struct prefix_adjout
return p;
}
-static inline struct prefix_adjout *
-prefix_adjout_unlock(struct prefix_adjout *p)
+static inline struct adjout_prefix *
+adjout_prefix_unlock(struct adjout_prefix *p)
{
if ((p->flags & PREFIX_ADJOUT_FLAG_LOCKED) == 0)
fatalx("%s: unlocking unlocked prefix", __func__);
@@ -182,28 +182,28 @@ prefix_adjout_unlock(struct prefix_adjou
}
static inline int
-prefix_is_locked(struct prefix_adjout *p)
+prefix_is_locked(struct adjout_prefix *p)
{
return (p->flags & PREFIX_ADJOUT_FLAG_LOCKED) != 0;
}
static inline int
-prefix_is_dead(struct prefix_adjout *p)
+prefix_is_dead(struct adjout_prefix *p)
{
return (p->flags & PREFIX_ADJOUT_FLAG_DEAD) != 0;
}
-static void prefix_adjout_link(struct prefix_adjout *, struct rde_peer *,
+static void adjout_prefix_link(struct adjout_prefix *, struct rde_peer *,
struct adjout_attr *, struct pt_entry *, uint32_t);
-static void prefix_adjout_unlink(struct prefix_adjout *,
+static void adjout_prefix_unlink(struct adjout_prefix *,
struct rde_peer *);
-static struct prefix_adjout *prefix_adjout_alloc(void);
-static void prefix_adjout_free(struct prefix_adjout *);
+static struct adjout_prefix *adjout_prefix_alloc(void);
+static void adjout_prefix_free(struct adjout_prefix *);
/* RB tree comparison function */
static inline int
-prefix_index_cmp(struct prefix_adjout *a, struct prefix_adjout *b)
+prefix_index_cmp(struct adjout_prefix *a, struct adjout_prefix *b)
{
int r;
r = pt_prefix_cmp(a->pt, b->pt);
@@ -218,7 +218,7 @@ prefix_index_cmp(struct prefix_adjout *a
}
static inline int
-prefix_cmp(struct prefix_adjout *a, struct prefix_adjout *b)
+prefix_cmp(struct adjout_prefix *a, struct adjout_prefix *b)
{
if ((a->flags & PREFIX_ADJOUT_FLAG_EOR) !=
(b->flags & PREFIX_ADJOUT_FLAG_EOR))
@@ -232,18 +232,18 @@ prefix_cmp(struct prefix_adjout *a, stru
return prefix_index_cmp(a, b);
}
-RB_GENERATE(prefix_tree, prefix_adjout, update, prefix_cmp)
-RB_GENERATE_STATIC(prefix_index, prefix_adjout, index, prefix_index_cmp)
+RB_GENERATE(prefix_tree, adjout_prefix, update, prefix_cmp)
+RB_GENERATE_STATIC(prefix_index, adjout_prefix, index, prefix_index_cmp)
/*
* Search for specified prefix in the peer prefix_index.
* Returns NULL if not found.
*/
-struct prefix_adjout *
-prefix_adjout_get(struct rde_peer *peer, uint32_t path_id_tx,
+struct adjout_prefix *
+adjout_prefix_get(struct rde_peer *peer, uint32_t path_id_tx,
struct pt_entry *pte)
{
- struct prefix_adjout xp;
+ struct adjout_prefix xp;
memset(&xp, 0, sizeof(xp));
xp.pt = pte;
@@ -256,10 +256,10 @@ prefix_adjout_get(struct rde_peer *peer,
* Lookup a prefix without considering path_id in the peer prefix_index.
* Returns NULL if not found.
*/
-struct prefix_adjout *
-prefix_adjout_first(struct rde_peer *peer, struct pt_entry *pte)
+struct adjout_prefix *
+adjout_prefix_first(struct rde_peer *peer, struct pt_entry *pte)
{
- struct prefix_adjout xp, *np;
+ struct adjout_prefix xp, *np;
memset(&xp, 0, sizeof(xp));
xp.pt = pte;
@@ -273,10 +273,10 @@ prefix_adjout_first(struct rde_peer *pee
/*
* Return next prefix after a lookup that is actually an update.
*/
-struct prefix_adjout *
-prefix_adjout_next(struct rde_peer *peer, struct prefix_adjout *p)
+struct adjout_prefix *
+adjout_prefix_next(struct rde_peer *peer, struct adjout_prefix *p)
{
- struct prefix_adjout *np;
+ struct adjout_prefix *np;
np = RB_NEXT(prefix_index, &peer->adj_rib_out, p);
if (np == NULL || np->pt != p->pt)
@@ -288,27 +288,27 @@ prefix_adjout_next(struct rde_peer *peer
* Lookup addr/prefixlen in the peer prefix_index. Returns first match.
* Returns NULL if not found.
*/
-struct prefix_adjout *
-prefix_adjout_lookup(struct rde_peer *peer, struct bgpd_addr *addr, int plen)
+struct adjout_prefix *
+adjout_prefix_lookup(struct rde_peer *peer, struct bgpd_addr *addr, int plen)
{
- return prefix_adjout_first(peer, pt_fill(addr, plen));
+ return adjout_prefix_first(peer, pt_fill(addr, plen));
}
/*
* Lookup addr in the peer prefix_index. Returns first match.
* Returns NULL if not found.
*/
-struct prefix_adjout *
-prefix_adjout_match(struct rde_peer *peer, struct bgpd_addr *addr)
+struct adjout_prefix *
+adjout_prefix_match(struct rde_peer *peer, struct bgpd_addr *addr)
{
- struct prefix_adjout *p;
+ struct adjout_prefix *p;
int i;
switch (addr->aid) {
case AID_INET:
case AID_VPN_IPv4:
for (i = 32; i >= 0; i--) {
- p = prefix_adjout_lookup(peer, addr, i);
+ p = adjout_prefix_lookup(peer, addr, i);
if (p != NULL)
return p;
}
@@ -316,7 +316,7 @@ prefix_adjout_match(struct rde_peer *pee
case AID_INET6:
case AID_VPN_IPv6:
for (i = 128; i >= 0; i--) {
- p = prefix_adjout_lookup(peer, addr, i);
+ p = adjout_prefix_lookup(peer, addr, i);
if (p != NULL)
return p;
}
@@ -333,13 +333,13 @@ prefix_adjout_match(struct rde_peer *pee
void
prefix_add_eor(struct rde_peer *peer, uint8_t aid)
{
- struct prefix_adjout *p;
+ struct adjout_prefix *p;
- p = prefix_adjout_alloc();
+ p = adjout_prefix_alloc();
p->flags = PREFIX_ADJOUT_FLAG_UPDATE | PREFIX_ADJOUT_FLAG_EOR;
if (RB_INSERT(prefix_tree, &peer->updates[aid], p) != NULL)
/* no need to add if EoR marker already present */
- prefix_adjout_free(p);
+ adjout_prefix_free(p);
/* EOR marker is not inserted into the adj_rib_out index */
}
@@ -347,13 +347,13 @@ prefix_add_eor(struct rde_peer *peer, ui
* Put a prefix from the Adj-RIB-Out onto the update queue.
*/
void
-prefix_adjout_update(struct prefix_adjout *p, struct rde_peer *peer,
+adjout_prefix_update(struct adjout_prefix *p, struct rde_peer *peer,
struct filterstate *state, struct pt_entry *pte, uint32_t path_id_tx)
{
struct adjout_attr *attrs;
if (p == NULL) {
- p = prefix_adjout_alloc();
+ p = adjout_prefix_alloc();
/* initially mark DEAD so code below is skipped */
p->flags |= PREFIX_ADJOUT_FLAG_DEAD;
@@ -373,10 +373,10 @@ prefix_adjout_update(struct prefix_adjou
* paths.
*/
if (p->path_id_tx == path_id_tx &&
- prefix_adjout_nexthop(p) == state->nexthop &&
+ adjout_prefix_nexthop(p) == state->nexthop &&
communities_equal(&state->communities,
- prefix_adjout_communities(p)) &&
- path_equal(&state->aspath, prefix_adjout_aspath(p))) {
+ adjout_prefix_communities(p)) &&
+ path_equal(&state->aspath, adjout_prefix_aspath(p))) {
/* nothing changed */
p->flags &= ~PREFIX_ADJOUT_FLAG_STALE;
return;
@@ -389,7 +389,7 @@ prefix_adjout_update(struct prefix_adjou
}
/* unlink prefix so it can be relinked below */
- prefix_adjout_unlink(p, peer);
+ adjout_prefix_unlink(p, peer);
peer->stats.prefix_out_cnt--;
}
if (p->flags & PREFIX_ADJOUT_FLAG_WITHDRAW) {
@@ -411,7 +411,7 @@ prefix_adjout_update(struct prefix_adjou
attrs = adjout_attr_get(state);
- prefix_adjout_link(p, peer, attrs, p->pt, p->path_id_tx);
+ adjout_prefix_link(p, peer, attrs, p->pt, p->path_id_tx);
peer->stats.prefix_out_cnt++;
if (p->flags & PREFIX_ADJOUT_FLAG_MASK)
@@ -429,7 +429,7 @@ prefix_adjout_update(struct prefix_adjou
* the prefix in the RIB linked to the peer withdraw list.
*/
void
-prefix_adjout_withdraw(struct rde_peer *peer, struct prefix_adjout *p)
+adjout_prefix_withdraw(struct rde_peer *peer, struct adjout_prefix *p)
{
/* already a withdraw, shortcut */
if (p->flags & PREFIX_ADJOUT_FLAG_WITHDRAW) {
@@ -444,7 +444,7 @@ prefix_adjout_withdraw(struct rde_peer *
/* unlink prefix if it was linked (not a withdraw or dead) */
if ((p->flags & (PREFIX_ADJOUT_FLAG_WITHDRAW |
PREFIX_ADJOUT_FLAG_DEAD)) == 0) {
- prefix_adjout_unlink(p, peer);
+ adjout_prefix_unlink(p, peer);
peer->stats.prefix_out_cnt--;
}
@@ -460,16 +460,16 @@ prefix_adjout_withdraw(struct rde_peer *
} else {
/* mark prefix dead to skip unlink on destroy */
p->flags |= PREFIX_ADJOUT_FLAG_DEAD;
- prefix_adjout_destroy(peer, p);
+ adjout_prefix_destroy(peer, p);
}
}
void
-prefix_adjout_destroy(struct rde_peer *peer, struct prefix_adjout *p)
+adjout_prefix_destroy(struct rde_peer *peer, struct adjout_prefix *p)
{
if (p->flags & PREFIX_ADJOUT_FLAG_EOR) {
/* EOR marker is not linked in the index */
- prefix_adjout_free(p);
+ adjout_prefix_free(p);
return;
}
@@ -484,7 +484,7 @@ prefix_adjout_destroy(struct rde_peer *p
/* unlink prefix if it was linked (not a withdraw or dead) */
if ((p->flags & (PREFIX_ADJOUT_FLAG_WITHDRAW |
PREFIX_ADJOUT_FLAG_DEAD)) == 0) {
- prefix_adjout_unlink(p, peer);
+ adjout_prefix_unlink(p, peer);
peer->stats.prefix_out_cnt--;
}
@@ -498,25 +498,25 @@ prefix_adjout_destroy(struct rde_peer *p
RB_REMOVE(prefix_index, &peer->adj_rib_out, p);
/* remove the last prefix reference before free */
pt_unref(p->pt);
- prefix_adjout_free(p);
+ adjout_prefix_free(p);
}
}
void
-prefix_adjout_flush_pending(struct rde_peer *peer)
+adjout_prefix_flush_pending(struct rde_peer *peer)
{
- struct prefix_adjout *p, *np;
+ struct adjout_prefix *p, *np;
uint8_t aid;
for (aid = AID_MIN; aid < AID_MAX; aid++) {
RB_FOREACH_SAFE(p, prefix_tree, &peer->withdraws[aid], np) {
- prefix_adjout_destroy(peer, p);
+ adjout_prefix_destroy(peer, p);
}
RB_FOREACH_SAFE(p, prefix_tree, &peer->updates[aid], np) {
p->flags &= ~PREFIX_ADJOUT_FLAG_UPDATE;
RB_REMOVE(prefix_tree, &peer->updates[aid], p);
if (p->flags & PREFIX_ADJOUT_FLAG_EOR) {
- prefix_adjout_destroy(peer, p);
+ adjout_prefix_destroy(peer, p);
} else {
peer->stats.pending_update--;
}
@@ -525,36 +525,36 @@ prefix_adjout_flush_pending(struct rde_p
}
int
-prefix_adjout_reaper(struct rde_peer *peer)
+adjout_prefix_reaper(struct rde_peer *peer)
{
- struct prefix_adjout *p, *np;
+ struct adjout_prefix *p, *np;
int count = RDE_REAPER_ROUNDS;
RB_FOREACH_SAFE(p, prefix_index, &peer->adj_rib_out, np) {
- prefix_adjout_destroy(peer, p);
+ adjout_prefix_destroy(peer, p);
if (count-- <= 0)
return 0;
}
return 1;
}
-static struct prefix_adjout *
+static struct adjout_prefix *
prefix_restart(struct rib_context *ctx)
{
- struct prefix_adjout *p = NULL;
+ struct adjout_prefix *p = NULL;
struct rde_peer *peer;
if ((peer = peer_get(ctx->ctx_id)) == NULL)
return NULL;
if (ctx->ctx_p)
- p = prefix_adjout_unlock(ctx->ctx_p);
+ p = adjout_prefix_unlock(ctx->ctx_p);
while (p && prefix_is_dead(p)) {
- struct prefix_adjout *next;
+ struct adjout_prefix *next;
next = RB_NEXT(prefix_index, unused, p);
- prefix_adjout_destroy(peer, p);
+ adjout_prefix_destroy(peer, p);
p = next;
}
ctx->ctx_p = NULL;
@@ -562,21 +562,21 @@ prefix_restart(struct rib_context *ctx)
}
void
-prefix_adjout_dump_cleanup(struct rib_context *ctx)
+adjout_prefix_dump_cleanup(struct rib_context *ctx)
{
- struct prefix_adjout *p = ctx->ctx_p;
+ struct adjout_prefix *p = ctx->ctx_p;
struct rde_peer *peer;
if ((peer = peer_get(ctx->ctx_id)) == NULL)
return;
- if (prefix_is_dead(prefix_adjout_unlock(p)))
- prefix_adjout_destroy(peer, p);
+ if (prefix_is_dead(adjout_prefix_unlock(p)))
+ adjout_prefix_destroy(peer, p);
}
void
-prefix_adjout_dump_r(struct rib_context *ctx)
+adjout_prefix_dump_r(struct rib_context *ctx)
{
- struct prefix_adjout *p, *next;
+ struct adjout_prefix *p, *next;
struct rde_peer *peer;
unsigned int i;
@@ -606,7 +606,7 @@ prefix_adjout_dump_r(struct rib_context
if (ctx->ctx_count && i++ >= ctx->ctx_count &&
!prefix_is_locked(p)) {
/* store and lock last element */
- ctx->ctx_p = prefix_adjout_lock(p);
+ ctx->ctx_p = adjout_prefix_lock(p);
return;
}
ctx->ctx_prefix_call(p, ctx->ctx_arg);
@@ -620,8 +620,8 @@ done:
}
int
-prefix_adjout_dump_new(struct rde_peer *peer, uint8_t aid, unsigned int count,
- void *arg, void (*upcall)(struct prefix_adjout *, void *),
+adjout_prefix_dump_new(struct rde_peer *peer, uint8_t aid, unsigned int count,
+ void *arg, void (*upcall)(struct adjout_prefix *, void *),
void (*done)(void *, uint8_t), int (*throttle)(void *))
{
struct rib_context *ctx;
@@ -640,20 +640,20 @@ prefix_adjout_dump_new(struct rde_peer *
/* requested a sync traversal */
if (count == 0)
- prefix_adjout_dump_r(ctx);
+ adjout_prefix_dump_r(ctx);
return 0;
}
int
-prefix_adjout_dump_subtree(struct rde_peer *peer, struct bgpd_addr *subtree,
+adjout_prefix_dump_subtree(struct rde_peer *peer, struct bgpd_addr *subtree,
uint8_t subtreelen, unsigned int count, void *arg,
- void (*upcall)(struct prefix_adjout *, void *),
+ void (*upcall)(struct adjout_prefix *, void *),
void (*done)(void *, uint8_t),
int (*throttle)(void *))
{
struct rib_context *ctx;
- struct prefix_adjout xp;
+ struct adjout_prefix xp;
if ((ctx = calloc(1, sizeof(*ctx))) == NULL)
return -1;
@@ -672,13 +672,13 @@ prefix_adjout_dump_subtree(struct rde_pe
xp.pt = pt_fill(subtree, subtreelen);
ctx->ctx_p = RB_NFIND(prefix_index, &peer->adj_rib_out, &xp);
if (ctx->ctx_p)
- prefix_adjout_lock(ctx->ctx_p);
+ adjout_prefix_lock(ctx->ctx_p);
rib_dump_insert(ctx);
/* requested a sync traversal */
if (count == 0)
- prefix_adjout_dump_r(ctx);
+ adjout_prefix_dump_r(ctx);
return 0;
}
@@ -687,7 +687,7 @@ prefix_adjout_dump_subtree(struct rde_pe
* Link a prefix into the different parent objects.
*/
static void
-prefix_adjout_link(struct prefix_adjout *p, struct rde_peer *peer,
+adjout_prefix_link(struct adjout_prefix *p, struct rde_peer *peer,
struct adjout_attr *attrs, struct pt_entry *pt, uint32_t path_id_tx)
{
p->attrs = adjout_attr_ref(attrs, peer);
@@ -699,7 +699,7 @@ prefix_adjout_link(struct prefix_adjout
* Unlink a prefix from the different parent objects.
*/
static void
-prefix_adjout_unlink(struct prefix_adjout *p, struct rde_peer *peer)
+adjout_prefix_unlink(struct adjout_prefix *p, struct rde_peer *peer)
{
/* destroy all references to other objects */
adjout_attr_unref(p->attrs, peer);
@@ -709,10 +709,10 @@ prefix_adjout_unlink(struct prefix_adjou
}
/* alloc and zero new entry. May not fail. */
-static struct prefix_adjout *
-prefix_adjout_alloc(void)
+static struct adjout_prefix *
+adjout_prefix_alloc(void)
{
- struct prefix_adjout *p;
+ struct adjout_prefix *p;
p = calloc(1, sizeof(*p));
if (p == NULL)
@@ -723,7 +723,7 @@ prefix_adjout_alloc(void)
/* free a unlinked entry */
static void
-prefix_adjout_free(struct prefix_adjout *p)
+adjout_prefix_free(struct adjout_prefix *p)
{
rdemem.adjout_prefix_cnt--;
free(p);
Index: bgpd/rde_peer.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde_peer.c,v
diff -u -p -r1.58 rde_peer.c
--- bgpd/rde_peer.c 2 Dec 2025 10:50:19 -0000 1.58
+++ bgpd/rde_peer.c 2 Dec 2025 12:46:09 -0000
@@ -426,7 +426,7 @@ peer_down(struct rde_peer *peer)
* and flush all pending imsg from the SE.
*/
rib_dump_terminate(peer);
- prefix_adjout_flush_pending(peer);
+ adjout_prefix_flush_pending(peer);
peer_imsg_flush(peer);
/* flush Adj-RIB-In */
@@ -499,7 +499,7 @@ peer_stale(struct rde_peer *peer, uint8_
* and flush all pending imsg from the SE.
*/
rib_dump_terminate(peer);
- prefix_adjout_flush_pending(peer);
+ adjout_prefix_flush_pending(peer);
peer_imsg_flush(peer);
if (flushall)
@@ -517,7 +517,7 @@ peer_stale(struct rde_peer *peer, uint8_
* Enqueue a prefix onto the update queue so it can be sent out.
*/
static void
-peer_blast_upcall(struct prefix_adjout *p, void *ptr)
+peer_blast_upcall(struct adjout_prefix *p, void *ptr)
{
struct rde_peer *peer = ptr;
@@ -557,9 +557,9 @@ peer_blast(struct rde_peer *peer, uint8_
rde_peer_send_rrefresh(peer, aid, ROUTE_REFRESH_BEGIN_RR);
/* force out all updates from the Adj-RIB-Out */
- if (prefix_adjout_dump_new(peer, aid, 0, peer, peer_blast_upcall,
+ if (adjout_prefix_dump_new(peer, aid, 0, peer, peer_blast_upcall,
peer_blast_done, NULL) == -1)
- fatal("%s: prefix_adjout_dump_new", __func__);
+ fatal("%s: adjout_prefix_dump_new", __func__);
}
/* RIB walker callbacks for peer_dump. */
@@ -642,7 +642,7 @@ peer_reaper(struct rde_peer *peer)
if (peer == NULL)
return;
- if (!prefix_adjout_reaper(peer))
+ if (!adjout_prefix_reaper(peer))
return;
ibufq_free(peer->ibufq);
Index: bgpd/rde_rib.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde_rib.c,v
diff -u -p -r1.283 rde_rib.c
--- bgpd/rde_rib.c 2 Dec 2025 10:50:19 -0000 1.283
+++ bgpd/rde_rib.c 2 Dec 2025 12:47:21 -0000
@@ -446,7 +446,7 @@ rib_dump_runner(void)
if (ctx->ctx_rib_call != NULL)
rib_dump_r(ctx);
else
- prefix_adjout_dump_r(ctx);
+ adjout_prefix_dump_r(ctx);
}
}
@@ -466,7 +466,7 @@ rib_dump_free(struct rib_context *ctx)
if (ctx->ctx_re)
rib_dump_cleanup(ctx);
if (ctx->ctx_p)
- prefix_adjout_dump_cleanup(ctx);
+ adjout_prefix_dump_cleanup(ctx);
LIST_REMOVE(ctx, entry);
free(ctx);
}
Index: bgpd/rde_update.c
===================================================================
RCS file: /cvs/src/usr.sbin/bgpd/rde_update.c,v
diff -u -p -r1.185 rde_update.c
--- bgpd/rde_update.c 2 Dec 2025 10:50:19 -0000 1.185
+++ bgpd/rde_update.c 2 Dec 2025 12:46:09 -0000
@@ -159,7 +159,7 @@ up_enforce_open_policy(struct rde_peer *
*/
static enum up_state
up_process_prefix(struct rde_peer *peer, struct prefix *new,
- struct prefix_adjout *p)
+ struct adjout_prefix *p)
{
struct filterstate state;
struct bgpd_addr addr;
@@ -195,10 +195,10 @@ up_process_prefix(struct rde_peer *peer,
/* from here on we know this is an update */
if (p == (void *)-1)
- p = prefix_adjout_get(peer, new->path_id_tx, new->pt);
+ p = adjout_prefix_get(peer, new->path_id_tx, new->pt);
up_prep_adjout(peer, &state, new->pt->aid);
- prefix_adjout_update(p, peer, &state, new->pt, new->path_id_tx);
+ adjout_prefix_update(p, peer, &state, new->pt, new->path_id_tx);
rde_filterstate_clean(&state);
/* max prefix checker outbound */
@@ -219,9 +219,9 @@ void
up_generate_updates(struct rde_peer *peer, struct rib_entry *re)
{
struct prefix *new;
- struct prefix_adjout *p;
+ struct adjout_prefix *p;
- p = prefix_adjout_first(peer, re->prefix);
+ p = adjout_prefix_first(peer, re->prefix);
new = prefix_best(re);
while (new != NULL) {
@@ -244,7 +244,7 @@ up_generate_updates(struct rde_peer *pee
done:
/* withdraw prefix */
if (p != NULL)
- prefix_adjout_withdraw(peer, p);
+ adjout_prefix_withdraw(peer, p);
}
/*
@@ -258,14 +258,14 @@ void
up_generate_addpath(struct rde_peer *peer, struct rib_entry *re)
{
struct prefix *new;
- struct prefix_adjout *head, *p;
+ struct adjout_prefix *head, *p;
int maxpaths = 0, extrapaths = 0, extra;
int checkmode = 1;
- head = prefix_adjout_first(peer, re->prefix);
+ head = adjout_prefix_first(peer, re->prefix);
/* mark all paths as stale */
- for (p = head; p != NULL; p = prefix_adjout_next(peer, p))
+ for (p = head; p != NULL; p = adjout_prefix_next(peer, p))
p->flags |= PREFIX_ADJOUT_FLAG_STALE;
/* update paths */
@@ -332,9 +332,9 @@ up_generate_addpath(struct rde_peer *pee
}
/* withdraw stale paths */
- for (p = head; p != NULL; p = prefix_adjout_next(peer, p)) {
+ for (p = head; p != NULL; p = adjout_prefix_next(peer, p)) {
if (p->flags & PREFIX_ADJOUT_FLAG_STALE)
- prefix_adjout_withdraw(peer, p);
+ adjout_prefix_withdraw(peer, p);
}
}
@@ -346,7 +346,7 @@ void
up_generate_addpath_all(struct rde_peer *peer, struct rib_entry *re,
struct prefix *new, uint32_t old_pathid_tx)
{
- struct prefix_adjout *p;
+ struct adjout_prefix *p;
/*
* If old and new are NULL then re-insert all prefixes from re,
@@ -381,9 +381,9 @@ up_generate_addpath_all(struct rde_peer
if (old_pathid_tx != 0) {
/* withdraw old path */
- p = prefix_adjout_get(peer, old_pathid_tx, re->prefix);
+ p = adjout_prefix_get(peer, old_pathid_tx, re->prefix);
if (p != NULL)
- prefix_adjout_withdraw(peer, p);
+ adjout_prefix_withdraw(peer, p);
}
}
@@ -394,7 +394,7 @@ up_generate_default(struct rde_peer *pee
extern struct rde_peer *peerself;
struct filterstate state;
struct rde_aspath *asp;
- struct prefix_adjout *p;
+ struct adjout_prefix *p;
struct pt_entry *pte;
struct bgpd_addr addr;
@@ -416,7 +416,7 @@ up_generate_default(struct rde_peer *pee
memset(&addr, 0, sizeof(addr));
addr.aid = aid;
- p = prefix_adjout_lookup(peer, &addr, 0);
+ p = adjout_prefix_lookup(peer, &addr, 0);
/* outbound filter as usual */
if (rde_filter(peer->out_rules, peer, peerself, &addr, 0, &state) ==
@@ -426,11 +426,11 @@ up_generate_default(struct rde_peer *pee
}
up_prep_adjout(peer, &state, addr.aid);
- /* can't use pt_fill here since prefix_adjout_update keeps a ref */
+ /* can't use pt_fill here since adjout_prefix_update keeps a ref */
pte = pt_get(&addr, 0);
if (pte == NULL)
pte = pt_add(&addr, 0);
- prefix_adjout_update(p, peer, &state, pte, 0);
+ adjout_prefix_update(p, peer, &state, pte, 0);
rde_filterstate_clean(&state);
/* max prefix checker outbound */
@@ -798,17 +798,17 @@ up_generate_attr(struct ibuf *buf, struc
int
up_is_eor(struct rde_peer *peer, uint8_t aid)
{
- struct prefix_adjout *p;
+ struct adjout_prefix *p;
p = RB_MIN(prefix_tree, &peer->updates[aid]);
if (p != NULL && (p->flags & PREFIX_ADJOUT_FLAG_EOR)) {
/*
* Need to remove eor from update tree because
- * prefix_adjout_destroy() can't handle that.
+ * adjout_prefix_destroy() can't handle that.
*/
RB_REMOVE(prefix_tree, &peer->updates[aid], p);
p->flags &= ~PREFIX_ADJOUT_FLAG_UPDATE;
- prefix_adjout_destroy(peer, p);
+ adjout_prefix_destroy(peer, p);
return 1;
}
return 0;
@@ -818,12 +818,12 @@ up_is_eor(struct rde_peer *peer, uint8_t
#define MIN_UPDATE_LEN 16
static void
-up_prefix_free(struct prefix_tree *prefix_head, struct prefix_adjout *p,
+up_prefix_free(struct prefix_tree *prefix_head, struct adjout_prefix *p,
struct rde_peer *peer, int withdraw)
{
if (withdraw) {
/* prefix no longer needed, remove it */
- prefix_adjout_destroy(peer, p);
+ adjout_prefix_destroy(peer, p);
peer->stats.prefix_sent_withdraw++;
} else {
/* prefix still in Adj-RIB-Out, keep it */
@@ -843,7 +843,7 @@ static int
up_dump_prefix(struct ibuf *buf, struct prefix_tree *prefix_head,
struct rde_peer *peer, int withdraw)
{
- struct prefix_adjout *p, *np;
+ struct adjout_prefix *p, *np;
int done = 0, has_ap = -1, rv = -1;
RB_FOREACH_SAFE(p, prefix_tree, prefix_head, np) {
@@ -1083,7 +1083,7 @@ up_dump_withdraws(struct imsgbuf *imsg,
* Withdraw a single prefix after an error.
*/
static int
-up_dump_withdraw_one(struct rde_peer *peer, struct prefix_adjout *p,
+up_dump_withdraw_one(struct rde_peer *peer, struct adjout_prefix *p,
struct ibuf *buf)
{
size_t off;
@@ -1157,7 +1157,7 @@ up_dump_update(struct imsgbuf *imsg, str
{
struct ibuf *buf;
struct bgpd_addr addr;
- struct prefix_adjout *p;
+ struct adjout_prefix *p;
size_t off, pkgsize = MAX_PKTSIZE;
uint16_t len;
int force_ip4mp = 0;
@@ -1167,7 +1167,7 @@ up_dump_update(struct imsgbuf *imsg, str
return;
if (aid == AID_INET && peer_has_ext_nexthop(peer, AID_INET)) {
- struct nexthop *nh = prefix_adjout_nexthop(p);
+ struct nexthop *nh = adjout_prefix_nexthop(p);
if (nh != NULL && nh->exit_nexthop.aid == AID_INET6)
force_ip4mp = 1;
}
@@ -1190,8 +1190,8 @@ up_dump_update(struct imsgbuf *imsg, str
if (ibuf_add_zero(buf, sizeof(len)) == -1)
goto fail;
- if (up_generate_attr(buf, peer, prefix_adjout_aspath(p),
- prefix_adjout_communities(p), prefix_adjout_nexthop(p), aid) == -1)
+ if (up_generate_attr(buf, peer, adjout_prefix_aspath(p),
+ adjout_prefix_communities(p), adjout_prefix_nexthop(p), aid) == -1)
goto drop;
if (aid != AID_INET || force_ip4mp) {
@@ -1203,7 +1203,7 @@ up_dump_update(struct imsgbuf *imsg, str
* merge the attributes together in reverse order of
* creation.
*/
- if (up_generate_mp_reach(buf, peer, prefix_adjout_nexthop(p),
+ if (up_generate_mp_reach(buf, peer, adjout_prefix_nexthop(p),
aid) == -1)
goto drop;
}
bgpd: rename prefix_adjout to adjout_prefix