Download raw body.
fq_codel: align with RFC 8289 and RFC 8290 (2/2)
This diff implements RFC 8290 batch drop mechanism for overload
protection.
RFC 8290 section 4.1 changes the overload handling behaviour: instead of
dropping a single packet from the longest queue, drop up to half the
packets from that queue (minimum 1, maximum 64). This amortizes the cost
of finding the longest queue.
The draft specification (draft-ietf-aqm-fq-codel-06) dropped one packet
at a time, while the final RFC specifies batch dropping for better
performance under load.
Implementation details:
- Add pending_drops mbuf_list to struct fqcodel to queue dropped packets
- When queue limit is exceeded, drop MIN(MAX(queue_length/2, 1), 64)
packets from the flow with largest backlog
- Return one dropped packet per enqueue call to maintain the interface
contract with PF, allowing individual packet accounting
- Initialize and clean up pending_drops list in alloc/free functions
This is a functional change that improves performance under overload
conditions while maintaining RFC 8290 compliance.
diff --git sys/net/fq_codel.c sys/net/fq_codel.c
index ad8ed7022c4..af2ea41e86d 100644
--- sys/net/fq_codel.c
+++ sys/net/fq_codel.c
@@ -116,6 +116,8 @@ struct fqcodel {
/* stats */
struct fqcodel_pktcntr xmit_cnt;
struct fqcodel_pktcntr drop_cnt;
+
+ struct mbuf_list pending_drops;
};
unsigned int fqcodel_idx(unsigned int, const struct mbuf *);
@@ -529,7 +531,7 @@ fqcodel_enq(struct fqcodel *fqc, struct mbuf *m)
struct flow *flow;
unsigned int backlog = 0;
int64_t now;
- int i;
+ int i, ndrop;
flow = classify_flow(fqc, m);
if (flow == NULL)
@@ -548,8 +550,20 @@ fqcodel_enq(struct fqcodel *fqc, struct mbuf *m)
}
/*
- * Check the limit for all queues and remove a packet
- * from the longest one.
+ * Flush pending_drops first to let PF account them individually.
+ * When batch dropping (below), we queue multiple packets but can
+ * only return one per enqueue call to maintain the interface
+ * contract.
+ */
+ if (!ml_empty(&fqc->pending_drops))
+ return (ml_dequeue(&fqc->pending_drops));
+
+ /*
+ * If total queue length exceeds the limit, find the flow with the
+ * largest backlog and drop up to half of its packets, with a
+ * maximum of 64, from the head. Implements RFC 8290, section 4.1
+ * batch drop to handle overload efficiently. Dropped packets are
+ * queued in pending_drops.
*/
if (fqc->qlength > fqc->qlimit) {
for (i = 0; i < fqc->nflows; i++) {
@@ -560,16 +574,23 @@ fqcodel_enq(struct fqcodel *fqc, struct mbuf *m)
}
KASSERT(flow != NULL);
- m = codel_commit(&flow->cd, NULL);
- fqc->drop_cnt.packets++;
- fqc->drop_cnt.bytes += m->m_pkthdr.len;
+ ndrop = MIN(MAX(codel_qlength(&flow->cd) / 2, 1), 64);
- fqc->qlength--;
+ for (i = 0; i < ndrop; i++) {
+ m = codel_commit(&flow->cd, NULL);
+ if (m == NULL)
+ break;
+ fqc->drop_cnt.packets++;
+ fqc->drop_cnt.bytes += m->m_pkthdr.len;
+ fqc->qlength--;
+ ml_enqueue(&fqc->pending_drops, m);
+ }
- DPRINTF("%s: dropping from flow %u\n", __func__,
- flow->id);
- return (m);
+ DPRINTF("%s: batch-dropped %d/%d pkts from flow %u\n", __func__,
+ i, ndrop, flow->id);
+
+ return (ml_dequeue(&fqc->pending_drops));
}
return (NULL);
@@ -728,6 +749,7 @@ fqcodel_pf_alloc(struct ifnet *ifp)
SIMPLEQ_INIT(&fqc->newq);
SIMPLEQ_INIT(&fqc->oldq);
+ ml_init(&fqc->pending_drops);
return (fqc);
}
@@ -782,6 +804,7 @@ fqcodel_pf_free(void *arg)
{
struct fqcodel *fqc = arg;
+ ml_purge(&fqc->pending_drops);
codel_freeparams(&fqc->cparams);
free(fqc->flows, M_DEVBUF, fqc->nflows * sizeof(struct flow));
free(fqc, M_DEVBUF, sizeof(struct fqcodel));
fq_codel: align with RFC 8289 and RFC 8290 (2/2)