Download raw body.
ix(4): simplify rx mbuf size handling
On Sat, Sep 07, 2024 at 10:51:44AM +1000, David Gwynne wrote:
> ix(4) handles 2048 byte rx buffers, so consistently use 2048 everywhere
>
> ... except when we allocate those buffers with some ETHER_ALIGN extra
> bytes so we can properly align the packet payloads.
>
> this is the same as if_igc.c r1.27.
>
> ok?
tested on amd64
ix0 at pci1 dev 0 function 0 "Intel X550T" rev 0x01, msix, 4 queues, address a0:36:9f:e0:52:54
OK bluhm@
> Index: if_ix.c
> ===================================================================
> RCS file: /cvs/src/sys/dev/pci/if_ix.c,v
> diff -u -p -r1.217 if_ix.c
> --- if_ix.c 4 Sep 2024 07:54:52 -0000 1.217
> +++ if_ix.c 6 Sep 2024 23:03:14 -0000
> @@ -764,7 +764,7 @@ ixgbe_init(void *arg)
> ixgbe_initialize_transmit_units(sc);
>
> /* Use 2k clusters, even for jumbo frames */
> - sc->rx_mbuf_sz = MCLBYTES + ETHER_ALIGN;
> + sc->rx_mbuf_sz = MCLBYTES;
>
> /* Prepare receive descriptors and buffers */
> if (ixgbe_setup_receive_structures(sc)) {
> @@ -2697,12 +2694,11 @@ ixgbe_get_buf(struct ix_rxring *rxr, int
> return (ENOBUFS);
> }
>
> - /* needed in any case so preallocate since this one will fail for sure */
> - mp = MCLGETL(NULL, M_DONTWAIT, sc->rx_mbuf_sz);
> + mp = MCLGETL(NULL, M_DONTWAIT, sc->rx_mbuf_sz + ETHER_ALIGN);
> if (!mp)
> return (ENOBUFS);
>
> - mp->m_data += (mp->m_ext.ext_size - sc->rx_mbuf_sz);
> + mp->m_data += ETHER_ALIGN;
> mp->m_len = mp->m_pkthdr.len = sc->rx_mbuf_sz;
>
> error = bus_dmamap_load_mbuf(rxr->rxdma.dma_tag, rxbuf->map,
> @@ -2747,8 +2743,9 @@ ixgbe_allocate_receive_buffers(struct ix
>
> rxbuf = rxr->rx_buffers;
> for (i = 0; i < sc->num_rx_desc; i++, rxbuf++) {
> - error = bus_dmamap_create(rxr->rxdma.dma_tag, 16 * 1024, 1,
> - 16 * 1024, 0, BUS_DMA_NOWAIT, &rxbuf->map);
> + error = bus_dmamap_create(rxr->rxdma.dma_tag,
> + sc->rx_mbuf_sz, 1, sc->rx_mbuf_sz, 0,
> + BUS_DMA_NOWAIT, &rxbuf->map);
> if (error) {
> printf("%s: Unable to create Pack DMA map\n",
> ifp->if_xname);
> @@ -2789,7 +2786,8 @@ ixgbe_setup_receive_ring(struct ix_rxrin
> rxr->next_to_check = 0;
> rxr->last_desc_filled = sc->num_rx_desc - 1;
>
> - if_rxr_init(&rxr->rx_ring, 2 * ((ifp->if_hardmtu / MCLBYTES) + 1),
> + if_rxr_init(&rxr->rx_ring,
> + 2 * howmany(ifp->if_hardmtu, MCLBYTES) + 1,
> sc->num_rx_desc - 1);
>
> ixgbe_rxfill(rxr);
> @@ -2924,7 +2922,7 @@ ixgbe_initialize_receive_units(struct ix
> IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, rdrxctl);
> }
>
> - bufsz = (sc->rx_mbuf_sz - ETHER_ALIGN) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
> + bufsz = sc->rx_mbuf_sz >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
>
> for (i = 0; i < sc->num_queues; i++, rxr++) {
> uint64_t rdba = rxr->rxdma.dma_map->dm_segs[0].ds_addr;
ix(4): simplify rx mbuf size handling