From: Yuichiro NAITO Subject: Re: iavf(4): multi-queue support To: tech@openbsd.org Date: Wed, 04 Sep 2024 17:22:21 +0900 Hi. Suppose you are interested in iavf(4) multi-queue. Try the following complete patch which enables multi-queue, checksum offloads, and TSO. I confirmed it works on my ESXi 8.0 and Linux qemu/kvm. Iperf3 results in 9.41 Gbps transmit speed and 6.87 Gbps receive speed of my OpenBSD guest with MTU size 1500 on ESXi 8.0. From: Yuichiro NAITO Subject: iavf(4): multiqueues support Date: Fri, 26 Jul 2024 18:30:04 +0900 (JST) > Hi, when I use an iavf interface on an OpenBSD virtual machine with ESXi, > iavf(4) fails to attach and doesn't works for me. > > ``` > iavf0 at pci11 dev 0 function 0 "Intel XL710/X710 VF" rev 0x01, VF version 1.1, VF 0 VSI 19config irq map failed: -5 > , msix, address 00:0c:29:a2:be:07 > ``` > > The message shows that the IRQ mapping request results in an error code -5 > (IAVF_VC_RC_ERR_PARAM). The 'iavf_config_irq_map' function in the driver > sets one IRQ for the admin queue and tx/rx queue. This is a different > number of queues from the msix interrupt counts. The PF (Primary Function) > driver of the ESXi doesn't allow a signle IRQ request. With the Linux PF > driver, it won't be a problem, a signle IRQ request is allowed. diff --git a/sys/dev/pci/if_iavf.c b/sys/dev/pci/if_iavf.c index d573d6725f4..6b5b7df0d5d 100644 --- a/sys/dev/pci/if_iavf.c +++ b/sys/dev/pci/if_iavf.c @@ -62,6 +62,7 @@ #include #include #include +#include #include #include @@ -75,11 +76,19 @@ #include #include +#include #include #include #include +#ifndef CACHE_LINE_SIZE +#define CACHE_LINE_SIZE 64 +#endif + +#define IAVF_MAX_VECTORS 4 +#define IAVF_MAX_DMA_SEG_SIZE ((16 * 1024) - 1) + #define I40E_MASK(mask, shift) ((mask) << (shift)) #define I40E_AQ_LARGE_BUF 512 @@ -386,6 +395,10 @@ struct iavf_tx_desc { #define IAVF_TX_DESC_BSIZE_MASK \ (IAVF_TX_DESC_BSIZE_MAX << IAVF_TX_DESC_BSIZE_SHIFT) +#define IAVF_TX_CTX_DESC_CMD_TSO 0x10 +#define IAVF_TX_CTX_DESC_TLEN_SHIFT 30 +#define IAVF_TX_CTX_DESC_MSS_SHIFT 50 + #define IAVF_TX_DESC_L2TAG1_SHIFT 48 #define IAVF_TX_DESC_L2TAG1_MASK (0xffff << IAVF_TX_DESC_L2TAG1_SHIFT) } __packed __aligned(16); @@ -460,6 +473,7 @@ struct iavf_rx_wb_desc_32 { #define IAVF_TX_PKT_DESCS 8 #define IAVF_TX_QUEUE_ALIGN 128 #define IAVF_RX_QUEUE_ALIGN 128 +#define IAVF_TX_PKT_MAXSIZE (MCLBYTES * IAVF_TX_PKT_DESCS) #define IAVF_HARDMTU 9712 /* 9726 - ETHER_HDR_LEN */ @@ -526,6 +540,7 @@ struct iavf_tx_map { struct iavf_tx_ring { unsigned int txr_prod; unsigned int txr_cons; + struct ifqueue *txr_ifq; struct iavf_tx_map *txr_maps; struct iavf_dmamem txr_mem; @@ -541,6 +556,7 @@ struct iavf_rx_map { struct iavf_rx_ring { struct iavf_softc *rxr_sc; + struct ifiqueue *rxr_ifiq; struct if_rxring rxr_acct; struct timeout rxr_refill; @@ -558,6 +574,15 @@ struct iavf_rx_ring { unsigned int rxr_qid; }; +struct iavf_vector { + struct iavf_softc *iv_sc; + struct iavf_rx_ring *iv_rxr; + struct iavf_tx_ring *iv_txr; + int iv_qid; + void *iv_ihc; + char iv_name[16]; +} __aligned(CACHE_LINE_SIZE); + struct iavf_softc { struct device sc_dev; struct arpcom sc_ac; @@ -569,6 +594,7 @@ struct iavf_softc { pci_intr_handle_t sc_ih; void *sc_ihc; pcitag_t sc_tag; + struct intrmap *sc_intrmap; bus_dma_tag_t sc_dmat; bus_space_tag_t sc_memt; @@ -612,6 +638,9 @@ struct iavf_softc { unsigned int sc_tx_ring_ndescs; unsigned int sc_rx_ring_ndescs; unsigned int sc_nqueues; /* 1 << sc_nqueues */ + unsigned int sc_nintrs; + + struct iavf_vector *sc_vectors; struct rwlock sc_cfg_lock; unsigned int sc_dead; @@ -644,6 +673,7 @@ static int iavf_add_del_addr(struct iavf_softc *, uint8_t *, int); static int iavf_process_arq(struct iavf_softc *, int); static int iavf_match(struct device *, void *, void *); +static int iavf_setup_interrupts(struct iavf_softc *, struct pci_attach_args *); static void iavf_attach(struct device *, struct device *, void *); static int iavf_media_change(struct ifnet *); @@ -652,6 +682,7 @@ static void iavf_watchdog(struct ifnet *); static int iavf_ioctl(struct ifnet *, u_long, caddr_t); static void iavf_start(struct ifqueue *); static int iavf_intr(void *); +static int iavf_intr_vector(void *); static int iavf_up(struct iavf_softc *); static int iavf_down(struct iavf_softc *); static int iavf_iff(struct iavf_softc *); @@ -715,9 +746,17 @@ static const struct iavf_aq_regs iavf_aq_regs = { I40E_VFINT_DYN_CTL0_CLEARPBA_MASK | \ (IAVF_NOITR << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)); \ iavf_wr((_s), I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK) +#define iavf_queue_intr_enable(_s, _q) \ + iavf_wr((_s), I40E_VFINT_DYN_CTLN1((_q)), \ + I40E_VFINT_DYN_CTLN1_INTENA_MASK | \ + I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | \ + (IAVF_NOITR << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)) +#define iavf_queue_intr_disable(_s, _q) \ + iavf_wr((_s), I40E_VFINT_DYN_CTLN1((_q)), \ + (IAVF_NOITR << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)) #define iavf_nqueues(_sc) (1 << (_sc)->sc_nqueues) -#define iavf_allqueues(_sc) ((1 << ((_sc)->sc_nqueues+1)) - 1) +#define iavf_allqueues(_sc) ((1 << (iavf_nqueues(_sc))) - 1) #ifdef __LP64__ #define iavf_dmamem_hi(_ixm) (uint32_t)(IAVF_DMA_DVA(_ixm) >> 32) @@ -757,6 +796,92 @@ iavf_match(struct device *parent, void *match, void *aux) return (pci_matchbyid(aux, iavf_devices, nitems(iavf_devices))); } +static int +iavf_intr_vector(void *v) +{ + struct iavf_vector *iv = v; + struct iavf_softc *sc = iv->iv_sc; + + struct ifnet *ifp = &sc->sc_ac.ac_if; + int rv = 0; + + if (ISSET(ifp->if_flags, IFF_RUNNING)) { + rv |= iavf_rxeof(sc, iv->iv_rxr->rxr_ifiq); + rv |= iavf_txeof(sc, iv->iv_txr->txr_ifq); + } + + iavf_queue_intr_enable(sc, iv->iv_qid); + + return rv; +} + +static int +iavf_setup_interrupts(struct iavf_softc *sc, struct pci_attach_args *pa) +{ + unsigned int i, v, nqueues = iavf_nqueues(sc); + struct iavf_vector *iv; + pci_intr_handle_t ih; + + sc->sc_ihc = pci_intr_establish(sc->sc_pc, sc->sc_ih, + IPL_NET | IPL_MPSAFE, iavf_intr, sc, DEVNAME(sc)); + if (sc->sc_ihc == NULL) { + printf("%s: unable to establish interrupt handler\n", + DEVNAME(sc)); + return -1; + } + + sc->sc_vectors = mallocarray(sizeof(*sc->sc_vectors), nqueues, + M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO); + if (sc->sc_vectors == NULL) { + printf("%s: unable to allocate vectors\n", DEVNAME(sc)); + return -1; + } + + for (i = 0; i < nqueues; i++) { + iv = &sc->sc_vectors[i]; + iv->iv_sc = sc; + iv->iv_qid = i; + snprintf(iv->iv_name, sizeof(iv->iv_name), "%s:%u", + DEVNAME(sc), i); + } + + if (sc->sc_intrmap) { + for (i = 0; i < nqueues; i++) { + iv = &sc->sc_vectors[i]; + v = i + 1; /* 0 is used for adminq */ + + if (pci_intr_map_msix(pa, v, &ih)) { + printf("%s: unable to map msi-x vector %d\n", + DEVNAME(sc), v); + goto free_vectors; + } + + iv->iv_ihc = pci_intr_establish_cpu(sc->sc_pc, ih, + IPL_NET | IPL_MPSAFE, + intrmap_cpu(sc->sc_intrmap, i), + iavf_intr_vector, iv, iv->iv_name); + if (iv->iv_ihc == NULL) { + printf("%s: unable to establish interrupt %d\n", + DEVNAME(sc), v); + goto free_vectors; + } + } + } + + sc->sc_nintrs = nqueues + 1; + return 0; +free_vectors: + if (sc->sc_intrmap != NULL) { + for (i = 0; i < nqueues; i++) { + struct iavf_vector *iv = &sc->sc_vectors[i]; + if (iv->iv_ihc != NULL) + pci_intr_disestablish(sc->sc_pc, iv->iv_ihc); + } + } + free(sc->sc_vectors, M_DEVBUF, nqueues * sizeof(*sc->sc_vectors)); + return -1; +} + void iavf_attach(struct device *parent, struct device *self, void *aux) { @@ -764,7 +889,8 @@ iavf_attach(struct device *parent, struct device *self, void *aux) struct ifnet *ifp = &sc->sc_ac.ac_if; struct pci_attach_args *pa = aux; pcireg_t memtype; - int tries; + int nmsix, tries; + unsigned int nqueues; rw_init(&sc->sc_cfg_lock, "iavfcfg"); @@ -852,13 +978,20 @@ iavf_attach(struct device *parent, struct device *self, void *aux) goto free_scratch; } - if (iavf_config_irq_map(sc) != 0) { - printf(", timeout waiting for IRQ map response"); - goto free_scratch; - } - /* msix only? */ - if (pci_intr_map_msix(pa, 0, &sc->sc_ih) != 0) { + if (pci_intr_map_msix(pa, 0, &sc->sc_ih) == 0) { + nmsix = pci_intr_msix_count(pa); + if (nmsix > 1) { /* we used 1 (the 0th) for the adminq */ + nmsix--; + + sc->sc_intrmap = intrmap_create(&sc->sc_dev, + nmsix, IAVF_MAX_VECTORS, INTRMAP_POWEROF2); + nqueues = intrmap_count(sc->sc_intrmap); + KASSERT(nqueues > 0); + KASSERT(powerof2(nqueues)); + sc->sc_nqueues = fls(nqueues) - 1; + } + } else { printf(", unable to map interrupt\n"); goto free_scratch; } @@ -868,17 +1001,23 @@ iavf_attach(struct device *parent, struct device *self, void *aux) if (memcmp(sc->sc_ac.ac_enaddr, etheranyaddr, ETHER_ADDR_LEN) == 0) ether_fakeaddr(ifp); - printf(", %s, address %s\n", pci_intr_string(sc->sc_pc, sc->sc_ih), - ether_sprintf(sc->sc_ac.ac_enaddr)); + nqueues = iavf_nqueues(sc); + printf(", %s, %d queue%s, address %s\n", + pci_intr_string(sc->sc_pc, sc->sc_ih), + nqueues, (nqueues > 1 ? "s" : ""), + ether_sprintf(sc->sc_ac.ac_enaddr)); - sc->sc_ihc = pci_intr_establish(sc->sc_pc, sc->sc_ih, - IPL_NET | IPL_MPSAFE, iavf_intr, sc, DEVNAME(sc)); - if (sc->sc_ihc == NULL) { + if (iavf_setup_interrupts(sc, pa) != 0) { printf("%s: unable to establish interrupt handler\n", DEVNAME(sc)); goto free_scratch; } + if (iavf_config_irq_map(sc) != 0) { + printf(", timeout waiting for IRQ map response"); + goto free_scratch; + } + ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_xflags = IFXF_MPSAFE; @@ -891,10 +1030,9 @@ iavf_attach(struct device *parent, struct device *self, void *aux) ifq_init_maxlen(&ifp->if_snd, sc->sc_tx_ring_ndescs); ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; -#if 0 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | - IFCAP_CSUM_UDPv4; -#endif + IFCAP_CSUM_UDPv4 | IFCAP_CSUM_UDPv6 | IFCAP_CSUM_TCPv6; + ifp->if_capabilities |= IFCAP_TSOv4 | IFCAP_TSOv6; ifmedia_init(&sc->sc_media, 0, iavf_media_change, iavf_media_status); @@ -1170,13 +1308,13 @@ static int iavf_up(struct iavf_softc *sc) { struct ifnet *ifp = &sc->sc_ac.ac_if; + struct iavf_vector *iv; struct iavf_rx_ring *rxr; struct iavf_tx_ring *txr; unsigned int nqueues, i; int rv = ENOMEM; nqueues = iavf_nqueues(sc); - KASSERT(nqueues == 1); /* XXX */ rw_enter_write(&sc->sc_cfg_lock); if (sc->sc_dead) { @@ -1195,8 +1333,11 @@ iavf_up(struct iavf_softc *sc) goto free; } - ifp->if_iqs[i]->ifiq_softc = rxr; - ifp->if_ifqs[i]->ifq_softc = txr; + iv = &sc->sc_vectors[i]; + iv->iv_rxr = ifp->if_iqs[i]->ifiq_softc = rxr; + iv->iv_txr = ifp->if_ifqs[i]->ifq_softc = txr; + rxr->rxr_ifiq = ifp->if_iqs[i]; + txr->txr_ifq = ifp->if_ifqs[i]; iavf_rxfill(sc, rxr); } @@ -1210,6 +1351,9 @@ iavf_up(struct iavf_softc *sc) if (iavf_queue_select(sc, IAVF_VC_OP_ENABLE_QUEUES) != 0) goto down; + for (i = 0; i < nqueues; i++) + iavf_queue_intr_enable(sc, i); + SET(ifp->if_flags, IFF_RUNNING); iavf_wr(sc, I40E_VFINT_ITR01(0), 0x7a); @@ -1235,6 +1379,9 @@ free: iavf_txr_free(sc, txr); iavf_rxr_free(sc, rxr); + iv = &sc->sc_vectors[i]; + iv->iv_rxr = ifp->if_iqs[i]->ifiq_softc = NULL; + iv->iv_txr = ifp->if_ifqs[i]->ifq_softc = NULL; } rw_exit_write(&sc->sc_cfg_lock); return (rv); @@ -1273,7 +1420,7 @@ iavf_config_promisc_mode(struct iavf_softc *sc, int unicast, int multicast) BUS_DMASYNC_PREREAD); iavf_atq_post(sc, &iaq); - rv = iavf_arq_wait(sc, 250); + rv = iavf_arq_wait(sc, 3000); if (rv != IAVF_VC_RC_SUCCESS) { printf("%s: CONFIG_PROMISC_MODE failed: %d\n", DEVNAME(sc), rv); return (1); @@ -1357,6 +1504,7 @@ static int iavf_down(struct iavf_softc *sc) { struct ifnet *ifp = &sc->sc_ac.ac_if; + struct iavf_vector *iv; struct iavf_rx_ring *rxr; struct iavf_tx_ring *txr; unsigned int nqueues, i; @@ -1386,6 +1534,8 @@ iavf_down(struct iavf_softc *sc) /* make sure no hw generated work is still in flight */ intr_barrier(sc->sc_ihc); for (i = 0; i < nqueues; i++) { + iavf_queue_intr_disable(sc, i); + rxr = ifp->if_iqs[i]->ifiq_softc; txr = ifp->if_ifqs[i]->ifq_softc; @@ -1395,8 +1545,9 @@ iavf_down(struct iavf_softc *sc) } for (i = 0; i < nqueues; i++) { - rxr = ifp->if_iqs[i]->ifiq_softc; - txr = ifp->if_ifqs[i]->ifq_softc; + iv = &sc->sc_vectors[i]; + txr = iv->iv_txr; + rxr = iv->iv_rxr; iavf_txr_clean(sc, txr); iavf_rxr_clean(sc, rxr); @@ -1404,8 +1555,8 @@ iavf_down(struct iavf_softc *sc) iavf_txr_free(sc, txr); iavf_rxr_free(sc, rxr); - ifp->if_iqs[i]->ifiq_softc = NULL; - ifp->if_ifqs[i]->ifq_softc = NULL; + iv->iv_rxr = ifp->if_iqs[i]->ifiq_softc = NULL; + iv->iv_txr = ifp->if_ifqs[i]->ifq_softc = NULL; } /* unmask */ @@ -1561,7 +1712,7 @@ iavf_txr_alloc(struct iavf_softc *sc, unsigned int qid) txm = &maps[i]; if (bus_dmamap_create(sc->sc_dmat, - IAVF_HARDMTU, IAVF_TX_PKT_DESCS, IAVF_HARDMTU, 0, + MAXMCLBYTES, IAVF_TX_PKT_DESCS, IAVF_MAX_DMA_SEG_SIZE, 0, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT, &txm->txm_map) != 0) goto uncreate; @@ -1656,6 +1807,83 @@ iavf_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m) BUS_DMA_STREAMING | BUS_DMA_NOWAIT)); } +static uint64_t +iavf_tx_setup_offload(struct mbuf *m0, struct iavf_tx_ring *txr, + unsigned int prod) +{ + struct ether_extracted ext; + uint64_t hlen; + uint64_t offload = 0; + + + if (ISSET(m0->m_flags, M_VLANTAG)) { + uint64_t vtag = m0->m_pkthdr.ether_vtag; + offload |= IAVF_TX_DESC_CMD_IL2TAG1; + offload |= vtag << IAVF_TX_DESC_L2TAG1_SHIFT; + } + + if (!ISSET(m0->m_pkthdr.csum_flags, + M_IPV4_CSUM_OUT|M_TCP_CSUM_OUT|M_UDP_CSUM_OUT|M_TCP_TSO)) + return (offload); + + ether_extract_headers(m0, &ext); + + if (ext.ip4) { + offload |= ISSET(m0->m_pkthdr.csum_flags, M_IPV4_CSUM_OUT) ? + IAVF_TX_DESC_CMD_IIPT_IPV4_CSUM : + IAVF_TX_DESC_CMD_IIPT_IPV4; +#ifdef INET6 + } else if (ext.ip6) { + offload |= IAVF_TX_DESC_CMD_IIPT_IPV6; +#endif + } else { + panic("CSUM_OUT set for non-IP packet"); + /* NOTREACHED */ + } + hlen = ext.iphlen; + + offload |= (ETHER_HDR_LEN >> 1) << IAVF_TX_DESC_MACLEN_SHIFT; + offload |= (hlen >> 2) << IAVF_TX_DESC_IPLEN_SHIFT; + + if (ext.tcp && ISSET(m0->m_pkthdr.csum_flags, M_TCP_CSUM_OUT)) { + offload |= IAVF_TX_DESC_CMD_L4T_EOFT_TCP; + offload |= (uint64_t)(ext.tcphlen >> 2) + << IAVF_TX_DESC_L4LEN_SHIFT; + } else if (ext.udp && ISSET(m0->m_pkthdr.csum_flags, M_UDP_CSUM_OUT)) { + offload |= IAVF_TX_DESC_CMD_L4T_EOFT_UDP; + offload |= (uint64_t)(sizeof(*ext.udp) >> 2) + << IAVF_TX_DESC_L4LEN_SHIFT; + } + + if (ISSET(m0->m_pkthdr.csum_flags, M_TCP_TSO)) { + if (ext.tcp && m0->m_pkthdr.ph_mss > 0) { + struct iavf_tx_desc *ring, *txd; + uint64_t cmd = 0, paylen, outlen; + + hlen += ext.tcphlen; + + /* + * The MSS should not be set to a lower value than 64 + * or larger than 9668 bytes. + */ + outlen = MIN(9668, MAX(64, m0->m_pkthdr.ph_mss)); + paylen = m0->m_pkthdr.len - ETHER_HDR_LEN - hlen; + ring = IAVF_DMA_KVA(&txr->txr_mem); + txd = &ring[prod]; + + cmd |= IAVF_TX_DESC_DTYPE_CONTEXT; + cmd |= IAVF_TX_CTX_DESC_CMD_TSO; + cmd |= paylen << IAVF_TX_CTX_DESC_TLEN_SHIFT; + cmd |= outlen << IAVF_TX_CTX_DESC_MSS_SHIFT; + + htolem64(&txd->addr, 0); + htolem64(&txd->cmd, cmd); + } + } + + return (offload); +} + static void iavf_start(struct ifqueue *ifq) { @@ -1668,6 +1896,7 @@ iavf_start(struct ifqueue *ifq) struct mbuf *m; uint64_t cmd; uint64_t vlan_cmd; + uint64_t offload; unsigned int prod, free, last, i; unsigned int mask; int post = 0; @@ -1693,7 +1922,8 @@ iavf_start(struct ifqueue *ifq) mask = sc->sc_tx_ring_ndescs - 1; for (;;) { - if (free <= IAVF_TX_PKT_DESCS) { + /* We need one extra descriptor for TSO packets. */ + if (free <= (IAVF_TX_PKT_DESCS + 1)) { ifq_set_oactive(ifq); break; } @@ -1702,9 +1932,17 @@ iavf_start(struct ifqueue *ifq) if (m == NULL) break; + offload = iavf_tx_setup_offload(m, txr, prod); + txm = &txr->txr_maps[prod]; map = txm->txm_map; + if (ISSET(m->m_pkthdr.csum_flags, M_TCP_TSO)) { + prod++; + prod &= mask; + free--; + } + if (iavf_load_mbuf(sc->sc_dmat, map, m) != 0) { ifq->ifq_errors++; m_freem(m); @@ -1727,7 +1965,7 @@ iavf_start(struct ifqueue *ifq) cmd = (uint64_t)map->dm_segs[i].ds_len << IAVF_TX_DESC_BSIZE_SHIFT; cmd |= IAVF_TX_DESC_DTYPE_DATA | IAVF_TX_DESC_CMD_ICRC | - vlan_cmd; + vlan_cmd | offload; htolem64(&txd->addr, map->dm_segs[i].ds_addr); htolem64(&txd->cmd, cmd); @@ -1938,6 +2176,24 @@ iavf_rxr_free(struct iavf_softc *sc, struct iavf_rx_ring *rxr) free(rxr, M_DEVBUF, sizeof(*rxr)); } +static void +iavf_rx_checksum(struct mbuf *m, uint64_t word) +{ + if (!ISSET(word, IAVF_RX_DESC_L3L4P)) + return; + + if (ISSET(word, IAVF_RX_DESC_IPE)) + return; + + m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; + + if (ISSET(word, IAVF_RX_DESC_L4E)) + return; + + m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK | M_UDP_CSUM_IN_OK; +} + + static int iavf_rxeof(struct iavf_softc *sc, struct ifiqueue *ifiq) { @@ -2011,6 +2267,7 @@ iavf_rxeof(struct iavf_softc *sc, struct ifiqueue *ifiq) } if (!ISSET(word, IAVF_RX_DESC_RXE | IAVF_RX_DESC_OVERSIZE)) { + iavf_rx_checksum(m, word); ml_enqueue(&ml, m); } else { ifp->if_ierrors++; /* XXX */ @@ -2555,25 +2812,45 @@ iavf_config_irq_map(struct iavf_softc *sc) struct iavf_aq_desc iaq; struct iavf_vc_vector_map *vec; struct iavf_vc_irq_map_info *map; + struct iavf_vector *iv; + unsigned int num_vec = 0; int tries; memset(&iaq, 0, sizeof(iaq)); iaq.iaq_flags = htole16(IAVF_AQ_BUF | IAVF_AQ_RD); iaq.iaq_opcode = htole16(IAVF_AQ_OP_SEND_TO_PF); iaq.iaq_vc_opcode = htole32(IAVF_VC_OP_CONFIG_IRQ_MAP); - iaq.iaq_datalen = htole16(sizeof(*map) + sizeof(*vec)); + iaq.iaq_datalen = htole16(sizeof(*map) + sizeof(*vec) * sc->sc_nintrs); iavf_aq_dva(&iaq, IAVF_DMA_DVA(&sc->sc_scratch)); map = IAVF_DMA_KVA(&sc->sc_scratch); - map->num_vectors = htole16(1); vec = map->vecmap; - vec[0].vsi_id = htole16(sc->sc_vsi_id); - vec[0].vector_id = 0; - vec[0].rxq_map = htole16(iavf_allqueues(sc)); - vec[0].txq_map = htole16(iavf_allqueues(sc)); - vec[0].rxitr_idx = htole16(IAVF_NOITR); - vec[0].txitr_idx = htole16(IAVF_NOITR); + if (sc->sc_nintrs == 1) { + vec[num_vec].vsi_id = htole16(sc->sc_vsi_id); + vec[num_vec].vector_id = htole16(num_vec); + vec[num_vec].rxq_map = htole16(iavf_allqueues(sc)); + vec[num_vec].txq_map = htole16(iavf_allqueues(sc)); + vec[num_vec].rxitr_idx = htole16(IAVF_NOITR); + vec[num_vec].txitr_idx = htole16(IAVF_NOITR); + num_vec++; + } else if (sc->sc_nintrs > 1) { + for (; num_vec < sc->sc_nintrs - 1; num_vec++) { + iv = &sc->sc_vectors[num_vec]; + vec[num_vec].vsi_id = htole16(sc->sc_vsi_id); + vec[num_vec].vector_id = htole16(num_vec + 1); + vec[num_vec].rxq_map = htole16(1 << iv->iv_qid); + vec[num_vec].txq_map = htole16(1 << iv->iv_qid); + vec[num_vec].rxitr_idx = htole16(IAVF_ITR0); + vec[num_vec].txitr_idx = htole16(IAVF_ITR1); + } + vec[num_vec].vsi_id = htole16(sc->sc_vsi_id); + vec[num_vec].vector_id = htole16(0); + vec[num_vec].rxq_map = htole16(0); + vec[num_vec].txq_map = htole16(0); + num_vec++; + } + map->num_vectors = htole16(num_vec); bus_dmamap_sync(sc->sc_dmat, IAVF_DMA_MAP(&sc->sc_scratch), 0, IAVF_DMA_LEN(&sc->sc_scratch), BUS_DMASYNC_PREREAD); -- Yuichiro NAITO (naito.yuichiro@gmail.com)