Download raw body.
ix: preparing vf support
On Thu, 24 Oct 2024 15:06:54 +0900 (JST)
YASUOKA Masahiko <yasuoka@openbsd.org> wrote:
> On Mon, 02 Sep 2024 13:27:04 +0900 (JST)
> YASUOKA Masahiko <yasuoka@openbsd.org> wrote:
>> I'm commiting ixv(4) from NAITO Yuichiro. Almost of the changes can
>> be done separately from ix(4), but the diff bellow is the part which
>> actually affect ix(4).
>
> This is the second step. This diff will not affect existing ix(4)
> because it only changes "mailbox" behavior. "mailbox" is used only
> for primary function or virtual function and our ix(4) doesn't support
> either yet.
This is the final step. ixv(4) is still disabled on the config.
Index: sys/dev/pci/files.pci
===================================================================
RCS file: /cvs/src/sys/dev/pci/files.pci,v
diff -u -p -u -p -r1.367 files.pci
--- sys/dev/pci/files.pci 24 Oct 2024 18:52:59 -0000 1.367
+++ sys/dev/pci/files.pci 27 Oct 2024 05:43:10 -0000
@@ -350,13 +350,19 @@ file dev/pci/ixgb_hw.c ixgb
# Intel 82598 10GbE
device ix: ether, ifnet, ifmedia, intrmap, stoeplitz
attach ix at pci
-file dev/pci/if_ix.c ix
-file dev/pci/ixgbe.c ix
-file dev/pci/ixgbe_82598.c ix
-file dev/pci/ixgbe_82599.c ix
-file dev/pci/ixgbe_x540.c ix
-file dev/pci/ixgbe_x550.c ix
-file dev/pci/ixgbe_phy.c ix
+file dev/pci/if_ix.c ix | ixv
+file dev/pci/ixgbe.c ix | ixv
+file dev/pci/ixgbe_82598.c ix | ixv
+file dev/pci/ixgbe_82599.c ix | ixv
+file dev/pci/ixgbe_x540.c ix | ixv
+file dev/pci/ixgbe_x550.c ix | ixv
+file dev/pci/ixgbe_phy.c ix | ixv
+
+# Virtual Function of i82599.
+device ixv: ether, ifnet, ifmedia, intrmap, stoeplitz
+attach ixv at pci
+file dev/pci/if_ixv.c ixv
+file dev/pci/ixgbe_vf.c ixv
# Intel Ethernet 700 Series
device ixl: ether, ifnet, ifmedia, intrmap, stoeplitz
Index: sys/dev/pci/if_ixv.c
===================================================================
RCS file: sys/dev/pci/if_ixv.c
diff -N sys/dev/pci/if_ixv.c
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ sys/dev/pci/if_ixv.c 27 Oct 2024 05:43:10 -0000
@@ -0,0 +1,1573 @@
+/* $OpenBSD$ */
+
+/******************************************************************************
+
+ Copyright (c) 2001-2017, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+#include <dev/pci/if_ix.h>
+#include <dev/pci/ixgbe_type.h>
+#include <dev/pci/ixgbe.h>
+
+/************************************************************************
+ * Driver version
+ ************************************************************************/
+char ixv_driver_version[] = "1.5.32";
+
+/************************************************************************
+ * PCI Device ID Table
+ *
+ * Used by probe to select devices to load on
+ *
+ * { Vendor ID, Device ID }
+ ************************************************************************/
+const struct pci_matchid ixv_devices[] = {
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_82599VF},
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X540_VF},
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550_VF},
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_X_VF},
+ {PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_X550EM_A_VF}
+};
+
+/************************************************************************
+ * Function prototypes
+ ************************************************************************/
+static int ixv_probe(struct device *, void *, void *);
+static void ixv_identify_hardware(struct ix_softc *sc);
+static void ixv_attach(struct device *, struct device *, void *);
+static int ixv_detach(struct device *, int);
+static int ixv_ioctl(struct ifnet *, u_long, caddr_t);
+static void ixv_watchdog(struct ifnet *);
+static void ixv_init(struct ix_softc *);
+static void ixv_stop(void *);
+static int ixv_allocate_msix(struct ix_softc *);
+static void ixv_setup_interface(struct device *, struct ix_softc *);
+static int ixv_negotiate_api(struct ix_softc *);
+
+static void ixv_initialize_transmit_units(struct ix_softc *);
+static void ixv_initialize_receive_units(struct ix_softc *);
+static void ixv_initialize_rss_mapping(struct ix_softc *);
+
+static void ixv_enable_intr(struct ix_softc *);
+static void ixv_disable_intr(struct ix_softc *);
+static void ixv_iff(struct ix_softc *);
+static void ixv_set_ivar(struct ix_softc *, uint8_t, uint8_t, int8_t);
+static void ixv_configure_ivars(struct ix_softc *);
+static uint8_t *ixv_mc_array_itr(struct ixgbe_hw *, uint8_t **, uint32_t *);
+
+static void ixv_setup_vlan_support(struct ix_softc *);
+
+/* The MSI-X Interrupt handlers */
+static int ixv_msix_que(void *);
+static int ixv_msix_mbx(void *);
+
+/* Share functions between ixv and ix. */
+void ixgbe_start(struct ifqueue *ifq);
+int ixgbe_activate(struct device *, int);
+int ixgbe_allocate_queues(struct ix_softc *);
+int ixgbe_setup_transmit_structures(struct ix_softc *);
+int ixgbe_setup_receive_structures(struct ix_softc *);
+void ixgbe_free_transmit_structures(struct ix_softc *);
+void ixgbe_free_receive_structures(struct ix_softc *);
+int ixgbe_txeof(struct ix_txring *);
+int ixgbe_rxeof(struct ix_rxring *);
+void ixgbe_rxrefill(void *);
+void ixgbe_update_link_status(struct ix_softc *);
+int ixgbe_allocate_pci_resources(struct ix_softc *);
+void ixgbe_free_pci_resources(struct ix_softc *);
+void ixgbe_media_status(struct ifnet *, struct ifmediareq *);
+int ixgbe_media_change(struct ifnet *);
+void ixgbe_add_media_types(struct ix_softc *);
+int ixgbe_get_sffpage(struct ix_softc *, struct if_sffpage *);
+int ixgbe_rxrinfo(struct ix_softc *, struct if_rxrinfo *);
+
+#if NKSTAT > 0
+static void ixv_kstats(struct ix_softc *);
+static void ixv_rxq_kstats(struct ix_softc *, struct ix_rxring *);
+static void ixv_txq_kstats(struct ix_softc *, struct ix_txring *);
+static void ixv_kstats_tick(void *);
+#endif
+
+/************************************************************************
+ * Value Definitions
+ ************************************************************************/
+/*
+ Default value for Extended Interrupt Throttling Register.
+ 128 * 2.048 uSec will be minimum interrupt iterval for 10GbE link.
+ Minimum interrupt interval can be set from 0 to 2044 in increments of 4.
+ */
+#define IXGBE_EITR_DEFAULT 128
+
+/*********************************************************************
+ * OpenBSD Device Interface Entry Points
+ *********************************************************************/
+
+struct cfdriver ixv_cd = {
+ NULL, "ixv", DV_IFNET
+};
+
+const struct cfattach ixv_ca = {
+ sizeof(struct ix_softc), ixv_probe, ixv_attach, ixv_detach,
+ ixgbe_activate
+};
+
+/************************************************************************
+ * ixv_probe - Device identification routine
+ *
+ * Determines if the driver should be loaded on
+ * adapter based on its PCI vendor/device ID.
+ *
+ * return BUS_PROBE_DEFAULT on success, positive on failure
+ ************************************************************************/
+static int
+ixv_probe(struct device *parent, void *match, void *aux)
+{
+ INIT_DEBUGOUT("ixv_probe: begin");
+
+ return (pci_matchbyid((struct pci_attach_args *)aux, ixv_devices,
+ nitems(ixv_devices)));
+}
+
+/*********************************************************************
+ *
+ * Determine hardware revision.
+ *
+ **********************************************************************/
+static void
+ixv_identify_hardware(struct ix_softc *sc)
+{
+ struct ixgbe_osdep *os = &sc->osdep;
+ struct pci_attach_args *pa = &os->os_pa;
+ uint32_t reg;
+
+ /* Save off the information about this board */
+ sc->hw.vendor_id = PCI_VENDOR(pa->pa_id);
+ sc->hw.device_id = PCI_PRODUCT(pa->pa_id);
+
+ reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_CLASS_REG);
+ sc->hw.revision_id = PCI_REVISION(reg);
+
+ reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG);
+ sc->hw.subsystem_vendor_id = PCI_VENDOR(reg);
+ sc->hw.subsystem_device_id = PCI_PRODUCT(reg);
+
+ sc->num_segs = IXGBE_82599_SCATTER;
+}
+
+/************************************************************************
+ * ixv_attach - Device initialization routine
+ *
+ * Called when the driver is being loaded.
+ * Identifies the type of hardware, allocates all resources
+ * and initializes the hardware.
+ *
+ * return 0 on success, positive on failure
+ ************************************************************************/
+static void
+ixv_attach(struct device *parent, struct device *self, void *aux)
+{
+ struct pci_attach_args *pa = (struct pci_attach_args *)aux;
+ struct ix_softc *sc = (struct ix_softc *)self;
+ struct ixgbe_hw *hw;
+ int error;
+
+ INIT_DEBUGOUT("ixv_attach: begin");
+
+ sc->osdep.os_sc = sc;
+ sc->osdep.os_pa = *pa;
+
+ rw_init(&sc->sfflock, "ixvsff");
+
+ /* Allocate, clear, and link in our adapter structure */
+ sc->dev = *self;
+ sc->hw.back = sc;
+ hw = &sc->hw;
+
+ /* Indicate to RX setup to use Jumbo Clusters */
+ sc->num_tx_desc = DEFAULT_TXD;
+ sc->num_rx_desc = DEFAULT_RXD;
+
+ ixv_identify_hardware(sc);
+
+#if NKSTAT > 0
+ ixv_kstats(sc);
+#endif
+
+ /* Allocate multicast array memory */
+ sc->mta = mallocarray(IXGBE_ETH_LENGTH_OF_ADDRESS,
+ IXGBE_MAX_MULTICAST_ADDRESSES_VF, M_DEVBUF, M_NOWAIT);
+ if (sc->mta == NULL) {
+ printf("Can not allocate multicast setup array\n");
+ return;
+ }
+
+ /* Do base PCI setup - map BAR0 */
+ if (ixgbe_allocate_pci_resources(sc)) {
+ printf("ixgbe_allocate_pci_resources() failed!\n");
+ goto err_out;
+ }
+
+ /* Allocate our TX/RX Queues */
+ if (ixgbe_allocate_queues(sc)) {
+ printf("ixgbe_allocate_queues() failed!\n");
+ goto err_out;
+ }
+
+ /* A subset of set_mac_type */
+ switch (hw->device_id) {
+ case IXGBE_DEV_ID_82599_VF:
+ hw->mac.type = ixgbe_mac_82599_vf;
+ break;
+ case IXGBE_DEV_ID_X540_VF:
+ hw->mac.type = ixgbe_mac_X540_vf;
+ break;
+ case IXGBE_DEV_ID_X550_VF:
+ hw->mac.type = ixgbe_mac_X550_vf;
+ break;
+ case IXGBE_DEV_ID_X550EM_X_VF:
+ hw->mac.type = ixgbe_mac_X550EM_x_vf;
+ break;
+ case IXGBE_DEV_ID_X550EM_A_VF:
+ hw->mac.type = ixgbe_mac_X550EM_a_vf;
+ break;
+ default:
+ /* Shouldn't get here since probe succeeded */
+ printf("Unknown device ID!\n");
+ goto err_out;
+ }
+
+ /* Initialize the shared code */
+ if (ixgbe_init_ops_vf(hw)) {
+ printf("ixgbe_init_ops_vf() failed!\n");
+ goto err_out;
+ }
+
+ /* Setup the mailbox */
+ ixgbe_init_mbx_params_vf(hw);
+
+ /* Set the right number of segments */
+ sc->num_segs = IXGBE_82599_SCATTER;
+
+ error = hw->mac.ops.reset_hw(hw);
+ switch (error) {
+ case 0:
+ break;
+ case IXGBE_ERR_RESET_FAILED:
+ printf("...reset_hw() failure: Reset Failed!\n");
+ goto err_out;
+ default:
+ printf("...reset_hw() failed with error %d\n",
+ error);
+ goto err_out;
+ }
+
+ error = hw->mac.ops.init_hw(hw);
+ if (error) {
+ printf("...init_hw() failed with error %d\n",
+ error);
+ goto err_out;
+ }
+
+ /* Negotiate mailbox API version */
+ if (ixv_negotiate_api(sc)) {
+ printf("Mailbox API negotiation failed during attach!\n");
+ goto err_out;
+ }
+
+ /* If no mac address was assigned, make a random one */
+ if (memcmp(hw->mac.addr, etheranyaddr, ETHER_ADDR_LEN) == 0) {
+ ether_fakeaddr(&sc->arpcom.ac_if);
+ bcopy(sc->arpcom.ac_enaddr, hw->mac.addr, ETHER_ADDR_LEN);
+ bcopy(sc->arpcom.ac_enaddr, hw->mac.perm_addr, ETHER_ADDR_LEN);
+ } else
+ bcopy(hw->mac.addr, sc->arpcom.ac_enaddr, ETHER_ADDR_LEN);
+
+ /* Setup OS specific network interface */
+ ixv_setup_interface(self, sc);
+
+ /* Setup MSI-X */
+ if (ixv_allocate_msix(sc)) {
+ printf("ixv_allocate_msix() failed!\n");
+ goto err_late;
+ }
+
+ /* Check if VF was disabled by PF */
+ if (hw->mac.ops.get_link_state(hw, &sc->link_enabled)) {
+ /* PF is not capable of controlling VF state. Enable the link. */
+ sc->link_enabled = TRUE;
+ }
+
+ /* Set an initial default flow control value */
+ sc->fc = ixgbe_fc_full;
+
+ INIT_DEBUGOUT("ixv_attach: end");
+
+ return;
+
+err_late:
+ ixgbe_free_transmit_structures(sc);
+ ixgbe_free_receive_structures(sc);
+err_out:
+ ixgbe_free_pci_resources(sc);
+ free(sc->mta, M_DEVBUF, IXGBE_ETH_LENGTH_OF_ADDRESS *
+ IXGBE_MAX_MULTICAST_ADDRESSES_VF);
+} /* ixv_attach */
+
+/************************************************************************
+ * ixv_detach - Device removal routine
+ *
+ * Called when the driver is being removed.
+ * Stops the adapter and deallocates all the resources
+ * that were allocated for driver operation.
+ *
+ * return 0 on success, positive on failure
+ ************************************************************************/
+static int
+ixv_detach(struct device *self, int flags)
+{
+ struct ix_softc *sc = (struct ix_softc *)self;
+ struct ifnet *ifp = &sc->arpcom.ac_if;
+
+ INIT_DEBUGOUT("ixv_detach: begin");
+
+ ixv_stop(sc);
+ ether_ifdetach(ifp);
+ if_detach(ifp);
+
+ free(sc->mta, M_DEVBUF, IXGBE_ETH_LENGTH_OF_ADDRESS *
+ IXGBE_MAX_MULTICAST_ADDRESSES_VF);
+
+ ixgbe_free_pci_resources(sc);
+
+ ixgbe_free_transmit_structures(sc);
+ ixgbe_free_receive_structures(sc);
+
+ return (0);
+} /* ixv_detach */
+
+/*********************************************************************
+ * Watchdog entry point
+ *
+ **********************************************************************/
+static void
+ixv_watchdog(struct ifnet * ifp)
+{
+ struct ix_softc *sc = (struct ix_softc *)ifp->if_softc;
+ struct ix_txring *txr = sc->tx_rings;
+ struct ixgbe_hw *hw = &sc->hw;
+ int tx_hang = FALSE;
+ int i;
+
+ /*
+ * The timer is set to 5 every time ixgbe_start() queues a packet.
+ * Anytime all descriptors are clean the timer is set to 0.
+ */
+ for (i = 0; i < sc->num_queues; i++, txr++) {
+ if (txr->watchdog_timer == 0 || --txr->watchdog_timer)
+ continue;
+ else {
+ tx_hang = TRUE;
+ break;
+ }
+ }
+ if (tx_hang == FALSE)
+ return;
+
+
+ printf("%s: Watchdog timeout -- resetting\n", ifp->if_xname);
+ for (i = 0; i < sc->num_queues; i++, txr++) {
+ printf("%s: Queue(%d) tdh = %d, hw tdt = %d\n", ifp->if_xname, i,
+ IXGBE_READ_REG(hw, IXGBE_VFTDH(i)),
+ IXGBE_READ_REG(hw, txr->tail));
+ printf("%s: TX(%d) Next TX to Clean = %d\n", ifp->if_xname,
+ i, txr->next_to_clean);
+ }
+ ifp->if_flags &= ~IFF_RUNNING;
+
+ ixv_init(sc);
+}
+
+/************************************************************************
+ * ixv_init - Init entry point
+ *
+ * Used in two ways: It is used by the stack as an init entry
+ * point in network interface structure. It is also used
+ * by the driver as a hw/sw initialization routine to get
+ * to a consistent state.
+ *
+ * return 0 on success, positive on failure
+ ************************************************************************/
+void
+ixv_init(struct ix_softc *sc)
+{
+ struct ifnet *ifp = &sc->arpcom.ac_if;
+ struct ixgbe_hw *hw = &sc->hw;
+ struct ix_queue *que = sc->queues;
+ uint32_t mask;
+ int i, s, error = 0;
+
+ INIT_DEBUGOUT("ixv_init: begin");
+
+ s = splnet();
+
+ hw->adapter_stopped = FALSE;
+ hw->mac.ops.stop_adapter(hw);
+
+ /* reprogram the RAR[0] in case user changed it. */
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+
+ /* Get the latest mac address, User can use a LAA */
+ bcopy(sc->arpcom.ac_enaddr, sc->hw.mac.addr,
+ IXGBE_ETH_LENGTH_OF_ADDRESS);
+
+ sc->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, 1);
+
+ /* Prepare transmit descriptors and buffers */
+ if (ixgbe_setup_transmit_structures(sc)) {
+ printf("Could not setup transmit structures\n");
+ ixv_stop(sc);
+ splx(s);
+ return;
+ }
+
+ /* Reset VF and renegotiate mailbox API version */
+ hw->mac.ops.reset_hw(hw);
+ error = ixv_negotiate_api(sc);
+ if (error) {
+ printf("Mailbox API negotiation failed in init!\n");
+ splx(s);
+ return;
+ }
+
+ ixv_initialize_transmit_units(sc);
+
+ /* Setup Multicast table */
+ ixv_iff(sc);
+
+ /* Use 2k clusters, even for jumbo frames */
+ sc->rx_mbuf_sz = MCLBYTES + ETHER_ALIGN;
+
+ /* Prepare receive descriptors and buffers */
+ if (ixgbe_setup_receive_structures(sc)) {
+ printf("Could not setup receive structures\n");
+ ixv_stop(sc);
+ splx(s);
+ return;
+ }
+
+ /* Configure RX settings */
+ ixv_initialize_receive_units(sc);
+
+ /* Set up VLAN offload and filter */
+ ixv_setup_vlan_support(sc);
+
+ /* Set up MSI-X routing */
+ ixv_configure_ivars(sc);
+
+ /* Set up auto-mask */
+ mask = (1 << sc->linkvec);
+ for (i = 0; i < sc->num_queues; i++, que++)
+ mask |= (1 << que->msix);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAM, mask);
+
+ /* Set moderation on the Link interrupt */
+ IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEITR(sc->linkvec),
+ IXGBE_LINK_ITR);
+
+ /* Config/Enable Link */
+ error = hw->mac.ops.get_link_state(hw, &sc->link_enabled);
+ if (error) {
+ /* PF is not capable of controlling VF state. Enable the link. */
+ sc->link_enabled = TRUE;
+ } else if (sc->link_enabled == FALSE)
+ printf("VF is disabled by PF\n");
+
+ hw->mac.ops.check_link(hw, &sc->link_speed, &sc->link_up,
+ FALSE);
+
+ /* And now turn on interrupts */
+ ixv_enable_intr(sc);
+
+ /* Now inform the stack we're ready */
+ ifp->if_flags |= IFF_RUNNING;
+ for (i = 0; i < sc->num_queues; i++)
+ ifq_clr_oactive(ifp->if_ifqs[i]);
+
+ splx(s);
+} /* ixv_init */
+
+/*
+ * MSI-X Interrupt Handlers and Tasklets
+ */
+
+static inline void
+ixv_enable_queue(struct ix_softc *sc, uint32_t vector)
+{
+ struct ixgbe_hw *hw = &sc->hw;
+ uint32_t queue = 1 << vector;
+ uint32_t mask;
+
+ mask = (IXGBE_EIMS_RTX_QUEUE & queue);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+} /* ixv_enable_queue */
+
+static inline void
+ixv_disable_queue(struct ix_softc *sc, uint32_t vector)
+{
+ struct ixgbe_hw *hw = &sc->hw;
+ uint64_t queue = (1ULL << vector);
+ uint32_t mask;
+
+ mask = (IXGBE_EIMS_RTX_QUEUE & queue);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, mask);
+} /* ixv_disable_queue */
+
+/************************************************************************
+ * ixv_msix_que - MSI Queue Interrupt Service routine
+ ************************************************************************/
+int
+ixv_msix_que(void *arg)
+{
+ struct ix_queue *que = arg;
+ struct ix_softc *sc = que->sc;
+ struct ifnet *ifp = &sc->arpcom.ac_if;
+ struct ix_txring *txr = que->txr;
+ struct ix_rxring *rxr = que->rxr;
+
+ if ((ifp->if_flags & IFF_RUNNING) == 0)
+ return 1;
+
+ ixv_disable_queue(sc, que->msix);
+
+ ixgbe_rxeof(rxr);
+ ixgbe_txeof(txr);
+ ixgbe_rxrefill(rxr);
+
+ /* Reenable this interrupt */
+ ixv_enable_queue(sc, que->msix);
+
+ return 1;
+} /* ixv_msix_que */
+
+
+/************************************************************************
+ * ixv_msix_mbx
+ ************************************************************************/
+static int
+ixv_msix_mbx(void *arg)
+{
+ struct ix_softc *sc = arg;
+ struct ixgbe_hw *hw = &sc->hw;
+
+ sc->hw.mac.get_link_status = TRUE;
+ KERNEL_LOCK();
+ ixgbe_update_link_status(sc);
+ KERNEL_UNLOCK();
+
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << sc->linkvec));
+
+
+ return 1;
+} /* ixv_msix_mbx */
+
+/************************************************************************
+ * ixv_negotiate_api
+ *
+ * Negotiate the Mailbox API with the PF;
+ * start with the most featured API first.
+ ************************************************************************/
+static int
+ixv_negotiate_api(struct ix_softc *sc)
+{
+ struct ixgbe_hw *hw = &sc->hw;
+ int mbx_api[] = { ixgbe_mbox_api_12,
+ ixgbe_mbox_api_11,
+ ixgbe_mbox_api_10,
+ ixgbe_mbox_api_unknown };
+ int i = 0;
+
+ while (mbx_api[i] != ixgbe_mbox_api_unknown) {
+ if (ixgbevf_negotiate_api_version(hw, mbx_api[i]) == 0)
+ return (0);
+ i++;
+ }
+
+ return (EINVAL);
+} /* ixv_negotiate_api */
+
+
+/************************************************************************
+ * ixv_iff - Multicast Update
+ *
+ * Called whenever multicast address list is updated.
+ ************************************************************************/
+static void
+ixv_iff(struct ix_softc *sc)
+{
+ struct ifnet *ifp = &sc->arpcom.ac_if;
+ struct ixgbe_hw *hw = &sc->hw;
+ struct arpcom *ac = &sc->arpcom;
+ uint8_t *mta, *update_ptr;
+ struct ether_multi *enm;
+ struct ether_multistep step;
+ int xcast_mode, mcnt = 0;
+
+ IOCTL_DEBUGOUT("ixv_iff: begin");
+
+ mta = sc->mta;
+ bzero(mta, sizeof(uint8_t) * IXGBE_ETH_LENGTH_OF_ADDRESS *
+ IXGBE_MAX_MULTICAST_ADDRESSES_VF);
+
+ ifp->if_flags &= ~IFF_ALLMULTI;
+ if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0 ||
+ ac->ac_multicnt > IXGBE_MAX_MULTICAST_ADDRESSES_VF) {
+ ifp->if_flags |= IFF_ALLMULTI;
+ } else {
+ ETHER_FIRST_MULTI(step, &sc->arpcom, enm);
+ while (enm != NULL) {
+ bcopy(enm->enm_addrlo,
+ &mta[mcnt * IXGBE_ETH_LENGTH_OF_ADDRESS],
+ IXGBE_ETH_LENGTH_OF_ADDRESS);
+ mcnt++;
+
+ ETHER_NEXT_MULTI(step, enm);
+ }
+
+ update_ptr = mta;
+ hw->mac.ops.update_mc_addr_list(hw, update_ptr, mcnt,
+ ixv_mc_array_itr, TRUE);
+ }
+
+ /* request the most inclusive mode we need */
+ if (ISSET(ifp->if_flags, IFF_PROMISC))
+ xcast_mode = IXGBEVF_XCAST_MODE_PROMISC;
+ else if (ISSET(ifp->if_flags, IFF_ALLMULTI))
+ xcast_mode = IXGBEVF_XCAST_MODE_ALLMULTI;
+ else if (ISSET(ifp->if_flags, (IFF_BROADCAST | IFF_MULTICAST)))
+ xcast_mode = IXGBEVF_XCAST_MODE_MULTI;
+ else
+ xcast_mode = IXGBEVF_XCAST_MODE_NONE;
+
+ hw->mac.ops.update_xcast_mode(hw, xcast_mode);
+
+
+} /* ixv_iff */
+
+/************************************************************************
+ * ixv_mc_array_itr
+ *
+ * An iterator function needed by the multicast shared code.
+ * It feeds the shared code routine the addresses in the
+ * array of ixv_iff() one by one.
+ ************************************************************************/
+static uint8_t *
+ixv_mc_array_itr(struct ixgbe_hw *hw, uint8_t **update_ptr, uint32_t *vmdq)
+{
+ uint8_t *mta = *update_ptr;
+
+ *vmdq = 0;
+ *update_ptr = mta + IXGBE_ETH_LENGTH_OF_ADDRESS;
+
+ return (mta);
+} /* ixv_mc_array_itr */
+
+/************************************************************************
+ * ixv_stop - Stop the hardware
+ *
+ * Disables all traffic on the adapter by issuing a
+ * global reset on the MAC and deallocates TX/RX buffers.
+ ************************************************************************/
+static void
+ixv_stop(void *arg)
+{
+ struct ix_softc *sc = arg;
+ struct ifnet *ifp = &sc->arpcom.ac_if;
+ struct ixgbe_hw *hw = &sc->hw;
+ int i;
+
+ INIT_DEBUGOUT("ixv_stop: begin\n");
+#if NKSTAT > 0
+ timeout_del(&sc->sc_kstat_tmo);
+#endif
+ ixv_disable_intr(sc);
+
+
+ /* Tell the stack that the interface is no longer active */
+ ifp->if_flags &= ~IFF_RUNNING;
+
+ hw->mac.ops.reset_hw(hw);
+ sc->hw.adapter_stopped = FALSE;
+ hw->mac.ops.stop_adapter(hw);
+
+ /* reprogram the RAR[0] in case user changed it. */
+ hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV);
+
+ intr_barrier(sc->tag);
+ for (i = 0; i < sc->num_queues; i++) {
+ struct ifqueue *ifq = ifp->if_ifqs[i];
+ ifq_barrier(ifq);
+ ifq_clr_oactive(ifq);
+
+ if (sc->queues[i].tag != NULL)
+ intr_barrier(sc->queues[i].tag);
+ timeout_del(&sc->rx_rings[i].rx_refill);
+ }
+
+ KASSERT((ifp->if_flags & IFF_RUNNING) == 0);
+
+ /* Should we really clear all structures on stop? */
+ ixgbe_free_transmit_structures(sc);
+ ixgbe_free_receive_structures(sc);
+
+ ixgbe_update_link_status(sc);
+} /* ixv_stop */
+
+/************************************************************************
+ * ixv_setup_interface
+ *
+ * Setup networking device structure and register an interface.
+ ************************************************************************/
+static void
+ixv_setup_interface(struct device *dev, struct ix_softc *sc)
+{
+ struct ifnet *ifp;
+ int i;
+
+ ifp = &sc->arpcom.ac_if;
+
+ strlcpy(ifp->if_xname, sc->dev.dv_xname, IFNAMSIZ);
+ ifp->if_softc = sc;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_xflags = IFXF_MPSAFE;
+ ifp->if_ioctl = ixv_ioctl;
+ ifp->if_qstart = ixgbe_start;
+ ifp->if_timer = 0;
+ ifp->if_watchdog = ixv_watchdog;
+ ifp->if_hardmtu = IXGBE_MAX_FRAME_SIZE -
+ ETHER_HDR_LEN - ETHER_CRC_LEN;
+ ifq_init_maxlen(&ifp->if_snd, sc->num_tx_desc - 1);
+
+ ifp->if_capabilities = IFCAP_VLAN_MTU;
+
+#if NVLAN > 0
+ ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING;
+#endif
+
+ ifp->if_capabilities |= IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4;
+ ifp->if_capabilities |= IFCAP_CSUM_TCPv6 | IFCAP_CSUM_UDPv6;
+ ifp->if_capabilities |= IFCAP_CSUM_IPv4;
+
+ /*
+ * Specify the media types supported by this sc and register
+ * callbacks to update media and link information
+ */
+ ifmedia_init(&sc->media, IFM_IMASK, ixgbe_media_change,
+ ixgbe_media_status);
+ ixgbe_add_media_types(sc);
+ ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO);
+
+ if_attach(ifp);
+ ether_ifattach(ifp);
+
+ if_attach_queues(ifp, sc->num_queues);
+ if_attach_iqueues(ifp, sc->num_queues);
+ for (i = 0; i < sc->num_queues; i++) {
+ struct ifqueue *ifq = ifp->if_ifqs[i];
+ struct ifiqueue *ifiq = ifp->if_iqs[i];
+ struct ix_txring *txr = &sc->tx_rings[i];
+ struct ix_rxring *rxr = &sc->rx_rings[i];
+
+ ifq->ifq_softc = txr;
+ txr->ifq = ifq;
+
+ ifiq->ifiq_softc = rxr;
+ rxr->ifiq = ifiq;
+
+#if NKSTAT > 0
+ ixv_txq_kstats(sc, txr);
+ ixv_rxq_kstats(sc, rxr);
+#endif
+ }
+
+ sc->max_frame_size = IXGBE_MAX_FRAME_SIZE;
+} /* ixv_setup_interface */
+
+/************************************************************************
+ * ixv_initialize_transmit_units - Enable transmit unit.
+ ************************************************************************/
+static void
+ixv_initialize_transmit_units(struct ix_softc *sc)
+{
+ struct ifnet *ifp = &sc->arpcom.ac_if;
+ struct ix_txring *txr;
+ struct ixgbe_hw *hw = &sc->hw;
+ uint64_t tdba;
+ uint32_t txctrl, txdctl;
+ int i;
+
+ for (i = 0; i < sc->num_queues; i++) {
+ txr = &sc->tx_rings[i];
+ tdba = txr->txdma.dma_map->dm_segs[0].ds_addr;
+
+ /* Set WTHRESH to 8, burst writeback */
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+ txdctl |= (8 << 16);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
+
+ /* Set Tx Tail register */
+ txr->tail = IXGBE_VFTDT(i);
+
+ /* Set the HW Tx Head and Tail indices */
+ IXGBE_WRITE_REG(&sc->hw, IXGBE_VFTDH(i), 0);
+ IXGBE_WRITE_REG(&sc->hw, txr->tail, 0);
+
+ /* Setup Transmit Descriptor Cmd Settings */
+ txr->txd_cmd = IXGBE_TXD_CMD_IFCS;
+ txr->queue_status = IXGBE_QUEUE_IDLE;
+ txr->watchdog_timer = 0;
+
+ /* Set Ring parameters */
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAL(i),
+ (tdba & 0x00000000ffffffffULL));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDBAH(i), (tdba >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDLEN(i),
+ sc->num_tx_desc * sizeof(struct ixgbe_legacy_tx_desc));
+ txctrl = IXGBE_READ_REG(hw, IXGBE_VFDCA_TXCTRL(i));
+ txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), txctrl);
+
+ /* Now enable */
+ txdctl = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
+ txdctl |= IXGBE_TXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), txdctl);
+ }
+ ifp->if_timer = 0;
+
+ return;
+} /* ixv_initialize_transmit_units */
+
+/************************************************************************
+ * ixv_initialize_rss_mapping
+ ************************************************************************/
+static void
+ixv_initialize_rss_mapping(struct ix_softc *sc)
+{
+ struct ixgbe_hw *hw = &sc->hw;
+ uint32_t reta = 0, mrqc, rss_key[10];
+ int queue_id;
+ int i, j;
+
+ /* set up random bits */
+ stoeplitz_to_key(&rss_key, sizeof(rss_key));
+
+ /* Now fill out hash function seeds */
+ for (i = 0; i < 10; i++)
+ IXGBE_WRITE_REG(hw, IXGBE_VFRSSRK(i), rss_key[i]);
+
+ /* Set up the redirection table */
+ for (i = 0, j = 0; i < 64; i++, j++) {
+ if (j == sc->num_queues)
+ j = 0;
+
+ /*
+ * Fetch the RSS bucket id for the given indirection
+ * entry. Cap it at the number of configured buckets
+ * (which is num_queues.)
+ */
+ queue_id = queue_id % sc->num_queues;
+
+ /*
+ * The low 8 bits are for hash value (n+0);
+ * The next 8 bits are for hash value (n+1), etc.
+ */
+ reta >>= 8;
+ reta |= ((uint32_t)queue_id) << 24;
+ if ((i & 3) == 3) {
+ IXGBE_WRITE_REG(hw, IXGBE_VFRETA(i >> 2), reta);
+ reta = 0;
+ }
+ }
+
+ /*
+ * Disable UDP - IP fragments aren't currently being handled
+ * and so we end up with a mix of 2-tuple and 4-tuple
+ * traffic.
+ */
+ mrqc = IXGBE_MRQC_RSSEN
+ | IXGBE_MRQC_RSS_FIELD_IPV4
+ | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
+ | IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP
+ | IXGBE_MRQC_RSS_FIELD_IPV6_EX
+ | IXGBE_MRQC_RSS_FIELD_IPV6
+ | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
+ ;
+ IXGBE_WRITE_REG(hw, IXGBE_VFMRQC, mrqc);
+} /* ixv_initialize_rss_mapping */
+
+
+/************************************************************************
+ * ixv_initialize_receive_units - Setup receive registers and features.
+ ************************************************************************/
+static void
+ixv_initialize_receive_units(struct ix_softc *sc)
+{
+ struct ix_rxring *rxr = sc->rx_rings;
+ struct ixgbe_hw *hw = &sc->hw;
+ uint64_t rdba;
+ uint32_t reg, rxdctl, bufsz, psrtype;
+ int i, j, k;
+
+ bufsz = (sc->rx_mbuf_sz - ETHER_ALIGN) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+
+ psrtype = IXGBE_PSRTYPE_TCPHDR
+ | IXGBE_PSRTYPE_UDPHDR
+ | IXGBE_PSRTYPE_IPV4HDR
+ | IXGBE_PSRTYPE_IPV6HDR
+ | IXGBE_PSRTYPE_L2HDR;
+
+ if (sc->num_queues > 1)
+ psrtype |= 1 << 29;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, psrtype);
+
+ /* Tell PF our max_frame size */
+ if (ixgbevf_rlpml_set_vf(hw, sc->max_frame_size) != 0) {
+ printf("There is a problem with the PF setup."
+ " It is likely the receive unit for this VF will not function correctly.\n");
+ }
+
+ for (i = 0; i < sc->num_queues; i++, rxr++) {
+ rdba = rxr->rxdma.dma_map->dm_segs[0].ds_addr;
+
+ /* Disable the queue */
+ rxdctl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+ rxdctl &= ~IXGBE_RXDCTL_ENABLE;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
+ for (j = 0; j < 10; j++) {
+ if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
+ IXGBE_RXDCTL_ENABLE)
+ msec_delay(1);
+ else
+ break;
+ }
+
+ /* Setup the Base and Length of the Rx Descriptor Ring */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAL(i),
+ (rdba & 0x00000000ffffffffULL));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDBAH(i), (rdba >> 32));
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDLEN(i),
+ sc->num_rx_desc * sizeof(union ixgbe_adv_rx_desc));
+
+ /* Capture Rx Tail index */
+ rxr->tail = IXGBE_VFRDT(rxr->me);
+
+ /* Reset the ring indices */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDH(rxr->me), 0);
+ IXGBE_WRITE_REG(hw, rxr->tail, 0);
+
+ /* Set up the SRRCTL register */
+ reg = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
+ reg &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
+ reg &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
+ reg |= bufsz;
+ reg |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
+ IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), reg);
+
+ /* Do the queue enabling last */
+ rxdctl |= IXGBE_RXDCTL_ENABLE | IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), rxdctl);
+ for (k = 0; k < 10; k++) {
+ if (IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i)) &
+ IXGBE_RXDCTL_ENABLE)
+ break;
+ msec_delay(1);
+ }
+
+ /* Set the Tail Pointer */
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDT(rxr->me),
+ sc->num_rx_desc - 1);
+ }
+
+ /*
+ * Do not touch RSS and RETA settings for older hardware
+ * as those are shared among PF and all VF.
+ */
+ if (sc->hw.mac.type >= ixgbe_mac_X550_vf)
+ ixv_initialize_rss_mapping(sc);
+
+ return;
+} /* ixv_initialize_receive_units */
+
+/************************************************************************
+ * ixv_setup_vlan_support
+ ************************************************************************/
+static void
+ixv_setup_vlan_support(struct ix_softc *sc)
+{
+ struct ixgbe_hw *hw = &sc->hw;
+ uint32_t ctrl, vid, vfta, retry;
+ int i, j;
+
+ /*
+ * We get here thru init, meaning
+ * a soft reset, this has already cleared
+ * the VFTA and other state, so if there
+ * have been no vlan's registered do nothing.
+ */
+ if (sc->num_vlans == 0)
+ return;
+
+ /* Enable the queues */
+ for (i = 0; i < sc->num_queues; i++) {
+ ctrl = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
+ ctrl |= IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), ctrl);
+ /*
+ * Let Rx path know that it needs to store VLAN tag
+ * as part of extra mbuf info.
+ */
+ }
+
+ /*
+ * A soft reset zero's out the VFTA, so
+ * we need to repopulate it now.
+ */
+ for (i = 0; i < IXGBE_VFTA_SIZE; i++) {
+ if (sc->shadow_vfta[i] == 0)
+ continue;
+ vfta = sc->shadow_vfta[i];
+ /*
+ * Reconstruct the vlan id's
+ * based on the bits set in each
+ * of the array ints.
+ */
+ for (j = 0; j < 32; j++) {
+ retry = 0;
+ if ((vfta & (1 << j)) == 0)
+ continue;
+ vid = (i * 32) + j;
+ /* Call the shared code mailbox routine */
+ while (hw->mac.ops.set_vfta(hw, vid, 0, TRUE, FALSE)) {
+ if (++retry > 5)
+ break;
+ }
+ }
+ }
+} /* ixv_setup_vlan_support */
+
+/************************************************************************
+ * ixv_enable_intr
+ ************************************************************************/
+static void
+ixv_enable_intr(struct ix_softc *sc)
+{
+ struct ixgbe_hw *hw = &sc->hw;
+ struct ix_queue *que = sc->queues;
+ uint32_t mask;
+ int i;
+
+ /* For VTEIAC */
+ mask = (1 << sc->linkvec);
+ for (i = 0; i < sc->num_queues; i++, que++)
+ mask |= (1 << que->msix);
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIAC, mask);
+
+ /* For VTEIMS */
+ IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, (1 << sc->linkvec));
+ que = sc->queues;
+ for (i = 0; i < sc->num_queues; i++, que++)
+ ixv_enable_queue(sc, que->msix);
+
+ IXGBE_WRITE_FLUSH(hw);
+
+ return;
+} /* ixv_enable_intr */
+
+/************************************************************************
+ * ixv_disable_intr
+ ************************************************************************/
+static void
+ixv_disable_intr(struct ix_softc *sc)
+{
+ IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEIAC, 0);
+ IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEIMC, ~0);
+ IXGBE_WRITE_FLUSH(&sc->hw);
+
+ return;
+} /* ixv_disable_intr */
+
+/************************************************************************
+ * ixv_set_ivar
+ *
+ * Setup the correct IVAR register for a particular MSI-X interrupt
+ * - entry is the register array entry
+ * - vector is the MSI-X vector for this queue
+ * - type is RX/TX/MISC
+ ************************************************************************/
+static void
+ixv_set_ivar(struct ix_softc *sc, uint8_t entry, uint8_t vector, int8_t type)
+{
+ struct ixgbe_hw *hw = &sc->hw;
+ uint32_t ivar, index;
+
+ vector |= IXGBE_IVAR_ALLOC_VAL;
+
+ if (type == -1) { /* MISC IVAR */
+ ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
+ ivar &= ~0xFF;
+ ivar |= vector;
+ IXGBE_WRITE_REG(hw, IXGBE_VTIVAR_MISC, ivar);
+ } else { /* RX/TX IVARS */
+ index = (16 * (entry & 1)) + (8 * type);
+ ivar = IXGBE_READ_REG(hw, IXGBE_VTIVAR(entry >> 1));
+ ivar &= ~(0xFF << index);
+ ivar |= (vector << index);
+ IXGBE_WRITE_REG(hw, IXGBE_VTIVAR(entry >> 1), ivar);
+ }
+} /* ixv_set_ivar */
+
+/************************************************************************
+ * ixv_configure_ivars
+ ************************************************************************/
+static void
+ixv_configure_ivars(struct ix_softc *sc)
+{
+ struct ix_queue *que = sc->queues;
+ int i;
+
+ for (i = 0; i < sc->num_queues; i++, que++) {
+ /* First the RX queue entry */
+ ixv_set_ivar(sc, i, que->msix, 0);
+ /* ... and the TX */
+ ixv_set_ivar(sc, i, que->msix, 1);
+ /* Set an initial value in EITR */
+ IXGBE_WRITE_REG(&sc->hw, IXGBE_VTEITR(que->msix),
+ IXGBE_EITR_DEFAULT);
+ }
+
+ /* For the mailbox interrupt */
+ ixv_set_ivar(sc, 1, sc->linkvec, -1);
+} /* ixv_configure_ivars */
+
+/************************************************************************
+ * ixv_ioctl - Ioctl entry point
+ *
+ * Called when the user wants to configure the interface.
+ *
+ * return 0 on success, positive on failure
+ ************************************************************************/
+static int
+ixv_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
+{
+ struct ix_softc *sc = ifp->if_softc;
+ struct ifreq *ifr = (struct ifreq *)data;
+ int s, error = 0;
+
+ s = splnet();
+
+ switch (command) {
+ case SIOCSIFADDR:
+ IOCTL_DEBUGOUT("ioctl: SIOCxIFADDR (Get/Set Interface Addr)");
+ ifp->if_flags |= IFF_UP;
+ if (!(ifp->if_flags & IFF_RUNNING))
+ ixv_init(sc);
+ break;
+
+ case SIOCSIFFLAGS:
+ IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
+ if (ifp->if_flags & IFF_UP) {
+ if (ifp->if_flags & IFF_RUNNING)
+ error = ENETRESET;
+ else
+ ixv_init(sc);
+ } else {
+ if (ifp->if_flags & IFF_RUNNING)
+ ixv_stop(sc);
+ }
+ break;
+
+ case SIOCSIFMEDIA:
+ case SIOCGIFMEDIA:
+ IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
+ error = ifmedia_ioctl(ifp, ifr, &sc->media, command);
+ break;
+
+ case SIOCGIFRXR:
+ error = ixgbe_rxrinfo(sc, (struct if_rxrinfo *)ifr->ifr_data);
+ break;
+
+ default:
+ error = ether_ioctl(ifp, &sc->arpcom, command, data);
+ }
+
+ switch (error) {
+ case 0:
+ if (command == SIOCSIFMTU)
+ ixv_init(sc);
+ break;
+ case ENETRESET:
+ if (ifp->if_flags & IFF_RUNNING) {
+ ixv_disable_intr(sc);
+ ixv_iff(sc);
+ ixv_enable_intr(sc);
+ }
+ error = 0;
+ }
+
+ splx(s);
+ return (error);
+} /* ixv_ioctl */
+
+/************************************************************************
+ * ixv_allocate_msix - Setup MSI-X Interrupt resources and handlers
+ ************************************************************************/
+static int
+ixv_allocate_msix(struct ix_softc *sc)
+{
+ struct ixgbe_osdep *os = &sc->osdep;
+ struct pci_attach_args *pa = &os->os_pa;
+ int i = 0, error = 0, off;
+ struct ix_queue *que;
+ pci_intr_handle_t ih;
+ pcireg_t reg;
+
+ for (i = 0, que = sc->queues; i < sc->num_queues; i++, que++) {
+ if (pci_intr_map_msix(pa, i, &ih)) {
+ printf("ixv_allocate_msix: "
+ "pci_intr_map_msix vec %d failed\n", i);
+ error = ENOMEM;
+ goto fail;
+ }
+
+ que->tag = pci_intr_establish_cpu(pa->pa_pc, ih,
+ IPL_NET | IPL_MPSAFE, intrmap_cpu(sc->sc_intrmap, i),
+ ixv_msix_que, que, que->name);
+ if (que->tag == NULL) {
+ printf("ixv_allocate_msix: "
+ "pci_intr_establish vec %d failed\n", i);
+ error = ENOMEM;
+ goto fail;
+ }
+
+ que->msix = i;
+ }
+
+ /* and Mailbox */
+ if (pci_intr_map_msix(pa, i, &ih)) {
+ printf("ixgbe_allocate_msix: "
+ "pci_intr_map_msix mbox vector failed\n");
+ error = ENOMEM;
+ goto fail;
+ }
+
+ sc->tag = pci_intr_establish(pa->pa_pc, ih, IPL_NET | IPL_MPSAFE,
+ ixv_msix_mbx, sc, sc->dev.dv_xname);
+ if (sc->tag == NULL) {
+ printf("ixv_allocate_msix: "
+ "pci_intr_establish mbox vector failed\n");
+ error = ENOMEM;
+ goto fail;
+ }
+ sc->linkvec = i;
+
+ /*
+ * Due to a broken design QEMU will fail to properly
+ * enable the guest for MSI-X unless the vectors in
+ * the table are all set up, so we must rewrite the
+ * ENABLE in the MSI-X control register again at this
+ * point to cause it to successfully initialize us.
+ */
+ if (sc->hw.mac.type == ixgbe_mac_82599_vf) {
+ pci_get_capability(pa->pa_pc, pa->pa_tag, PCI_CAP_MSIX, &off, NULL);
+ reg = pci_conf_read(pa->pa_pc, pa->pa_tag, off);
+ pci_conf_write(pa->pa_pc, pa->pa_tag, off, reg | PCI_MSIX_MC_MSIXE);
+ }
+
+ printf(", %s, %d queue%s\n", pci_intr_string(pa->pa_pc, ih),
+ i, (i > 1) ? "s" : "");
+
+ return (0);
+
+fail:
+ for (que = sc->queues; i > 0; i--, que++) {
+ if (que->tag == NULL)
+ continue;
+ pci_intr_disestablish(pa->pa_pc, que->tag);
+ que->tag = NULL;
+ }
+ return (error);
+} /* ixv_allocate_msix */
+
+#if NKSTAT > 0
+enum ixv_counter_idx {
+ ixv_good_packets_received_count,
+ ixv_good_packets_transmitted_count,
+ ixv_good_octets_received_count,
+ ixv_good_octets_transmitted_count,
+ ixv_multicast_packets_received_count,
+
+ ixv_counter_num,
+};
+
+CTASSERT(KSTAT_KV_U_PACKETS <= 0xff);
+CTASSERT(KSTAT_KV_U_BYTES <= 0xff);
+
+struct ixv_counter {
+ char name[KSTAT_KV_NAMELEN];
+ uint32_t reg;
+ uint8_t width;
+ uint8_t unit;
+};
+
+static const struct ixv_counter ixv_counters[ixv_counter_num] = {
+ [ixv_good_packets_received_count] = { "rx good", IXGBE_VFGPRC, 32, KSTAT_KV_U_PACKETS },
+ [ixv_good_packets_transmitted_count] = { "tx good", IXGBE_VFGPTC, 32, KSTAT_KV_U_PACKETS },
+ [ixv_good_octets_received_count] = { "rx total", IXGBE_VFGORC_LSB, 36, KSTAT_KV_U_BYTES },
+ [ixv_good_octets_transmitted_count] = { "tx total", IXGBE_VFGOTC_LSB, 36, KSTAT_KV_U_BYTES },
+ [ixv_multicast_packets_received_count] = { "rx mcast", IXGBE_VFMPRC, 32, KSTAT_KV_U_PACKETS },
+};
+
+struct ixv_rxq_kstats {
+ struct kstat_kv qprc;
+ struct kstat_kv qbrc;
+ struct kstat_kv qprdc;
+};
+
+static const struct ixv_rxq_kstats ixv_rxq_kstats_tpl = {
+ KSTAT_KV_UNIT_INITIALIZER("packets",
+ KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
+ KSTAT_KV_UNIT_INITIALIZER("bytes",
+ KSTAT_KV_T_COUNTER64, KSTAT_KV_U_BYTES),
+ KSTAT_KV_UNIT_INITIALIZER("qdrops",
+ KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
+};
+
+struct ixv_txq_kstats {
+ struct kstat_kv qptc;
+ struct kstat_kv qbtc;
+};
+
+static const struct ixv_txq_kstats ixv_txq_kstats_tpl = {
+ KSTAT_KV_UNIT_INITIALIZER("packets",
+ KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS),
+ KSTAT_KV_UNIT_INITIALIZER("bytes",
+ KSTAT_KV_T_COUNTER64, KSTAT_KV_U_BYTES),
+};
+
+static int ixv_kstats_read(struct kstat *ks);
+static int ixv_rxq_kstats_read(struct kstat *ks);
+static int ixv_txq_kstats_read(struct kstat *ks);
+
+static void
+ixv_kstats(struct ix_softc *sc)
+{
+ struct kstat *ks;
+ struct kstat_kv *kvs;
+ unsigned int i;
+
+ mtx_init(&sc->sc_kstat_mtx, IPL_SOFTCLOCK);
+ timeout_set(&sc->sc_kstat_tmo, ixv_kstats_tick, sc);
+
+ ks = kstat_create(sc->dev.dv_xname, 0, "ixv-stats", 0,
+ KSTAT_T_KV, 0);
+ if (ks == NULL)
+ return;
+
+ kvs = mallocarray(nitems(ixv_counters), sizeof(*kvs),
+ M_DEVBUF, M_WAITOK|M_ZERO);
+
+ for (i = 0; i < nitems(ixv_counters); i++) {
+ const struct ixv_counter *ixc = &ixv_counters[i];
+
+ kstat_kv_unit_init(&kvs[i], ixc->name,
+ KSTAT_KV_T_COUNTER64, ixc->unit);
+ }
+
+ kstat_set_mutex(ks, &sc->sc_kstat_mtx);
+ ks->ks_softc = sc;
+ ks->ks_data = kvs;
+ ks->ks_datalen = nitems(ixv_counters) * sizeof(*kvs);
+ ks->ks_read = ixv_kstats_read;
+
+ sc->sc_kstat = ks;
+ kstat_install(ks);
+}
+
+static void
+ixv_rxq_kstats(struct ix_softc *sc, struct ix_rxring *rxr)
+{
+ struct ixv_rxq_kstats *stats;
+ struct kstat *ks;
+
+ ks = kstat_create(sc->dev.dv_xname, 0, "ixv-rxq", rxr->me,
+ KSTAT_T_KV, 0);
+ if (ks == NULL)
+ return;
+
+ stats = malloc(sizeof(*stats), M_DEVBUF, M_WAITOK|M_ZERO);
+ *stats = ixv_rxq_kstats_tpl;
+
+ kstat_set_mutex(ks, &sc->sc_kstat_mtx);
+ ks->ks_softc = rxr;
+ ks->ks_data = stats;
+ ks->ks_datalen = sizeof(*stats);
+ ks->ks_read = ixv_rxq_kstats_read;
+
+ rxr->kstat = ks;
+ kstat_install(ks);
+}
+
+static void
+ixv_txq_kstats(struct ix_softc *sc, struct ix_txring *txr)
+{
+ struct ixv_txq_kstats *stats;
+ struct kstat *ks;
+
+ ks = kstat_create(sc->dev.dv_xname, 0, "ixv-txq", txr->me,
+ KSTAT_T_KV, 0);
+ if (ks == NULL)
+ return;
+
+ stats = malloc(sizeof(*stats), M_DEVBUF, M_WAITOK|M_ZERO);
+ *stats = ixv_txq_kstats_tpl;
+
+ kstat_set_mutex(ks, &sc->sc_kstat_mtx);
+ ks->ks_softc = txr;
+ ks->ks_data = stats;
+ ks->ks_datalen = sizeof(*stats);
+ ks->ks_read = ixv_txq_kstats_read;
+
+ txr->kstat = ks;
+ kstat_install(ks);
+}
+
+/**********************************************************************
+ *
+ * Update the board statistics counters.
+ *
+ **********************************************************************/
+
+static void
+ixv_kstats_tick(void *arg)
+{
+ struct ix_softc *sc = arg;
+ int i;
+
+ timeout_add_sec(&sc->sc_kstat_tmo, 1);
+
+ mtx_enter(&sc->sc_kstat_mtx);
+ ixv_kstats_read(sc->sc_kstat);
+ for (i = 0; i < sc->num_queues; i++) {
+ ixv_rxq_kstats_read(sc->rx_rings[i].kstat);
+ ixv_txq_kstats_read(sc->tx_rings[i].kstat);
+ }
+ mtx_leave(&sc->sc_kstat_mtx);
+}
+
+static uint64_t
+ixv_read36(struct ixgbe_hw *hw, bus_size_t loreg, bus_size_t hireg)
+{
+ uint64_t lo, hi;
+
+ lo = IXGBE_READ_REG(hw, loreg);
+ hi = IXGBE_READ_REG(hw, hireg);
+
+ return (((hi & 0xf) << 32) | lo);
+}
+
+static int
+ixv_kstats_read(struct kstat *ks)
+{
+ struct ix_softc *sc = ks->ks_softc;
+ struct kstat_kv *kvs = ks->ks_data;
+ struct ixgbe_hw *hw = &sc->hw;
+ unsigned int i;
+
+ for (i = 0; i < nitems(ixv_counters); i++) {
+ const struct ixv_counter *ixc = &ixv_counters[i];
+ uint32_t reg = ixc->reg;
+ uint64_t v;
+
+ if (reg == 0)
+ continue;
+
+ if (ixc->width > 32)
+ v = ixv_read36(hw, reg, reg + 4);
+ else
+ v = IXGBE_READ_REG(hw, reg);
+
+ kstat_kv_u64(&kvs[i]) = v;
+ }
+
+ getnanouptime(&ks->ks_updated);
+
+ return (0);
+}
+
+int
+ixv_rxq_kstats_read(struct kstat *ks)
+{
+ struct ixv_rxq_kstats *stats = ks->ks_data;
+ struct ix_rxring *rxr = ks->ks_softc;
+ struct ix_softc *sc = rxr->sc;
+ struct ixgbe_hw *hw = &sc->hw;
+ uint32_t i = rxr->me;
+
+ kstat_kv_u64(&stats->qprc) += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
+ kstat_kv_u64(&stats->qprdc) += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
+ kstat_kv_u64(&stats->qbrc) +=
+ ixv_read36(hw, IXGBE_QBRC_L(i), IXGBE_QBRC_H(i));
+
+ getnanouptime(&ks->ks_updated);
+
+ return (0);
+}
+
+int
+ixv_txq_kstats_read(struct kstat *ks)
+{
+ struct ixv_txq_kstats *stats = ks->ks_data;
+ struct ix_txring *txr = ks->ks_softc;
+ struct ix_softc *sc = txr->sc;
+ struct ixgbe_hw *hw = &sc->hw;
+ uint32_t i = txr->me;
+
+ kstat_kv_u64(&stats->qptc) += IXGBE_READ_REG(hw, IXGBE_QPTC(i));
+ kstat_kv_u64(&stats->qbtc) +=
+ ixv_read36(hw, IXGBE_QBTC_L(i), IXGBE_QBTC_H(i));
+
+ getnanouptime(&ks->ks_updated);
+
+ return (0);
+}
+#endif /* NKVSTAT > 0 */
Index: sys/dev/pci/ixgbe_vf.c
===================================================================
RCS file: sys/dev/pci/ixgbe_vf.c
diff -N sys/dev/pci/ixgbe_vf.c
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ sys/dev/pci/ixgbe_vf.c 27 Oct 2024 05:43:10 -0000
@@ -0,0 +1,799 @@
+/* $OpenBSD$ */
+
+/******************************************************************************
+
+ Copyright (c) 2001-2017, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+
+#include <dev/pci/ixgbe.h>
+#include <dev/pci/ixgbe_type.h>
+
+#ifndef IXGBE_VFWRITE_REG
+#define IXGBE_VFWRITE_REG IXGBE_WRITE_REG
+#endif
+#ifndef IXGBE_VFREAD_REG
+#define IXGBE_VFREAD_REG IXGBE_READ_REG
+#endif
+
+/**
+ Dummy handlers.
+ They are called from ix driver code,
+ and there is nothing to do for VF.
+ */
+static uint64_t
+ixgbe_dummy_uint64_handler_vf(struct ixgbe_hw *hw)
+{
+ return 0;
+}
+
+static int32_t
+ixgbe_dummy_handler_vf(struct ixgbe_hw *hw)
+{
+ return 0;
+}
+
+static void
+ixgbe_dummy_void_handler_vf(struct ixgbe_hw *hw)
+{
+ return;
+}
+
+/**
+ * ixgbe_init_ops_vf - Initialize the pointers for vf
+ * @hw: pointer to hardware structure
+ *
+ * This will assign function pointers, adapter-specific functions can
+ * override the assignment of generic function pointers by assigning
+ * their own adapter-specific function pointers.
+ * Does not touch the hardware.
+ **/
+int32_t ixgbe_init_ops_vf(struct ixgbe_hw *hw)
+{
+ /* MAC */
+ hw->mac.ops.init_hw = ixgbe_init_hw_vf;
+ hw->mac.ops.reset_hw = ixgbe_reset_hw_vf;
+ hw->mac.ops.start_hw = ixgbe_start_hw_vf;
+ /* Cannot clear stats on VF */
+ hw->mac.ops.clear_hw_cntrs = NULL;
+ hw->mac.ops.get_media_type = NULL;
+ hw->mac.ops.get_supported_physical_layer =
+ ixgbe_dummy_uint64_handler_vf;
+ hw->mac.ops.get_mac_addr = ixgbe_get_mac_addr_vf;
+ hw->mac.ops.stop_adapter = ixgbe_stop_adapter_vf;
+ hw->mac.ops.get_bus_info = NULL;
+ hw->mac.ops.negotiate_api_version = ixgbevf_negotiate_api_version;
+
+ /* Link */
+ hw->mac.ops.setup_link = ixgbe_setup_mac_link_vf;
+ hw->mac.ops.check_link = ixgbe_check_mac_link_vf;
+ hw->mac.ops.get_link_capabilities = NULL;
+
+ /* RAR, Multicast, VLAN */
+ hw->mac.ops.set_rar = ixgbe_set_rar_vf;
+ hw->mac.ops.set_uc_addr = ixgbevf_set_uc_addr_vf;
+ hw->mac.ops.init_rx_addrs = NULL;
+ hw->mac.ops.update_mc_addr_list = ixgbe_update_mc_addr_list_vf;
+ hw->mac.ops.update_xcast_mode = ixgbevf_update_xcast_mode;
+ hw->mac.ops.get_link_state = ixgbe_get_link_state_vf;
+ hw->mac.ops.enable_mc = NULL;
+ hw->mac.ops.disable_mc = NULL;
+ hw->mac.ops.clear_vfta = NULL;
+ hw->mac.ops.set_vfta = ixgbe_set_vfta_vf;
+ hw->mac.ops.set_rlpml = ixgbevf_rlpml_set_vf;
+
+ /* Flow Control */
+ hw->mac.ops.fc_enable = ixgbe_dummy_handler_vf;
+ hw->mac.ops.setup_fc = ixgbe_dummy_handler_vf;
+ hw->mac.ops.fc_autoneg = ixgbe_dummy_void_handler_vf;
+
+ hw->mac.max_tx_queues = 1;
+ hw->mac.max_rx_queues = 1;
+
+ hw->mbx.ops.init_params = ixgbe_init_mbx_params_vf;
+
+ return IXGBE_SUCCESS;
+}
+
+/* ixgbe_virt_clr_reg - Set register to default (power on) state.
+ * @hw: pointer to hardware structure
+ */
+static void ixgbe_virt_clr_reg(struct ixgbe_hw *hw)
+{
+ int i;
+ uint32_t vfsrrctl;
+ uint32_t vfdca_rxctrl;
+ uint32_t vfdca_txctrl;
+
+ /* VRSRRCTL default values (BSIZEPACKET = 2048, BSIZEHEADER = 256) */
+ vfsrrctl = 0x100 << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT;
+ vfsrrctl |= 0x800 >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
+
+ /* DCA_RXCTRL default value */
+ vfdca_rxctrl = IXGBE_DCA_RXCTRL_DESC_RRO_EN |
+ IXGBE_DCA_RXCTRL_DATA_WRO_EN |
+ IXGBE_DCA_RXCTRL_HEAD_WRO_EN;
+
+ /* DCA_TXCTRL default value */
+ vfdca_txctrl = IXGBE_DCA_TXCTRL_DESC_RRO_EN |
+ IXGBE_DCA_TXCTRL_DESC_WRO_EN |
+ IXGBE_DCA_TXCTRL_DATA_RRO_EN;
+
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
+
+ for (i = 0; i < 7; i++) {
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDH(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRDT(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFSRRCTL(i), vfsrrctl);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDH(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDT(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAH(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFTDWBAL(i), 0);
+ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_RXCTRL(i), vfdca_rxctrl);
+ IXGBE_WRITE_REG(hw, IXGBE_VFDCA_TXCTRL(i), vfdca_txctrl);
+ }
+
+ IXGBE_WRITE_FLUSH(hw);
+}
+
+/**
+ * ixgbe_start_hw_vf - Prepare hardware for Tx/Rx
+ * @hw: pointer to hardware structure
+ *
+ * Starts the hardware by filling the bus info structure and media type, clears
+ * all on chip counters, initializes receive address registers, multicast
+ * table, VLAN filter table, calls routine to set up link and flow control
+ * settings, and leaves transmit and receive units disabled and uninitialized
+ **/
+int32_t ixgbe_start_hw_vf(struct ixgbe_hw *hw)
+{
+ /* Clear adapter stopped flag */
+ hw->adapter_stopped = FALSE;
+
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_init_hw_vf - virtual function hardware initialization
+ * @hw: pointer to hardware structure
+ *
+ * Initialize the hardware by resetting the hardware and then starting
+ * the hardware
+ **/
+int32_t ixgbe_init_hw_vf(struct ixgbe_hw *hw)
+{
+ int32_t status = hw->mac.ops.start_hw(hw);
+
+ hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
+
+ return status;
+}
+
+/**
+ * ixgbe_reset_hw_vf - Performs hardware reset
+ * @hw: pointer to hardware structure
+ *
+ * Resets the hardware by resetting the transmit and receive units, masks and
+ * clears all interrupts.
+ **/
+int32_t ixgbe_reset_hw_vf(struct ixgbe_hw *hw)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ uint32_t timeout = IXGBE_VF_INIT_TIMEOUT;
+ int32_t ret_val = IXGBE_ERR_INVALID_MAC_ADDR;
+ uint32_t msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
+ uint8_t *addr = (uint8_t *)(&msgbuf[1]);
+
+ DEBUGFUNC("ixgbevf_reset_hw_vf");
+
+ /* Call adapter stop to disable tx/rx and clear interrupts */
+ hw->mac.ops.stop_adapter(hw);
+
+ /* reset the api version */
+ hw->api_version = ixgbe_mbox_api_10;
+ ixgbe_init_mbx_params_vf(hw);
+
+ DEBUGOUT("Issuing a function level reset to MAC\n");
+
+ IXGBE_VFWRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
+ IXGBE_WRITE_FLUSH(hw);
+
+ msec_delay(50);
+
+ /* we cannot reset while the RSTI / RSTD bits are asserted */
+ while (!mbx->ops.check_for_rst(hw, 0) && timeout) {
+ timeout--;
+ usec_delay(5);
+ }
+
+ if (!timeout)
+ return IXGBE_ERR_RESET_FAILED;
+
+ /* Reset VF registers to initial values */
+ ixgbe_virt_clr_reg(hw);
+
+ /* mailbox timeout can now become active */
+ mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
+
+ msgbuf[0] = IXGBE_VF_RESET;
+ ixgbe_write_mbx(hw, msgbuf, 1, 0);
+
+ msec_delay(10);
+
+ /*
+ * set our "perm_addr" based on info provided by PF
+ * also set up the mc_filter_type which is piggy backed
+ * on the mac address in word 3
+ */
+ ret_val = ixgbe_poll_mbx(hw, msgbuf,
+ IXGBE_VF_PERMADDR_MSG_LEN, 0);
+ if (ret_val)
+ return ret_val;
+
+ if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS) &&
+ msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_FAILURE))
+ return IXGBE_ERR_INVALID_MAC_ADDR;
+
+ if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS))
+ memcpy(hw->mac.perm_addr, addr, IXGBE_ETH_LENGTH_OF_ADDRESS);
+
+ hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_stop_adapter_vf - Generic stop Tx/Rx units
+ * @hw: pointer to hardware structure
+ *
+ * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
+ * disables transmit and receive units. The adapter_stopped flag is used by
+ * the shared code and drivers to determine if the adapter is in a stopped
+ * state and should not touch the hardware.
+ **/
+int32_t ixgbe_stop_adapter_vf(struct ixgbe_hw *hw)
+{
+ uint32_t reg_val;
+ uint16_t i;
+
+ /*
+ * Set the adapter_stopped flag so other driver functions stop touching
+ * the hardware
+ */
+ hw->adapter_stopped = TRUE;
+
+ /* Clear interrupt mask to stop from interrupts being generated */
+ IXGBE_VFWRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
+
+ /* Clear any pending interrupts, flush previous writes */
+ IXGBE_VFREAD_REG(hw, IXGBE_VTEICR);
+
+ /* Disable the transmit unit. Each queue must be disabled. */
+ for (i = 0; i < hw->mac.max_tx_queues; i++)
+ IXGBE_VFWRITE_REG(hw, IXGBE_VFTXDCTL(i), IXGBE_TXDCTL_SWFLSH);
+
+ /* Disable the receive unit by stopping each queue */
+ for (i = 0; i < hw->mac.max_rx_queues; i++) {
+ reg_val = IXGBE_VFREAD_REG(hw, IXGBE_VFRXDCTL(i));
+ reg_val &= ~IXGBE_RXDCTL_ENABLE;
+ IXGBE_VFWRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
+ }
+ /* Clear packet split and pool config */
+ IXGBE_WRITE_REG(hw, IXGBE_VFPSRTYPE, 0);
+
+ /* flush all queues disables */
+ IXGBE_WRITE_FLUSH(hw);
+ msec_delay(2);
+
+ return IXGBE_SUCCESS;
+}
+
+static int32_t ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, uint32_t *msg,
+ uint32_t *retmsg, uint16_t size)
+{
+ int32_t retval = ixgbe_write_mbx(hw, msg, size, 0);
+
+ if (retval)
+ return retval;
+
+ return ixgbe_poll_mbx(hw, retmsg, size, 0);
+}
+
+/**
+ * ixgbe_set_rar_vf - set device MAC address
+ * @hw: pointer to hardware structure
+ * @index: Receive address register to write
+ * @addr: Address to put into receive address register
+ * @vmdq: VMDq "set" or "pool" index
+ * @enable_addr: set flag that address is active
+ **/
+int32_t ixgbe_set_rar_vf(struct ixgbe_hw *hw, uint32_t index, uint8_t *addr,
+ uint32_t vmdq, uint32_t enable_addr)
+{
+ uint32_t msgbuf[3];
+ uint8_t *msg_addr = (uint8_t *)(&msgbuf[1]);
+ int32_t ret_val;
+
+ memset(msgbuf, 0, 12);
+ msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
+ memcpy(msg_addr, addr, 6);
+ ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 3);
+
+ msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+ /* if nacked the address was rejected, use "perm_addr" */
+ if (!ret_val &&
+ (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_FAILURE))) {
+ ixgbe_get_mac_addr_vf(hw, hw->mac.addr);
+ return IXGBE_ERR_MBX;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_update_mc_addr_list_vf - Update Multicast addresses
+ * @hw: pointer to the HW structure
+ * @mc_addr_list: array of multicast addresses to program
+ * @mc_addr_count: number of multicast addresses to program
+ * @next: caller supplied function to return next address in list
+ * @clear: unused
+ *
+ * Updates the Multicast Table Array.
+ **/
+int32_t ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, uint8_t *mc_addr_list,
+ uint32_t mc_addr_count, ixgbe_mc_addr_itr next,
+ bool clear)
+{
+ uint32_t msgbuf[IXGBE_VFMAILBOX_SIZE];
+ uint16_t *vector_list = (uint16_t *)&msgbuf[1];
+ uint32_t vector;
+ uint32_t cnt, i;
+ uint32_t vmdq;
+
+ DEBUGFUNC("ixgbe_update_mc_addr_list_vf");
+
+ /* Each entry in the list uses 1 16 bit word. We have 30
+ * 16 bit words available in our HW msg buffer (minus 1 for the
+ * msg type). That's 30 hash values if we pack 'em right. If
+ * there are more than 30 MC addresses to add then punt the
+ * extras for now and then add code to handle more than 30 later.
+ * It would be unusual for a server to request that many multi-cast
+ * addresses except for in large enterprise network environments.
+ */
+
+ DEBUGOUT1("MC Addr Count = %d\n", mc_addr_count);
+
+ cnt = (mc_addr_count > IXGBE_MAX_MULTICAST_ADDRESSES_VF) ? IXGBE_MAX_MULTICAST_ADDRESSES_VF : mc_addr_count;
+ msgbuf[0] = IXGBE_VF_SET_MULTICAST;
+ msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
+
+ for (i = 0; i < cnt; i++) {
+ vector = ixgbe_mta_vector(hw, next(hw, &mc_addr_list, &vmdq));
+ DEBUGOUT1("Hash value = 0x%03X\n", vector);
+ vector_list[i] = (uint16_t)vector;
+ }
+
+ return ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, IXGBE_VFMAILBOX_SIZE);
+}
+
+/**
+ * ixgbevf_update_xcast_mode - Update Multicast mode
+ * @hw: pointer to the HW structure
+ * @xcast_mode: new multicast mode
+ *
+ * Updates the Multicast Mode of VF.
+ **/
+int32_t ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
+{
+ uint32_t msgbuf[2];
+ int32_t err;
+
+ switch (hw->api_version) {
+ case ixgbe_mbox_api_12:
+ /* New modes were introduced in 1.3 version */
+ if (xcast_mode > IXGBEVF_XCAST_MODE_ALLMULTI)
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+ /* Fall through */
+ case ixgbe_mbox_api_13:
+ case ixgbe_mbox_api_15:
+ break;
+ default:
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+ }
+
+ msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
+ msgbuf[1] = xcast_mode;
+
+ err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
+ if (err)
+ return err;
+
+ msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+ if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_FAILURE))
+ return IXGBE_ERR_FEATURE_NOT_SUPPORTED;
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_get_link_state_vf - Get VF link state from PF
+ * @hw: pointer to the HW structure
+ * @link_state: link state storage
+ *
+ * Returns state of the operation error or success.
+ **/
+int32_t ixgbe_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
+{
+ uint32_t msgbuf[2];
+ int32_t err;
+ int32_t ret_val;
+
+ msgbuf[0] = IXGBE_VF_GET_LINK_STATE;
+ msgbuf[1] = 0x0;
+
+ err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
+
+ if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
+ ret_val = IXGBE_ERR_MBX;
+ } else {
+ ret_val = IXGBE_SUCCESS;
+ *link_state = msgbuf[1];
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_set_vfta_vf - Set/Unset vlan filter table address
+ * @hw: pointer to the HW structure
+ * @vlan: 12 bit VLAN ID
+ * @vind: unused by VF drivers
+ * @vlan_on: if TRUE then set bit, else clear bit
+ * @vlvf_bypass: boolean flag indicating updating default pool is okay
+ *
+ * Turn on/off specified VLAN in the VLAN filter table.
+ **/
+int32_t ixgbe_set_vfta_vf(struct ixgbe_hw *hw, uint32_t vlan, uint32_t vind,
+ bool vlan_on, bool vlvf_bypass)
+{
+ uint32_t msgbuf[2];
+ int32_t ret_val;
+
+ msgbuf[0] = IXGBE_VF_SET_VLAN;
+ msgbuf[1] = vlan;
+ /* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
+ msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
+
+ ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
+ if (!ret_val && (msgbuf[0] & IXGBE_VT_MSGTYPE_SUCCESS))
+ return IXGBE_SUCCESS;
+
+ return ret_val | (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE);
+}
+
+/**
+ * ixgbe_get_num_of_tx_queues_vf - Get number of TX queues
+ * @hw: pointer to hardware structure
+ *
+ * Returns the number of transmit queues for the given adapter.
+ **/
+uint32_t ixgbe_get_num_of_tx_queues_vf(struct ixgbe_hw *hw)
+{
+ return IXGBE_VF_MAX_TX_QUEUES;
+}
+
+/**
+ * ixgbe_get_num_of_rx_queues_vf - Get number of RX queues
+ * @hw: pointer to hardware structure
+ *
+ * Returns the number of receive queues for the given adapter.
+ **/
+uint32_t ixgbe_get_num_of_rx_queues_vf(struct ixgbe_hw *hw)
+{
+ return IXGBE_VF_MAX_RX_QUEUES;
+}
+
+/**
+ * ixgbe_get_mac_addr_vf - Read device MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: the MAC address
+ **/
+int32_t ixgbe_get_mac_addr_vf(struct ixgbe_hw *hw, uint8_t *mac_addr)
+{
+ int i;
+
+ for (i = 0; i < IXGBE_ETH_LENGTH_OF_ADDRESS; i++)
+ mac_addr[i] = hw->mac.perm_addr[i];
+
+ return IXGBE_SUCCESS;
+}
+
+int32_t ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, uint32_t index, uint8_t *addr)
+{
+ uint32_t msgbuf[3], msgbuf_chk;
+ uint8_t *msg_addr = (uint8_t *)(&msgbuf[1]);
+ int32_t ret_val;
+
+ memset(msgbuf, 0, sizeof(msgbuf));
+ /*
+ * If index is one then this is the start of a new list and needs
+ * indication to the PF so it can do it's own list management.
+ * If it is zero then that tells the PF to just clear all of
+ * this VF's macvlans and there is no new list.
+ */
+ msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
+ msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
+ msgbuf_chk = msgbuf[0];
+ if (addr)
+ memcpy(msg_addr, addr, 6);
+
+ ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 3);
+ if (!ret_val) {
+ msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+ if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_FAILURE))
+ return IXGBE_ERR_OUT_OF_MEM;
+ }
+
+ return ret_val;
+}
+
+/**
+ * ixgbe_setup_mac_link_vf - Setup MAC link settings
+ * @hw: pointer to hardware structure
+ * @speed: new link speed
+ * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
+ *
+ * Set the link speed in the AUTOC register and restarts link.
+ **/
+int32_t ixgbe_setup_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
+{
+ return IXGBE_SUCCESS;
+}
+
+/**
+ * ixgbe_check_mac_link_vf - Get link/speed status
+ * @hw: pointer to hardware structure
+ * @speed: pointer to link speed
+ * @link_up: TRUE is link is up, FALSE otherwise
+ * @autoneg_wait_to_complete: TRUE when waiting for completion is needed
+ *
+ * Reads the links register to determine if link is up and the current speed
+ **/
+int32_t ixgbe_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ bool *link_up, bool autoneg_wait_to_complete)
+{
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ struct ixgbe_mac_info *mac = &hw->mac;
+ int32_t ret_val = IXGBE_SUCCESS;
+ uint32_t in_msg = 0;
+ uint32_t links_reg;
+
+ /* If we were hit with a reset drop the link */
+ if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
+ mac->get_link_status = TRUE;
+
+ if (!mac->get_link_status)
+ goto out;
+
+ /* if link status is down no point in checking to see if pf is up */
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+
+ /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
+ * before the link status is correct
+ */
+ if (mac->type == ixgbe_mac_82599_vf) {
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ usec_delay(100);
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+ }
+ }
+
+ switch (links_reg & IXGBE_LINKS_SPEED_82599) {
+ case IXGBE_LINKS_SPEED_10G_82599:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ if (hw->mac.type >= ixgbe_mac_X550_vf) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ }
+ break;
+ case IXGBE_LINKS_SPEED_1G_82599:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IXGBE_LINKS_SPEED_100_82599:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ if (hw->mac.type == ixgbe_mac_X550_vf) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_5GB_FULL;
+ }
+ break;
+ case IXGBE_LINKS_SPEED_10_X550EM_A:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ /* Since Reserved in older MAC's */
+ if (hw->mac.type >= ixgbe_mac_X550_vf)
+ *speed = IXGBE_LINK_SPEED_10_FULL;
+ break;
+ default:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ }
+
+ /* if the read failed it could just be a mailbox collision, best wait
+ * until we are called again and don't report an error
+ */
+ if (ixgbe_read_mbx(hw, &in_msg, 1, 0)) {
+ if (hw->api_version >= ixgbe_mbox_api_15)
+ mac->get_link_status = FALSE;
+ goto out;
+ }
+
+ if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
+ /* msg is not CTS and is NACK we must have lost CTS status */
+ if (in_msg & IXGBE_VT_MSGTYPE_FAILURE)
+ ret_val = IXGBE_ERR_MBX;
+ goto out;
+ }
+
+ /* the pf is talking, if we timed out in the past we reinit */
+ if (!mbx->timeout) {
+ ret_val = IXGBE_ERR_TIMEOUT;
+ goto out;
+ }
+
+ /* if we passed all the tests above then the link is up and we no
+ * longer need to check for link
+ */
+ mac->get_link_status = FALSE;
+
+out:
+ *link_up = !mac->get_link_status;
+ return ret_val;
+}
+
+/**
+ * ixgbevf_rlpml_set_vf - Set the maximum receive packet length
+ * @hw: pointer to the HW structure
+ * @max_size: value to assign to max frame size
+ **/
+int32_t ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, uint16_t max_size)
+{
+ uint32_t msgbuf[2];
+ int32_t retval;
+
+ msgbuf[0] = IXGBE_VF_SET_LPE;
+ msgbuf[1] = max_size;
+
+ retval = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
+ if (retval)
+ return retval;
+ if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
+ (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE))
+ return IXGBE_ERR_MBX;
+
+ return 0;
+}
+
+/**
+ * ixgbevf_negotiate_api_version - Negotiate supported API version
+ * @hw: pointer to the HW structure
+ * @api: integer containing requested API version
+ **/
+int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api)
+{
+ int err;
+ uint32_t msg[3];
+
+ /* Negotiate the mailbox API version */
+ msg[0] = IXGBE_VF_API_NEGOTIATE;
+ msg[1] = api;
+ msg[2] = 0;
+
+ err = ixgbevf_write_msg_read_ack(hw, msg, msg, 3);
+ if (!err) {
+ msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+ /* Store value and return 0 on success */
+ if (msg[0] == (IXGBE_VF_API_NEGOTIATE | IXGBE_VT_MSGTYPE_SUCCESS)) {
+ hw->api_version = api;
+ return 0;
+ }
+
+ err = IXGBE_ERR_INVALID_ARGUMENT;
+ }
+
+ return err;
+}
+
+int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
+ unsigned int *default_tc)
+{
+ int err;
+ uint32_t msg[5];
+
+ /* do nothing if API doesn't support ixgbevf_get_queues */
+ switch (hw->api_version) {
+ case ixgbe_mbox_api_11:
+ case ixgbe_mbox_api_12:
+ case ixgbe_mbox_api_13:
+ case ixgbe_mbox_api_15:
+ break;
+ default:
+ return 0;
+ }
+
+ /* Fetch queue configuration from the PF */
+ msg[0] = IXGBE_VF_GET_QUEUES;
+ msg[1] = msg[2] = msg[3] = msg[4] = 0;
+
+ err = ixgbevf_write_msg_read_ack(hw, msg, msg, 5);
+ if (!err) {
+ msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
+
+ /*
+ * if we didn't get a SUCCESS there must have been
+ * some sort of mailbox error so we should treat it
+ * as such
+ */
+ if (msg[0] != (IXGBE_VF_GET_QUEUES | IXGBE_VT_MSGTYPE_SUCCESS))
+ return IXGBE_ERR_MBX;
+
+ /* record and validate values from message */
+ hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
+ if (hw->mac.max_tx_queues == 0 ||
+ hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
+ hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
+
+ hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
+ if (hw->mac.max_rx_queues == 0 ||
+ hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
+ hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
+
+ *num_tcs = msg[IXGBE_VF_TRANS_VLAN];
+ /* in case of unknown state assume we cannot tag frames */
+ if (*num_tcs > hw->mac.max_rx_queues)
+ *num_tcs = 1;
+
+ *default_tc = msg[IXGBE_VF_DEF_QUEUE];
+ /* default to queue 0 on out-of-bounds queue number */
+ if (*default_tc >= hw->mac.max_tx_queues)
+ *default_tc = 0;
+ }
+
+ return err;
+}
Index: share/man/man4/Makefile
===================================================================
RCS file: /cvs/src/share/man/man4/Makefile,v
diff -u -p -u -p -r1.851 Makefile
--- share/man/man4/Makefile 4 Sep 2024 11:12:53 -0000 1.851
+++ share/man/man4/Makefile 27 Oct 2024 05:43:11 -0000
@@ -50,7 +50,7 @@ MAN= aac.4 abcrtc.4 abl.4 ac97.4 acphy.4
inet.4 inet6.4 inphy.4 intelpmc.4 \
iophy.4 iosf.4 ip.4 ip6.4 ipcomp.4 ipgphy.4 \
ipmi.4 ips.4 ipsec.4 ipw.4 isa.4 isagpio.4 isapnp.4 islrtc.4 \
- it.4 itherm.4 iwi.4 iwn.4 iwm.4 iwx.4 ix.4 ixgb.4 ixl.4 \
+ it.4 itherm.4 iwi.4 iwn.4 iwm.4 iwx.4 ix.4 ixv.4 ixgb.4 ixl.4 \
jmb.4 jme.4 jmphy.4 \
kate.4 kcov.4 km.4 ksmn.4 kstat.4 ksyms.4 kubsan.4 kue.4 \
lc.4 lge.4 lii.4 \
Index: share/man/man4/ixv.4
===================================================================
RCS file: share/man/man4/ixv.4
diff -N share/man/man4/ixv.4
--- /dev/null 1 Jan 1970 00:00:00 -0000
+++ share/man/man4/ixv.4 27 Oct 2024 05:43:11 -0000
@@ -0,0 +1,57 @@
+.\" $OpenBSD$
+.\"
+.\" Copyright (c) 2024 YASUOKA Masahiko <yasuoka@yasuoka.net>
+.\" Copyright (c) 2019 Jonathan Matthew <jmatthew@openbsd.org>
+.\"
+.\" Permission to use, copy, modify, and distribute this software for any
+.\" purpose with or without fee is hereby granted, provided that the above
+.\" copyright notice and this permission notice appear in all copies.
+.\"
+.\" THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+.\" WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+.\" MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+.\" ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+.\" WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+.\" ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+.\" OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+.\"
+.Dd $Mdocdate$
+.Dt IXV 4
+.Os
+.Sh NAME
+.Nm ixv
+.Nd Intel 10Gb Ethernet Virtual Function device
+.Sh SYNOPSIS
+.Cd "ixv* at pci?"
+.Sh DESCRIPTION
+The
+.Nm
+driver supports the SR-IOV Virtual Functions of Intel 82598EB,
+82559, and X540-based Ethernet controller devices.
+.Sh SEE ALSO
+.Xr arp 4 ,
+.Xr ifmedia 4 ,
+.Xr intro 4 ,
+.Xr netintro 4 ,
+.Xr pci 4 ,
+.Xr hostname.if 5 ,
+.Xr ifconfig 8
+.Sh BUGS
+The VF relies on
+.Dq vlan-filter
+to receive VLAN-tagged packets.
+Because the current driver doesn't have a way to configure it,
+any VLAN-tagged packets will not be passed.
+Alternatively,
+on the host running the primary function,
+there might be a way to configure a VLAN ID.
+.Sh HISTORY
+The
+.Nm
+driver first appeared in
+.Ox 7.7 .
+.Sh AUTHORS
+The
+.Nm
+driver was written by
+Intel Corporation and ported to OpenBSD by NAITO Yuichiro.
Index: sys/arch/amd64/conf/GENERIC
===================================================================
RCS file: /cvs/src/sys/arch/amd64/conf/GENERIC,v
diff -u -p -u -p -r1.526 GENERIC
--- sys/arch/amd64/conf/GENERIC 4 Sep 2024 07:45:08 -0000 1.526
+++ sys/arch/amd64/conf/GENERIC 27 Oct 2024 05:43:11 -0000
@@ -529,6 +529,7 @@ msk* at mskc? # each port of above
em* at pci? # Intel Pro/1000 ethernet
ixgb* at pci? # Intel Pro/10Gb ethernet
ix* at pci? # Intel 82598EB 10Gb ethernet
+#ixv* at pci? # Virtual Function of Intel 82599
myx* at pci? # Myricom Myri-10G 10Gb ethernet
oce* at pci? # Emulex OneConnect 10Gb ethernet
txp* at pci? # 3com 3CR990
ix: preparing vf support