git: bc761988b724 - main - ice: Add host SR-IOV support
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Fri, 18 Jul 2025 19:16:13 UTC
The branch main has been updated by imp:
URL: https://cgit.FreeBSD.org/src/commit/?id=bc761988b724587fe9ed5b99858b05ef842243ac
commit bc761988b724587fe9ed5b99858b05ef842243ac
Author: Krzysztof Galazka <krzysztof.galazka@intel.com>
AuthorDate: 2025-01-15 23:21:04 +0000
Commit: Warner Losh <imp@FreeBSD.org>
CommitDate: 2025-07-18 19:16:16 +0000
ice: Add host SR-IOV support
Enable basic SR-IOV support for E800 adapters.
Authored-by: Eric Joyner <erj@FreeBSD.org>
Signed-off-by: Krzysztof Galazka <krzysztof.galazka@intel.com>
Reviewed by: imp
Pull Request: https://github.com/freebsd/freebsd-src/pull/1573
---
sys/conf/files.amd64 | 4 +
sys/dev/ice/ice_features.h | 2 +
sys/dev/ice/ice_iflib.h | 16 +
sys/dev/ice/ice_iov.c | 1732 ++++++++++++++++++++++++++++++++++++++++++++
sys/dev/ice/ice_iov.h | 115 +++
sys/dev/ice/ice_lib.c | 20 +
sys/dev/ice/ice_lib.h | 4 +
sys/dev/ice/ice_vf_mbx.c | 471 ++++++++++++
sys/dev/ice/ice_vf_mbx.h | 67 ++
sys/dev/ice/if_ice_iflib.c | 117 +++
sys/modules/ice/Makefile | 1 +
11 files changed, 2549 insertions(+)
diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64
index 0584fc29d039..678d288c2d86 100644
--- a/sys/conf/files.amd64
+++ b/sys/conf/files.amd64
@@ -191,6 +191,10 @@ dev/ice/irdma_di_if.m optional ice pci \
compile-with "${NORMAL_M} -I$S/dev/ice"
dev/ice/ice_ddp_common.c optional ice pci \
compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_iov.c optional ice pci pci_iov \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
+dev/ice/ice_vf_mbx.c optional ice pci pci_iov \
+ compile-with "${NORMAL_C} -I$S/dev/ice"
ice_ddp.c optional ice_ddp \
compile-with "${AWK} -f $S/tools/fw_stub.awk ice_ddp.fw:ice_ddp:0x01032900 -mice_ddp -c${.TARGET}" \
no-ctfconvert no-implicit-rule before-depend local \
diff --git a/sys/dev/ice/ice_features.h b/sys/dev/ice/ice_features.h
index 821abe4806ca..5b23757b1c98 100644
--- a/sys/dev/ice/ice_features.h
+++ b/sys/dev/ice/ice_features.h
@@ -91,7 +91,9 @@ enum feat_list {
static inline void
ice_disable_unsupported_features(ice_bitmap_t __unused *bitmap)
{
+#ifndef PCI_IOV
ice_clear_bit(ICE_FEATURE_SRIOV, bitmap);
+#endif
#ifndef DEV_NETMAP
ice_clear_bit(ICE_FEATURE_NETMAP, bitmap);
#endif
diff --git a/sys/dev/ice/ice_iflib.h b/sys/dev/ice/ice_iflib.h
index 3a5dc201189a..e1d5307a9516 100644
--- a/sys/dev/ice/ice_iflib.h
+++ b/sys/dev/ice/ice_iflib.h
@@ -139,6 +139,9 @@ struct ice_irq_vector {
* @tc: traffic class queue belongs to
* @q_handle: qidx in tc; used in TXQ enable functions
*
+ * ice_iov.c requires the following parameters (when PCI_IOV is defined):
+ * @itr_idx: ITR index to use for this queue
+ *
* Other parameters may be iflib driver specific
*/
struct ice_tx_queue {
@@ -153,6 +156,9 @@ struct ice_tx_queue {
u32 me;
u16 q_handle;
u8 tc;
+#ifdef PCI_IOV
+ u8 itr_idx;
+#endif
/* descriptor writeback status */
qidx_t *tx_rsq;
@@ -175,6 +181,9 @@ struct ice_tx_queue {
* @stats: queue statistics
* @tc: traffic class queue belongs to
*
+ * ice_iov.c requires the following parameters (when PCI_IOV is defined):
+ * @itr_idx: ITR index to use for this queue
+ *
* Other parameters may be iflib driver specific
*/
struct ice_rx_queue {
@@ -187,6 +196,9 @@ struct ice_rx_queue {
struct ice_irq_vector *irqv;
u32 me;
u8 tc;
+#ifdef PCI_IOV
+ u8 itr_idx;
+#endif
struct if_irq que_irq;
};
@@ -332,6 +344,10 @@ struct ice_softc {
ice_declare_bitmap(feat_cap, ICE_FEATURE_COUNT);
ice_declare_bitmap(feat_en, ICE_FEATURE_COUNT);
+#ifdef PCI_IOV
+ struct ice_vf *vfs;
+ u16 num_vfs;
+#endif
struct ice_resmgr os_imgr;
/* For mirror interface */
struct ice_mirr_if *mirr_if;
diff --git a/sys/dev/ice/ice_iov.c b/sys/dev/ice/ice_iov.c
new file mode 100644
index 000000000000..75b50af0d1bc
--- /dev/null
+++ b/sys/dev/ice/ice_iov.c
@@ -0,0 +1,1732 @@
+/* SPDX-License-Identifier: BSD-3-Clause */
+/* Copyright (c) 2025, Intel Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @file ice_iov.c
+ * @brief Virtualization support functions
+ *
+ * Contains functions for enabling and managing PCIe virtual function devices,
+ * including enabling new VFs, and managing VFs over the virtchnl interface.
+ */
+
+#include "ice_iov.h"
+
+static struct ice_vf *ice_iov_get_vf(struct ice_softc *sc, int vf_num);
+static void ice_iov_ready_vf(struct ice_softc *sc, struct ice_vf *vf);
+static void ice_reset_vf(struct ice_softc *sc, struct ice_vf *vf);
+static void ice_iov_setup_intr_mapping(struct ice_softc *sc, struct ice_vf *vf);
+
+static void ice_vc_version_msg(struct ice_softc *sc, struct ice_vf *vf,
+ u8 *msg_buf);
+static void ice_vc_get_vf_res_msg(struct ice_softc *sc, struct ice_vf *vf,
+ u8 *msg_buf);
+static void ice_vc_add_eth_addr_msg(struct ice_softc *sc, struct ice_vf *vf,
+ u8 *msg_buf);
+static void ice_vc_del_eth_addr_msg(struct ice_softc *sc, struct ice_vf *vf,
+ u8 *msg_buf);
+static bool ice_vc_isvalid_ring_len(u16 ring_len);
+static void ice_vc_cfg_vsi_qs_msg(struct ice_softc *sc, struct ice_vf *vf,
+ u8 *msg_buf);
+static void ice_vc_cfg_rss_key_msg(struct ice_softc *sc, struct ice_vf *vf,
+ u8 *msg_buf);
+static void ice_vc_set_rss_hena_msg(struct ice_softc *sc, struct ice_vf *vf,
+ u8 *msg_buf);
+static void ice_vc_enable_queues_msg(struct ice_softc *sc, struct ice_vf *vf,
+ u8 *msg_buf);
+static void ice_vc_notify_vf_link_state(struct ice_softc *sc, struct ice_vf *vf);
+static void ice_vc_disable_queues_msg(struct ice_softc *sc, struct ice_vf *vf,
+ u8 *msg_buf);
+static void ice_vc_cfg_irq_map_msg(struct ice_softc *sc, struct ice_vf *vf,
+ u8 *msg_buf);
+static void ice_vc_get_stats_msg(struct ice_softc *sc, struct ice_vf *vf,
+ u8 *msg_buf);
+static void ice_eth_stats_to_virtchnl_eth_stats(struct ice_eth_stats *istats,
+ struct virtchnl_eth_stats *vstats);
+static void ice_vc_cfg_rss_lut_msg(struct ice_softc *sc, struct ice_vf *vf,
+ u8 *msg_buf);
+static void ice_vc_cfg_promisc_mode_msg(struct ice_softc *sc, struct ice_vf *vf,
+ u8 *msg_buf);
+static void ice_vc_add_vlan_msg(struct ice_softc *sc, struct ice_vf *vf,
+ u8 *msg_buf);
+static void ice_vc_del_vlan_msg(struct ice_softc *sc, struct ice_vf *vf,
+ u8 *msg_buf);
+static enum virtchnl_status_code ice_iov_err_to_virt_err(int ice_err);
+static int ice_vf_validate_mac(struct ice_vf *vf, const uint8_t *addr);
+
+/**
+ * ice_iov_attach - Initialize SR-IOV PF host support
+ * @sc: device softc structure
+ *
+ * Initialize SR-IOV PF host support at the end of the driver attach process.
+ *
+ * @pre Must be called from sleepable context (calls malloc() w/ M_WAITOK)
+ *
+ * @returns 0 if successful, or
+ * - ENOMEM if there is no memory for the PF/VF schemas or iov device
+ * - ENXIO if the device isn't PCI-E or doesn't support the same SR-IOV
+ * version as the kernel
+ * - ENOENT if the device doesn't have the SR-IOV capability
+ */
+int
+ice_iov_attach(struct ice_softc *sc)
+{
+ device_t dev = sc->dev;
+ nvlist_t *pf_schema, *vf_schema;
+ int error;
+
+ pf_schema = pci_iov_schema_alloc_node();
+ vf_schema = pci_iov_schema_alloc_node();
+
+ pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
+ pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
+ IOV_SCHEMA_HASDEFAULT, TRUE);
+ pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
+ IOV_SCHEMA_HASDEFAULT, FALSE);
+ pci_iov_schema_add_bool(vf_schema, "allow-promisc",
+ IOV_SCHEMA_HASDEFAULT, FALSE);
+ pci_iov_schema_add_uint16(vf_schema, "num-queues",
+ IOV_SCHEMA_HASDEFAULT, ICE_DEFAULT_VF_QUEUES);
+ pci_iov_schema_add_uint16(vf_schema, "mirror-src-vsi",
+ IOV_SCHEMA_HASDEFAULT, ICE_INVALID_MIRROR_VSI);
+
+ error = pci_iov_attach(dev, pf_schema, vf_schema);
+ if (error != 0) {
+ device_printf(dev,
+ "pci_iov_attach failed (error=%s)\n",
+ ice_err_str(error));
+ ice_clear_bit(ICE_FEATURE_SRIOV, sc->feat_en);
+ } else
+ ice_set_bit(ICE_FEATURE_SRIOV, sc->feat_en);
+
+ return (error);
+}
+
+/**
+ * ice_iov_detach - Teardown SR-IOV PF host support
+ * @sc: device softc structure
+ *
+ * Teardown SR-IOV PF host support at the start of the driver detach process.
+ *
+ * @returns 0 if successful or IOV support hasn't been setup, or
+ * - EBUSY if VFs still exist
+ */
+int
+ice_iov_detach(struct ice_softc *sc)
+{
+ device_t dev = sc->dev;
+ int error;
+
+ error = pci_iov_detach(dev);
+ if (error != 0) {
+ device_printf(dev,
+ "pci_iov_detach failed (error=%s)\n",
+ ice_err_str(error));
+ }
+
+ return (error);
+}
+
+/**
+ * ice_iov_init - Called by the OS before the first VF is created.
+ * @sc: device softc structure
+ * @num_vfs: number of VFs to setup resources for
+ * @params: configuration parameters for the PF
+ *
+ * @returns 0 if successful or an error code on failure
+ */
+int
+ice_iov_init(struct ice_softc *sc, uint16_t num_vfs, const nvlist_t *params __unused)
+{
+ /* Allocate array of VFs, for tracking */
+ sc->vfs = (struct ice_vf *)malloc(sizeof(struct ice_vf) * num_vfs, M_ICE, M_NOWAIT |
+ M_ZERO);
+ if (sc->vfs == NULL)
+ return (ENOMEM);
+
+ /* Initialize each VF with basic information */
+ for (int i = 0; i < num_vfs; i++)
+ sc->vfs[i].vf_num = i;
+
+ /* Save off number of configured VFs */
+ sc->num_vfs = num_vfs;
+
+ return (0);
+}
+
+/**
+ * ice_iov_get_vf - Get pointer to VF at given index
+ * @sc: device softc structure
+ * @vf_num: Index of VF to retrieve
+ *
+ * @remark will throw an assertion if vf_num is not in the
+ * range of allocated VFs
+ *
+ * @returns a pointer to the VF structure at the given index
+ */
+static struct ice_vf *
+ice_iov_get_vf(struct ice_softc *sc, int vf_num)
+{
+ MPASS(vf_num < sc->num_vfs);
+
+ return &sc->vfs[vf_num];
+}
+
+/**
+ * ice_iov_add_vf - Called by the OS for each VF to create
+ * @sc: device softc structure
+ * @vfnum: index of VF to configure
+ * @params: configuration parameters for the VF
+ *
+ * @returns 0 if successful or an error code on failure
+ */
+int
+ice_iov_add_vf(struct ice_softc *sc, uint16_t vfnum, const nvlist_t *params)
+{
+ struct ice_tx_queue *txq;
+ struct ice_rx_queue *rxq;
+ device_t dev = sc->dev;
+ struct ice_vsi *vsi;
+ struct ice_vf *vf;
+ int vf_num_queues;
+ const void *mac;
+ size_t size;
+ int error;
+ int i;
+
+ vf = ice_iov_get_vf(sc, vfnum);
+ vf->vf_flags = VF_FLAG_ENABLED;
+
+ /* This VF needs at least one VSI */
+ vsi = ice_alloc_vsi(sc, ICE_VSI_VF);
+ if (vsi == NULL)
+ return (ENOMEM);
+ vf->vsi = vsi;
+ vsi->vf_num = vfnum;
+
+ vf_num_queues = nvlist_get_number(params, "num-queues");
+ /* Validate and clamp value if invalid */
+ if (vf_num_queues < 1 || vf_num_queues > ICE_MAX_SCATTERED_QUEUES)
+ device_printf(dev, "Invalid num-queues (%d) for VF %d\n",
+ vf_num_queues, vf->vf_num);
+ if (vf_num_queues < 1) {
+ device_printf(dev, "Setting VF %d num-queues to 1\n", vf->vf_num);
+ vf_num_queues = 1;
+ } else if (vf_num_queues > ICE_MAX_SCATTERED_QUEUES) {
+ device_printf(dev, "Setting VF %d num-queues to %d\n",
+ vf->vf_num, ICE_MAX_SCATTERED_QUEUES);
+ vf_num_queues = ICE_MAX_SCATTERED_QUEUES;
+ }
+ vsi->qmap_type = ICE_RESMGR_ALLOC_SCATTERED;
+
+ /* Reserve VF queue allocation from PF queues */
+ ice_alloc_vsi_qmap(vsi, vf_num_queues, vf_num_queues);
+ vsi->num_tx_queues = vsi->num_rx_queues = vf_num_queues;
+
+ /* Assign Tx queues from PF space */
+ error = ice_resmgr_assign_scattered(&sc->tx_qmgr, vsi->tx_qmap,
+ vsi->num_tx_queues);
+ if (error) {
+ device_printf(sc->dev, "Unable to assign VF Tx queues: %s\n",
+ ice_err_str(error));
+ goto release_vsi;
+ }
+
+ /* Assign Rx queues from PF space */
+ error = ice_resmgr_assign_scattered(&sc->rx_qmgr, vsi->rx_qmap,
+ vsi->num_rx_queues);
+ if (error) {
+ device_printf(sc->dev, "Unable to assign VF Rx queues: %s\n",
+ ice_err_str(error));
+ goto release_vsi;
+ }
+
+ vsi->max_frame_size = ICE_MAX_FRAME_SIZE;
+
+ /* Allocate queue structure memory */
+ vsi->tx_queues = (struct ice_tx_queue *)
+ malloc(sizeof(struct ice_tx_queue) * vsi->num_tx_queues, M_ICE,
+ M_NOWAIT | M_ZERO);
+ if (!vsi->tx_queues) {
+ device_printf(sc->dev, "VF-%d: Unable to allocate Tx queue memory\n",
+ vfnum);
+ error = ENOMEM;
+ goto release_vsi;
+ }
+ for (i = 0, txq = vsi->tx_queues; i < vsi->num_tx_queues; i++, txq++) {
+ txq->me = i;
+ txq->vsi = vsi;
+ }
+
+ /* Allocate queue structure memory */
+ vsi->rx_queues = (struct ice_rx_queue *)
+ malloc(sizeof(struct ice_rx_queue) * vsi->num_rx_queues, M_ICE,
+ M_NOWAIT | M_ZERO);
+ if (!vsi->rx_queues) {
+ device_printf(sc->dev, "VF-%d: Unable to allocate Rx queue memory\n",
+ vfnum);
+ error = ENOMEM;
+ goto free_txqs;
+ }
+ for (i = 0, rxq = vsi->rx_queues; i < vsi->num_rx_queues; i++, rxq++) {
+ rxq->me = i;
+ rxq->vsi = vsi;
+ }
+
+ /* Allocate space to store the IRQ vector data */
+ vf->num_irq_vectors = vf_num_queues + 1;
+ vf->tx_irqvs = (struct ice_irq_vector *)
+ malloc(sizeof(struct ice_irq_vector) * (vf->num_irq_vectors),
+ M_ICE, M_NOWAIT);
+ if (!vf->tx_irqvs) {
+ device_printf(sc->dev,
+ "Unable to allocate TX irqv memory for VF-%d's %d vectors\n",
+ vfnum, vf->num_irq_vectors);
+ error = ENOMEM;
+ goto free_rxqs;
+ }
+ vf->rx_irqvs = (struct ice_irq_vector *)
+ malloc(sizeof(struct ice_irq_vector) * (vf->num_irq_vectors),
+ M_ICE, M_NOWAIT);
+ if (!vf->rx_irqvs) {
+ device_printf(sc->dev,
+ "Unable to allocate RX irqv memory for VF-%d's %d vectors\n",
+ vfnum, vf->num_irq_vectors);
+ error = ENOMEM;
+ goto free_txirqvs;
+ }
+
+ /* Assign VF interrupts from PF space */
+ if (!(vf->vf_imap =
+ (u16 *)malloc(sizeof(u16) * vf->num_irq_vectors,
+ M_ICE, M_NOWAIT))) {
+ device_printf(dev, "Unable to allocate VF-%d imap memory\n", vfnum);
+ error = ENOMEM;
+ goto free_rxirqvs;
+ }
+ error = ice_resmgr_assign_contiguous(&sc->dev_imgr, vf->vf_imap, vf->num_irq_vectors);
+ if (error) {
+ device_printf(dev, "Unable to assign VF-%d interrupt mapping: %s\n",
+ vfnum, ice_err_str(error));
+ goto free_imap;
+ }
+
+ if (nvlist_exists_binary(params, "mac-addr")) {
+ mac = nvlist_get_binary(params, "mac-addr", &size);
+ bcopy(mac, vf->mac, ETHER_ADDR_LEN);
+
+ if (nvlist_get_bool(params, "allow-set-mac"))
+ vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
+ } else
+ /*
+ * If the administrator has not specified a MAC address then
+ * we must allow the VF to choose one.
+ */
+ vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
+
+ if (nvlist_get_bool(params, "mac-anti-spoof"))
+ vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
+
+ if (nvlist_get_bool(params, "allow-promisc"))
+ vf->vf_flags |= VF_FLAG_PROMISC_CAP;
+
+ vsi->mirror_src_vsi = nvlist_get_number(params, "mirror-src-vsi");
+
+ vf->vf_flags |= VF_FLAG_VLAN_CAP;
+
+ /* Create and setup VSI in HW */
+ error = ice_initialize_vsi(vsi);
+ if (error) {
+ device_printf(sc->dev, "Unable to initialize VF %d VSI: %s\n",
+ vfnum, ice_err_str(error));
+ goto release_imap;
+ }
+
+ ice_iov_ready_vf(sc, vf);
+
+ return (0);
+
+release_imap:
+ ice_resmgr_release_map(&sc->dev_imgr, vf->vf_imap,
+ vf->num_irq_vectors);
+free_imap:
+ free(vf->vf_imap, M_ICE);
+ vf->vf_imap = NULL;
+free_rxirqvs:
+ free(vf->rx_irqvs, M_ICE);
+ vf->rx_irqvs = NULL;
+free_txirqvs:
+ free(vf->tx_irqvs, M_ICE);
+ vf->tx_irqvs = NULL;
+free_rxqs:
+ free(vsi->rx_queues, M_ICE);
+ vsi->rx_queues = NULL;
+free_txqs:
+ free(vsi->tx_queues, M_ICE);
+ vsi->tx_queues = NULL;
+release_vsi:
+ ice_release_vsi(vsi);
+ vf->vsi = NULL;
+ return (error);
+}
+
+/**
+ * ice_iov_uninit - Called by the OS when VFs are destroyed
+ * @sc: device softc structure
+ */
+void
+ice_iov_uninit(struct ice_softc *sc)
+{
+ struct ice_vf *vf;
+ struct ice_vsi *vsi;
+
+ /* Release per-VF resources */
+ for (int i = 0; i < sc->num_vfs; i++) {
+ vf = &sc->vfs[i];
+ vsi = vf->vsi;
+
+ /* Free VF interrupt reservation */
+ if (vf->vf_imap) {
+ free(vf->vf_imap, M_ICE);
+ vf->vf_imap = NULL;
+ }
+
+ /* Free queue interrupt mapping trackers */
+ if (vf->tx_irqvs) {
+ free(vf->tx_irqvs, M_ICE);
+ vf->tx_irqvs = NULL;
+ }
+ if (vf->rx_irqvs) {
+ free(vf->rx_irqvs, M_ICE);
+ vf->rx_irqvs = NULL;
+ }
+
+ if (!vsi)
+ continue;
+
+ /* Free VSI queues */
+ if (vsi->tx_queues) {
+ free(vsi->tx_queues, M_ICE);
+ vsi->tx_queues = NULL;
+ }
+ if (vsi->rx_queues) {
+ free(vsi->rx_queues, M_ICE);
+ vsi->rx_queues = NULL;
+ }
+
+ ice_release_vsi(vsi);
+ vf->vsi = NULL;
+ }
+
+ /* Release memory used for VF tracking */
+ if (sc->vfs) {
+ free(sc->vfs, M_ICE);
+ sc->vfs = NULL;
+ }
+ sc->num_vfs = 0;
+}
+
+/**
+ * ice_iov_ready_vf - Setup VF interrupts and mark it as ready
+ * @sc: device softc structure
+ * @vf: driver's VF structure for the VF to update
+ *
+ * Clears VF reset triggering bit, sets up the PF<->VF interrupt
+ * mapping and marks the VF as active in the HW so that the VF
+ * driver can use it.
+ */
+static void
+ice_iov_ready_vf(struct ice_softc *sc, struct ice_vf *vf)
+{
+ struct ice_hw *hw = &sc->hw;
+ u32 reg;
+
+ /* Clear the triggering bit */
+ reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_num));
+ reg &= ~VPGEN_VFRTRIG_VFSWR_M;
+ wr32(hw, VPGEN_VFRTRIG(vf->vf_num), reg);
+
+ /* Setup VF interrupt allocation and mapping */
+ ice_iov_setup_intr_mapping(sc, vf);
+
+ /* Indicate to the VF that reset is done */
+ wr32(hw, VFGEN_RSTAT(vf->vf_num), VIRTCHNL_VFR_VFACTIVE);
+
+ ice_flush(hw);
+}
+
+/**
+ * ice_reset_vf - Perform a hardware reset (VFR) on a VF
+ * @sc: device softc structure
+ * @vf: driver's VF structure for VF to be reset
+ *
+ * Performs a VFR for the given VF. This function busy waits until the
+ * reset completes in the HW, notifies the VF that the reset is done
+ * by setting a bit in a HW register, then returns.
+ *
+ * @remark This also sets up the PF<->VF interrupt mapping and allocations in
+ * the hardware after the hardware reset is finished, via
+ * ice_iov_setup_intr_mapping()
+ */
+static void
+ice_reset_vf(struct ice_softc *sc, struct ice_vf *vf)
+{
+ u16 global_vf_num, reg_idx, bit_idx;
+ struct ice_hw *hw = &sc->hw;
+ int status;
+ u32 reg;
+ int i;
+
+ global_vf_num = vf->vf_num + hw->func_caps.vf_base_id;
+
+ reg = rd32(hw, VPGEN_VFRTRIG(vf->vf_num));
+ reg |= VPGEN_VFRTRIG_VFSWR_M;
+ wr32(hw, VPGEN_VFRTRIG(vf->vf_num), reg);
+
+ /* clear the VFLR bit for the VF in a GLGEN_VFLRSTAT register */
+ reg_idx = (global_vf_num) / 32;
+ bit_idx = (global_vf_num) % 32;
+ wr32(hw, GLGEN_VFLRSTAT(reg_idx), BIT(bit_idx));
+ ice_flush(hw);
+
+ /* Wait until there are no pending PCI transactions */
+ wr32(hw, PF_PCI_CIAA,
+ ICE_PCIE_DEV_STATUS | (global_vf_num << PF_PCI_CIAA_VF_NUM_S));
+
+ for (i = 0; i < ICE_PCI_CIAD_WAIT_COUNT; i++) {
+ reg = rd32(hw, PF_PCI_CIAD);
+ if (!(reg & PCIEM_STA_TRANSACTION_PND))
+ break;
+
+ DELAY(ICE_PCI_CIAD_WAIT_DELAY_US);
+ }
+ if (i == ICE_PCI_CIAD_WAIT_COUNT)
+ device_printf(sc->dev,
+ "VF-%d PCI transactions stuck\n", vf->vf_num);
+
+ /* Disable TX queues, which is required during VF reset */
+ status = ice_dis_vsi_txq(hw->port_info, vf->vsi->idx, 0, 0, NULL, NULL,
+ NULL, ICE_VF_RESET, vf->vf_num, NULL);
+ if (status)
+ device_printf(sc->dev,
+ "%s: Failed to disable LAN Tx queues: err %s aq_err %s\n",
+ __func__, ice_status_str(status),
+ ice_aq_str(hw->adminq.sq_last_status));
+
+ /* Then check for the VF reset to finish in HW */
+ for (i = 0; i < ICE_VPGEN_VFRSTAT_WAIT_COUNT; i++) {
+ reg = rd32(hw, VPGEN_VFRSTAT(vf->vf_num));
+ if ((reg & VPGEN_VFRSTAT_VFRD_M))
+ break;
+
+ DELAY(ICE_VPGEN_VFRSTAT_WAIT_DELAY_US);
+ }
+ if (i == ICE_VPGEN_VFRSTAT_WAIT_COUNT)
+ device_printf(sc->dev,
+ "VF-%d Reset is stuck\n", vf->vf_num);
+
+ ice_iov_ready_vf(sc, vf);
+}
+
+/**
+ * ice_vc_get_vf_res_msg - Handle VIRTCHNL_OP_GET_VF_RESOURCES msg from VF
+ * @sc: device private structure
+ * @vf: VF tracking structure
+ * @msg_buf: raw message buffer from the VF
+ *
+ * Receives a message from the VF listing its supported capabilities, and
+ * replies to the VF with information about what resources the PF has
+ * allocated for the VF.
+ *
+ * @remark This always replies to the VF with a success status; it does not
+ * fail. It's up to the VF driver to reject or complain about the PF's response.
+ */
+static void
+ice_vc_get_vf_res_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf)
+{
+ struct ice_hw *hw = &sc->hw;
+ struct virtchnl_vf_resource *vf_res;
+ u16 vf_res_len;
+ u32 vf_caps;
+
+ /* XXX: Only support one VSI per VF, so this size doesn't need adjusting */
+ vf_res_len = sizeof(struct virtchnl_vf_resource);
+ vf_res = (struct virtchnl_vf_resource *)malloc(vf_res_len, M_ICE, M_WAITOK | M_ZERO);
+
+ vf_res->num_vsis = 1;
+ vf_res->num_queue_pairs = vf->vsi->num_tx_queues;
+ vf_res->max_vectors = vf_res->num_queue_pairs + 1;
+
+ vf_res->rss_key_size = ICE_GET_SET_RSS_KEY_EXTEND_KEY_SIZE;
+ vf_res->rss_lut_size = ICE_VSIQF_HLUT_ARRAY_SIZE;
+ vf_res->max_mtu = 0;
+
+ vf_res->vf_cap_flags = VF_BASE_MODE_OFFLOADS;
+ if (msg_buf != NULL) {
+ vf_caps = *((u32 *)(msg_buf));
+
+ if (vf_caps & VIRTCHNL_VF_CAP_ADV_LINK_SPEED)
+ vf_res->vf_cap_flags |= VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
+ }
+
+ vf_res->vsi_res[0].vsi_id = vf->vsi->idx;
+ vf_res->vsi_res[0].num_queue_pairs = vf->vsi->num_tx_queues;
+ vf_res->vsi_res[0].vsi_type = VIRTCHNL_VSI_SRIOV;
+ vf_res->vsi_res[0].qset_handle = 0;
+
+ ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_GET_VF_RESOURCES,
+ VIRTCHNL_STATUS_SUCCESS, (u8 *)vf_res, vf_res_len, NULL);
+
+ free(vf_res, M_ICE);
+}
+
+/**
+ * ice_vc_version_msg - Handle VIRTCHNL_OP_VERSION msg from VF
+ * @sc: device private structure
+ * @vf: VF tracking structure
+ * @msg_buf: raw message buffer from the VF
+ *
+ * Receives a version message from the VF, and responds to the VF with
+ * the version number that the PF will use.
+ *
+ * @remark This always replies to the VF with a success status; it does not
+ * fail.
+ */
+static void
+ice_vc_version_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf)
+{
+ struct virtchnl_version_info *recv_vf_version;
+ struct ice_hw *hw = &sc->hw;
+ device_t dev = sc->dev;
+
+ recv_vf_version = (struct virtchnl_version_info *)msg_buf;
+
+ /* VFs running the 1.0 API expect to get 1.0 back */
+ if (VF_IS_V10(recv_vf_version)) {
+ vf->version.major = 1;
+ vf->version.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
+ } else {
+ vf->version.major = VIRTCHNL_VERSION_MAJOR;
+ vf->version.minor = VIRTCHNL_VERSION_MINOR;
+
+ if ((recv_vf_version->major != VIRTCHNL_VERSION_MAJOR) ||
+ (recv_vf_version->minor != VIRTCHNL_VERSION_MINOR))
+ device_printf(dev,
+ "%s: VF-%d requested version (%d.%d) differs from PF version (%d.%d)\n",
+ __func__, vf->vf_num,
+ recv_vf_version->major, recv_vf_version->minor,
+ VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR);
+ }
+
+ ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_VERSION,
+ VIRTCHNL_STATUS_SUCCESS, (u8 *)&vf->version, sizeof(vf->version),
+ NULL);
+}
+
+/**
+ * ice_vf_validate_mac - Validate MAC address before adding it
+ * @vf: VF tracking structure
+ * @addr: MAC address to validate
+ *
+ * Validate a MAC address before adding it to a VF during the handling
+ * of a VIRTCHNL_OP_ADD_ETH_ADDR operation. Notably, this also checks if
+ * the VF is allowed to set its own arbitrary MAC addresses.
+ *
+ * Returns 0 if MAC address is valid for the given vf
+ */
+static int
+ice_vf_validate_mac(struct ice_vf *vf, const uint8_t *addr)
+{
+
+ if (ETHER_IS_ZERO(addr) || ETHER_IS_BROADCAST(addr))
+ return (EINVAL);
+
+ /*
+ * If the VF is not allowed to change its MAC address, don't let it
+ * set a MAC filter for an address that is not a multicast address and
+ * is not its assigned MAC.
+ */
+ if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
+ !(ETHER_IS_MULTICAST(addr) || !bcmp(addr, vf->mac, ETHER_ADDR_LEN)))
+ return (EPERM);
+
+ return (0);
+}
+
+/**
+ * ice_vc_add_eth_addr_msg - Handle VIRTCHNL_OP_ADD_ETH_ADDR msg from VF
+ * @sc: device private structure
+ * @vf: VF tracking structure
+ * @msg_buf: raw message buffer from the VF
+ *
+ * Receives a list of MAC addresses from the VF and adds those addresses
+ * to the VSI's filter list.
+ */
+static void
+ice_vc_add_eth_addr_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf)
+{
+ enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_ether_addr_list *addr_list;
+ struct ice_hw *hw = &sc->hw;
+ int error = 0;
+
+ addr_list = (struct virtchnl_ether_addr_list *)msg_buf;
+
+ for (int i = 0; i < addr_list->num_elements; i++) {
+ u8 *addr = addr_list->list[i].addr;
+
+ /* The type flag is currently ignored; every MAC address is
+ * treated as the LEGACY type
+ */
+
+ error = ice_vf_validate_mac(vf, addr);
+ if (error == EPERM) {
+ device_printf(sc->dev,
+ "%s: VF-%d: Not permitted to add MAC addr for VSI %d\n",
+ __func__, vf->vf_num, vf->vsi->idx);
+ v_status = VIRTCHNL_STATUS_ERR_PARAM;
+ continue;
+ } else if (error) {
+ device_printf(sc->dev,
+ "%s: VF-%d: Did not add invalid MAC addr for VSI %d\n",
+ __func__, vf->vf_num, vf->vsi->idx);
+ v_status = VIRTCHNL_STATUS_ERR_PARAM;
+ continue;
+ }
+
+ error = ice_add_vsi_mac_filter(vf->vsi, addr);
+ if (error) {
+ device_printf(sc->dev,
+ "%s: VF-%d: Error adding MAC addr for VSI %d\n",
+ __func__, vf->vf_num, vf->vsi->idx);
+ v_status = VIRTCHNL_STATUS_ERR_PARAM;
+ goto done;
+ }
+ }
+
+done:
+ ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_ADD_ETH_ADDR,
+ v_status, NULL, 0, NULL);
+}
+
+/**
+ * ice_vc_del_eth_addr_msg - Handle VIRTCHNL_OP_DEL_ETH_ADDR msg from VF
+ * @sc: device private structure
+ * @vf: VF tracking structure
+ * @msg_buf: raw message buffer from the VF
+ *
+ * Receives a list of MAC addresses from the VF and removes those addresses
+ * from the VSI's filter list.
+ */
+static void
+ice_vc_del_eth_addr_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf)
+{
+ enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS;
+ struct virtchnl_ether_addr_list *addr_list;
+ struct ice_hw *hw = &sc->hw;
+ int error = 0;
+
+ addr_list = (struct virtchnl_ether_addr_list *)msg_buf;
+
+ for (int i = 0; i < addr_list->num_elements; i++) {
+ error = ice_remove_vsi_mac_filter(vf->vsi, addr_list->list[i].addr);
+ if (error) {
+ device_printf(sc->dev,
+ "%s: VF-%d: Error removing MAC addr for VSI %d\n",
+ __func__, vf->vf_num, vf->vsi->idx);
+ v_status = VIRTCHNL_STATUS_ERR_PARAM;
+ goto done;
+ }
+ }
+
+done:
+ ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_DEL_ETH_ADDR,
+ v_status, NULL, 0, NULL);
+}
+
+/**
+ * ice_vc_add_vlan_msg - Handle VIRTCHNL_OP_ADD_VLAN msg from VF
+ * @sc: PF's softc structure
+ * @vf: VF tracking structure
+ * @msg_buf: message buffer from VF
+ *
+ * Adds the VLANs in msg_buf to the VF's VLAN filter list.
+ */
+static void
+ice_vc_add_vlan_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf)
+{
+ struct ice_hw *hw = &sc->hw;
+ struct virtchnl_vlan_filter_list *vlan_list;
+ int status = 0;
+ enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_vsi *vsi = vf->vsi;
+
+ vlan_list = (struct virtchnl_vlan_filter_list *)msg_buf;
+
+ if (vlan_list->vsi_id != vsi->idx) {
+ device_printf(sc->dev,
+ "VF-%d: Message has invalid VSI ID (expected %d, got %d)\n",
+ vf->vf_num, vsi->idx, vlan_list->vsi_id);
+ v_status = VIRTCHNL_STATUS_ERR_PARAM;
+ goto done;
+ }
+
+ status = ice_add_vlan_hw_filters(vsi, vlan_list->vlan_id,
+ vlan_list->num_elements);
+ if (status) {
+ device_printf(sc->dev,
+ "VF-%d: Failure adding VLANs to VSI %d, err %s aq_err %s\n",
+ vf->vf_num, vsi->idx, ice_status_str(status),
+ ice_aq_str(sc->hw.adminq.sq_last_status));
+ v_status = ice_iov_err_to_virt_err(status);
+ goto done;
+ }
+
+done:
+ ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_ADD_VLAN,
+ v_status, NULL, 0, NULL);
+}
+
+/**
+ * ice_vc_del_vlan_msg - Handle VIRTCHNL_OP_DEL_VLAN msg from VF
+ * @sc: PF's softc structure
+ * @vf: VF tracking structure
+ * @msg_buf: message buffer from VF
+ *
+ * Removes the VLANs in msg_buf from the VF's VLAN filter list.
+ */
+static void
+ice_vc_del_vlan_msg(struct ice_softc *sc, struct ice_vf *vf, u8 *msg_buf)
+{
+ struct ice_hw *hw = &sc->hw;
+ struct virtchnl_vlan_filter_list *vlan_list;
+ int status = 0;
+ enum virtchnl_status_code v_status = VIRTCHNL_STATUS_SUCCESS;
+ struct ice_vsi *vsi = vf->vsi;
+
+ vlan_list = (struct virtchnl_vlan_filter_list *)msg_buf;
+
+ if (vlan_list->vsi_id != vsi->idx) {
+ device_printf(sc->dev,
+ "VF-%d: Message has invalid VSI ID (expected %d, got %d)\n",
+ vf->vf_num, vsi->idx, vlan_list->vsi_id);
+ v_status = VIRTCHNL_STATUS_ERR_PARAM;
+ goto done;
+ }
+
+ status = ice_remove_vlan_hw_filters(vsi, vlan_list->vlan_id,
+ vlan_list->num_elements);
+ if (status) {
+ device_printf(sc->dev,
+ "VF-%d: Failure deleting VLANs from VSI %d, err %s aq_err %s\n",
+ vf->vf_num, vsi->idx, ice_status_str(status),
+ ice_aq_str(sc->hw.adminq.sq_last_status));
+ v_status = ice_iov_err_to_virt_err(status);
+ goto done;
+ }
+
+done:
+ ice_aq_send_msg_to_vf(hw, vf->vf_num, VIRTCHNL_OP_DEL_VLAN,
+ v_status, NULL, 0, NULL);
+}
+
+/**
+ * ice_vc_validate_ring_len - Check to see if a descriptor ring length is valid
+ * @ring_len: length of ring
+ *
+ * Check whether a ring size value is valid.
+ *
*** 1810 LINES SKIPPED ***