git: b685df314f13 - main - mana: some code refactoring and export apis for future RDMA driver

From: Wei Hu <whu_at_FreeBSD.org>
Date: Mon, 29 Aug 2022 05:29:09 UTC
The branch main has been updated by whu:

URL: https://cgit.FreeBSD.org/src/commit/?id=b685df314f1386ca769f45499c8823366f7e1d7d

commit b685df314f1386ca769f45499c8823366f7e1d7d
Author:     Wei Hu <whu@FreeBSD.org>
AuthorDate: 2022-08-29 05:03:33 +0000
Commit:     Wei Hu <whu@FreeBSD.org>
CommitDate: 2022-08-29 05:24:21 +0000

    mana: some code refactoring and export apis for future RDMA driver
    
    - Record the physical address for doorbell page region
      For supporting RDMA device with multiple user contexts with their
      individual doorbell pages, record the start address of doorbell page
      region for use by the RDMA driver to allocate user context doorbell IDs.
    
    - Handle vport sharing between devices
      For outgoing packets, the PF requires the VF to configure the vport with
      corresponding protection domain and doorbell ID for the kernel or user
      context. The vport can't be shared between different contexts.
    
      Implement the logic to exclusively take over the vport by either the
      Ethernet device or RDMA device.
    
    - Add functions for allocating doorbell page from GDMA
      The RDMA device needs to allocate doorbell pages for each user context.
      Implement those functions and expose them for use by the RDMA driver.
    
    - Export Work Queue functions for use by RDMA driver
      RDMA device may need to create Ethernet device queues for use by Queue
      Pair type RAW. This allows a user-mode context accesses Ethernet hardware
      queues. Export the supporting functions for use by the RDMA driver.
    
    - Define max values for SGL entries
      The number of maximum SGl entries should be computed from the maximum
      WQE size for the intended queue type and the corresponding OOB data
      size. This guarantees the hardware queue can successfully queue requests
      up to the queue depth exposed to the upper layer.
    
    - Define and process GDMA response code GDMA_STATUS_MORE_ENTRIES
       When doing memory registration, the PF may respond with
       GDMA_STATUS_MORE_ENTRIES to indicate a follow request is needed. This is
       not an error and should be processed as expected.
    
    - Define data structures for protection domain and memory registration
      The MANA hardware support protection domain and memory registration for use
      in RDMA environment. Add those definitions and expose them for use by the
      RDMA driver.
    
    MFC after:      2 weeks
    Sponsored by:   Microsoft
---
 sys/dev/mana/gdma.h       | 159 ++++++++++++++++++++++++++++++++++++++++++++--
 sys/dev/mana/gdma_main.c  |  87 +++++++++++++++++++++----
 sys/dev/mana/hw_channel.c |   2 +-
 sys/dev/mana/mana.h       |  19 +++++-
 sys/dev/mana/mana_en.c    |  75 ++++++++++++++++++----
 5 files changed, 311 insertions(+), 31 deletions(-)

diff --git a/sys/dev/mana/gdma.h b/sys/dev/mana/gdma.h
index 8b225800ccdb..5c60812b8b5d 100644
--- a/sys/dev/mana/gdma.h
+++ b/sys/dev/mana/gdma.h
@@ -43,6 +43,8 @@
 #include "gdma_util.h"
 #include "shm_channel.h"
 
+#define GDMA_STATUS_MORE_ENTRIES	0x00000105
+
 /* Structures labeled with "HW DATA" are exchanged with the hardware. All of
  * them are naturally aligned and hence don't need __packed.
  */
@@ -70,11 +72,19 @@ enum gdma_request_type {
 	GDMA_GENERATE_TEST_EQE		= 10,
 	GDMA_CREATE_QUEUE		= 12,
 	GDMA_DISABLE_QUEUE		= 13,
+	GDMA_ALLOCATE_RESOURCE_RANGE	= 22,
+	GDMA_DESTROY_RESOURCE_RANGE	= 24,
 	GDMA_CREATE_DMA_REGION		= 25,
 	GDMA_DMA_REGION_ADD_PAGES	= 26,
 	GDMA_DESTROY_DMA_REGION		= 27,
+	GDMA_CREATE_PD			= 29,
+	GDMA_DESTROY_PD			= 30,
+	GDMA_CREATE_MR			= 31,
+	GDMA_DESTROY_MR			= 32,
 };
 
+#define GDMA_RESOURCE_DOORBELL_PAGE	27
+
 enum gdma_queue_type {
 	GDMA_INVALID_QUEUE,
 	GDMA_SQ,
@@ -103,6 +113,7 @@ enum {
 	GDMA_DEVICE_MANA	= 2,
 };
 
+typedef uint64_t gdma_obj_handle_t;
 
 struct gdma_resource {
 	/* Protect the bitmap */
@@ -240,7 +251,7 @@ struct gdma_mem_info {
 	uint64_t		length;
 
 	/* Allocated by the PF driver */
-	uint64_t		gdma_region;
+	gdma_obj_handle_t	dma_region_handle;
 };
 
 #define REGISTER_ATB_MST_MKEY_LOWER_SIZE 8
@@ -414,6 +425,7 @@ struct gdma_context {
 	int			msix_rid;
 	void __iomem		*shm_base;
 	void __iomem		*db_page_base;
+	vm_paddr_t		phys_db_page_base;
 	uint32_t		db_page_size;
 
 	/* Shared memory chanenl (used to bootstrap HWC) */
@@ -485,6 +497,13 @@ struct gdma_wqe {
 #define MAX_TX_WQE_SIZE		512
 #define MAX_RX_WQE_SIZE		256
 
+#define MAX_TX_WQE_SGL_ENTRIES	((GDMA_MAX_SQE_SIZE -			   \
+			sizeof(struct gdma_sge) - INLINE_OOB_SMALL_SIZE) / \
+			sizeof(struct gdma_sge))
+
+#define MAX_RX_WQE_SGL_ENTRIES	((GDMA_MAX_RQE_SIZE -			   \
+			sizeof(struct gdma_sge)) / sizeof(struct gdma_sge))
+
 struct gdma_cqe {
 	uint32_t cqe_data[GDMA_COMP_DATA_SIZE / 4];
 
@@ -615,6 +634,26 @@ struct gdma_register_device_resp {
 	uint32_t db_id;
 }; /* HW DATA */
 
+struct gdma_allocate_resource_range_req {
+	struct gdma_req_hdr hdr;
+	uint32_t resource_type;
+	uint32_t num_resources;
+	uint32_t alignment;
+	uint32_t allocated_resources;
+};
+
+struct gdma_allocate_resource_range_resp {
+	struct gdma_resp_hdr hdr;
+	uint32_t allocated_resources;
+};
+
+struct gdma_destroy_resource_range_req {
+	struct gdma_req_hdr hdr;
+	uint32_t resource_type;
+	uint32_t num_resources;
+	uint32_t allocated_resources;
+};
+
 /* GDMA_CREATE_QUEUE */
 struct gdma_create_queue_req {
 	struct gdma_req_hdr hdr;
@@ -622,7 +661,7 @@ struct gdma_create_queue_req {
 	uint32_t reserved1;
 	uint32_t pdid;
 	uint32_t doolbell_id;
-	uint64_t gdma_region;
+	gdma_obj_handle_t gdma_region;
 	uint32_t reserved2;
 	uint32_t queue_size;
 	uint32_t log2_throttle_limit;
@@ -649,6 +688,28 @@ struct gdma_disable_queue_req {
 	uint32_t alloc_res_id_on_creation;
 }; /* HW DATA */
 
+enum atb_page_size {
+	ATB_PAGE_SIZE_4K,
+	ATB_PAGE_SIZE_8K,
+	ATB_PAGE_SIZE_16K,
+	ATB_PAGE_SIZE_32K,
+	ATB_PAGE_SIZE_64K,
+	ATB_PAGE_SIZE_128K,
+	ATB_PAGE_SIZE_256K,
+	ATB_PAGE_SIZE_512K,
+	ATB_PAGE_SIZE_1M,
+	ATB_PAGE_SIZE_2M,
+	ATB_PAGE_SIZE_MAX,
+};
+
+enum gdma_mr_access_flags {
+	GDMA_ACCESS_FLAG_LOCAL_READ = BIT(0),
+	GDMA_ACCESS_FLAG_LOCAL_WRITE = BIT(1),
+	GDMA_ACCESS_FLAG_REMOTE_READ = BIT(2),
+	GDMA_ACCESS_FLAG_REMOTE_WRITE = BIT(3),
+	GDMA_ACCESS_FLAG_REMOTE_ATOMIC = BIT(4),
+};
+
 /* GDMA_CREATE_DMA_REGION */
 struct gdma_create_dma_region_req {
 	struct gdma_req_hdr hdr;
@@ -675,14 +736,14 @@ struct gdma_create_dma_region_req {
 
 struct gdma_create_dma_region_resp {
 	struct gdma_resp_hdr hdr;
-	uint64_t gdma_region;
+	gdma_obj_handle_t dma_region_handle;
 }; /* HW DATA */
 
 /* GDMA_DMA_REGION_ADD_PAGES */
 struct gdma_dma_region_add_pages_req {
 	struct gdma_req_hdr hdr;
 
-	uint64_t gdma_region;
+	gdma_obj_handle_t dma_region_handle;
 
 	uint32_t page_addr_list_len;
 	uint32_t reserved3;
@@ -694,9 +755,88 @@ struct gdma_dma_region_add_pages_req {
 struct gdma_destroy_dma_region_req {
 	struct gdma_req_hdr hdr;
 
-	uint64_t gdma_region;
+	gdma_obj_handle_t dma_region_handle;
 }; /* HW DATA */
 
+enum gdma_pd_flags {
+	GDMA_PD_FLAG_INVALID = 0,
+};
+
+struct gdma_create_pd_req {
+	struct gdma_req_hdr hdr;
+	enum gdma_pd_flags flags;
+	uint32_t reserved;
+};/* HW DATA */
+
+struct gdma_create_pd_resp {
+	struct gdma_resp_hdr hdr;
+	gdma_obj_handle_t pd_handle;
+	uint32_t pd_id;
+	uint32_t reserved;
+};/* HW DATA */
+
+struct gdma_destroy_pd_req {
+	struct gdma_req_hdr hdr;
+	gdma_obj_handle_t pd_handle;
+};/* HW DATA */
+
+struct gdma_destory_pd_resp {
+	struct gdma_resp_hdr hdr;
+};/* HW DATA */
+
+enum gdma_mr_type {
+	/* Guest Virtual Address - MRs of this type allow access
+	 * to memory mapped by PTEs associated with this MR using a virtual
+	 * address that is set up in the MST
+	 */
+	GDMA_MR_TYPE_GVA = 2,
+};
+
+struct gdma_create_mr_params {
+	gdma_obj_handle_t pd_handle;
+	enum gdma_mr_type mr_type;
+	union {
+		struct {
+			gdma_obj_handle_t dma_region_handle;
+			uint64_t virtual_address;
+			enum gdma_mr_access_flags access_flags;
+		} gva;
+	};
+};
+
+struct gdma_create_mr_request {
+	struct gdma_req_hdr hdr;
+	gdma_obj_handle_t pd_handle;
+	enum gdma_mr_type mr_type;
+	uint32_t reserved_1;
+
+	union {
+		struct {
+			gdma_obj_handle_t dma_region_handle;
+			uint64_t virtual_address;
+			enum gdma_mr_access_flags access_flags;
+		} gva;
+
+	};
+	uint32_t reserved_2;
+};/* HW DATA */
+
+struct gdma_create_mr_response {
+	struct gdma_resp_hdr hdr;
+	gdma_obj_handle_t mr_handle;
+	uint32_t lkey;
+	uint32_t rkey;
+};/* HW DATA */
+
+struct gdma_destroy_mr_request {
+	struct gdma_req_hdr hdr;
+	gdma_obj_handle_t mr_handle;
+};/* HW DATA */
+
+struct gdma_destroy_mr_response {
+	struct gdma_resp_hdr hdr;
+};/* HW DATA */
+
 int mana_gd_verify_vf_version(device_t dev);
 
 int mana_gd_register_device(struct gdma_dev *gd);
@@ -727,4 +867,13 @@ void mana_gd_dma_map_paddr(void *arg, bus_dma_segment_t *segs,
 
 int mana_gd_send_request(struct gdma_context *gc, uint32_t req_len,
     const void *req, uint32_t resp_len, void *resp);
+
+int mana_gd_allocate_doorbell_page(struct gdma_context *gc,
+    int *doorbell_page);
+
+int mana_gd_destroy_doorbell_page(struct gdma_context *gc,
+    int doorbell_page);
+
+int mana_gd_destroy_dma_region(struct gdma_context *gc,
+    gdma_obj_handle_t dma_region_handle);
 #endif /* _GDMA_H */
diff --git a/sys/dev/mana/gdma_main.c b/sys/dev/mana/gdma_main.c
index 6b4e87d169de..60ddde142f44 100644
--- a/sys/dev/mana/gdma_main.c
+++ b/sys/dev/mana/gdma_main.c
@@ -283,6 +283,61 @@ mana_gd_free_memory(struct gdma_mem_info *gmi)
 	bus_dma_tag_destroy(gmi->dma_tag);
 }
 
+int
+mana_gd_destroy_doorbell_page(struct gdma_context *gc, int doorbell_page)
+{
+	struct gdma_destroy_resource_range_req req = {};
+	struct gdma_resp_hdr resp = {};
+	int err;
+
+	mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_RESOURCE_RANGE,
+	    sizeof(req), sizeof(resp));
+
+	req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE;
+	req.num_resources = 1;
+	req.allocated_resources = doorbell_page;
+
+	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+	if (err || resp.status) {
+		device_printf(gc->dev,
+		    "Failed to destroy doorbell page: ret %d, 0x%x\n",
+		    err, resp.status);
+		return err ? err : EPROTO;
+	}
+
+	return 0;
+}
+
+int
+mana_gd_allocate_doorbell_page(struct gdma_context *gc, int *doorbell_page)
+{
+	struct gdma_allocate_resource_range_req req = {};
+	struct gdma_allocate_resource_range_resp resp = {};
+	int err;
+
+	mana_gd_init_req_hdr(&req.hdr, GDMA_ALLOCATE_RESOURCE_RANGE,
+	    sizeof(req), sizeof(resp));
+
+	req.resource_type = GDMA_RESOURCE_DOORBELL_PAGE;
+	req.num_resources = 1;
+	req.alignment = 1;
+
+	/* Have GDMA start searching from 0 */
+	req.allocated_resources = 0;
+
+	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp);
+	if (err || resp.hdr.status) {
+		device_printf(gc->dev,
+		    "Failed to allocate doorbell page: ret %d, 0x%x\n",
+		    err, resp.hdr.status);
+		return err ? err : EPROTO;
+	}
+
+	*doorbell_page = resp.allocated_resources;
+
+	return 0;
+}
+
 static int
 mana_gd_create_hw_eq(struct gdma_context *gc,
     struct gdma_queue *queue)
@@ -301,7 +356,7 @@ mana_gd_create_hw_eq(struct gdma_context *gc,
 	req.type = queue->type;
 	req.pdid = queue->gdma_dev->pdid;
 	req.doolbell_id = queue->gdma_dev->doorbell;
-	req.gdma_region = queue->mem_info.gdma_region;
+	req.gdma_region = queue->mem_info.dma_region_handle;
 	req.queue_size = queue->queue_size;
 	req.log2_throttle_limit = queue->eq.log2_throttle_limit;
 	req.eq_pci_msix_index = queue->eq.msix_index;
@@ -316,7 +371,7 @@ mana_gd_create_hw_eq(struct gdma_context *gc,
 
 	queue->id = resp.queue_index;
 	queue->eq.disable_needed = true;
-	queue->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
+	queue->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
 	return 0;
 }
 
@@ -848,26 +903,31 @@ free_q:
 	return err;
 }
 
-static void
-mana_gd_destroy_dma_region(struct gdma_context *gc, uint64_t gdma_region)
+int
+mana_gd_destroy_dma_region(struct gdma_context *gc,
+    gdma_obj_handle_t dma_region_handle)
 {
 	struct gdma_destroy_dma_region_req req = {};
 	struct gdma_general_resp resp = {};
 	int err;
 
-	if (gdma_region == GDMA_INVALID_DMA_REGION)
-		return;
+	if (dma_region_handle == GDMA_INVALID_DMA_REGION)
+		return 0;
 
 	mana_gd_init_req_hdr(&req.hdr, GDMA_DESTROY_DMA_REGION, sizeof(req),
 	    sizeof(resp));
-	req.gdma_region = gdma_region;
+	req.dma_region_handle = dma_region_handle;
 
 	err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp),
 	    &resp);
-	if (err || resp.hdr.status)
+	if (err || resp.hdr.status) {
 		device_printf(gc->dev,
 		    "Failed to destroy DMA region: %d, 0x%x\n",
 		    err, resp.hdr.status);
+		return EPROTO;
+	}
+
+	return 0;
 }
 
 static int
@@ -922,14 +982,15 @@ mana_gd_create_dma_region(struct gdma_dev *gd,
 	if (err)
 		goto out;
 
-	if (resp.hdr.status || resp.gdma_region == GDMA_INVALID_DMA_REGION) {
+	if (resp.hdr.status ||
+	    resp.dma_region_handle == GDMA_INVALID_DMA_REGION) {
 		device_printf(gc->dev, "Failed to create DMA region: 0x%x\n",
 			resp.hdr.status);
 		err = EPROTO;
 		goto out;
 	}
 
-	gmi->gdma_region = resp.gdma_region;
+	gmi->dma_region_handle = resp.dma_region_handle;
 out:
 	free(req, M_DEVBUF);
 	return err;
@@ -1057,7 +1118,7 @@ mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue)
 		return;
 	}
 
-	mana_gd_destroy_dma_region(gc, gmi->gdma_region);
+	mana_gd_destroy_dma_region(gc, gmi->dma_region_handle);
 	mana_gd_free_memory(gmi);
 	free(queue, M_DEVBUF);
 }
@@ -1448,12 +1509,16 @@ static void
 mana_gd_init_registers(struct gdma_context *gc)
 {
 	uint64_t bar0_va = rman_get_bushandle(gc->bar0);
+	vm_paddr_t bar0_pa = rman_get_start(gc->bar0);
 
 	gc->db_page_size = mana_gd_r32(gc, GDMA_REG_DB_PAGE_SIZE) & 0xFFFF;
 
 	gc->db_page_base =
 	    (void *) (bar0_va + mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET));
 
+	gc->phys_db_page_base =
+	    bar0_pa + mana_gd_r64(gc, GDMA_REG_DB_PAGE_OFFSET);
+
 	gc->shm_base =
 	    (void *) (bar0_va + mana_gd_r64(gc, GDMA_REG_SHM_OFFSET));
 
diff --git a/sys/dev/mana/hw_channel.c b/sys/dev/mana/hw_channel.c
index dbb5b54d9ad1..19e25a8a49ab 100644
--- a/sys/dev/mana/hw_channel.c
+++ b/sys/dev/mana/hw_channel.c
@@ -931,7 +931,7 @@ mana_hwc_send_request(struct hw_channel_context *hwc, uint32_t req_len,
 		goto out;
 	}
 
-	if (ctx->status_code) {
+	if (ctx->status_code && ctx->status_code != GDMA_STATUS_MORE_ENTRIES) {
 		device_printf(hwc->dev,
 		    "HWC: Failed hw_channel req: 0x%x\n", ctx->status_code);
 		err = EPROTO;
diff --git a/sys/dev/mana/mana.h b/sys/dev/mana/mana.h
index 7402685bd17c..cb5ee66c2331 100644
--- a/sys/dev/mana/mana.h
+++ b/sys/dev/mana/mana.h
@@ -382,8 +382,6 @@ struct mana_cq {
 	struct gdma_comp	gdma_comp_buf[CQE_POLLING_BUFFER];
 };
 
-#define GDMA_MAX_RQE_SGES	15
-
 struct mana_recv_buf_oob {
 	/* A valid GDMA work request representing the data buffer. */
 	struct gdma_wqe_request		wqe_req;
@@ -393,7 +391,7 @@ struct mana_recv_buf_oob {
 
 	/* SGL of the buffer going to be sent as part of the work request. */
 	uint32_t			num_sge;
-	struct gdma_sge			sgl[GDMA_MAX_RQE_SGES];
+	struct gdma_sge			sgl[MAX_RX_WQE_SGL_ENTRIES];
 
 	/* Required to store the result of mana_gd_post_work_request.
 	 * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the
@@ -505,6 +503,8 @@ struct mana_port_context {
 
 	mana_handle_t		port_handle;
 
+	int			vport_use_count;
+
 	uint16_t		port_idx;
 
 	uint16_t		frame_size;
@@ -699,4 +699,17 @@ struct mana_tx_package {
 
 int mana_restart(struct mana_port_context *apc);
 
+int mana_create_wq_obj(struct mana_port_context *apc,
+    mana_handle_t vport,
+    uint32_t wq_type, struct mana_obj_spec *wq_spec,
+    struct mana_obj_spec *cq_spec,
+    mana_handle_t *wq_obj);
+
+void mana_destroy_wq_obj(struct mana_port_context *apc, uint32_t wq_type,
+    mana_handle_t wq_obj);
+
+int mana_cfg_vport(struct mana_port_context *apc, uint32_t protection_dom_id,
+    uint32_t doorbell_pg_id);
+
+void mana_uncfg_vport(struct mana_port_context *apc);
 #endif /* _MANA_H */
diff --git a/sys/dev/mana/mana_en.c b/sys/dev/mana/mana_en.c
index a075851a6ab5..e504df1c8a8d 100644
--- a/sys/dev/mana/mana_en.c
+++ b/sys/dev/mana/mana_en.c
@@ -980,7 +980,20 @@ mana_query_vport_cfg(struct mana_port_context *apc, uint32_t vport_index,
 	return 0;
 }
 
-static int
+void
+mana_uncfg_vport(struct mana_port_context *apc)
+{
+	MANA_APC_LOCK_LOCK(apc);
+	apc->vport_use_count--;
+	if (apc->vport_use_count < 0) {
+		mana_err(NULL,
+		    "WARNING: vport_use_count less than 0: %u\n",
+		    apc->vport_use_count);
+	}
+	MANA_APC_LOCK_UNLOCK(apc);
+}
+
+int
 mana_cfg_vport(struct mana_port_context *apc, uint32_t protection_dom_id,
     uint32_t doorbell_pg_id)
 {
@@ -988,6 +1001,32 @@ mana_cfg_vport(struct mana_port_context *apc, uint32_t protection_dom_id,
 	struct mana_config_vport_req req = {};
 	int err;
 
+	/* This function is used to program the Ethernet port in the hardware
+	 * table. It can be called from the Ethernet driver or the RDMA driver.
+	 *
+	 * For Ethernet usage, the hardware supports only one active user on a
+	 * physical port. The driver checks on the port usage before programming
+	 * the hardware when creating the RAW QP (RDMA driver) or exposing the
+	 * device to kernel NET layer (Ethernet driver).
+	 *
+	 * Because the RDMA driver doesn't know in advance which QP type the
+	 * user will create, it exposes the device with all its ports. The user
+	 * may not be able to create RAW QP on a port if this port is already
+	 * in used by the Ethernet driver from the kernel.
+	 *
+	 * This physical port limitation only applies to the RAW QP. For RC QP,
+	 * the hardware doesn't have this limitation. The user can create RC
+	 * QPs on a physical port up to the hardware limits independent of the
+	 * Ethernet usage on the same port.
+	 */
+	MANA_APC_LOCK_LOCK(apc);
+	if (apc->vport_use_count > 0) {
+		MANA_APC_LOCK_UNLOCK(apc);
+		return EBUSY;
+	}
+	apc->vport_use_count++;
+	MANA_APC_LOCK_UNLOCK(apc);
+
 	mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
 	    sizeof(req), sizeof(resp));
 	req.vport = apc->port_handle;
@@ -1014,7 +1053,13 @@ mana_cfg_vport(struct mana_port_context *apc, uint32_t protection_dom_id,
 
 	apc->tx_shortform_allowed = resp.short_form_allowed;
 	apc->tx_vp_offset = resp.tx_vport_offset;
+
+	if_printf(apc->ndev, "Configured vPort %lu PD %u DB %u\n",
+	    apc->port_handle, protection_dom_id, doorbell_pg_id);
 out:
+	if (err)
+		mana_uncfg_vport(apc);
+
 	return err;
 }
 
@@ -1078,12 +1123,15 @@ mana_cfg_vport_steering(struct mana_port_context *apc,
 		    resp.hdr.status);
 		err = EPROTO;
 	}
+
+	if_printf(ndev, "Configured steering vPort %lu entries %u\n",
+	    apc->port_handle, num_entries);
 out:
 	free(req, M_DEVBUF);
 	return err;
 }
 
-static int
+int
 mana_create_wq_obj(struct mana_port_context *apc,
     mana_handle_t vport,
     uint32_t wq_type, struct mana_obj_spec *wq_spec,
@@ -1138,7 +1186,7 @@ out:
 	return err;
 }
 
-static void
+void
 mana_destroy_wq_obj(struct mana_port_context *apc, uint32_t wq_type,
     mana_handle_t wq_obj)
 {
@@ -1966,10 +2014,10 @@ mana_create_txq(struct mana_port_context *apc, struct ifnet *net)
 		memset(&wq_spec, 0, sizeof(wq_spec));
 		memset(&cq_spec, 0, sizeof(cq_spec));
 
-		wq_spec.gdma_region = txq->gdma_sq->mem_info.gdma_region;
+		wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle;
 		wq_spec.queue_size = txq->gdma_sq->queue_size;
 
-		cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
+		cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
 		cq_spec.queue_size = cq->gdma_cq->queue_size;
 		cq_spec.modr_ctx_id = 0;
 		cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
@@ -1983,8 +2031,10 @@ mana_create_txq(struct mana_port_context *apc, struct ifnet *net)
 		txq->gdma_sq->id = wq_spec.queue_index;
 		cq->gdma_cq->id = cq_spec.queue_index;
 
-		txq->gdma_sq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
-		cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
+		txq->gdma_sq->mem_info.dma_region_handle =
+		    GDMA_INVALID_DMA_REGION;
+		cq->gdma_cq->mem_info.dma_region_handle =
+		    GDMA_INVALID_DMA_REGION;
 
 		txq->gdma_txq_id = txq->gdma_sq->id;
 
@@ -2285,10 +2335,10 @@ mana_create_rxq(struct mana_port_context *apc, uint32_t rxq_idx,
 
 	memset(&wq_spec, 0, sizeof(wq_spec));
 	memset(&cq_spec, 0, sizeof(cq_spec));
-	wq_spec.gdma_region = rxq->gdma_rq->mem_info.gdma_region;
+	wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle;
 	wq_spec.queue_size = rxq->gdma_rq->queue_size;
 
-	cq_spec.gdma_region = cq->gdma_cq->mem_info.gdma_region;
+	cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
 	cq_spec.queue_size = cq->gdma_cq->queue_size;
 	cq_spec.modr_ctx_id = 0;
 	cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
@@ -2301,8 +2351,8 @@ mana_create_rxq(struct mana_port_context *apc, uint32_t rxq_idx,
 	rxq->gdma_rq->id = wq_spec.queue_index;
 	cq->gdma_cq->id = cq_spec.queue_index;
 
-	rxq->gdma_rq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
-	cq->gdma_cq->mem_info.gdma_region = GDMA_INVALID_DMA_REGION;
+	rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
+	cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
 
 	rxq->gdma_id = rxq->gdma_rq->id;
 	cq->gdma_id = cq->gdma_cq->id;
@@ -2399,6 +2449,8 @@ mana_destroy_vport(struct mana_port_context *apc)
 	}
 
 	mana_destroy_txq(apc);
+
+	mana_uncfg_vport(apc);
 }
 
 static int
@@ -2691,6 +2743,7 @@ mana_probe_port(struct mana_context *ac, int port_idx,
 	apc->frame_size = DEFAULT_FRAME_SIZE;
 	apc->last_tx_cq_bind_cpu = -1;
 	apc->last_rx_cq_bind_cpu = -1;
+	apc->vport_use_count = 0;
 
 	MANA_APC_LOCK_INIT(apc);