svn commit: r342066 - head/sys/dev/mrsas

Kashyap D Desai kadesai at FreeBSD.org
Fri Dec 14 08:04:18 UTC 2018


Author: kadesai
Date: Fri Dec 14 08:04:16 2018
New Revision: 342066
URL: https://svnweb.freebsd.org/changeset/base/342066

Log:
  This patch will add support for NVME PRPs creation by driver for fastpath
  capable IOs. NVME specification supports specific type of scatter gather list
  called as PRP (Physical Region Page) for IO data buffers. Since NVME drive is
  connected behind SAS3.5 tri-mode adapter, MegaRAID driver/firmware has to convert
  OS SGLs in native NVMe PRP format. For IOs sent to firmware, MegaRAID firmware
  does this job of OS SGLs to PRP translation and send PRPs to backend NVME device.
  For fastpath IOs, driver will do this OS SGLs to PRP translation.
  
  Submitted by: Sumit Saxena <sumit.saxena at broadcom.com>
  Reviewed by:  Kashyap Desai <Kashyap.Desai at broadcom.com>
  Approved by:  ken
  MFC after:  3 days
  Sponsored by:   Broadcom Inc

Modified:
  head/sys/dev/mrsas/mrsas.c
  head/sys/dev/mrsas/mrsas.h
  head/sys/dev/mrsas/mrsas_cam.c
  head/sys/dev/mrsas/mrsas_fp.c

Modified: head/sys/dev/mrsas/mrsas.c
==============================================================================
--- head/sys/dev/mrsas/mrsas.c	Fri Dec 14 08:03:28 2018	(r342065)
+++ head/sys/dev/mrsas/mrsas.c	Fri Dec 14 08:04:16 2018	(r342066)
@@ -451,6 +451,12 @@ mrsas_setup_sysctl(struct mrsas_softc *sc)
 	    OID_AUTO, "stream detection", CTLFLAG_RW,
 		&sc->drv_stream_detection, 0,
 		"Disable/Enable Stream detection. <default: 1, Enable Stream Detection>");
+	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+	    OID_AUTO, "prp_count", CTLFLAG_RD,
+	    &sc->prp_count.val_rdonly, 0, "Number of IOs for which PRPs are built");
+	SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree),
+	    OID_AUTO, "SGE holes", CTLFLAG_RD,
+	    &sc->sge_holes.val_rdonly, 0, "Number of IOs with holes in SGEs");
 }
 
 /*
@@ -899,6 +905,8 @@ mrsas_attach(device_t dev)
 
 	mrsas_atomic_set(&sc->fw_outstanding, 0);
 	mrsas_atomic_set(&sc->target_reset_outstanding, 0);
+	mrsas_atomic_set(&sc->prp_count, 0);
+	mrsas_atomic_set(&sc->sge_holes, 0);
 
 	sc->io_cmds_highwater = 0;
 
@@ -2266,7 +2274,7 @@ mrsas_init_fw(struct mrsas_softc *sc)
 	u_int32_t max_sectors_1;
 	u_int32_t max_sectors_2;
 	u_int32_t tmp_sectors;
-	u_int32_t scratch_pad_2, scratch_pad_3;
+	u_int32_t scratch_pad_2, scratch_pad_3, scratch_pad_4;
 	int msix_enable = 0;
 	int fw_msix_count = 0;
 	int i, j;
@@ -2350,6 +2358,15 @@ mrsas_init_fw(struct mrsas_softc *sc)
 		return (1);
 	}
 
+	if (sc->is_ventura) {
+		scratch_pad_4 = mrsas_read_reg(sc, offsetof(mrsas_reg_set,
+		    outbound_scratch_pad_4));
+		if ((scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK) >= MR_DEFAULT_NVME_PAGE_SHIFT)
+			sc->nvme_page_size = 1 << (scratch_pad_4 & MR_NVME_PAGE_SIZE_MASK);
+
+		device_printf(sc->mrsas_dev, "NVME page size\t: (%d)\n", sc->nvme_page_size);
+	}
+
 	/* Allocate internal commands for pass-thru */
 	if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) {
 		device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n");
@@ -2652,6 +2669,7 @@ mrsas_ioc_init(struct mrsas_softc *sc)
 	IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr;
 	IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr;
 	IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0);
+	IOCInitMsg->HostPageSize = MR_DEFAULT_NVME_PAGE_SHIFT;
 
 	init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem;
 	init_frame->cmd = MFI_CMD_INIT;

Modified: head/sys/dev/mrsas/mrsas.h
==============================================================================
--- head/sys/dev/mrsas/mrsas.h	Fri Dec 14 08:03:28 2018	(r342065)
+++ head/sys/dev/mrsas/mrsas.h	Fri Dec 14 08:04:16 2018	(r342066)
@@ -694,7 +694,7 @@ typedef struct _MPI2_IOC_INIT_REQUEST {
 	u_int16_t HeaderVersion;	/* 0x0E */
 	u_int32_t Reserved5;		/* 0x10 */
 	u_int16_t Reserved6;		/* 0x14 */
-	u_int8_t Reserved7;		/* 0x16 */
+	u_int8_t HostPageSize;		/* 0x16 */
 	u_int8_t HostMSIxVectors;	/* 0x17 */
 	u_int16_t Reserved8;		/* 0x18 */
 	u_int16_t SystemRequestFrameSize;	/* 0x1A */
@@ -763,7 +763,7 @@ Mpi2IOCInitRequest_t, MPI2_POINTER pMpi2IOCInitRequest
 typedef struct _MR_DEV_HANDLE_INFO {
 	u_int16_t curDevHdl;
 	u_int8_t validHandles;
-	u_int8_t reserved;
+	u_int8_t interfaceType;
 	u_int16_t devHandle[2];
 }	MR_DEV_HANDLE_INFO;
 
@@ -1017,6 +1017,7 @@ struct IO_REQUEST_INFO {
 	u_int16_t ldTgtId;
 	u_int8_t isRead;
 	u_int16_t devHandle;
+	u_int8_t pdInterface;
 	u_int64_t pdBlock;
 	u_int8_t fpOkForIo;
 	u_int8_t IoforUnevenSpan;
@@ -1164,6 +1165,22 @@ typedef struct _MR_FW_RAID_MAP_DYNAMIC {
 #define	IEEE_SGE_FLAGS_CHAIN_ELEMENT	(0x80)
 #define	IEEE_SGE_FLAGS_END_OF_LIST		(0x40)
 
+/* Few NVME flags defines*/
+#define MPI2_SGE_FLAGS_SHIFT                (0x02)
+#define IEEE_SGE_FLAGS_FORMAT_MASK          (0xC0)
+#define IEEE_SGE_FLAGS_FORMAT_IEEE          (0x00)
+#define IEEE_SGE_FLAGS_FORMAT_PQI           (0x01)
+#define IEEE_SGE_FLAGS_FORMAT_NVME          (0x02)
+#define IEEE_SGE_FLAGS_FORMAT_AHCI          (0x03)
+
+
+#define MPI26_IEEE_SGE_FLAGS_NSF_MASK           (0x1C)
+#define MPI26_IEEE_SGE_FLAGS_NSF_MPI_IEEE       (0x00)
+#define MPI26_IEEE_SGE_FLAGS_NSF_PQI            (0x04)
+#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP       (0x08)
+#define MPI26_IEEE_SGE_FLAGS_NSF_AHCI_PRDT      (0x0C)
+#define MPI26_IEEE_SGE_FLAGS_NSF_NVME_SGL       (0x10)
+
 union desc_value {
 	u_int64_t word;
 	struct {
@@ -1227,9 +1244,8 @@ typedef struct _mrsas_register_set {
 	u_int32_t outbound_scratch_pad;	/* 00B0h */
 	u_int32_t outbound_scratch_pad_2;	/* 00B4h */
 	u_int32_t outbound_scratch_pad_3;	/* 00B8h */
+	u_int32_t outbound_scratch_pad_4;	/* 00BCh */
 
-	u_int32_t reserved_4;	/* 00BCh */
-
 	u_int32_t inbound_low_queue_port;	/* 00C0h */
 
 	u_int32_t inbound_high_queue_port;	/* 00C4h */
@@ -1678,6 +1694,7 @@ struct mrsas_mpt_cmd {
 	struct mrsas_mpt_cmd *peer_cmd;
 	bool	callout_owner;
 	TAILQ_ENTRY(mrsas_mpt_cmd) next;
+	u_int8_t pdInterface;
 };
 
 /*
@@ -3149,6 +3166,10 @@ struct mrsas_target {
 	u_int32_t max_io_size_kb;
 } __packed;
 
+#define MR_NVME_PAGE_SIZE_MASK		0x000000FF
+#define MR_DEFAULT_NVME_PAGE_SIZE	4096
+#define MR_DEFAULT_NVME_PAGE_SHIFT	12
+
 /*******************************************************************
  * per-instance data
  ********************************************************************/
@@ -3287,6 +3308,8 @@ struct mrsas_softc {
 	u_int32_t max_sectors_per_req;
 	u_int32_t disableOnlineCtrlReset;
 	mrsas_atomic_t fw_outstanding;
+	mrsas_atomic_t prp_count;
+	mrsas_atomic_t sge_holes;
 
 	u_int32_t mrsas_debug;
 	u_int32_t mrsas_io_timeout;
@@ -3331,6 +3354,7 @@ struct mrsas_softc {
 	u_int32_t new_map_sz;
 	u_int32_t drv_map_sz;
 
+	u_int32_t nvme_page_size;
 	boolean_t is_ventura;
 	boolean_t msix_combined;
 	u_int16_t maxRaidMapSize;

Modified: head/sys/dev/mrsas/mrsas_cam.c
==============================================================================
--- head/sys/dev/mrsas/mrsas_cam.c	Fri Dec 14 08:03:28 2018	(r342065)
+++ head/sys/dev/mrsas/mrsas_cam.c	Fri Dec 14 08:04:16 2018	(r342066)
@@ -105,6 +105,14 @@ mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs,
 static int32_t
 mrsas_startio(struct mrsas_softc *sc, struct cam_sim *sim,
     union ccb *ccb);
+
+static boolean_t mrsas_is_prp_possible(struct mrsas_mpt_cmd *cmd,
+	bus_dma_segment_t *segs, int nsegs);
+static void mrsas_build_ieee_sgl(struct mrsas_mpt_cmd *cmd,
+	bus_dma_segment_t *segs, int nseg);
+static void mrsas_build_prp_nvme(struct mrsas_mpt_cmd *cmd,
+	bus_dma_segment_t *segs, int nseg);
+
 struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc);
 MRSAS_REQUEST_DESCRIPTOR_UNION *
 	mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index);
@@ -1145,6 +1153,7 @@ mrsas_setup_io(struct mrsas_softc *sc, struct mrsas_mp
 
 		cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
 		io_request->DevHandle = io_info.devHandle;
+		cmd->pdInterface = io_info.pdInterface;
 	} else {
 		/* Not FP IO */
 		io_request->RaidContext.raid_context.timeoutValue = map_ptr->raidMap.fpPdIoTimeoutSec;
@@ -1268,6 +1277,8 @@ mrsas_build_syspdio(struct mrsas_softc *sc, struct mrs
 	io_request->RaidContext.raid_context.regLockRowLBA = 0;
 	io_request->RaidContext.raid_context.regLockLength = 0;
 
+	cmd->pdInterface = sc->target_list[device_id].interface_type;
+
 	/* If FW supports PD sequence number */
 	if (sc->use_seqnum_jbod_fp &&
 	    sc->pd_list[device_id].driveType == 0x00) {
@@ -1366,6 +1377,72 @@ mrsas_build_syspdio(struct mrsas_softc *sc, struct mrs
 }
 
 /*
+ * mrsas_is_prp_possible:	This function will tell whether PRPs should be built or not
+ * sc:						Adapter instance soft state
+ * cmd:						MPT command frame pointer
+ * nsesg:					Number of OS SGEs
+ *
+ * This function will check whether IO is qualified to build PRPs
+ * return:				true: if PRP should be built
+ *						false: if IEEE SGLs should be built
+ */
+static boolean_t mrsas_is_prp_possible(struct mrsas_mpt_cmd *cmd,
+	bus_dma_segment_t *segs, int nsegs)
+{
+	struct mrsas_softc *sc = cmd->sc;
+	int i;
+	u_int32_t data_length = 0;
+	bool build_prp = false;
+	u_int32_t mr_nvme_pg_size;
+
+	mr_nvme_pg_size = max(sc->nvme_page_size, MR_DEFAULT_NVME_PAGE_SIZE);
+	data_length = cmd->length;
+
+	if (data_length > (mr_nvme_pg_size * 5))
+		build_prp = true;
+	else if ((data_length > (mr_nvme_pg_size * 4)) &&
+		(data_length <= (mr_nvme_pg_size * 5)))  {
+		/* check if 1st SG entry size is < residual beyond 4 pages */
+		if ((segs[0].ds_len) < (data_length - (mr_nvme_pg_size * 4)))
+			build_prp = true;
+	}
+
+	/*check for SGE holes here*/
+	for (i = 0; i < nsegs; i++) {
+		/* check for mid SGEs */
+		if ((i != 0) && (i != (nsegs - 1))) {
+				if ((segs[i].ds_addr % mr_nvme_pg_size) ||
+					(segs[i].ds_len % mr_nvme_pg_size)) {
+					build_prp = false;
+					mrsas_atomic_inc(&sc->sge_holes);
+					break;
+				}
+		}
+
+		/* check for first SGE*/
+		if ((nsegs > 1) && (i == 0)) {
+				if ((segs[i].ds_addr + segs[i].ds_len) % mr_nvme_pg_size) {
+					build_prp = false;
+					mrsas_atomic_inc(&sc->sge_holes);
+					break;
+				}
+		}
+
+		/* check for Last SGE*/
+		if ((nsegs > 1) && (i == (nsegs - 1))) {
+				if (segs[i].ds_addr % mr_nvme_pg_size) {
+					build_prp = false;
+					mrsas_atomic_inc(&sc->sge_holes);
+					break;
+				}
+		}
+
+	}
+
+	return build_prp;
+}
+
+/*
  * mrsas_map_request:	Map and load data
  * input:				Adapter instance soft state
  * 						Pointer to command packet
@@ -1427,42 +1504,21 @@ mrsas_unmap_request(struct mrsas_softc *sc, struct mrs
 	}
 }
 
-/*
- * mrsas_data_load_cb:	Callback entry point
- * input:				Pointer to command packet as argument
- * 						Pointer to segment
- * 						Number of segments Error
- *
- * This is the callback function of the bus dma map load.  It builds the SG
- * list.
+/**
+ * mrsas_build_ieee_sgl -	Prepare IEEE SGLs
+ * @sc:						Adapter soft state
+ * @segs:					OS SGEs pointers
+ * @nseg:					Number of OS SGEs
+ * @cmd:					Fusion command frame
+ * return:					void
  */
-static void
-mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+static void mrsas_build_ieee_sgl(struct mrsas_mpt_cmd *cmd, bus_dma_segment_t *segs, int nseg)
 {
-	struct mrsas_mpt_cmd *cmd = (struct mrsas_mpt_cmd *)arg;
 	struct mrsas_softc *sc = cmd->sc;
 	MRSAS_RAID_SCSI_IO_REQUEST *io_request;
 	pMpi25IeeeSgeChain64_t sgl_ptr;
 	int i = 0, sg_processed = 0;
 
-	if (error) {
-		cmd->error_code = error;
-		device_printf(sc->mrsas_dev, "mrsas_data_load_cb: error=%d\n", error);
-		if (error == EFBIG) {
-			cmd->ccb_ptr->ccb_h.status = CAM_REQ_TOO_BIG;
-			return;
-		}
-	}
-	if (cmd->flags & MRSAS_DIR_IN)
-		bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap,
-		    BUS_DMASYNC_PREREAD);
-	if (cmd->flags & MRSAS_DIR_OUT)
-		bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap,
-		    BUS_DMASYNC_PREWRITE);
-	if (nseg > sc->max_num_sge) {
-		device_printf(sc->mrsas_dev, "SGE count is too large or 0.\n");
-		return;
-	}
 	io_request = cmd->io_request;
 	sgl_ptr = (pMpi25IeeeSgeChain64_t)&io_request->SGL;
 
@@ -1484,12 +1540,12 @@ mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs,
 			sgl_ptr++;
 			sg_processed = i + 1;
 			if ((sg_processed == (sc->max_sge_in_main_msg - 1)) &&
-			    (nseg > sc->max_sge_in_main_msg)) {
+				(nseg > sc->max_sge_in_main_msg)) {
 				pMpi25IeeeSgeChain64_t sg_chain;
 
 				if (sc->mrsas_gen3_ctrl || sc->is_ventura) {
 					if ((cmd->io_request->IoFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
-					    != MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
+						!= MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH)
 						cmd->io_request->ChainOffset = sc->chain_offset_io_request;
 					else
 						cmd->io_request->ChainOffset = 0;
@@ -1506,6 +1562,166 @@ mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs,
 			}
 		}
 	}
+}
+
+/**
+ * mrsas_build_prp_nvme - Prepare PRPs(Physical Region Page)- SGLs specific to NVMe drives only
+ * @sc:						Adapter soft state
+ * @segs:					OS SGEs pointers
+ * @nseg:					Number of OS SGEs
+ * @cmd:					Fusion command frame
+ * return:					void
+ */
+static void mrsas_build_prp_nvme(struct mrsas_mpt_cmd *cmd, bus_dma_segment_t *segs, int nseg)
+{
+	struct mrsas_softc *sc = cmd->sc;
+	int sge_len, offset, num_prp_in_chain = 0;
+	pMpi25IeeeSgeChain64_t main_chain_element, ptr_first_sgl, sgl_ptr;
+	u_int64_t *ptr_sgl, *ptr_sgl_phys;
+	u_int64_t sge_addr;
+	u_int32_t page_mask, page_mask_result, i = 0;
+	u_int32_t first_prp_len;
+	int data_len = cmd->length;
+	u_int32_t mr_nvme_pg_size = max(sc->nvme_page_size,
+					MR_DEFAULT_NVME_PAGE_SIZE);
+
+	sgl_ptr = (pMpi25IeeeSgeChain64_t) &cmd->io_request->SGL;
+	/*
+	 * NVMe has a very convoluted PRP format.  One PRP is required
+	 * for each page or partial page.  We need to split up OS SG
+	 * entries if they are longer than one page or cross a page
+	 * boundary.  We also have to insert a PRP list pointer entry as
+	 * the last entry in each physical page of the PRP list.
+	 *
+	 * NOTE: The first PRP "entry" is actually placed in the first
+	 * SGL entry in the main message in IEEE 64 format.  The 2nd
+	 * entry in the main message is the chain element, and the rest
+	 * of the PRP entries are built in the contiguous PCIe buffer.
+	 */
+	page_mask = mr_nvme_pg_size - 1;
+	ptr_sgl = (u_int64_t *) cmd->chain_frame;
+	ptr_sgl_phys = (u_int64_t *) cmd->chain_frame_phys_addr;;
+
+	/* Build chain frame element which holds all PRPs except first*/
+	main_chain_element = (pMpi25IeeeSgeChain64_t)
+	    ((u_int8_t *)sgl_ptr + sizeof(MPI25_IEEE_SGE_CHAIN64));
+
+
+	main_chain_element->Address = (u_int64_t) ptr_sgl_phys;
+	main_chain_element->NextChainOffset = 0;
+	main_chain_element->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT |
+					IEEE_SGE_FLAGS_SYSTEM_ADDR |
+					MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
+
+
+	/* Build first PRP, SGE need not to be PAGE aligned*/
+	ptr_first_sgl = sgl_ptr;
+	sge_addr = segs[i].ds_addr;
+	sge_len = segs[i].ds_len;
+	i++;
+
+	offset = (u_int32_t) (sge_addr & page_mask);
+	first_prp_len = mr_nvme_pg_size - offset;
+
+	ptr_first_sgl->Address = sge_addr;
+	ptr_first_sgl->Length = first_prp_len;
+
+	data_len -= first_prp_len;
+
+	if (sge_len > first_prp_len) {
+		sge_addr += first_prp_len;
+		sge_len -= first_prp_len;
+	} else if (sge_len == first_prp_len) {
+		sge_addr = segs[i].ds_addr;
+		sge_len = segs[i].ds_len;
+		i++;
+	}
+
+	for (;;) {
+
+		offset = (u_int32_t) (sge_addr & page_mask);
+
+		/* Put PRP pointer due to page boundary*/
+		page_mask_result = (uintptr_t)(ptr_sgl + 1) & page_mask;
+		if (!page_mask_result) {
+			device_printf(sc->mrsas_dev, "BRCM: Put prp pointer as we are at page boundary"
+					" ptr_sgl: 0x%p\n", ptr_sgl);
+			ptr_sgl_phys++;
+			*ptr_sgl = (uintptr_t)ptr_sgl_phys;
+			ptr_sgl++;
+			num_prp_in_chain++;
+		}
+
+		*ptr_sgl = sge_addr;
+		ptr_sgl++;
+		ptr_sgl_phys++;
+		num_prp_in_chain++;
+
+
+		sge_addr += mr_nvme_pg_size;
+		sge_len -= mr_nvme_pg_size;
+		data_len -= mr_nvme_pg_size;
+
+		if (data_len <= 0)
+			break;
+
+		if (sge_len > 0)
+			continue;
+
+		sge_addr = segs[i].ds_addr;
+		sge_len = segs[i].ds_len;
+		i++;
+	}
+
+	main_chain_element->Length = num_prp_in_chain * sizeof(u_int64_t);
+	mrsas_atomic_inc(&sc->prp_count);
+
+}
+
+/*
+ * mrsas_data_load_cb:	Callback entry point to build SGLs
+ * input:				Pointer to command packet as argument
+ *						Pointer to segment
+ *						Number of segments Error
+ *
+ * This is the callback function of the bus dma map load.  It builds SG list
+ */
+static void
+mrsas_data_load_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
+{
+	struct mrsas_mpt_cmd *cmd = (struct mrsas_mpt_cmd *)arg;
+	struct mrsas_softc *sc = cmd->sc;
+	boolean_t build_prp = false;
+
+	if (error) {
+		cmd->error_code = error;
+		device_printf(sc->mrsas_dev, "mrsas_data_load_cb_prp: error=%d\n", error);
+		if (error == EFBIG) {
+			cmd->ccb_ptr->ccb_h.status = CAM_REQ_TOO_BIG;
+			return;
+		}
+	}
+	if (cmd->flags & MRSAS_DIR_IN)
+		bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap,
+		    BUS_DMASYNC_PREREAD);
+	if (cmd->flags & MRSAS_DIR_OUT)
+		bus_dmamap_sync(cmd->sc->data_tag, cmd->data_dmamap,
+		    BUS_DMASYNC_PREWRITE);
+	if (nseg > sc->max_num_sge) {
+		device_printf(sc->mrsas_dev, "SGE count is too large or 0.\n");
+		return;
+	}
+
+	/* Check for whether PRPs should be built or IEEE SGLs*/
+	if ((cmd->io_request->IoFlags & MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) &&
+			(cmd->pdInterface == NVME_PD))
+		build_prp = mrsas_is_prp_possible(cmd, segs, nseg);
+
+	if (build_prp == true)
+		mrsas_build_prp_nvme(cmd, segs, nseg);
+	else
+		mrsas_build_ieee_sgl(cmd, segs, nseg);
+
 	cmd->sge_count = nseg;
 }
 

Modified: head/sys/dev/mrsas/mrsas_fp.c
==============================================================================
--- head/sys/dev/mrsas/mrsas_fp.c	Fri Dec 14 08:03:28 2018	(r342065)
+++ head/sys/dev/mrsas/mrsas_fp.c	Fri Dec 14 08:04:16 2018	(r342066)
@@ -219,6 +219,12 @@ MR_PdDevHandleGet(u_int32_t pd, MR_DRV_RAID_MAP_ALL * 
 	return map->raidMap.devHndlInfo[pd].curDevHdl;
 }
 
+static u_int8_t MR_PdInterfaceTypeGet(u_int32_t pd, MR_DRV_RAID_MAP_ALL *map)
+{
+    return map->raidMap.devHndlInfo[pd].interfaceType;
+}
+
+
 static u_int16_t
 MR_ArPdGet(u_int32_t ar, u_int32_t arm, MR_DRV_RAID_MAP_ALL * map)
 {
@@ -927,6 +933,8 @@ mr_spanset_get_phy_params(struct mrsas_softc *sc, u_in
 	u_int8_t retval = TRUE;
 	u_int64_t *pdBlock = &io_info->pdBlock;
 	u_int16_t *pDevHandle = &io_info->devHandle;
+	u_int8_t  *pPdInterface = &io_info->pdInterface;
+
 	u_int32_t logArm, rowMod, armQ, arm;
 
 	/* Get row and span from io_info for Uneven Span IO. */
@@ -952,6 +960,7 @@ mr_spanset_get_phy_params(struct mrsas_softc *sc, u_in
 
 	if (pd != MR_PD_INVALID) {
 		*pDevHandle = MR_PdDevHandleGet(pd, map);
+		*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
 		/* get second pd also for raid 1/10 fast path writes */
 		if ((raid->level == 1) && !io_info->isRead) {
 			r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
@@ -966,8 +975,10 @@ mr_spanset_get_phy_params(struct mrsas_softc *sc, u_in
 			pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
 		else if (raid->level == 1) {
 			pd = MR_ArPdGet(arRef, physArm + 1, map);
-			if (pd != MR_PD_INVALID)
+			if (pd != MR_PD_INVALID) {
 				*pDevHandle = MR_PdDevHandleGet(pd, map);
+				*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
+			}
 		}
 	}
 
@@ -1622,6 +1633,7 @@ mrsas_get_updated_dev_handle(struct mrsas_softc *sc,
 	/* get best new arm */
 	arm_pd = mrsas_get_best_arm_pd(sc, lbInfo, io_info);
 	devHandle = MR_PdDevHandleGet(arm_pd, drv_map);
+	io_info->pdInterface = MR_PdInterfaceTypeGet(arm_pd, drv_map);
 	mrsas_atomic_inc(&lbInfo->scsi_pending_cmds[arm_pd]);
 
 	return devHandle;
@@ -1653,6 +1665,7 @@ MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld,
 	int error_code = 0;
 	u_int64_t *pdBlock = &io_info->pdBlock;
 	u_int16_t *pDevHandle = &io_info->devHandle;
+	u_int8_t  *pPdInterface = &io_info->pdInterface;
 	u_int32_t rowMod, armQ, arm, logArm;
 
 	row = mega_div64_32(stripRow, raid->rowDataSize);
@@ -1691,6 +1704,7 @@ MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld,
 	if (pd != MR_PD_INVALID) {
 		/* Get dev handle from Pd */
 		*pDevHandle = MR_PdDevHandleGet(pd, map);
+		*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
 		/* get second pd also for raid 1/10 fast path writes */
 		if ((raid->level == 1) && !io_info->isRead) {
 			r1_alt_pd = MR_ArPdGet(arRef, physArm + 1, map);
@@ -1706,9 +1720,11 @@ MR_GetPhyParams(struct mrsas_softc *sc, u_int32_t ld,
 		else if (raid->level == 1) {
 			/* Get Alternate Pd. */
 			pd = MR_ArPdGet(arRef, physArm + 1, map);
-			if (pd != MR_PD_INVALID)
+			if (pd != MR_PD_INVALID) {
 				/* Get dev handle from Pd. */
 				*pDevHandle = MR_PdDevHandleGet(pd, map);
+				*pPdInterface = MR_PdInterfaceTypeGet(pd, map);
+			}
 		}
 	}
 


More information about the svn-src-head mailing list