svn commit: r367979 - head/sys/dev/isp

Alexander Motin mav at FreeBSD.org
Tue Nov 24 04:16:51 UTC 2020


Author: mav
Date: Tue Nov 24 04:16:49 2020
New Revision: 367979
URL: https://svnweb.freebsd.org/changeset/base/367979

Log:
  Implement request queue overflow protection.
  
  Before this change in case of request queue overflow driver just froze the
  device queue for 100ms to retry after.  It was pretty bad for performance.
  This change introduces SIM queue freezing when free space on the request
  queue drops below 255 entries (worst case of maximum I/O size S/G list),
  checking for a chance to release it on I/O completion.  If the queue still
  get overflowed somehow, the old mechanism is still in place, just with
  delay reduced to 10ms.
  
  With the earlier queue length increase overflows should not happen often,
  but it is still easily reachable on synthetic tests.

Modified:
  head/sys/dev/isp/isp.c
  head/sys/dev/isp/isp_freebsd.c
  head/sys/dev/isp/isp_library.c
  head/sys/dev/isp/isp_library.h
  head/sys/dev/isp/ispmbox.h
  head/sys/dev/isp/ispvar.h

Modified: head/sys/dev/isp/isp.c
==============================================================================
--- head/sys/dev/isp/isp.c	Tue Nov 24 03:49:37 2020	(r367978)
+++ head/sys/dev/isp/isp.c	Tue Nov 24 04:16:49 2020	(r367979)
@@ -263,6 +263,9 @@ isp_reset(ispsoftc_t *isp, int do_load_defaults)
 		return;
 	}
 
+	isp->isp_reqidx = isp->isp_reqodx = 0;
+	isp->isp_resodx = 0;
+	isp->isp_atioodx = 0;
 	ISP_WRITE(isp, BIU2400_REQINP, 0);
 	ISP_WRITE(isp, BIU2400_REQOUTP, 0);
 	ISP_WRITE(isp, BIU2400_RSPINP, 0);
@@ -573,14 +576,16 @@ isp_reset(ispsoftc_t *isp, int do_load_defaults)
 	}
 	isp_prt(isp, ISP_LOGCONFIG, "%s", buf);
 
+	/*
+	 * For the maximum number of commands take free exchange control block
+	 * buffer count reported by firmware, limiting it to the maximum of our
+	 * hardcoded handle format (16K now) minus some management reserve.
+	 */
 	MBSINIT(&mbs, MBOX_GET_RESOURCE_COUNT, MBLOGALL, 0);
 	isp_mboxcmd(isp, &mbs);
-	if (mbs.param[0] != MBOX_COMMAND_COMPLETE) {
+	if (mbs.param[0] != MBOX_COMMAND_COMPLETE)
 		return;
-	}
-	isp->isp_maxcmds = mbs.param[3];
-	/* Limit to the maximum of our hardcoded handle format (16K now). */
-	isp->isp_maxcmds = MIN(isp->isp_maxcmds, ISP_HANDLE_MAX - ISP_HANDLE_RESERVE);
+	isp->isp_maxcmds = MIN(mbs.param[3], ISP_HANDLE_MAX - ISP_HANDLE_RESERVE);
 	isp_prt(isp, ISP_LOGCONFIG, "%d max I/O command limit set", isp->isp_maxcmds);
 
 	/*
@@ -888,6 +893,8 @@ isp_init(ispsoftc_t *isp)
 		isp_prt(isp, ISP_LOGERR, "No valid WWNs to use");
 		return;
 	}
+	icbp->icb_rspnsin = isp->isp_resodx;
+	icbp->icb_rqstout = isp->isp_reqidx;
 	icbp->icb_retry_count = fcp->isp_retry_count;
 
 	icbp->icb_rqstqlen = RQUEST_QUEUE_LEN(isp);
@@ -913,6 +920,7 @@ isp_init(ispsoftc_t *isp)
 
 #ifdef	ISP_TARGET_MODE
 	/* unconditionally set up the ATIO queue if we support target mode */
+	icbp->icb_atio_in = isp->isp_atioodx;
 	icbp->icb_atioqlen = ATIO_QUEUE_LEN(isp);
 	if (icbp->icb_atioqlen < 8) {
 		isp_prt(isp, ISP_LOGERR, "bad ATIO queue length %d", icbp->icb_atioqlen);
@@ -1031,11 +1039,6 @@ isp_init(ispsoftc_t *isp)
 	if (mbs.param[0] != MBOX_COMMAND_COMPLETE) {
 		return;
 	}
-	isp->isp_reqidx = 0;
-	isp->isp_reqodx = 0;
-	isp->isp_residx = 0;
-	isp->isp_resodx = 0;
-	isp->isp_atioodx = 0;
 
 	/*
 	 * Whatever happens, we're now committed to being here.
@@ -3237,8 +3240,6 @@ isp_intr_respq(ispsoftc_t *isp)
 	}
 
 	iptr = ISP_READ(isp, BIU2400_RSPINP);
-	isp->isp_residx = iptr;
-
 	optr = isp->isp_resodx;
 	while (optr != iptr) {
 		sptr = cptr = optr;

Modified: head/sys/dev/isp/isp_freebsd.c
==============================================================================
--- head/sys/dev/isp/isp_freebsd.c	Tue Nov 24 03:49:37 2020	(r367978)
+++ head/sys/dev/isp/isp_freebsd.c	Tue Nov 24 04:16:49 2020	(r367979)
@@ -54,6 +54,8 @@ static const char prom3[] = "Chan %d [%u] PortID 0x%06
 
 static void isp_freeze_loopdown(ispsoftc_t *, int);
 static void isp_loop_changed(ispsoftc_t *isp, int chan);
+static void isp_rq_check_above(ispsoftc_t *);
+static void isp_rq_check_below(ispsoftc_t *);
 static d_ioctl_t ispioctl;
 static void isp_poll(struct cam_sim *);
 static callout_func_t isp_watchdog;
@@ -350,6 +352,36 @@ isp_unfreeze_loopdown(ispsoftc_t *isp, int chan)
 	}
 }
 
+/*
+ * Functions to protect from request queue overflow by freezing SIM queue.
+ * XXX: freezing only one arbitrary SIM, since they all share the queue.
+ */
+static void
+isp_rq_check_above(ispsoftc_t *isp)
+{
+	struct isp_fc *fc = ISP_FC_PC(isp, 0);
+
+	if (isp->isp_rqovf || fc->sim == NULL)
+		return;
+	if (!isp_rqentry_avail(isp, QENTRY_MAX)) {
+		xpt_freeze_simq(fc->sim, 1);
+		isp->isp_rqovf = 1;
+	}
+}
+
+static void
+isp_rq_check_below(ispsoftc_t *isp)
+{
+	struct isp_fc *fc = ISP_FC_PC(isp, 0);
+
+	if (!isp->isp_rqovf || fc->sim == NULL)
+		return;
+	if (isp_rqentry_avail(isp, QENTRY_MAX)) {
+		xpt_release_simq(fc->sim, 0);
+		isp->isp_rqovf = 0;
+	}
+}
+
 static int
 ispioctl(struct cdev *dev, u_long c, caddr_t addr, int flags, struct thread *td)
 {
@@ -655,7 +687,7 @@ static void destroy_lun_state(ispsoftc_t *, int, tstat
 static void isp_enable_lun(ispsoftc_t *, union ccb *);
 static void isp_disable_lun(ispsoftc_t *, union ccb *);
 static callout_func_t isp_refire_notify_ack;
-static void isp_complete_ctio(union ccb *);
+static void isp_complete_ctio(ispsoftc_t *isp, union ccb *);
 enum Start_Ctio_How { FROM_CAM, FROM_TIMER, FROM_SRR, FROM_CTIO_DONE };
 static void isp_target_start_ctio(ispsoftc_t *, union ccb *, enum Start_Ctio_How);
 static void isp_handle_platform_atio7(ispsoftc_t *, at7_entry_t *);
@@ -733,6 +765,8 @@ isp_tmcmd_restart(ispsoftc_t *isp)
 			isp_target_start_ctio(isp, ccb, FROM_TIMER);
 		}
 	}
+	isp_rq_check_above(isp);
+	isp_rq_check_below(isp);
 }
 
 static atio_private_data_t *
@@ -1308,12 +1342,12 @@ isp_refire_notify_ack(void *arg)
 
 
 static void
-isp_complete_ctio(union ccb *ccb)
+isp_complete_ctio(ispsoftc_t *isp, union ccb *ccb)
 {
-	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
-		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
-		xpt_done(ccb);
-	}
+
+	isp_rq_check_below(isp);
+	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
+	xpt_done(ccb);
 }
 
 static void
@@ -1570,7 +1604,7 @@ fail:
 	isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot);
 	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
 	ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
-	isp_complete_ctio(ccb);
+	isp_complete_ctio(isp, ccb);
 	return;
 mdp:
 	if (isp_notify_ack(isp, inot)) {
@@ -1578,7 +1612,7 @@ mdp:
 		goto fail;
 	}
 	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
-	ccb->ccb_h.status = CAM_MESSAGE_RECV;
+	ccb->ccb_h.status |= CAM_MESSAGE_RECV;
 	/*
 	 * This is not a strict interpretation of MDP, but it's close
 	 */
@@ -1591,7 +1625,7 @@ mdp:
 	ccb->csio.msg_ptr[4] = srr_off >> 16;
 	ccb->csio.msg_ptr[5] = srr_off >> 8;
 	ccb->csio.msg_ptr[6] = srr_off;
-	isp_complete_ctio(ccb);
+	isp_complete_ctio(isp, ccb);
 }
 
 
@@ -1718,7 +1752,7 @@ isp_handle_platform_ctio(ispsoftc_t *isp, ct7_entry_t 
 	 *
 	 * 24XX cards never need an ATIO put back.
 	 */
-	isp_complete_ctio(ccb);
+	isp_complete_ctio(isp, ccb);
 }
 
 static int
@@ -2475,6 +2509,7 @@ isp_action(struct cam_sim *sim, union ccb *ccb)
 			break;
 		}
 		error = isp_start((XS_T *) ccb);
+		isp_rq_check_above(isp);
 		switch (error) {
 		case 0:
 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
@@ -2497,7 +2532,7 @@ isp_action(struct cam_sim *sim, union ccb *ccb)
 		case CMD_EAGAIN:
 			isp_free_pcmd(isp, ccb);
 			cam_freeze_devq(ccb->ccb_h.path);
-			cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 100, 0);
+			cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 10, 0);
 			ccb->ccb_h.status = CAM_REQUEUE_REQ;
 			xpt_done(ccb);
 			break;
@@ -2589,6 +2624,7 @@ isp_action(struct cam_sim *sim, union ccb *ccb)
 	}
 	case XPT_CONT_TARGET_IO:
 		isp_target_start_ctio(isp, ccb, FROM_CAM);
+		isp_rq_check_above(isp);
 		break;
 #endif
 	case XPT_RESET_DEV:		/* BDR the specified SCSI device */
@@ -2877,6 +2913,7 @@ isp_done(XS_T *sccb)
 			callout_stop(&PISP_PCMD(sccb)->wdog);
 		isp_free_pcmd(isp, (union ccb *) sccb);
 	}
+	isp_rq_check_below(isp);
 	xpt_done((union ccb *) sccb);
 }
 

Modified: head/sys/dev/isp/isp_library.c
==============================================================================
--- head/sys/dev/isp/isp_library.c	Tue Nov 24 03:49:37 2020	(r367978)
+++ head/sys/dev/isp/isp_library.c	Tue Nov 24 04:16:49 2020	(r367979)
@@ -65,7 +65,7 @@ isp_send_cmd(ispsoftc_t *isp, void *fqe, void *segp, u
 {
 	ispcontreq64_t crq;
 	uint8_t type, nqe = 1;
-	uint32_t seg, seglim, nxt, nxtnxt;
+	uint32_t seg, seglim, nxt;
 	ispds64_t *dsp64 = NULL;
 	void *qe0, *qe1;
 
@@ -109,14 +109,8 @@ isp_send_cmd(ispsoftc_t *isp, void *fqe, void *segp, u
 	 * Second, start building additional continuation segments as needed.
 	 */
 	while (seg < nsegs) {
-		nxtnxt = ISP_NXT_QENTRY(nxt, RQUEST_QUEUE_LEN(isp));
-		if (nxtnxt == isp->isp_reqodx) {
-			isp->isp_reqodx = ISP_READ(isp, BIU2400_REQOUTP);
-			if (nxtnxt == isp->isp_reqodx)
-				return (CMD_EAGAIN);
-		}
-		qe1 = ISP_QUEUE_ENTRY(isp->isp_rquest, nxt);
-		nxt = nxtnxt;
+		if (!isp_rqentry_avail(isp, ++nqe))
+			return (CMD_EAGAIN);
 
 		ISP_MEMZERO(&crq, QENTRY_LEN);
 		crq.req_header.rqs_entry_type = RQSTYPE_A64_CONT;
@@ -128,14 +122,17 @@ isp_send_cmd(ispsoftc_t *isp, void *fqe, void *segp, u
 			seglim = nsegs;
 		while (seg < seglim)
 			XS_GET_DMA64_SEG(dsp64++, segp, seg++);
+
+		qe1 = ISP_QUEUE_ENTRY(isp->isp_rquest, nxt);
 		isp_put_cont64_req(isp, &crq, qe1);
 		if (isp->isp_dblev & ISP_LOGDEBUG1) {
 			isp_print_bytes(isp, "additional queue entry",
 			    QENTRY_LEN, qe1);
 		}
-		nqe++;
-        }
 
+		nxt = ISP_NXT_QENTRY(nxt, RQUEST_QUEUE_LEN(isp));
+	}
+
 copy_and_sync:
 	((isphdr_t *)fqe)->rqs_entry_count = nqe;
 	switch (type) {
@@ -216,26 +213,6 @@ isp_destroy_handle(ispsoftc_t *isp, uint32_t handle)
 		isp->isp_xflist[(handle & ISP_HANDLE_CMD_MASK)].cmd = isp->isp_xffree;
 		isp->isp_xffree = &isp->isp_xflist[(handle & ISP_HANDLE_CMD_MASK)];
 	}
-}
-
-/*
- * Make sure we have space to put something on the request queue.
- * Return a pointer to that entry if we do. A side effect of this
- * function is to update the output index. The input index
- * stays the same.
- */
-void *
-isp_getrqentry(ispsoftc_t *isp)
-{
-	uint32_t next;
-
-	next = ISP_NXT_QENTRY(isp->isp_reqidx, RQUEST_QUEUE_LEN(isp));
-	if (next == isp->isp_reqodx) {
-		isp->isp_reqodx = ISP_READ(isp, BIU2400_REQOUTP);
-		if (next == isp->isp_reqodx)
-			return (NULL);
-	}
-	return (ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx));
 }
 
 #define	TBA	(4 * (((QENTRY_LEN >> 2) * 3) + 1) + 1)

Modified: head/sys/dev/isp/isp_library.h
==============================================================================
--- head/sys/dev/isp/isp_library.h	Tue Nov 24 03:49:37 2020	(r367978)
+++ head/sys/dev/isp/isp_library.h	Tue Nov 24 04:16:49 2020	(r367979)
@@ -53,7 +53,23 @@ void isp_destroy_handle(ispsoftc_t *, uint32_t);
 /*
  * Request Queue allocation
  */
-void *isp_getrqentry(ispsoftc_t *);
+inline int
+isp_rqentry_avail(ispsoftc_t *isp, uint32_t num)
+{
+	if (ISP_QAVAIL(isp) >= num)
+		return (1);
+	/* We don't have enough in cached.  Reread the hardware. */
+	isp->isp_reqodx = ISP_READ(isp, BIU2400_REQOUTP);
+	return (ISP_QAVAIL(isp) >= num);
+}
+
+inline void *
+isp_getrqentry(ispsoftc_t *isp)
+{
+	if (!isp_rqentry_avail(isp, 1))
+		return (NULL);
+	return (ISP_QUEUE_ENTRY(isp->isp_rquest, isp->isp_reqidx));
+}
 
 /*
  * Queue Entry debug functions

Modified: head/sys/dev/isp/ispmbox.h
==============================================================================
--- head/sys/dev/isp/ispmbox.h	Tue Nov 24 03:49:37 2020	(r367978)
+++ head/sys/dev/isp/ispmbox.h	Tue Nov 24 04:16:49 2020	(r367979)
@@ -333,6 +333,7 @@
  * All IOCB Queue entries are this size
  */
 #define	QENTRY_LEN			64
+#define	QENTRY_MAX			255
 
 /*
  * Command Structure Definitions
@@ -1694,14 +1695,12 @@ typedef struct {
 /*
  * Miscellaneous
  *
- * These are the limits of the number of dma segments we
- * can deal with based not on the size of the segment counter
- * (which is 16 bits), but on the size of the number of 
- * queue entries field (which is 8 bits). We assume no
- * segments in the first queue entry, so we can have
- * have 5 dma segments per continuation entry...
- * multiplying out by 254....
+ * This is the limit of the number of dma segments we can deal with based
+ * not on the size of the segment counter (which is 16 bits), but on the
+ * size of the number of queue entries field (which is 8 bits).  We assume
+ * one segment in the first queue entry, plus we can have 5 segments per
+ * continuation entry, multiplied by maximum of continuation entries.
  */
-#define	ISP_NSEG64_MAX	1270
+#define	ISP_NSEG64_MAX	(1 + (QENTRY_MAX - 1) * 5)
 
 #endif	/* _ISPMBOX_H */

Modified: head/sys/dev/isp/ispvar.h
==============================================================================
--- head/sys/dev/isp/ispvar.h	Tue Nov 24 03:49:37 2020	(r367978)
+++ head/sys/dev/isp/ispvar.h	Tue Nov 24 04:16:49 2020	(r367979)
@@ -131,16 +131,17 @@ struct ispmdvec {
  */
 /* This is the size of a queue entry (request and response) */
 #define	QENTRY_LEN			64
-/* Queue lengths must be a power of two and at least 8 elements. */
+/*
+ * Hardware requires queue lengths of at least 8 elements.  Driver requires
+ * lengths to be a power of two, and request queue of at least 256 elements.
+ */
 #define	RQUEST_QUEUE_LEN(x)		8192
 #define	RESULT_QUEUE_LEN(x)		1024
 #define	ATIO_QUEUE_LEN(x)		1024
 #define	ISP_QUEUE_ENTRY(q, idx)		(((uint8_t *)q) + ((size_t)(idx) * QENTRY_LEN))
 #define	ISP_QUEUE_SIZE(n)		((size_t)(n) * QENTRY_LEN)
 #define	ISP_NXT_QENTRY(idx, qlen)	(((idx) + 1) & ((qlen)-1))
-#define	ISP_QFREE(in, out, qlen)	\
-	((in == out)? (qlen - 1) : ((in > out)? \
-	((qlen - 1) - (in - out)) : (out - in - 1)))
+#define	ISP_QFREE(in, out, qlen)	((out - in - 1) & ((qlen) - 1))
 #define	ISP_QAVAIL(isp)	\
 	ISP_QFREE(isp->isp_reqidx, isp->isp_reqodx, RQUEST_QUEUE_LEN(isp))
 
@@ -472,7 +473,6 @@ struct ispsoftc {
 	volatile mbreg_t	isp_curmbx;	/* currently active mailbox command */
 	volatile uint32_t	isp_reqodx;	/* index of last ISP pickup */
 	volatile uint32_t	isp_reqidx;	/* index of next request */
-	volatile uint32_t	isp_residx;	/* index of last ISP write */
 	volatile uint32_t	isp_resodx;	/* index of next result */
 	volatile uint32_t	isp_atioodx;	/* index of next ATIO */
 	volatile uint32_t	isp_obits;	/* mailbox command output */
@@ -480,6 +480,7 @@ struct ispsoftc {
 	volatile uint16_t	isp_mboxtmp[MAX_MAILBOX];
 	volatile uint16_t	isp_lastmbxcmd;	/* last mbox command sent */
 	volatile uint16_t	isp_seqno;	/* running sequence number */
+	u_int			isp_rqovf;	/* request queue overflow */
 
 	/*
 	 * Active commands are stored here, indexed by handle functions.


More information about the svn-src-all mailing list