svn commit: r248737 - head/sys/dev/nvme

Jim Harris jimharris at FreeBSD.org
Tue Mar 26 18:37:37 UTC 2013


Author: jimharris
Date: Tue Mar 26 18:37:36 2013
New Revision: 248737
URL: http://svnweb.freebsd.org/changeset/base/248737

Log:
  Enable asynchronous event requests on non-Chatham devices.
  
  Also add logic to clean up all outstanding asynchronous event requests
  when resetting or shutting down the controller, since these requests
  will not be explicitly completed by the controller itself.
  
  Sponsored by:	Intel

Modified:
  head/sys/dev/nvme/nvme.h
  head/sys/dev/nvme/nvme_ctrlr.c
  head/sys/dev/nvme/nvme_ctrlr_cmd.c
  head/sys/dev/nvme/nvme_private.h
  head/sys/dev/nvme/nvme_qpair.c

Modified: head/sys/dev/nvme/nvme.h
==============================================================================
--- head/sys/dev/nvme/nvme.h	Tue Mar 26 18:34:19 2013	(r248736)
+++ head/sys/dev/nvme/nvme.h	Tue Mar 26 18:37:36 2013	(r248737)
@@ -360,7 +360,7 @@ enum nvme_feature {
 	NVME_FEAT_INTERRUPT_COALESCING		= 0x08,
 	NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION = 0x09,
 	NVME_FEAT_WRITE_ATOMICITY		= 0x0A,
-	NVME_FEAT_ASYNCHRONOUS_EVENT_CONFIGURATION = 0x0B,
+	NVME_FEAT_ASYNC_EVENT_CONFIGURATION	= 0x0B,
 	/* 0x0C-0x7F - reserved */
 	NVME_FEAT_SOFTWARE_PROGRESS_MARKER	= 0x80,
 	/* 0x81-0xBF - command set specific (reserved) */

Modified: head/sys/dev/nvme/nvme_ctrlr.c
==============================================================================
--- head/sys/dev/nvme/nvme_ctrlr.c	Tue Mar 26 18:34:19 2013	(r248736)
+++ head/sys/dev/nvme/nvme_ctrlr.c	Tue Mar 26 18:37:36 2013	(r248737)
@@ -38,6 +38,9 @@ __FBSDID("$FreeBSD$");
 
 #include "nvme_private.h"
 
+static void nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
+						struct nvme_async_event_request *aer);
+
 static void
 nvme_ctrlr_cb(void *arg, const struct nvme_completion *status)
 {
@@ -409,30 +412,6 @@ nvme_ctrlr_reset(struct nvme_controller 
 	return (nvme_ctrlr_enable(ctrlr));
 }
 
-/*
- * Disable this code for now, since Chatham doesn't support
- *  AERs so I have no good way to test them.
- */
-#if 0
-static void
-nvme_async_event_cb(void *arg, const struct nvme_completion *status)
-{
-	struct nvme_controller *ctrlr = arg;
-
-	printf("Asynchronous event occurred.\n");
-
-	/* TODO: decode async event type based on status */
-	/* TODO: check status for any error bits */
-
-	/*
-	 * Repost an asynchronous event request so that it can be
-	 *  used again by the controller.
-	 */
-	nvme_ctrlr_cmd_asynchronous_event_request(ctrlr, nvme_async_event_cb,
-	    ctrlr);
-}
-#endif
-
 static int
 nvme_ctrlr_identify(struct nvme_controller *ctrlr)
 {
@@ -558,27 +537,71 @@ nvme_ctrlr_construct_namespaces(struct n
 }
 
 static void
+nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl)
+{
+	struct nvme_async_event_request *aer = arg;
+
+	if (cpl->sf_sc == NVME_SC_ABORTED_SQ_DELETION) {
+		/*
+		 *  This is simulated when controller is being shut down, to
+		 *  effectively abort outstanding asynchronous event requests
+		 *  and make sure all memory is freed.  Do not repost the
+		 *  request in this case.
+		 */
+		return;
+	}
+
+	/* TODO: decode async event type based on status */
+
+	/*
+	 * Repost another asynchronous event request to replace the one that
+	 *  just completed.
+	 */
+	printf("Asynchronous event occurred.\n");
+	nvme_ctrlr_construct_and_submit_aer(aer->ctrlr, aer);
+}
+
+static void
+nvme_ctrlr_construct_and_submit_aer(struct nvme_controller *ctrlr,
+    struct nvme_async_event_request *aer)
+{
+	struct nvme_request *req;
+
+	aer->ctrlr = ctrlr;
+	req = nvme_allocate_request(NULL, 0, nvme_ctrlr_async_event_cb, aer);
+	aer->req = req;
+
+	/*
+	 * Override default timeout value here, since asynchronous event
+	 *  requests should by nature never be timed out.
+	 */
+	req->timeout = 0;
+	req->cmd.opc = NVME_OPC_ASYNC_EVENT_REQUEST;
+	nvme_ctrlr_submit_admin_request(ctrlr, req);
+}
+
+static void
 nvme_ctrlr_configure_aer(struct nvme_controller *ctrlr)
 {
 	union nvme_critical_warning_state	state;
-	uint8_t					num_async_events;
+	struct nvme_async_event_request		*aer;
+	uint32_t				i;
 
 	state.raw = 0xFF;
 	state.bits.reserved = 0;
-	nvme_ctrlr_cmd_set_asynchronous_event_config(ctrlr, state, NULL, NULL);
+	nvme_ctrlr_cmd_set_async_event_config(ctrlr, state, NULL, NULL);
 
 	/* aerl is a zero-based value, so we need to add 1 here. */
-	num_async_events = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1));
+	ctrlr->num_aers = min(NVME_MAX_ASYNC_EVENTS, (ctrlr->cdata.aerl+1));
 
-	/*
-	 * Disable this code for now, since Chatham doesn't support
-	 *  AERs so I have no good way to test them.
-	 */
-#if 0
-	for (int i = 0; i < num_async_events; i++)
-		nvme_ctrlr_cmd_asynchronous_event_request(ctrlr,
-		    nvme_async_event_cb, ctrlr);
-#endif
+	/* Chatham doesn't support AERs. */
+	if (pci_get_devid(ctrlr->dev) == CHATHAM_PCI_ID)
+		ctrlr->num_aers = 0;
+
+	for (i = 0; i < ctrlr->num_aers; i++) {
+		aer = &ctrlr->aer[i];
+		nvme_ctrlr_construct_and_submit_aer(ctrlr, aer);
+	}
 }
 
 static void
@@ -810,8 +833,8 @@ intx:
 void
 nvme_ctrlr_destruct(struct nvme_controller *ctrlr, device_t dev)
 {
-	struct nvme_namespace	*ns;
-	int			i;
+	struct nvme_namespace		*ns;
+	int				i;
 
 	for (i = 0; i < NVME_MAX_NAMESPACES; i++) {
 		ns = &ctrlr->ns[i];
@@ -828,6 +851,13 @@ nvme_ctrlr_destruct(struct nvme_controll
 
 	free(ctrlr->ioq, M_NVME);
 
+	/* Manually abort outstanding async event requests. */
+	for (i = 0; i < ctrlr->num_aers; i++) {
+		nvme_qpair_manual_abort_request(&ctrlr->adminq,
+		    ctrlr->aer[i].req, NVME_SCT_GENERIC,
+		    NVME_SC_ABORTED_SQ_DELETION, FALSE);
+	}
+
 	nvme_admin_qpair_destroy(&ctrlr->adminq);
 
 	if (ctrlr->resource != NULL) {

Modified: head/sys/dev/nvme/nvme_ctrlr_cmd.c
==============================================================================
--- head/sys/dev/nvme/nvme_ctrlr_cmd.c	Tue Mar 26 18:34:19 2013	(r248736)
+++ head/sys/dev/nvme/nvme_ctrlr_cmd.c	Tue Mar 26 18:37:36 2013	(r248737)
@@ -211,7 +211,7 @@ nvme_ctrlr_cmd_set_num_queues(struct nvm
 }
 
 void
-nvme_ctrlr_cmd_set_asynchronous_event_config(struct nvme_controller *ctrlr,
+nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr,
     union nvme_critical_warning_state state, nvme_cb_fn_t cb_fn,
     void *cb_arg)
 {
@@ -219,7 +219,7 @@ nvme_ctrlr_cmd_set_asynchronous_event_co
 
 	cdw11 = state.raw;
 	nvme_ctrlr_cmd_set_feature(ctrlr,
-	    NVME_FEAT_ASYNCHRONOUS_EVENT_CONFIGURATION, cdw11, NULL, 0, cb_fn,
+	    NVME_FEAT_ASYNC_EVENT_CONFIGURATION, cdw11, NULL, 0, cb_fn,
 	    cb_arg);
 }
 
@@ -249,27 +249,6 @@ nvme_ctrlr_cmd_set_interrupt_coalescing(
 }
 
 void
-nvme_ctrlr_cmd_asynchronous_event_request(struct nvme_controller *ctrlr,
-    nvme_cb_fn_t cb_fn, void *cb_arg)
-{
-	struct nvme_request *req;
-	struct nvme_command *cmd;
-
-	req = nvme_allocate_request(NULL, 0, cb_fn, cb_arg);
-
-	/*
-	 * Override default timeout value here, since asynchronous event
-	 *  requests should by nature never be timed out.
-	 */
-	req->timeout = 0;
-
-	cmd = &req->cmd;
-	cmd->opc = NVME_OPC_ASYNC_EVENT_REQUEST;
-
-	nvme_ctrlr_submit_admin_request(ctrlr, req);
-}
-
-void
 nvme_ctrlr_cmd_get_health_information_page(struct nvme_controller *ctrlr,
     uint32_t nsid, struct nvme_health_information_page *payload,
     nvme_cb_fn_t cb_fn, void *cb_arg)

Modified: head/sys/dev/nvme/nvme_private.h
==============================================================================
--- head/sys/dev/nvme/nvme_private.h	Tue Mar 26 18:34:19 2013	(r248736)
+++ head/sys/dev/nvme/nvme_private.h	Tue Mar 26 18:37:36 2013	(r248737)
@@ -97,7 +97,7 @@ MALLOC_DECLARE(M_NVME);
 
 #define NVME_MAX_NAMESPACES	(16)
 #define NVME_MAX_CONSUMERS	(2)
-#define NVME_MAX_ASYNC_EVENTS	(4)
+#define NVME_MAX_ASYNC_EVENTS	(8)
 
 #define NVME_TIMEOUT_IN_SEC	(30)
 
@@ -119,6 +119,12 @@ struct nvme_request {
 	STAILQ_ENTRY(nvme_request)	stailq;
 };
 
+struct nvme_async_event_request {
+
+	struct nvme_controller		*ctrlr;
+	struct nvme_request		*req;
+};
+
 struct nvme_tracker {
 
 	SLIST_ENTRY(nvme_tracker)	slist;
@@ -255,6 +261,9 @@ struct nvme_controller {
 
 	boolean_t			is_started;
 
+	uint32_t			num_aers;
+	struct nvme_async_event_request	aer[NVME_MAX_ASYNC_EVENTS];
+
 #ifdef CHATHAM2
 	uint64_t		chatham_size;
 	uint64_t		chatham_lbas;
@@ -343,12 +352,9 @@ void	nvme_ctrlr_cmd_delete_io_sq(struct 
 void	nvme_ctrlr_cmd_set_num_queues(struct nvme_controller *ctrlr,
 				      uint32_t num_queues, nvme_cb_fn_t cb_fn,
 				      void *cb_arg);
-void	nvme_ctrlr_cmd_set_asynchronous_event_config(struct nvme_controller *ctrlr,
-					   union nvme_critical_warning_state state,
-					   nvme_cb_fn_t cb_fn, void *cb_arg);
-void	nvme_ctrlr_cmd_asynchronous_event_request(struct nvme_controller *ctrlr,
-						  nvme_cb_fn_t cb_fn,
-						  void *cb_arg);
+void	nvme_ctrlr_cmd_set_async_event_config(struct nvme_controller *ctrlr,
+					      union nvme_critical_warning_state state,
+					      nvme_cb_fn_t cb_fn, void *cb_arg);
 void	nvme_ctrlr_cmd_abort(struct nvme_controller *ctrlr, uint16_t cid,
 			     uint16_t sqid, nvme_cb_fn_t cb_fn, void *cb_arg);
 
@@ -376,6 +382,9 @@ void	nvme_qpair_submit_cmd(struct nvme_q
 void	nvme_qpair_process_completions(struct nvme_qpair *qpair);
 void	nvme_qpair_submit_request(struct nvme_qpair *qpair,
 				  struct nvme_request *req);
+void	nvme_qpair_manual_abort_request(struct nvme_qpair *qpair,
+					struct nvme_request *req, uint32_t sct,
+					uint32_t sc, boolean_t print_on_error);
 
 void	nvme_admin_qpair_destroy(struct nvme_qpair *qpair);
 

Modified: head/sys/dev/nvme/nvme_qpair.c
==============================================================================
--- head/sys/dev/nvme/nvme_qpair.c	Tue Mar 26 18:34:19 2013	(r248736)
+++ head/sys/dev/nvme/nvme_qpair.c	Tue Mar 26 18:37:36 2013	(r248737)
@@ -87,6 +87,23 @@ nvme_completion_is_retry(const struct nv
 	}
 }
 
+static struct nvme_tracker *
+nvme_qpair_find_tracker(struct nvme_qpair *qpair, struct nvme_request *req)
+{
+	struct nvme_tracker	*tr;
+	uint32_t		i;
+
+	KASSERT(req != NULL, ("%s: called with NULL req\n", __func__));
+
+	for (i = 0; i < qpair->num_entries; ++i) {
+		tr = qpair->act_tr[i];
+		if (tr != NULL && tr->req == req)
+			return (tr);
+	}
+
+	return (NULL);
+}
+
 static void
 nvme_qpair_construct_tracker(struct nvme_qpair *qpair, struct nvme_tracker *tr,
     uint16_t cid)
@@ -137,6 +154,7 @@ nvme_qpair_complete_tracker(struct nvme_
 			    tr->payload_dma_map);
 
 		nvme_free_request(req);
+		tr->req = NULL;
 
 		SLIST_INSERT_HEAD(&qpair->free_tr, tr, slist);
 
@@ -297,7 +315,7 @@ nvme_qpair_construct(struct nvme_qpair *
 static void
 nvme_qpair_destroy(struct nvme_qpair *qpair)
 {
-	struct nvme_tracker *tr;
+	struct nvme_tracker	*tr;
 
 	if (qpair->tag)
 		bus_teardown_intr(qpair->ctrlr->dev, qpair->res, qpair->tag);
@@ -393,9 +411,41 @@ nvme_io_qpair_destroy(struct nvme_qpair 
 }
 
 static void
-nvme_abort_complete(void *arg, const struct nvme_completion *status)
+nvme_qpair_manual_abort_tracker(struct nvme_qpair *qpair,
+    struct nvme_tracker *tr, uint32_t sct, uint32_t sc,
+    boolean_t print_on_error)
 {
 	struct nvme_completion	cpl;
+
+	memset(&cpl, 0, sizeof(cpl));
+	cpl.sqid = qpair->id;
+	cpl.cid = tr->cid;
+	cpl.sf_sct = sct;
+	cpl.sf_sc = sc;
+	nvme_qpair_complete_tracker(qpair, tr, &cpl, print_on_error);
+}
+
+void
+nvme_qpair_manual_abort_request(struct nvme_qpair *qpair,
+    struct nvme_request *req, uint32_t sct, uint32_t sc,
+    boolean_t print_on_error)
+{
+	struct nvme_tracker	*tr;
+
+	tr = nvme_qpair_find_tracker(qpair, req);
+
+	if (tr == NULL) {
+		printf("%s: request not found\n", __func__);
+		nvme_dump_command(&req->cmd);
+		return;
+	}
+
+	nvme_qpair_manual_abort_tracker(qpair, tr, sct, sc, print_on_error);
+}
+
+static void
+nvme_abort_complete(void *arg, const struct nvme_completion *status)
+{
 	struct nvme_tracker	*tr = arg;
 
 	/*
@@ -411,12 +461,8 @@ nvme_abort_complete(void *arg, const str
 		 *  status, and then complete the I/O's tracker manually.
 		 */
 		printf("abort command failed, aborting command manually\n");
-		memset(&cpl, 0, sizeof(cpl));
-		cpl.sqid = tr->qpair->id;
-		cpl.cid = tr->cid;
-		cpl.sf_sct = NVME_SCT_GENERIC;
-		cpl.sf_sc = NVME_SC_ABORTED_BY_REQUEST;
-		nvme_qpair_complete_tracker(tr->qpair, tr, &cpl, TRUE);
+		nvme_qpair_manual_abort_tracker(tr->qpair, tr,
+		    NVME_SCT_GENERIC, NVME_SC_ABORTED_BY_REQUEST, TRUE);
 	}
 }
 


More information about the svn-src-all mailing list