svn commit: r335372 - head/sys/dev/ixl

Sean Bruno sbruno at FreeBSD.org
Tue Jun 19 15:55:22 UTC 2018


Author: sbruno
Date: Tue Jun 19 15:55:21 2018
New Revision: 335372
URL: https://svnweb.freebsd.org/changeset/base/335372

Log:
  Remove "diff" line indicator.  Next to see if this code works or not.
  
  Submitted by:	mmacy
  Sponsored by:	Limelight Networks

Modified:
  head/sys/dev/ixl/ixlvc.c

Modified: head/sys/dev/ixl/ixlvc.c
==============================================================================
--- head/sys/dev/ixl/ixlvc.c	Tue Jun 19 15:05:31 2018	(r335371)
+++ head/sys/dev/ixl/ixlvc.c	Tue Jun 19 15:55:21 2018	(r335372)
@@ -386,9 +386,9 @@ ixlv_configure_queues(struct ixlv_sc *sc)
 {
 	device_t		dev = sc->dev;
 	struct ixl_vsi		*vsi = &sc->vsi;
-+	if_softc_ctx_t		scctx = iflib_get_softc_ctx(vsi->ctx);
-+	struct ixl_tx_queue	*tx_que = vsi->tx_queues;
-+	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
+	if_softc_ctx_t		scctx = iflib_get_softc_ctx(vsi->ctx);
+	struct ixl_tx_queue	*tx_que = vsi->tx_queues;
+	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
 	struct tx_ring		*txr;
 	struct rx_ring		*rxr;
 	int			len, pairs;
@@ -396,9 +396,9 @@ ixlv_configure_queues(struct ixlv_sc *sc)
 	struct virtchnl_vsi_queue_config_info *vqci;
 	struct virtchnl_queue_pair_info *vqpi;
 
-+	/* XXX: Linux PF driver wants matching ids in each tx/rx struct, so both TX/RX
-+	 * queues of a pair need to be configured */
-+	pairs = max(vsi->num_tx_queues, vsi->num_rx_queues);
+	/* XXX: Linux PF driver wants matching ids in each tx/rx struct, so both TX/RX
+	 * queues of a pair need to be configured */
+	pairs = max(vsi->num_tx_queues, vsi->num_rx_queues);
 	len = sizeof(struct virtchnl_vsi_queue_config_info) +
 		       (sizeof(struct virtchnl_queue_pair_info) * pairs);
 	vqci = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO);
@@ -413,24 +413,24 @@ ixlv_configure_queues(struct ixlv_sc *sc)
 	/* Size check is not needed here - HW max is 16 queue pairs, and we
 	 * can fit info for 31 of them into the AQ buffer before it overflows.
 	 */
-+	for (int i = 0; i < pairs; i++, tx_que++, rx_que++, vqpi++) {
-+		txr = &tx_que->txr;
-+		rxr = &rx_que->rxr;
-+
+	for (int i = 0; i < pairs; i++, tx_que++, rx_que++, vqpi++) {
+		txr = &tx_que->txr;
+		rxr = &rx_que->rxr;
+
 		vqpi->txq.vsi_id = vqci->vsi_id;
 		vqpi->txq.queue_id = i;
-+		vqpi->txq.ring_len = scctx->isc_ntxd[0];
-+		vqpi->txq.dma_ring_addr = txr->tx_paddr;
+		vqpi->txq.ring_len = scctx->isc_ntxd[0];
+		vqpi->txq.dma_ring_addr = txr->tx_paddr;
 		/* Enable Head writeback */
 		vqpi->txq.headwb_enabled = 0;
 		vqpi->txq.dma_headwb_addr = 0;
 
 		vqpi->rxq.vsi_id = vqci->vsi_id;
 		vqpi->rxq.queue_id = i;
-+		vqpi->rxq.ring_len = scctx->isc_nrxd[0];
-+		vqpi->rxq.dma_ring_addr = rxr->rx_paddr;
-+		vqpi->rxq.max_pkt_size = scctx->isc_max_frame_size;
-+		// TODO: Get this value from iflib, somehow
+		vqpi->rxq.ring_len = scctx->isc_nrxd[0];
+		vqpi->rxq.dma_ring_addr = rxr->rx_paddr;
+		vqpi->rxq.max_pkt_size = scctx->isc_max_frame_size;
+		// TODO: Get this value from iflib, somehow
 		vqpi->rxq.databuffer_size = rxr->mbuf_sz;
 		vqpi->rxq.splithdr_enabled = 0;
 	}
@@ -451,8 +451,8 @@ ixlv_enable_queues(struct ixlv_sc *sc)
 	struct virtchnl_queue_select vqs;
 
 	vqs.vsi_id = sc->vsi_res->vsi_id;
-+	/* XXX: In Linux PF, as long as neither of these is 0,
-+	 * every queue in VF VSI is enabled. */
+	/* XXX: In Linux PF, as long as neither of these is 0,
+	 * every queue in VF VSI is enabled. */
 	vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
 	vqs.rx_queues = vqs.tx_queues;
 	ixlv_send_pf_msg(sc, VIRTCHNL_OP_ENABLE_QUEUES,
@@ -470,8 +470,8 @@ ixlv_disable_queues(struct ixlv_sc *sc)
 	struct virtchnl_queue_select vqs;
 
 	vqs.vsi_id = sc->vsi_res->vsi_id;
-+	/* XXX: In Linux PF, as long as neither of these is 0,
-+	 * every queue in VF VSI is disabled. */
+	/* XXX: In Linux PF, as long as neither of these is 0,
+	 * every queue in VF VSI is disabled. */
 	vqs.tx_queues = (1 << sc->vsi_res->num_queue_pairs) - 1;
 	vqs.rx_queues = vqs.tx_queues;
 	ixlv_send_pf_msg(sc, VIRTCHNL_OP_DISABLE_QUEUES,
@@ -490,29 +490,29 @@ ixlv_map_queues(struct ixlv_sc *sc)
 	struct virtchnl_irq_map_info *vm;
 	int 			i, q, len;
 	struct ixl_vsi		*vsi = &sc->vsi;
-+	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
-+	if_softc_ctx_t		scctx = vsi->shared;
-+	device_t		dev = sc->dev;
-+
-+	// XXX: What happens if we only get 1 MSI-X vector?
-+	MPASS(scctx->isc_vectors > 1);
+	struct ixl_rx_queue	*rx_que = vsi->rx_queues;
+	if_softc_ctx_t		scctx = vsi->shared;
+	device_t		dev = sc->dev;
 
+	// XXX: What happens if we only get 1 MSI-X vector?
+	MPASS(scctx->isc_vectors > 1);
+
 	/* How many queue vectors, adminq uses one */
-+	// XXX: How do we know how many interrupt vectors we have?
-+	q = scctx->isc_vectors - 1;
+	// XXX: How do we know how many interrupt vectors we have?
+	q = scctx->isc_vectors - 1;
 
 	len = sizeof(struct virtchnl_irq_map_info) +
-+	      (scctx->isc_vectors * sizeof(struct i40e_virtchnl_vector_map));
+	      (scctx->isc_vectors * sizeof(struct i40e_virtchnl_vector_map));
 	vm = malloc(len, M_DEVBUF, M_NOWAIT);
 	if (!vm) {
-+		device_printf(dev, "%s: unable to allocate memory\n", __func__);
+		device_printf(dev, "%s: unable to allocate memory\n", __func__);
 		ixl_vc_schedule_retry(&sc->vc_mgr);
 		return;
 	}
 
-+	vm->num_vectors = scctx->isc_vectors;
+	vm->num_vectors = scctx->isc_vectors;
 	/* Queue vectors first */
-+	for (i = 0; i < q; i++, rx_que++) {
+	for (i = 0; i < q; i++, rx_que++) {
 		vm->vecmap[i].vsi_id = sc->vsi_res->vsi_id;
 		vm->vecmap[i].vector_id = i + 1; /* first is adminq */
 		// vm->vecmap[i].txq_map = (1 << que->me);
@@ -890,12 +890,12 @@ void
 ixlv_set_rss_hena(struct ixlv_sc *sc)
 {
 	struct virtchnl_rss_hena hena;
-+	struct i40e_hw *hw = &sc->hw;
+	struct i40e_hw *hw = &sc->hw;
 
-+	if (hw->mac.type == I40E_MAC_X722_VF)
-+		hena.hena = IXL_DEFAULT_RSS_HENA_X722;
-+	else
-+		hena.hena = IXL_DEFAULT_RSS_HENA_XL710;
+	if (hw->mac.type == I40E_MAC_X722_VF)
+		hena.hena = IXL_DEFAULT_RSS_HENA_X722;
+	else
+		hena.hena = IXL_DEFAULT_RSS_HENA_XL710;
 
 	ixlv_send_pf_msg(sc, VIRTCHNL_OP_SET_RSS_HENA,
 			  (u8 *)&hena, sizeof(hena));
@@ -931,9 +931,9 @@ ixlv_config_rss_lut(struct ixlv_sc *sc)
 		 * num_queues.)
 		 */
 		que_id = rss_get_indirection_to_bucket(i);
-+		que_id = que_id % sc->vsi.num_rx_queues;
+		que_id = que_id % sc->vsi.num_rx_queues;
 #else
-+		que_id = i % sc->vsi.num_rx_queues;
+		que_id = i % sc->vsi.num_rx_queues;
 #endif
 		lut = que_id & IXL_RSS_VSI_LUT_ENTRY_MASK;
 		rss_lut_msg->lut[i] = lut;
@@ -980,9 +980,7 @@ ixlv_vc_completion(struct ixlv_sc *sc,
 		case VIRTCHNL_EVENT_RESET_IMPENDING:
 			device_printf(dev, "PF initiated reset!\n");
 			sc->init_state = IXLV_RESET_PENDING;
-+			// mtx_unlock(&sc->mtx);
-+			ixlv_if_init(sc->vsi.ctx);
-+			// mtx_lock(&sc->mtx);
+			ixlv_if_init(sc->vsi.ctx);
 			break;
 		default:
 			device_printf(dev, "%s: Unknown event %d from AQ\n",


More information about the svn-src-all mailing list