svn commit: r366929 - head/sys/dev/cxgbe

Navdeep Parhar np at FreeBSD.org
Thu Oct 22 08:40:26 UTC 2020


Author: np
Date: Thu Oct 22 08:40:25 2020
New Revision: 366929
URL: https://svnweb.freebsd.org/changeset/base/366929

Log:
  cxgbe(4): fix the size of the iq/eq maps.
  
  The firmware can allocate ingress and egress context ids anywhere from
  its configured range.  Size the iq/eq maps to match the entire range
  instead of assuming that the firmware always allocates the first
  available context id.
  
  Reported by:	Baptiste Wicht @ Verisign
  MFC after:	1 week
  Sponsored by:	Chelsio Communications

Modified:
  head/sys/dev/cxgbe/adapter.h
  head/sys/dev/cxgbe/t4_main.c
  head/sys/dev/cxgbe/t4_netmap.c
  head/sys/dev/cxgbe/t4_sge.c

Modified: head/sys/dev/cxgbe/adapter.h
==============================================================================
--- head/sys/dev/cxgbe/adapter.h	Thu Oct 22 06:51:47 2020	(r366928)
+++ head/sys/dev/cxgbe/adapter.h	Thu Oct 22 08:40:25 2020	(r366929)
@@ -799,6 +799,8 @@ struct sge {
 	uint16_t iq_base;	/* first abs_id */
 	int eq_start;		/* first cntxt_id */
 	int eq_base;		/* first abs_id */
+	int iqmap_sz;
+	int eqmap_sz;
 	struct sge_iq **iqmap;	/* iq->cntxt_id to iq mapping */
 	struct sge_eq **eqmap;	/* eq->cntxt_id to eq mapping */
 

Modified: head/sys/dev/cxgbe/t4_main.c
==============================================================================
--- head/sys/dev/cxgbe/t4_main.c	Thu Oct 22 06:51:47 2020	(r366928)
+++ head/sys/dev/cxgbe/t4_main.c	Thu Oct 22 08:40:25 2020	(r366929)
@@ -1358,6 +1358,8 @@ t4_attach(device_t dev)
 	s->nm_txq = malloc(s->nnmtxq * sizeof(struct sge_nm_txq),
 	    M_CXGBE, M_ZERO | M_WAITOK);
 #endif
+	MPASS(s->niq <= s->iqmap_sz);
+	MPASS(s->neq <= s->eqmap_sz);
 
 	s->ctrlq = malloc(nports * sizeof(struct sge_wrq), M_CXGBE,
 	    M_ZERO | M_WAITOK);
@@ -1365,9 +1367,9 @@ t4_attach(device_t dev)
 	    M_ZERO | M_WAITOK);
 	s->txq = malloc(s->ntxq * sizeof(struct sge_txq), M_CXGBE,
 	    M_ZERO | M_WAITOK);
-	s->iqmap = malloc(s->niq * sizeof(struct sge_iq *), M_CXGBE,
+	s->iqmap = malloc(s->iqmap_sz * sizeof(struct sge_iq *), M_CXGBE,
 	    M_ZERO | M_WAITOK);
-	s->eqmap = malloc(s->neq * sizeof(struct sge_eq *), M_CXGBE,
+	s->eqmap = malloc(s->eqmap_sz * sizeof(struct sge_eq *), M_CXGBE,
 	    M_ZERO | M_WAITOK);
 
 	sc->irq = malloc(sc->intr_count * sizeof(struct irq), M_CXGBE,
@@ -4456,6 +4458,19 @@ get_params__post_init(struct adapter *sc)
 	    ("%s: L2 table size (%u) larger than expected (%u)",
 	    __func__, sc->vres.l2t.size, L2T_SIZE));
 	sc->params.core_vdd = val[6];
+
+	param[0] = FW_PARAM_PFVF(IQFLINT_END);
+	param[1] = FW_PARAM_PFVF(EQ_END);
+	rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
+	if (rc != 0) {
+		device_printf(sc->dev,
+		    "failed to query parameters (post_init2): %d.\n", rc);
+		return (rc);
+	}
+	MPASS(val[0] >= sc->sge.iq_start);
+	sc->sge.iqmap_sz = val[0] - sc->sge.iq_start + 1;
+	MPASS(val[1] >= sc->sge.eq_start);
+	sc->sge.eqmap_sz = val[1] - sc->sge.eq_start + 1;
 
 	if (chip_id(sc) >= CHELSIO_T6) {
 

Modified: head/sys/dev/cxgbe/t4_netmap.c
==============================================================================
--- head/sys/dev/cxgbe/t4_netmap.c	Thu Oct 22 06:51:47 2020	(r366928)
+++ head/sys/dev/cxgbe/t4_netmap.c	Thu Oct 22 08:40:25 2020	(r366929)
@@ -188,9 +188,9 @@ alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq
 	nm_rxq->iq_cntxt_id = be16toh(c.iqid);
 	nm_rxq->iq_abs_id = be16toh(c.physiqid);
 	cntxt_id = nm_rxq->iq_cntxt_id - sc->sge.iq_start;
-	if (cntxt_id >= sc->sge.niq) {
+	if (cntxt_id >= sc->sge.iqmap_sz) {
 		panic ("%s: nm_rxq->iq_cntxt_id (%d) more than the max (%d)",
-		    __func__, cntxt_id, sc->sge.niq - 1);
+		    __func__, cntxt_id, sc->sge.iqmap_sz - 1);
 	}
 	sc->sge.iqmap[cntxt_id] = (void *)nm_rxq;
 
@@ -201,9 +201,9 @@ alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq
 	nm_rxq->fl_db_threshold = chip_id(sc) <= CHELSIO_T5 ? 8 : 4;
 	MPASS(nm_rxq->fl_sidx == na->num_rx_desc);
 	cntxt_id = nm_rxq->fl_cntxt_id - sc->sge.eq_start;
-	if (cntxt_id >= sc->sge.neq) {
+	if (cntxt_id >= sc->sge.eqmap_sz) {
 		panic("%s: nm_rxq->fl_cntxt_id (%d) more than the max (%d)",
-		    __func__, cntxt_id, sc->sge.neq - 1);
+		    __func__, cntxt_id, sc->sge.eqmap_sz - 1);
 	}
 	sc->sge.eqmap[cntxt_id] = (void *)nm_rxq;
 
@@ -300,9 +300,9 @@ alloc_nm_txq_hwq(struct vi_info *vi, struct sge_nm_txq
 
 	nm_txq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd));
 	cntxt_id = nm_txq->cntxt_id - sc->sge.eq_start;
-	if (cntxt_id >= sc->sge.neq)
+	if (cntxt_id >= sc->sge.eqmap_sz)
 	    panic("%s: nm_txq->cntxt_id (%d) more than the max (%d)", __func__,
-		cntxt_id, sc->sge.neq - 1);
+		cntxt_id, sc->sge.eqmap_sz - 1);
 	sc->sge.eqmap[cntxt_id] = (void *)nm_txq;
 
 	nm_txq->pidx = nm_txq->cidx = 0;

Modified: head/sys/dev/cxgbe/t4_sge.c
==============================================================================
--- head/sys/dev/cxgbe/t4_sge.c	Thu Oct 22 06:51:47 2020	(r366928)
+++ head/sys/dev/cxgbe/t4_sge.c	Thu Oct 22 08:40:25 2020	(r366929)
@@ -3567,9 +3567,9 @@ alloc_iq_fl(struct vi_info *vi, struct sge_iq *iq, str
 	iq->flags |= IQ_ALLOCATED;
 
 	cntxt_id = iq->cntxt_id - sc->sge.iq_start;
-	if (cntxt_id >= sc->sge.niq) {
+	if (cntxt_id >= sc->sge.iqmap_sz) {
 		panic ("%s: iq->cntxt_id (%d) more than the max (%d)", __func__,
-		    cntxt_id, sc->sge.niq - 1);
+		    cntxt_id, sc->sge.iqmap_sz - 1);
 	}
 	sc->sge.iqmap[cntxt_id] = iq;
 
@@ -3581,9 +3581,9 @@ alloc_iq_fl(struct vi_info *vi, struct sge_iq *iq, str
 		fl->pidx = fl->cidx = 0;
 
 		cntxt_id = fl->cntxt_id - sc->sge.eq_start;
-		if (cntxt_id >= sc->sge.neq) {
+		if (cntxt_id >= sc->sge.eqmap_sz) {
 			panic("%s: fl->cntxt_id (%d) more than the max (%d)",
-			    __func__, cntxt_id, sc->sge.neq - 1);
+			    __func__, cntxt_id, sc->sge.eqmap_sz - 1);
 		}
 		sc->sge.eqmap[cntxt_id] = (void *)fl;
 
@@ -4152,9 +4152,9 @@ ctrl_eq_alloc(struct adapter *sc, struct sge_eq *eq)
 
 	eq->cntxt_id = G_FW_EQ_CTRL_CMD_EQID(be32toh(c.cmpliqid_eqid));
 	cntxt_id = eq->cntxt_id - sc->sge.eq_start;
-	if (cntxt_id >= sc->sge.neq)
+	if (cntxt_id >= sc->sge.eqmap_sz)
 	    panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
-		cntxt_id, sc->sge.neq - 1);
+		cntxt_id, sc->sge.eqmap_sz - 1);
 	sc->sge.eqmap[cntxt_id] = eq;
 
 	return (rc);
@@ -4198,9 +4198,9 @@ eth_eq_alloc(struct adapter *sc, struct vi_info *vi, s
 	eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd));
 	eq->abs_id = G_FW_EQ_ETH_CMD_PHYSEQID(be32toh(c.physeqid_pkd));
 	cntxt_id = eq->cntxt_id - sc->sge.eq_start;
-	if (cntxt_id >= sc->sge.neq)
+	if (cntxt_id >= sc->sge.eqmap_sz)
 	    panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
-		cntxt_id, sc->sge.neq - 1);
+		cntxt_id, sc->sge.eqmap_sz - 1);
 	sc->sge.eqmap[cntxt_id] = eq;
 
 	return (rc);
@@ -4243,9 +4243,9 @@ ofld_eq_alloc(struct adapter *sc, struct vi_info *vi, 
 
 	eq->cntxt_id = G_FW_EQ_OFLD_CMD_EQID(be32toh(c.eqid_pkd));
 	cntxt_id = eq->cntxt_id - sc->sge.eq_start;
-	if (cntxt_id >= sc->sge.neq)
+	if (cntxt_id >= sc->sge.eqmap_sz)
 	    panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
-		cntxt_id, sc->sge.neq - 1);
+		cntxt_id, sc->sge.eqmap_sz - 1);
 	sc->sge.eqmap[cntxt_id] = eq;
 
 	return (rc);


More information about the svn-src-all mailing list