svn commit: r366491 - in head/sys: dev/cxgbe dev/cxgbe/crypto dev/mlx5/mlx5_en kern net sys

John Baldwin jhb at FreeBSD.org
Tue Oct 6 17:59:00 UTC 2020


Author: jhb
Date: Tue Oct  6 17:58:56 2020
New Revision: 366491
URL: https://svnweb.freebsd.org/changeset/base/366491

Log:
  Store the send tag type in the common send tag header.
  
  Both cxgbe(4) and mlx5(4) wrapped the existing send tag header with
  their own identical headers that stored the type that the
  type-specific tag structures inherited from, so in practice it seems
  drivers need this in the tag anyway.  This permits removing these
  extra header indirections (struct cxgbe_snd_tag and struct
  mlx5e_snd_tag).
  
  In addition, this permits driver-independent code to query the type of
  a tag, e.g. to know what type of tag is being queried via
  if_snd_query.
  
  Reviewed by:	gallatin, hselasky, np, kib
  Sponsored by:	Netflix
  Differential Revision:	https://reviews.freebsd.org/D26689

Modified:
  head/sys/dev/cxgbe/adapter.h
  head/sys/dev/cxgbe/crypto/t4_kern_tls.c
  head/sys/dev/cxgbe/offload.h
  head/sys/dev/cxgbe/t4_main.c
  head/sys/dev/cxgbe/t4_sched.c
  head/sys/dev/cxgbe/t4_sge.c
  head/sys/dev/mlx5/mlx5_en/en.h
  head/sys/dev/mlx5/mlx5_en/en_hw_tls.h
  head/sys/dev/mlx5/mlx5_en/en_rl.h
  head/sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls.c
  head/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
  head/sys/dev/mlx5/mlx5_en/mlx5_en_rl.c
  head/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c
  head/sys/kern/kern_mbuf.c
  head/sys/net/if_lagg.c
  head/sys/net/if_vlan.c
  head/sys/sys/mbuf.h

Modified: head/sys/dev/cxgbe/adapter.h
==============================================================================
--- head/sys/dev/cxgbe/adapter.h	Tue Oct  6 15:17:41 2020	(r366490)
+++ head/sys/dev/cxgbe/adapter.h	Tue Oct  6 17:58:56 2020	(r366491)
@@ -1202,7 +1202,6 @@ int update_mac_settings(struct ifnet *, int);
 int adapter_full_init(struct adapter *);
 int adapter_full_uninit(struct adapter *);
 uint64_t cxgbe_get_counter(struct ifnet *, ift_counter);
-void cxgbe_snd_tag_init(struct cxgbe_snd_tag *, struct ifnet *, int);
 int vi_full_init(struct vi_info *);
 int vi_full_uninit(struct vi_info *);
 void vi_sysctls(struct vi_info *);

Modified: head/sys/dev/cxgbe/crypto/t4_kern_tls.c
==============================================================================
--- head/sys/dev/cxgbe/crypto/t4_kern_tls.c	Tue Oct  6 15:17:41 2020	(r366490)
+++ head/sys/dev/cxgbe/crypto/t4_kern_tls.c	Tue Oct  6 17:58:56 2020	(r366491)
@@ -156,7 +156,7 @@ struct tls_keyctx {
 #define KEY_DELETE_TX			0x8
 
 struct tlspcb {
-	struct cxgbe_snd_tag com;
+	struct m_snd_tag com;
 	struct vi_info *vi;	/* virtual interface */
 	struct adapter *sc;
 	struct l2t_entry *l2te;	/* L2 table entry used by this connection */
@@ -205,7 +205,7 @@ static int ktls_setup_keys(struct tlspcb *tlsp,
 static inline struct tlspcb *
 mst_to_tls(struct m_snd_tag *t)
 {
-	return ((struct tlspcb *)mst_to_cst(t));
+	return (__containerof(t, struct tlspcb, com));
 }
 
 /* XXX: There are similar versions of these two in tom/t4_tls.c. */
@@ -240,7 +240,7 @@ alloc_tlspcb(struct ifnet *ifp, struct vi_info *vi, in
 	if (tlsp == NULL)
 		return (NULL);
 
-	cxgbe_snd_tag_init(&tlsp->com, ifp, IF_SND_TAG_TYPE_TLS);
+	m_snd_tag_init(&tlsp->com, ifp, IF_SND_TAG_TYPE_TLS);
 	tlsp->vi = vi;
 	tlsp->sc = sc;
 	tlsp->ctrlq = &sc->sge.ctrlq[pi->port_id];
@@ -484,7 +484,7 @@ ktls_set_tcb_fields(struct tlspcb *tlsp, struct tcpcb 
 		    tlsp->tid);
 		return (ENOMEM);
 	}
-	m->m_pkthdr.snd_tag = m_snd_tag_ref(&tlsp->com.com);
+	m->m_pkthdr.snd_tag = m_snd_tag_ref(&tlsp->com);
 	m->m_pkthdr.csum_flags |= CSUM_SND_TAG;
 
 	/* FW_ULPTX_WR */
@@ -727,13 +727,13 @@ cxgbe_tls_tag_alloc(struct ifnet *ifp, union if_snd_ta
 	else
 		txq->kern_tls_cbc++;
 	TXQ_UNLOCK(txq);
-	*pt = &tlsp->com.com;
+	*pt = &tlsp->com;
 	return (0);
 
 failed:
 	if (atid >= 0)
 		free_atid(sc, atid);
-	m_snd_tag_rele(&tlsp->com.com);
+	m_snd_tag_rele(&tlsp->com);
 	return (error);
 }
 
@@ -836,7 +836,7 @@ ktls_setup_keys(struct tlspcb *tlsp, const struct ktls
 		    tlsp->tid);
 		return (ENOMEM);
 	}
-	m->m_pkthdr.snd_tag = m_snd_tag_ref(&tlsp->com.com);
+	m->m_pkthdr.snd_tag = m_snd_tag_ref(&tlsp->com);
 	m->m_pkthdr.csum_flags |= CSUM_SND_TAG;
 	kwr = mtod(m, void *);
 	memset(kwr, 0, len);

Modified: head/sys/dev/cxgbe/offload.h
==============================================================================
--- head/sys/dev/cxgbe/offload.h	Tue Oct  6 15:17:41 2020	(r366490)
+++ head/sys/dev/cxgbe/offload.h	Tue Oct  6 17:58:56 2020	(r366491)
@@ -87,13 +87,8 @@ enum {
 	EO_FLUSH_RPL_PENDING	= (1 << 3),	/* credit flush rpl due back */
 };
 
-struct cxgbe_snd_tag {
-	struct m_snd_tag com;
-	int type;
-};
-
 struct cxgbe_rate_tag {
-	struct cxgbe_snd_tag com;
+	struct m_snd_tag com;
 	struct adapter *adapter;
 	u_int flags;
 	struct mtx lock;
@@ -112,17 +107,10 @@ struct cxgbe_rate_tag {
 	uint8_t ncompl;		/* # of completions outstanding. */
 };
 
-static inline struct cxgbe_snd_tag *
-mst_to_cst(struct m_snd_tag *t)
-{
-
-	return (__containerof(t, struct cxgbe_snd_tag, com));
-}
-
 static inline struct cxgbe_rate_tag *
 mst_to_crt(struct m_snd_tag *t)
 {
-	return ((struct cxgbe_rate_tag *)mst_to_cst(t));
+	return (__containerof(t, struct cxgbe_rate_tag, com));
 }
 
 union etid_entry {

Modified: head/sys/dev/cxgbe/t4_main.c
==============================================================================
--- head/sys/dev/cxgbe/t4_main.c	Tue Oct  6 15:17:41 2020	(r366490)
+++ head/sys/dev/cxgbe/t4_main.c	Tue Oct  6 17:58:56 2020	(r366491)
@@ -2186,9 +2186,6 @@ cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
 	struct port_info *pi = vi->pi;
 	struct adapter *sc;
 	struct sge_txq *txq;
-#ifdef RATELIMIT
-	struct cxgbe_snd_tag *cst;
-#endif
 	void *items[1];
 	int rc;
 
@@ -2212,8 +2209,7 @@ cxgbe_transmit(struct ifnet *ifp, struct mbuf *m)
 	}
 #ifdef RATELIMIT
 	if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) {
-		cst = mst_to_cst(m->m_pkthdr.snd_tag);
-		if (cst->type == IF_SND_TAG_TYPE_RATE_LIMIT)
+		if (m->m_pkthdr.snd_tag->type == IF_SND_TAG_TYPE_RATE_LIMIT)
 			return (ethofld_transmit(ifp, m));
 	}
 #endif
@@ -2374,14 +2370,6 @@ cxgbe_get_counter(struct ifnet *ifp, ift_counter c)
 }
 
 #if defined(KERN_TLS) || defined(RATELIMIT)
-void
-cxgbe_snd_tag_init(struct cxgbe_snd_tag *cst, struct ifnet *ifp, int type)
-{
-
-	m_snd_tag_init(&cst->com, ifp);
-	cst->type = type;
-}
-
 static int
 cxgbe_snd_tag_alloc(struct ifnet *ifp, union if_snd_tag_alloc_params *params,
     struct m_snd_tag **pt)
@@ -2402,8 +2390,6 @@ cxgbe_snd_tag_alloc(struct ifnet *ifp, union if_snd_ta
 	default:
 		error = EOPNOTSUPP;
 	}
-	if (error == 0)
-		MPASS(mst_to_cst(*pt)->type == params->hdr.type);
 	return (error);
 }
 
@@ -2411,10 +2397,8 @@ static int
 cxgbe_snd_tag_modify(struct m_snd_tag *mst,
     union if_snd_tag_modify_params *params)
 {
-	struct cxgbe_snd_tag *cst;
 
-	cst = mst_to_cst(mst);
-	switch (cst->type) {
+	switch (mst->type) {
 #ifdef RATELIMIT
 	case IF_SND_TAG_TYPE_RATE_LIMIT:
 		return (cxgbe_rate_tag_modify(mst, params));
@@ -2428,10 +2412,8 @@ static int
 cxgbe_snd_tag_query(struct m_snd_tag *mst,
     union if_snd_tag_query_params *params)
 {
-	struct cxgbe_snd_tag *cst;
 
-	cst = mst_to_cst(mst);
-	switch (cst->type) {
+	switch (mst->type) {
 #ifdef RATELIMIT
 	case IF_SND_TAG_TYPE_RATE_LIMIT:
 		return (cxgbe_rate_tag_query(mst, params));
@@ -2444,10 +2426,8 @@ cxgbe_snd_tag_query(struct m_snd_tag *mst,
 static void
 cxgbe_snd_tag_free(struct m_snd_tag *mst)
 {
-	struct cxgbe_snd_tag *cst;
 
-	cst = mst_to_cst(mst);
-	switch (cst->type) {
+	switch (mst->type) {
 #ifdef RATELIMIT
 	case IF_SND_TAG_TYPE_RATE_LIMIT:
 		cxgbe_rate_tag_free(mst);

Modified: head/sys/dev/cxgbe/t4_sched.c
==============================================================================
--- head/sys/dev/cxgbe/t4_sched.c	Tue Oct  6 15:17:41 2020	(r366490)
+++ head/sys/dev/cxgbe/t4_sched.c	Tue Oct  6 17:58:56 2020	(r366491)
@@ -788,7 +788,7 @@ failed:
 	mtx_init(&cst->lock, "cst_lock", NULL, MTX_DEF);
 	mbufq_init(&cst->pending_tx, INT_MAX);
 	mbufq_init(&cst->pending_fwack, INT_MAX);
-	cxgbe_snd_tag_init(&cst->com, ifp, IF_SND_TAG_TYPE_RATE_LIMIT);
+	m_snd_tag_init(&cst->com, ifp, IF_SND_TAG_TYPE_RATE_LIMIT);
 	cst->flags |= EO_FLOWC_PENDING | EO_SND_TAG_REF;
 	cst->adapter = sc;
 	cst->port_id = pi->port_id;
@@ -805,7 +805,7 @@ failed:
 	 * Queues will be selected later when the connection flowid is available.
 	 */
 
-	*pt = &cst->com.com;
+	*pt = &cst->com;
 	return (0);
 }
 

Modified: head/sys/dev/cxgbe/t4_sge.c
==============================================================================
--- head/sys/dev/cxgbe/t4_sge.c	Tue Oct  6 15:17:41 2020	(r366490)
+++ head/sys/dev/cxgbe/t4_sge.c	Tue Oct  6 17:58:56 2020	(r366491)
@@ -2404,10 +2404,10 @@ set_mbuf_eo_tsclk_tsoff(struct mbuf *m, uint8_t tsclk_
 }
 
 static inline int
-needs_eo(struct cxgbe_snd_tag *cst)
+needs_eo(struct m_snd_tag *mst)
 {
 
-	return (cst != NULL && cst->type == IF_SND_TAG_TYPE_RATE_LIMIT);
+	return (mst != NULL && mst->type == IF_SND_TAG_TYPE_RATE_LIMIT);
 }
 #endif
 
@@ -2716,7 +2716,7 @@ parse_pkt(struct mbuf **mp, bool vm_wr)
 	struct tcphdr *tcp;
 #endif
 #if defined(KERN_TLS) || defined(RATELIMIT)
-	struct cxgbe_snd_tag *cst;
+	struct m_snd_tag *mst;
 #endif
 	uint16_t eh_type;
 	uint8_t cflags;
@@ -2740,12 +2740,12 @@ restart:
 	nsegs = count_mbuf_nsegs(m0, 0, &cflags);
 #if defined(KERN_TLS) || defined(RATELIMIT)
 	if (m0->m_pkthdr.csum_flags & CSUM_SND_TAG)
-		cst = mst_to_cst(m0->m_pkthdr.snd_tag);
+		mst = m0->m_pkthdr.snd_tag;
 	else
-		cst = NULL;
+		mst = NULL;
 #endif
 #ifdef KERN_TLS
-	if (cst != NULL && cst->type == IF_SND_TAG_TYPE_TLS) {
+	if (mst != NULL && mst->type == IF_SND_TAG_TYPE_TLS) {
 		int len16;
 
 		cflags |= MC_TLS;
@@ -2794,17 +2794,17 @@ restart:
 	 * checksumming is enabled.  needs_outer_l4_csum happens to check for
 	 * all the right things.
 	 */
-	if (__predict_false(needs_eo(cst) && !needs_outer_l4_csum(m0))) {
+	if (__predict_false(needs_eo(mst) && !needs_outer_l4_csum(m0))) {
 		m_snd_tag_rele(m0->m_pkthdr.snd_tag);
 		m0->m_pkthdr.snd_tag = NULL;
 		m0->m_pkthdr.csum_flags &= ~CSUM_SND_TAG;
-		cst = NULL;
+		mst = NULL;
 	}
 #endif
 
 	if (!needs_hwcsum(m0)
 #ifdef RATELIMIT
-   		 && !needs_eo(cst)
+   		 && !needs_eo(mst)
 #endif
 	)
 		return (0);
@@ -2923,7 +2923,7 @@ restart:
 #endif
 	}
 #ifdef RATELIMIT
-	if (needs_eo(cst)) {
+	if (needs_eo(mst)) {
 		u_int immhdrs;
 
 		/* EO WRs have the headers in the WR and not the GL. */
@@ -6484,7 +6484,7 @@ ethofld_tx(struct cxgbe_rate_tag *cst)
 		cst->tx_credits -= next_credits;
 		cst->tx_nocompl += next_credits;
 		compl = cst->ncompl == 0 || cst->tx_nocompl >= cst->tx_total / 2;
-		ETHER_BPF_MTAP(cst->com.com.ifp, m);
+		ETHER_BPF_MTAP(cst->com.ifp, m);
 		write_ethofld_wr(cst, wr, m, compl);
 		commit_wrq_wr(cst->eo_txq, wr, &cookie);
 		if (compl) {
@@ -6505,7 +6505,7 @@ ethofld_tx(struct cxgbe_rate_tag *cst)
 		 */
 		m->m_pkthdr.snd_tag = NULL;
 		m->m_pkthdr.csum_flags &= ~CSUM_SND_TAG;
-		m_snd_tag_rele(&cst->com.com);
+		m_snd_tag_rele(&cst->com);
 
 		mbufq_enqueue(&cst->pending_fwack, m);
 	}
@@ -6559,10 +6559,10 @@ ethofld_transmit(struct ifnet *ifp, struct mbuf *m0)
 	 * ethofld_tx() in case we are sending the final mbuf after
 	 * the inp was freed.
 	 */
-	m_snd_tag_ref(&cst->com.com);
+	m_snd_tag_ref(&cst->com);
 	ethofld_tx(cst);
 	mtx_unlock(&cst->lock);
-	m_snd_tag_rele(&cst->com.com);
+	m_snd_tag_rele(&cst->com);
 	return (0);
 
 done:
@@ -6633,12 +6633,12 @@ ethofld_fw4_ack(struct sge_iq *iq, const struct rss_he
 		 * As with ethofld_transmit(), hold an extra reference
 		 * so that the tag is stable across ethold_tx().
 		 */
-		m_snd_tag_ref(&cst->com.com);
+		m_snd_tag_ref(&cst->com);
 		m = mbufq_first(&cst->pending_tx);
 		if (m != NULL && cst->tx_credits >= mbuf_eo_len16(m))
 			ethofld_tx(cst);
 		mtx_unlock(&cst->lock);
-		m_snd_tag_rele(&cst->com.com);
+		m_snd_tag_rele(&cst->com);
 	} else {
 		/*
 		 * There shouldn't be any pending packets if the tag

Modified: head/sys/dev/mlx5/mlx5_en/en.h
==============================================================================
--- head/sys/dev/mlx5/mlx5_en/en.h	Tue Oct  6 15:17:41 2020	(r366490)
+++ head/sys/dev/mlx5/mlx5_en/en.h	Tue Oct  6 17:58:56 2020	(r366491)
@@ -791,11 +791,6 @@ enum {
 	MLX5E_SQ_FULL
 };
 
-struct mlx5e_snd_tag {
-	struct m_snd_tag m_snd_tag;	/* send tag */
-	u32	type;	/* tag type */
-};
-
 struct mlx5e_sq {
 	/* persistant fields */
 	struct	mtx lock;
@@ -876,7 +871,7 @@ mlx5e_sq_queue_level(struct mlx5e_sq *sq)
 
 struct mlx5e_channel {
 	struct mlx5e_rq rq;
-	struct mlx5e_snd_tag tag;
+	struct m_snd_tag tag;
 	struct mlx5e_sq sq[MLX5E_MAX_TX_NUM_TC];
 	struct mlx5e_priv *priv;
 	struct completion completion;

Modified: head/sys/dev/mlx5/mlx5_en/en_hw_tls.h
==============================================================================
--- head/sys/dev/mlx5/mlx5_en/en_hw_tls.h	Tue Oct  6 15:17:41 2020	(r366490)
+++ head/sys/dev/mlx5/mlx5_en/en_hw_tls.h	Tue Oct  6 17:58:56 2020	(r366491)
@@ -44,7 +44,7 @@ enum {
 };
 
 struct mlx5e_tls_tag {
-	struct mlx5e_snd_tag tag;
+	struct m_snd_tag tag;
 	STAILQ_ENTRY(mlx5e_tls_tag) entry;
 	volatile s32 refs;	/* number of pending mbufs */
 	uint32_t tisn;		/* HW TIS context number */

Modified: head/sys/dev/mlx5/mlx5_en/en_rl.h
==============================================================================
--- head/sys/dev/mlx5/mlx5_en/en_rl.h	Tue Oct  6 15:17:41 2020	(r366490)
+++ head/sys/dev/mlx5/mlx5_en/en_rl.h	Tue Oct  6 17:58:56 2020	(r366491)
@@ -129,7 +129,7 @@ struct mlx5e_rl_channel_param {
 };
 
 struct mlx5e_rl_channel {
-	struct mlx5e_snd_tag tag;
+	struct m_snd_tag tag;
 	STAILQ_ENTRY(mlx5e_rl_channel) entry;
 	struct mlx5e_sq * volatile sq;
 	struct mlx5e_rl_worker *worker;

Modified: head/sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls.c
==============================================================================
--- head/sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls.c	Tue Oct  6 15:17:41 2020	(r366490)
+++ head/sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls.c	Tue Oct  6 17:58:56 2020	(r366491)
@@ -303,7 +303,6 @@ mlx5e_tls_snd_tag_alloc(struct ifnet *ifp,
 
 	/* setup TLS tag */
 	ptag->tls = &priv->tls;
-	ptag->tag.type = params->hdr.type;
 
 	/* check if there is no TIS context */
 	if (ptag->tisn == 0) {
@@ -378,7 +377,7 @@ mlx5e_tls_snd_tag_alloc(struct ifnet *ifp,
 		goto failure;
 	}
 
-	switch (ptag->tag.type) {
+	switch (params->hdr.type) {
 #if defined(RATELIMIT) && defined(IF_SND_TAG_TYPE_TLS_RATE_LIMIT)
 	case IF_SND_TAG_TYPE_TLS_RATE_LIMIT:
 		memset(&rl_params, 0, sizeof(rl_params));
@@ -410,9 +409,9 @@ mlx5e_tls_snd_tag_alloc(struct ifnet *ifp,
 	}
 
 	/* store pointer to mbuf tag */
-	MPASS(ptag->tag.m_snd_tag.refcount == 0);
-	m_snd_tag_init(&ptag->tag.m_snd_tag, ifp);
-	*ppmt = &ptag->tag.m_snd_tag;
+	MPASS(ptag->tag.refcount == 0);
+	m_snd_tag_init(&ptag->tag, ifp, params->hdr.type);
+	*ppmt = &ptag->tag;
 
 	queue_work(priv->tls.wq, &ptag->work);
 	flush_work(&ptag->work);
@@ -429,12 +428,12 @@ mlx5e_tls_snd_tag_modify(struct m_snd_tag *pmt, union 
 {
 #if defined(RATELIMIT) && defined(IF_SND_TAG_TYPE_TLS_RATE_LIMIT)
 	struct if_snd_tag_rate_limit_params rl_params;
+	struct mlx5e_tls_tag *ptag =
+	    container_of(pmt, struct mlx5e_tls_tag, tag);
 	int error;
 #endif
-	struct mlx5e_tls_tag *ptag =
-	    container_of(pmt, struct mlx5e_tls_tag, tag.m_snd_tag);
 
-	switch (ptag->tag.type) {
+	switch (pmt->type) {
 #if defined(RATELIMIT) && defined(IF_SND_TAG_TYPE_TLS_RATE_LIMIT)
 	case IF_SND_TAG_TYPE_TLS_RATE_LIMIT:
 		memset(&rl_params, 0, sizeof(rl_params));
@@ -452,10 +451,10 @@ int
 mlx5e_tls_snd_tag_query(struct m_snd_tag *pmt, union if_snd_tag_query_params *params)
 {
 	struct mlx5e_tls_tag *ptag =
-	    container_of(pmt, struct mlx5e_tls_tag, tag.m_snd_tag);
+	    container_of(pmt, struct mlx5e_tls_tag, tag);
 	int error;
 
-	switch (ptag->tag.type) {
+	switch (pmt->type) {
 #if defined(RATELIMIT) && defined(IF_SND_TAG_TYPE_TLS_RATE_LIMIT)
 	case IF_SND_TAG_TYPE_TLS_RATE_LIMIT:
 		error = mlx5e_rl_snd_tag_query(ptag->rl_tag, params);
@@ -475,10 +474,10 @@ void
 mlx5e_tls_snd_tag_free(struct m_snd_tag *pmt)
 {
 	struct mlx5e_tls_tag *ptag =
-	    container_of(pmt, struct mlx5e_tls_tag, tag.m_snd_tag);
+	    container_of(pmt, struct mlx5e_tls_tag, tag);
 	struct mlx5e_priv *priv;
 
-	switch (ptag->tag.type) {
+	switch (pmt->type) {
 #if defined(RATELIMIT) && defined(IF_SND_TAG_TYPE_TLS_RATE_LIMIT)
 	case IF_SND_TAG_TYPE_TLS_RATE_LIMIT:
 		mlx5e_rl_snd_tag_free(ptag->rl_tag);
@@ -495,7 +494,7 @@ mlx5e_tls_snd_tag_free(struct m_snd_tag *pmt)
 	ptag->state = MLX5E_TLS_ST_FREED;
 	MLX5E_TLS_TAG_UNLOCK(ptag);
 
-	priv = ptag->tag.m_snd_tag.ifp->if_softc;
+	priv = ptag->tag.ifp->if_softc;
 	queue_work(priv->tls.wq, &ptag->work);
 }
 
@@ -699,7 +698,7 @@ int
 mlx5e_sq_tls_xmit(struct mlx5e_sq *sq, struct mlx5e_xmit_args *parg, struct mbuf **ppmb)
 {
 	struct mlx5e_tls_tag *ptls_tag;
-	struct mlx5e_snd_tag *ptag;
+	struct m_snd_tag *ptag;
 	const struct tcphdr *th;
 	struct mbuf *mb = *ppmb;
 	u64 rcd_sn;
@@ -709,8 +708,7 @@ mlx5e_sq_tls_xmit(struct mlx5e_sq *sq, struct mlx5e_xm
 	if ((mb->m_pkthdr.csum_flags & CSUM_SND_TAG) == 0)
 		return (MLX5E_TLS_CONTINUE);
 
-	ptag = container_of(mb->m_pkthdr.snd_tag,
-	    struct mlx5e_snd_tag, m_snd_tag);
+	ptag = mb->m_pkthdr.snd_tag;
 
 	if (
 #if defined(RATELIMIT) && defined(IF_SND_TAG_TYPE_TLS_RATE_LIMIT)

Modified: head/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
==============================================================================
--- head/sys/dev/mlx5/mlx5_en/mlx5_en_main.c	Tue Oct  6 15:17:41 2020	(r366490)
+++ head/sys/dev/mlx5/mlx5_en/mlx5_en_main.c	Tue Oct  6 17:58:56 2020	(r366491)
@@ -2141,8 +2141,7 @@ mlx5e_chan_static_init(struct mlx5e_priv *priv, struct
 	c->ix = ix;
 
 	/* setup send tag */
-	c->tag.type = IF_SND_TAG_TYPE_UNLIMITED;
-	m_snd_tag_init(&c->tag.m_snd_tag, c->priv->ifp);
+	m_snd_tag_init(&c->tag, c->priv->ifp, IF_SND_TAG_TYPE_UNLIMITED);
 
 	init_completion(&c->completion);
 
@@ -2166,7 +2165,7 @@ static void
 mlx5e_chan_wait_for_completion(struct mlx5e_channel *c)
 {
 
-	m_snd_tag_rele(&c->tag.m_snd_tag);
+	m_snd_tag_rele(&c->tag);
 	wait_for_completion(&c->completion);
 }
 
@@ -4087,8 +4086,8 @@ mlx5e_ul_snd_tag_alloc(struct ifnet *ifp,
 		/* check if send queue is not running */
 		if (unlikely(pch->sq[0].running == 0))
 			return (ENXIO);
-		m_snd_tag_ref(&pch->tag.m_snd_tag);
-		*ppmt = &pch->tag.m_snd_tag;
+		m_snd_tag_ref(&pch->tag);
+		*ppmt = &pch->tag;
 		return (0);
 	}
 }
@@ -4097,7 +4096,7 @@ int
 mlx5e_ul_snd_tag_query(struct m_snd_tag *pmt, union if_snd_tag_query_params *params)
 {
 	struct mlx5e_channel *pch =
-	    container_of(pmt, struct mlx5e_channel, tag.m_snd_tag);
+	    container_of(pmt, struct mlx5e_channel, tag);
 
 	params->unlimited.max_rate = -1ULL;
 	params->unlimited.queue_level = mlx5e_sq_queue_level(&pch->sq[0]);
@@ -4108,7 +4107,7 @@ void
 mlx5e_ul_snd_tag_free(struct m_snd_tag *pmt)
 {
 	struct mlx5e_channel *pch =
-	    container_of(pmt, struct mlx5e_channel, tag.m_snd_tag);
+	    container_of(pmt, struct mlx5e_channel, tag);
 
 	complete(&pch->completion);
 }
@@ -4142,10 +4141,8 @@ mlx5e_snd_tag_alloc(struct ifnet *ifp,
 static int
 mlx5e_snd_tag_modify(struct m_snd_tag *pmt, union if_snd_tag_modify_params *params)
 {
-	struct mlx5e_snd_tag *tag =
-	    container_of(pmt, struct mlx5e_snd_tag, m_snd_tag);
 
-	switch (tag->type) {
+	switch (pmt->type) {
 #ifdef RATELIMIT
 	case IF_SND_TAG_TYPE_RATE_LIMIT:
 		return (mlx5e_rl_snd_tag_modify(pmt, params));
@@ -4166,10 +4163,8 @@ mlx5e_snd_tag_modify(struct m_snd_tag *pmt, union if_s
 static int
 mlx5e_snd_tag_query(struct m_snd_tag *pmt, union if_snd_tag_query_params *params)
 {
-	struct mlx5e_snd_tag *tag =
-	    container_of(pmt, struct mlx5e_snd_tag, m_snd_tag);
 
-	switch (tag->type) {
+	switch (pmt->type) {
 #ifdef RATELIMIT
 	case IF_SND_TAG_TYPE_RATE_LIMIT:
 		return (mlx5e_rl_snd_tag_query(pmt, params));
@@ -4236,10 +4231,8 @@ mlx5e_ratelimit_query(struct ifnet *ifp __unused, stru
 static void
 mlx5e_snd_tag_free(struct m_snd_tag *pmt)
 {
-	struct mlx5e_snd_tag *tag =
-	    container_of(pmt, struct mlx5e_snd_tag, m_snd_tag);
 
-	switch (tag->type) {
+	switch (pmt->type) {
 #ifdef RATELIMIT
 	case IF_SND_TAG_TYPE_RATE_LIMIT:
 		mlx5e_rl_snd_tag_free(pmt);

Modified: head/sys/dev/mlx5/mlx5_en/mlx5_en_rl.c
==============================================================================
--- head/sys/dev/mlx5/mlx5_en/mlx5_en_rl.c	Tue Oct  6 15:17:41 2020	(r366490)
+++ head/sys/dev/mlx5/mlx5_en/mlx5_en_rl.c	Tue Oct  6 17:58:56 2020	(r366491)
@@ -1128,9 +1128,9 @@ mlx5e_rl_snd_tag_alloc(struct ifnet *ifp,
 	}
 
 	/* store pointer to mbuf tag */
-	MPASS(channel->tag.m_snd_tag.refcount == 0);
-	m_snd_tag_init(&channel->tag.m_snd_tag, ifp);
-	*ppmt = &channel->tag.m_snd_tag;
+	MPASS(channel->tag.refcount == 0);
+	m_snd_tag_init(&channel->tag, ifp, IF_SND_TAG_TYPE_RATE_LIMIT);
+	*ppmt = &channel->tag;
 done:
 	return (error);
 }
@@ -1140,7 +1140,7 @@ int
 mlx5e_rl_snd_tag_modify(struct m_snd_tag *pmt, union if_snd_tag_modify_params *params)
 {
 	struct mlx5e_rl_channel *channel =
-	    container_of(pmt, struct mlx5e_rl_channel, tag.m_snd_tag);
+	    container_of(pmt, struct mlx5e_rl_channel, tag);
 
 	return (mlx5e_rl_modify(channel->worker, channel, params->rate_limit.max_rate));
 }
@@ -1149,7 +1149,7 @@ int
 mlx5e_rl_snd_tag_query(struct m_snd_tag *pmt, union if_snd_tag_query_params *params)
 {
 	struct mlx5e_rl_channel *channel =
-	    container_of(pmt, struct mlx5e_rl_channel, tag.m_snd_tag);
+	    container_of(pmt, struct mlx5e_rl_channel, tag);
 
 	return (mlx5e_rl_query(channel->worker, channel, params));
 }
@@ -1158,7 +1158,7 @@ void
 mlx5e_rl_snd_tag_free(struct m_snd_tag *pmt)
 {
 	struct mlx5e_rl_channel *channel =
-	    container_of(pmt, struct mlx5e_rl_channel, tag.m_snd_tag);
+	    container_of(pmt, struct mlx5e_rl_channel, tag);
 
 	mlx5e_rl_free(channel->worker, channel);
 }

Modified: head/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c
==============================================================================
--- head/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c	Tue Oct  6 15:17:41 2020	(r366490)
+++ head/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c	Tue Oct  6 17:58:56 2020	(r366491)
@@ -90,7 +90,6 @@ static struct mlx5e_sq *
 mlx5e_select_queue_by_send_tag(struct ifnet *ifp, struct mbuf *mb)
 {
 	struct m_snd_tag *mb_tag;
-	struct mlx5e_snd_tag *ptag;
 	struct mlx5e_sq *sq;
 
 	mb_tag = mb->m_pkthdr.snd_tag;
@@ -99,29 +98,27 @@ mlx5e_select_queue_by_send_tag(struct ifnet *ifp, stru
 top:
 #endif
 	/* get pointer to sendqueue */
-	ptag = container_of(mb_tag, struct mlx5e_snd_tag, m_snd_tag);
-
-	switch (ptag->type) {
+	switch (mb_tag->type) {
 #ifdef RATELIMIT
 	case IF_SND_TAG_TYPE_RATE_LIMIT:
-		sq = container_of(ptag,
+		sq = container_of(mb_tag,
 		    struct mlx5e_rl_channel, tag)->sq;
 		break;
 #if defined(KERN_TLS) && defined(IF_SND_TAG_TYPE_TLS_RATE_LIMIT)
 	case IF_SND_TAG_TYPE_TLS_RATE_LIMIT:
-		mb_tag = container_of(ptag, struct mlx5e_tls_tag, tag)->rl_tag;
+		mb_tag = container_of(mb_tag, struct mlx5e_tls_tag, tag)->rl_tag;
 		goto top;
 #endif
 #endif
 	case IF_SND_TAG_TYPE_UNLIMITED:
-		sq = &container_of(ptag,
+		sq = &container_of(mb_tag,
 		    struct mlx5e_channel, tag)->sq[0];
-		KASSERT((ptag->m_snd_tag.refcount > 0),
+		KASSERT((mb_tag->refcount > 0),
 		    ("mlx5e_select_queue: Channel refs are zero for unlimited tag"));
 		break;
 #ifdef KERN_TLS
 	case IF_SND_TAG_TYPE_TLS:
-		mb_tag = container_of(ptag, struct mlx5e_tls_tag, tag)->rl_tag;
+		mb_tag = container_of(mb_tag, struct mlx5e_tls_tag, tag)->rl_tag;
 		goto top;
 #endif
 	default:

Modified: head/sys/kern/kern_mbuf.c
==============================================================================
--- head/sys/kern/kern_mbuf.c	Tue Oct  6 15:17:41 2020	(r366490)
+++ head/sys/kern/kern_mbuf.c	Tue Oct  6 17:58:56 2020	(r366491)
@@ -1526,12 +1526,13 @@ m_freem(struct mbuf *mb)
 }
 
 void
-m_snd_tag_init(struct m_snd_tag *mst, struct ifnet *ifp)
+m_snd_tag_init(struct m_snd_tag *mst, struct ifnet *ifp, u_int type)
 {
 
 	if_ref(ifp);
 	mst->ifp = ifp;
 	refcount_init(&mst->refcount, 1);
+	mst->type = type;
 	counter_u64_add(snd_tag_count, 1);
 }
 

Modified: head/sys/net/if_lagg.c
==============================================================================
--- head/sys/net/if_lagg.c	Tue Oct  6 15:17:41 2020	(r366490)
+++ head/sys/net/if_lagg.c	Tue Oct  6 17:58:56 2020	(r366491)
@@ -1686,7 +1686,7 @@ lagg_snd_tag_alloc(struct ifnet *ifp,
 		return (error);
 	}
 
-	m_snd_tag_init(&lst->com, ifp);
+	m_snd_tag_init(&lst->com, ifp, lst->tag->type);
 
 	*ppmt = &lst->com;
 	return (0);

Modified: head/sys/net/if_vlan.c
==============================================================================
--- head/sys/net/if_vlan.c	Tue Oct  6 15:17:41 2020	(r366490)
+++ head/sys/net/if_vlan.c	Tue Oct  6 17:58:56 2020	(r366491)
@@ -2066,7 +2066,7 @@ vlan_snd_tag_alloc(struct ifnet *ifp,
 		return (error);
 	}
 
-	m_snd_tag_init(&vst->com, ifp);
+	m_snd_tag_init(&vst->com, ifp, vst->tag->type);
 
 	*ppmt = &vst->com;
 	return (0);

Modified: head/sys/sys/mbuf.h
==============================================================================
--- head/sys/sys/mbuf.h	Tue Oct  6 15:17:41 2020	(r366490)
+++ head/sys/sys/mbuf.h	Tue Oct  6 17:58:56 2020	(r366491)
@@ -141,6 +141,7 @@ struct m_tag {
 struct m_snd_tag {
 	struct ifnet *ifp;		/* network interface tag belongs to */
 	volatile u_int refcount;
+	u_int	type;			/* One of IF_SND_TAG_TYPE_*. */
 };
 
 /*
@@ -833,7 +834,7 @@ int		 m_sanity(struct mbuf *, int);
 struct mbuf	*m_split(struct mbuf *, int, int);
 struct mbuf	*m_uiotombuf(struct uio *, int, int, int, int);
 struct mbuf	*m_unshare(struct mbuf *, int);
-void		 m_snd_tag_init(struct m_snd_tag *, struct ifnet *);
+void		 m_snd_tag_init(struct m_snd_tag *, struct ifnet *, u_int);
 void		 m_snd_tag_destroy(struct m_snd_tag *);
 
 static __inline int


More information about the svn-src-all mailing list