git: ebdb70064900 - main - mlx5en: Improve RX- and TX- TLS refcounting.

From: Hans Petter Selasky <hselasky_at_FreeBSD.org>
Date: Thu, 17 Feb 2022 12:13:49 UTC
The branch main has been updated by hselasky:

URL: https://cgit.FreeBSD.org/src/commit/?id=ebdb70064900a2ba2e3f8341328edc34e619170d

commit ebdb70064900a2ba2e3f8341328edc34e619170d
Author:     Hans Petter Selasky <hselasky@FreeBSD.org>
AuthorDate: 2022-02-17 11:47:20 +0000
Commit:     Hans Petter Selasky <hselasky@FreeBSD.org>
CommitDate: 2022-02-17 12:13:09 +0000

    mlx5en: Improve RX- and TX- TLS refcounting.
    
    Use the send tag refcounting mechanism to refcount the RX- and TX- TLS
    send tags. Then it is no longer needed to wait for refcounts to reach
    zero when destroying RX- and TX- TLS send tags as a result of pending
    data or WQE commands.
    
    This also ensures that when TX-TLS and rate limiting is used at the same
    time, the underlying SQ is not prematurely destroyed.
    
    MFC after:      1 week
    Sponsored by:   NVIDIA Networking
---
 sys/dev/mlx5/mlx5_en/en.h                |  5 ++--
 sys/dev/mlx5/mlx5_en/en_hw_tls.h         |  2 +-
 sys/dev/mlx5/mlx5_en/en_hw_tls_rx.h      |  3 +--
 sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls.c    | 21 ++++++++---------
 sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls_rx.c | 39 ++++++++++++++++++--------------
 sys/dev/mlx5/mlx5_en/mlx5_en_main.c      |  9 ++++----
 sys/dev/mlx5/mlx5_en/mlx5_en_tx.c        | 26 ++++++++++++---------
 7 files changed, 56 insertions(+), 49 deletions(-)

diff --git a/sys/dev/mlx5/mlx5_en/en.h b/sys/dev/mlx5/mlx5_en/en.h
index 1c8a53b1ba4d..fa355c68831e 100644
--- a/sys/dev/mlx5/mlx5_en/en.h
+++ b/sys/dev/mlx5/mlx5_en/en.h
@@ -1,5 +1,6 @@
 /*-
  * Copyright (c) 2015-2021 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2022 NVIDIA corporation & affiliates.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -833,7 +834,7 @@ struct mlx5e_iq {
 struct mlx5e_sq_mbuf {
 	bus_dmamap_t dma_map;
 	struct mbuf *mbuf;
-	volatile s32 *p_refcount;	/* in use refcount, if any */
+	struct m_snd_tag *mst;	/* if set, unref this send tag on completion */
 	u32	num_bytes;
 	u32	num_wqebbs;
 };
@@ -1047,7 +1048,7 @@ struct mlx5e_flow_tables {
 };
 
 struct mlx5e_xmit_args {
-	volatile s32 *pref;
+	struct m_snd_tag *mst;
 	u32 tisn;
 	u16 ihs;
 };
diff --git a/sys/dev/mlx5/mlx5_en/en_hw_tls.h b/sys/dev/mlx5/mlx5_en/en_hw_tls.h
index 3f480c900a73..f9d5ae23e82c 100644
--- a/sys/dev/mlx5/mlx5_en/en_hw_tls.h
+++ b/sys/dev/mlx5/mlx5_en/en_hw_tls.h
@@ -1,5 +1,6 @@
 /*-
  * Copyright (c) 2019 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2022 NVIDIA corporation & affiliates.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -44,7 +45,6 @@ enum {
 struct mlx5e_tls;
 struct mlx5e_tls_tag {
 	struct m_snd_tag tag;
-	volatile s32 refs;	/* number of pending mbufs */
 	uint32_t tisn;		/* HW TIS context number */
 	uint32_t dek_index;	/* HW TLS context number */
 	struct mlx5e_tls *tls;
diff --git a/sys/dev/mlx5/mlx5_en/en_hw_tls_rx.h b/sys/dev/mlx5/mlx5_en/en_hw_tls_rx.h
index 6954b97b827b..4bdcbda0076e 100644
--- a/sys/dev/mlx5/mlx5_en/en_hw_tls_rx.h
+++ b/sys/dev/mlx5/mlx5_en/en_hw_tls_rx.h
@@ -1,5 +1,5 @@
 /*-
- * Copyright (c) 2021 NVIDIA corporation & affiliates.
+ * Copyright (c) 2021-2022 NVIDIA corporation & affiliates.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -60,7 +60,6 @@ enum {
 struct mlx5e_tls_rx;
 struct mlx5e_tls_rx_tag {
 	struct m_snd_tag tag;
-	volatile s32 refs;	/* number of pending mbufs */
 	uint32_t tirn;		/* HW TIR context number */
 	uint32_t dek_index;	/* HW TLS context number */
 	struct mlx5e_tls_rx *tls_rx; /* parent pointer */
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls.c b/sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls.c
index 056202f814f7..a1973740571d 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls.c
@@ -1,5 +1,6 @@
 /*-
  * Copyright (c) 2019-2021 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2022 NVIDIA corporation & affiliates.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -256,10 +257,6 @@ mlx5e_tls_work(struct work_struct *work)
 		break;
 
 	case MLX5E_TLS_ST_FREED:
-		/* wait for all refs to go away */
-		while (ptag->refs != 0)
-			msleep(1);
-
 		/* try to destroy DEK context by ID */
 		if (ptag->dek_index_ok)
 			err = mlx5_encryption_key_destroy(priv->mdev, ptag->dek_index);
@@ -532,8 +529,8 @@ mlx5e_tls_send_static_parameters(struct mlx5e_sq *sq, struct mlx5e_tls_tag *ptag
 	sq->mbuf[pi].mbuf = NULL;
 	sq->mbuf[pi].num_bytes = 0;
 	sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
-	sq->mbuf[pi].p_refcount = &ptag->refs;
-	atomic_add_int(&ptag->refs, 1);
+	sq->mbuf[pi].mst = m_snd_tag_ref(&ptag->tag);
+
 	sq->pc += sq->mbuf[pi].num_wqebbs;
 }
 
@@ -570,8 +567,8 @@ mlx5e_tls_send_progress_parameters(struct mlx5e_sq *sq, struct mlx5e_tls_tag *pt
 	sq->mbuf[pi].mbuf = NULL;
 	sq->mbuf[pi].num_bytes = 0;
 	sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
-	sq->mbuf[pi].p_refcount = &ptag->refs;
-	atomic_add_int(&ptag->refs, 1);
+	sq->mbuf[pi].mst = m_snd_tag_ref(&ptag->tag);
+
 	sq->pc += sq->mbuf[pi].num_wqebbs;
 }
 
@@ -600,8 +597,8 @@ mlx5e_tls_send_nop(struct mlx5e_sq *sq, struct mlx5e_tls_tag *ptag)
 	sq->mbuf[pi].mbuf = NULL;
 	sq->mbuf[pi].num_bytes = 0;
 	sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
-	sq->mbuf[pi].p_refcount = &ptag->refs;
-	atomic_add_int(&ptag->refs, 1);
+	sq->mbuf[pi].mst = m_snd_tag_ref(&ptag->tag);
+
 	sq->pc += sq->mbuf[pi].num_wqebbs;
 }
 
@@ -781,7 +778,7 @@ mlx5e_sq_tls_xmit(struct mlx5e_sq *sq, struct mlx5e_xmit_args *parg, struct mbuf
 
 		/* setup transmit arguments */
 		parg->tisn = ptls_tag->tisn;
-		parg->pref = &ptls_tag->refs;
+		parg->mst = &ptls_tag->tag;
 
 		/* try to send DUMP data */
 		if (mlx5e_sq_dump_xmit(sq, parg, &r_mb) != 0) {
@@ -800,7 +797,7 @@ mlx5e_sq_tls_xmit(struct mlx5e_sq *sq, struct mlx5e_xmit_args *parg, struct mbuf
 
 	parg->tisn = ptls_tag->tisn;
 	parg->ihs = header_size;
-	parg->pref = &ptls_tag->refs;
+	parg->mst = &ptls_tag->tag;
 	return (MLX5E_TLS_CONTINUE);
 }
 
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls_rx.c b/sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls_rx.c
index 462556bd9d99..4a6fc8f2d202 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls_rx.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_hw_tls_rx.c
@@ -1,5 +1,5 @@
 /*-
- * Copyright (c) 2021 NVIDIA corporation & affiliates.
+ * Copyright (c) 2021-2022 NVIDIA corporation & affiliates.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -116,6 +116,16 @@ mlx5e_tls_rx_get_iq(struct mlx5e_priv *priv, uint32_t flowid, uint32_t flowtype)
 	return (&priv->channel[mlx5e_tls_rx_get_ch(priv, flowid, flowtype)].iq);
 }
 
+static void
+mlx5e_tls_rx_send_static_parameters_cb(void *arg)
+{
+	struct mlx5e_tls_rx_tag *ptag;
+
+	ptag = (struct mlx5e_tls_rx_tag *)arg;
+
+	m_snd_tag_rele(&ptag->tag);
+}
+
 /*
  * This function sends the so-called TLS RX static parameters to the
  * hardware. These parameters are temporarily stored in the
@@ -162,9 +172,11 @@ mlx5e_tls_rx_send_static_parameters(struct mlx5e_iq *iq, struct mlx5e_tls_rx_tag
 	memcpy(iq->doorbell.d32, &wqe->ctrl, sizeof(iq->doorbell.d32));
 
 	iq->data[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
+	iq->data[pi].callback = &mlx5e_tls_rx_send_static_parameters_cb;
+	iq->data[pi].arg = ptag;
+
+	m_snd_tag_ref(&ptag->tag);
 
-	iq->data[pi].p_refcount = &ptag->refs;
-	atomic_add_int(&ptag->refs, 1);
 	iq->pc += iq->data[pi].num_wqebbs;
 
 	mlx5e_iq_notify_hw(iq);
@@ -229,8 +241,7 @@ mlx5e_tls_rx_send_progress_parameters_sync(struct mlx5e_iq *iq,
 	iq->data[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
 	iq->data[pi].callback = &mlx5e_tls_rx_send_progress_parameters_cb;
 	iq->data[pi].arg = ptag;
-	iq->data[pi].p_refcount = &ptag->refs;
-	atomic_add_int(&ptag->refs, 1);
+
 	iq->pc += iq->data[pi].num_wqebbs;
 
 	init_completion(&ptag->progress_complete);
@@ -309,6 +320,8 @@ mlx5e_tls_rx_receive_progress_parameters_cb(void *arg)
 	}
 done:
 	MLX5E_TLS_RX_TAG_UNLOCK(ptag);
+
+	m_snd_tag_rele(&ptag->tag);
 }
 
 /*
@@ -355,10 +368,11 @@ mlx5e_tls_rx_receive_progress_parameters(struct mlx5e_iq *iq, struct mlx5e_tls_r
 	memcpy(iq->doorbell.d32, &wqe->ctrl, sizeof(iq->doorbell.d32));
 
 	iq->data[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
-	iq->data[pi].p_refcount = &ptag->refs;
 	iq->data[pi].callback = &mlx5e_tls_rx_receive_progress_parameters_cb;
 	iq->data[pi].arg = ptag;
-	atomic_add_int(&ptag->refs, 1);
+
+	m_snd_tag_ref(&ptag->tag);
+
 	iq->pc += iq->data[pi].num_wqebbs;
 
 	mlx5e_iq_notify_hw(iq);
@@ -560,10 +574,6 @@ mlx5e_tls_rx_work(struct work_struct *work)
 		if (ptag->flow_rule != NULL)
 			mlx5e_accel_fs_del_inpcb(ptag->flow_rule);
 
-		/* wait for all refs to go away */
-		while (ptag->refs != 0)
-			msleep(1);
-
 		/* try to destroy DEK context by ID */
 		if (ptag->dek_index_ok)
 			mlx5_encryption_key_destroy(priv->mdev, ptag->dek_index);
@@ -808,12 +818,7 @@ mlx5e_tls_rx_snd_tag_alloc(struct ifnet *ifp,
 	return (0);
 
 cleanup:
-	MLX5E_TLS_RX_TAG_LOCK(ptag);
-	ptag->state = MLX5E_TLS_RX_ST_FREED;
-	MLX5E_TLS_RX_TAG_UNLOCK(ptag);
-
-	queue_work(priv->tls_rx.wq, &ptag->work);
-	flush_work(&ptag->work);
+	m_snd_tag_rele(&ptag->tag);
 	return (error);
 
 failure:
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_main.c b/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
index 16488ace5a74..ccd87f1cb9d8 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
@@ -1,5 +1,6 @@
 /*-
  * Copyright (c) 2015-2021 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2022 NVIDIA corporation & affiliates.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -1582,14 +1583,14 @@ mlx5e_free_sq_db(struct mlx5e_sq *sq)
 	int x;
 
 	for (x = 0; x != wq_sz; x++) {
-		if (unlikely(sq->mbuf[x].p_refcount != NULL)) {
-			atomic_add_int(sq->mbuf[x].p_refcount, -1);
-			sq->mbuf[x].p_refcount = NULL;
-		}
 		if (sq->mbuf[x].mbuf != NULL) {
 			bus_dmamap_unload(sq->dma_tag, sq->mbuf[x].dma_map);
 			m_freem(sq->mbuf[x].mbuf);
 		}
+		if (sq->mbuf[x].mst != NULL) {
+			m_snd_tag_rele(sq->mbuf[x].mst);
+			sq->mbuf[x].mst = NULL;
+		}
 		bus_dmamap_destroy(sq->dma_tag, sq->mbuf[x].dma_map);
 	}
 	free(sq->mbuf, M_MLX5EN);
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c b/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c
index ea7f63efceae..9e0837a76393 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_tx.c
@@ -1,5 +1,6 @@
 /*-
  * Copyright (c) 2015-2021 Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2022 NVIDIA corporation & affiliates.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -665,8 +666,7 @@ mlx5e_sq_dump_xmit(struct mlx5e_sq *sq, struct mlx5e_xmit_args *parg, struct mbu
 
 	/* store pointer to mbuf */
 	sq->mbuf[pi].mbuf = mb;
-	sq->mbuf[pi].p_refcount = parg->pref;
-	atomic_add_int(parg->pref, 1);
+	sq->mbuf[pi].mst = m_snd_tag_ref(parg->mst);
 
 	/* count all traffic going out */
 	sq->stats.packets++;
@@ -996,9 +996,11 @@ top:
 	/* Store pointer to mbuf */
 	sq->mbuf[pi].mbuf = mb;
 	sq->mbuf[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
-	sq->mbuf[pi].p_refcount = args.pref;
-	if (unlikely(args.pref != NULL))
-		atomic_add_int(args.pref, 1);
+	if (unlikely(args.mst != NULL))
+		sq->mbuf[pi].mst = m_snd_tag_ref(args.mst);
+	else
+		MPASS(sq->mbuf[pi].mst == NULL);
+
 	sq->pc += sq->mbuf[pi].num_wqebbs;
 
 	/* Count all traffic going out */
@@ -1028,6 +1030,7 @@ mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget)
 
 	while (budget > 0) {
 		struct mlx5_cqe64 *cqe;
+		struct m_snd_tag *mst;
 		struct mbuf *mb;
 		bool match;
 		u16 sqcc_this;
@@ -1065,13 +1068,10 @@ mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget)
 			match = (delta < sq->mbuf[ci].num_wqebbs);
 			mb = sq->mbuf[ci].mbuf;
 			sq->mbuf[ci].mbuf = NULL;
+			mst = sq->mbuf[ci].mst;
+			sq->mbuf[ci].mst = NULL;
 
-			if (unlikely(sq->mbuf[ci].p_refcount != NULL)) {
-				atomic_add_int(sq->mbuf[ci].p_refcount, -1);
-				sq->mbuf[ci].p_refcount = NULL;
-			}
-
-			if (mb == NULL) {
+			if (unlikely(mb == NULL)) {
 				if (unlikely(sq->mbuf[ci].num_bytes == 0))
 					sq->stats.nop++;
 			} else {
@@ -1082,6 +1082,10 @@ mlx5e_poll_tx_cq(struct mlx5e_sq *sq, int budget)
 				/* Free transmitted mbuf */
 				m_freem(mb);
 			}
+
+			if (unlikely(mst != NULL))
+				m_snd_tag_rele(mst);
+
 			sqcc += sq->mbuf[ci].num_wqebbs;
 		}
 	}