svn commit: r352989 - head/sys/dev/mlx5/mlx5_en
Hans Petter Selasky
hselasky at FreeBSD.org
Wed Oct 2 10:43:50 UTC 2019
Author: hselasky
Date: Wed Oct 2 10:43:49 2019
New Revision: 352989
URL: https://svnweb.freebsd.org/changeset/base/352989
Log:
Seal transmit path with regards to using destroyed mutex in mlx5en(4).
It may happen during link down that the running state may be observed
non-zero in the transmit routine, right before the running state is
cleared. This may end up using a destroyed mutex.
Make all channel mutexes and callouts persistant.
Preserve receive and send queue statistics during link toggle.
MFC after: 3 days
Sponsored by: Mellanox Technologies
Modified:
head/sys/dev/mlx5/mlx5_en/en.h
head/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
Modified: head/sys/dev/mlx5/mlx5_en/en.h
==============================================================================
--- head/sys/dev/mlx5/mlx5_en/en.h Wed Oct 2 10:26:26 2019 (r352988)
+++ head/sys/dev/mlx5/mlx5_en/en.h Wed Oct 2 10:43:49 2019 (r352989)
@@ -139,6 +139,10 @@
#define MLX5E_100MB (100000)
#define MLX5E_1GB (1000000)
+#define MLX5E_ZERO(ptr, field) \
+ memset(&(ptr)->field, 0, \
+ sizeof(*(ptr)) - __offsetof(__typeof(*(ptr)), field))
+
MALLOC_DECLARE(M_MLX5EN);
struct mlx5_core_dev;
@@ -741,15 +745,18 @@ struct mlx5e_rq_mbuf {
};
struct mlx5e_rq {
+ /* persistant fields */
+ struct mtx mtx;
+ struct mlx5e_rq_stats stats;
+
/* data path */
+#define mlx5e_rq_zero_start wq
struct mlx5_wq_ll wq;
- struct mtx mtx;
bus_dma_tag_t dma_tag;
u32 wqe_sz;
u32 nsegs;
struct mlx5e_rq_mbuf *mbuf;
struct ifnet *ifp;
- struct mlx5e_rq_stats stats;
struct mlx5e_cq cq;
struct lro_ctrl lro;
volatile int enabled;
@@ -783,11 +790,15 @@ struct mlx5e_snd_tag {
};
struct mlx5e_sq {
- /* data path */
+ /* persistant fields */
struct mtx lock;
- bus_dma_tag_t dma_tag;
struct mtx comp_lock;
+ struct mlx5e_sq_stats stats;
+ /* data path */
+#define mlx5e_sq_zero_start dma_tag
+ bus_dma_tag_t dma_tag;
+
/* dirtied @completion */
u16 cc;
@@ -806,7 +817,6 @@ struct mlx5e_sq {
u32 d32[2];
u64 d64;
} doorbell;
- struct mlx5e_sq_stats stats;
struct mlx5e_cq cq;
@@ -859,13 +869,9 @@ mlx5e_sq_queue_level(struct mlx5e_sq *sq)
}
struct mlx5e_channel {
- /* data path */
struct mlx5e_rq rq;
struct mlx5e_snd_tag tag;
struct mlx5e_sq sq[MLX5E_MAX_TX_NUM_TC];
- u8 num_tc;
-
- /* control */
struct mlx5e_priv *priv;
int ix;
} __aligned(MLX5E_CACHELINE_SIZE);
Modified: head/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
==============================================================================
--- head/sys/dev/mlx5/mlx5_en/mlx5_en_main.c Wed Oct 2 10:26:26 2019 (r352988)
+++ head/sys/dev/mlx5/mlx5_en/mlx5_en_main.c Wed Oct 2 10:43:49 2019 (r352989)
@@ -1481,8 +1481,6 @@ mlx5e_close_rq(struct mlx5e_rq *rq)
callout_stop(&rq->watchdog);
mtx_unlock(&rq->mtx);
- callout_drain(&rq->watchdog);
-
mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
}
@@ -1564,7 +1562,7 @@ mlx5e_refresh_sq_inline_sub(struct mlx5e_priv *priv, s
{
int i;
- for (i = 0; i != c->num_tc; i++) {
+ for (i = 0; i != priv->num_tc; i++) {
mtx_lock(&c->sq[i].lock);
mlx5e_update_sq_inline(&c->sq[i]);
mtx_unlock(&c->sq[i].lock);
@@ -1751,6 +1749,12 @@ mlx5e_open_sq(struct mlx5e_channel *c,
{
int err;
+ sq->cev_factor = c->priv->params_ethtool.tx_completion_fact;
+
+ /* ensure the TX completion event factor is not zero */
+ if (sq->cev_factor == 0)
+ sq->cev_factor = 1;
+
err = mlx5e_create_sq(c, tc, param, sq);
if (err)
return (err);
@@ -1862,9 +1866,6 @@ mlx5e_drain_sq(struct mlx5e_sq *sq)
mlx5e_sq_send_nops_locked(sq, 1);
mtx_unlock(&sq->lock);
- /* make sure it is safe to free the callout */
- callout_drain(&sq->cev_callout);
-
/* wait till SQ is empty or link is down */
mtx_lock(&sq->lock);
while (sq->cc != sq->pc &&
@@ -2049,7 +2050,7 @@ mlx5e_open_tx_cqs(struct mlx5e_channel *c,
int err;
int tc;
- for (tc = 0; tc < c->num_tc; tc++) {
+ for (tc = 0; tc < c->priv->num_tc; tc++) {
/* open completion queue */
err = mlx5e_open_cq(c->priv, &cparam->tx_cq, &c->sq[tc].cq,
&mlx5e_tx_cq_comp, c->ix);
@@ -2070,7 +2071,7 @@ mlx5e_close_tx_cqs(struct mlx5e_channel *c)
{
int tc;
- for (tc = 0; tc < c->num_tc; tc++)
+ for (tc = 0; tc < c->priv->num_tc; tc++)
mlx5e_close_cq(&c->sq[tc].cq);
}
@@ -2081,7 +2082,7 @@ mlx5e_open_sqs(struct mlx5e_channel *c,
int err;
int tc;
- for (tc = 0; tc < c->num_tc; tc++) {
+ for (tc = 0; tc < c->priv->num_tc; tc++) {
err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
if (err)
goto err_close_sqs;
@@ -2101,20 +2102,28 @@ mlx5e_close_sqs_wait(struct mlx5e_channel *c)
{
int tc;
- for (tc = 0; tc < c->num_tc; tc++)
+ for (tc = 0; tc < c->priv->num_tc; tc++)
mlx5e_close_sq_wait(&c->sq[tc]);
}
static void
-mlx5e_chan_mtx_init(struct mlx5e_channel *c)
+mlx5e_chan_static_init(struct mlx5e_priv *priv, struct mlx5e_channel *c, int ix)
{
int tc;
+ /* setup priv and channel number */
+ c->priv = priv;
+ c->ix = ix;
+
+ /* setup send tag */
+ c->tag.type = IF_SND_TAG_TYPE_UNLIMITED;
+ m_snd_tag_init(&c->tag.m_snd_tag, c->priv->ifp);
+
mtx_init(&c->rq.mtx, "mlx5rx", MTX_NETWORK_LOCK, MTX_DEF);
callout_init_mtx(&c->rq.watchdog, &c->rq.mtx, 0);
- for (tc = 0; tc < c->num_tc; tc++) {
+ for (tc = 0; tc != MLX5E_MAX_TX_NUM_TC; tc++) {
struct mlx5e_sq *sq = c->sq + tc;
mtx_init(&sq->lock, "mlx5tx",
@@ -2123,46 +2132,40 @@ mlx5e_chan_mtx_init(struct mlx5e_channel *c)
MTX_NETWORK_LOCK " TX", MTX_DEF);
callout_init_mtx(&sq->cev_callout, &sq->lock, 0);
-
- sq->cev_factor = c->priv->params_ethtool.tx_completion_fact;
-
- /* ensure the TX completion event factor is not zero */
- if (sq->cev_factor == 0)
- sq->cev_factor = 1;
}
}
static void
-mlx5e_chan_mtx_destroy(struct mlx5e_channel *c)
+mlx5e_chan_static_destroy(struct mlx5e_channel *c)
{
int tc;
+ /* drop our reference */
+ m_snd_tag_rele(&c->tag.m_snd_tag);
+
+ callout_drain(&c->rq.watchdog);
+
mtx_destroy(&c->rq.mtx);
- for (tc = 0; tc < c->num_tc; tc++) {
+ for (tc = 0; tc != MLX5E_MAX_TX_NUM_TC; tc++) {
+ callout_drain(&c->sq[tc].cev_callout);
mtx_destroy(&c->sq[tc].lock);
mtx_destroy(&c->sq[tc].comp_lock);
}
}
static int
-mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
+mlx5e_open_channel(struct mlx5e_priv *priv,
struct mlx5e_channel_param *cparam,
struct mlx5e_channel *c)
{
- int err;
+ int i, err;
- memset(c, 0, sizeof(*c));
+ /* zero non-persistant data */
+ MLX5E_ZERO(&c->rq, mlx5e_rq_zero_start);
+ for (i = 0; i != priv->num_tc; i++)
+ MLX5E_ZERO(&c->sq[i], mlx5e_sq_zero_start);
- c->priv = priv;
- c->ix = ix;
- /* setup send tag */
- c->tag.type = IF_SND_TAG_TYPE_UNLIMITED;
- c->num_tc = priv->num_tc;
-
- /* init mutexes */
- mlx5e_chan_mtx_init(c);
-
/* open transmit completion queue */
err = mlx5e_open_tx_cqs(c, cparam);
if (err)
@@ -2197,8 +2200,6 @@ err_close_tx_cqs:
mlx5e_close_tx_cqs(c);
err_free:
- /* destroy mutexes */
- mlx5e_chan_mtx_destroy(c);
return (err);
}
@@ -2214,8 +2215,6 @@ mlx5e_close_channel_wait(struct mlx5e_channel *c)
mlx5e_close_rq_wait(&c->rq);
mlx5e_close_sqs_wait(c);
mlx5e_close_tx_cqs(c);
- /* destroy mutexes */
- mlx5e_chan_mtx_destroy(c);
}
static int
@@ -2411,14 +2410,16 @@ mlx5e_build_channel_param(struct mlx5e_priv *priv,
static int
mlx5e_open_channels(struct mlx5e_priv *priv)
{
- struct mlx5e_channel_param cparam;
+ struct mlx5e_channel_param *cparam;
int err;
int i;
int j;
- mlx5e_build_channel_param(priv, &cparam);
+ cparam = malloc(sizeof(*cparam), M_MLX5EN, M_WAITOK);
+
+ mlx5e_build_channel_param(priv, cparam);
for (i = 0; i < priv->params.num_channels; i++) {
- err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
+ err = mlx5e_open_channel(priv, cparam, &priv->channel[i]);
if (err)
goto err_close_channels;
}
@@ -2428,6 +2429,7 @@ mlx5e_open_channels(struct mlx5e_priv *priv)
if (err)
goto err_close_channels;
}
+ free(cparam, M_MLX5EN);
return (0);
err_close_channels:
@@ -2435,6 +2437,7 @@ err_close_channels:
mlx5e_close_channel(&priv->channel[i]);
mlx5e_close_channel_wait(&priv->channel[i]);
}
+ free(cparam, M_MLX5EN);
return (err);
}
@@ -2544,7 +2547,7 @@ mlx5e_refresh_channel_params_sub(struct mlx5e_priv *pr
if (err)
goto done;
- for (i = 0; i != c->num_tc; i++) {
+ for (i = 0; i != priv->num_tc; i++) {
err = mlx5e_refresh_sq_params(priv, &c->sq[i]);
if (err)
goto done;
@@ -3601,17 +3604,26 @@ static const char *mlx5e_pport_stats_desc[] = {
};
static void
-mlx5e_priv_mtx_init(struct mlx5e_priv *priv)
+mlx5e_priv_static_init(struct mlx5e_priv *priv, const uint32_t channels)
{
+ uint32_t x;
+
mtx_init(&priv->async_events_mtx, "mlx5async", MTX_NETWORK_LOCK, MTX_DEF);
sx_init(&priv->state_lock, "mlx5state");
callout_init_mtx(&priv->watchdog, &priv->async_events_mtx, 0);
MLX5_INIT_DOORBELL_LOCK(&priv->doorbell_lock);
+ for (x = 0; x != channels; x++)
+ mlx5e_chan_static_init(priv, &priv->channel[x], x);
}
static void
-mlx5e_priv_mtx_destroy(struct mlx5e_priv *priv)
+mlx5e_priv_static_destroy(struct mlx5e_priv *priv, const uint32_t channels)
{
+ uint32_t x;
+
+ for (x = 0; x != channels; x++)
+ mlx5e_chan_static_destroy(&priv->channel[x]);
+ callout_drain(&priv->watchdog);
mtx_destroy(&priv->async_events_mtx);
sx_destroy(&priv->state_lock);
}
@@ -3641,7 +3653,7 @@ mlx5e_disable_tx_dma(struct mlx5e_channel *ch)
{
int i;
- for (i = 0; i < ch->num_tc; i++)
+ for (i = 0; i < ch->priv->num_tc; i++)
mlx5e_drain_sq(&ch->sq[i]);
}
@@ -3693,7 +3705,7 @@ mlx5e_enable_tx_dma(struct mlx5e_channel *ch)
{
int i;
- for (i = 0; i < ch->num_tc; i++)
+ for (i = 0; i < ch->priv->num_tc; i++)
mlx5e_resume_sq(&ch->sq[i]);
}
@@ -3708,8 +3720,6 @@ mlx5e_disable_rx_dma(struct mlx5e_channel *ch)
callout_stop(&rq->watchdog);
mtx_unlock(&rq->mtx);
- callout_drain(&rq->watchdog);
-
err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
if (err != 0) {
mlx5_en_err(rq->ifp,
@@ -4175,13 +4185,15 @@ mlx5e_create_ifp(struct mlx5_core_dev *mdev)
priv = malloc(sizeof(*priv) +
(sizeof(priv->channel[0]) * mdev->priv.eq_table.num_comp_vectors),
M_MLX5EN, M_WAITOK | M_ZERO);
- mlx5e_priv_mtx_init(priv);
ifp = priv->ifp = if_alloc_dev(IFT_ETHER, mdev->pdev->dev.bsddev);
if (ifp == NULL) {
mlx5_core_err(mdev, "if_alloc() failed\n");
goto err_free_priv;
}
+ /* setup all static fields */
+ mlx5e_priv_static_init(priv, mdev->priv.eq_table.num_comp_vectors);
+
ifp->if_softc = priv;
if_initname(ifp, "mce", device_get_unit(mdev->pdev->dev.bsddev));
ifp->if_mtu = ETHERMTU;
@@ -4417,10 +4429,10 @@ err_free_sysctl:
sysctl_ctx_free(&priv->sysctl_ctx);
if (priv->sysctl_debug)
sysctl_ctx_free(&priv->stats.port_stats_debug.ctx);
+ mlx5e_priv_static_destroy(priv, mdev->priv.eq_table.num_comp_vectors);
if_free(ifp);
err_free_priv:
- mlx5e_priv_mtx_destroy(priv);
free(priv, M_MLX5EN);
return (NULL);
}
@@ -4480,7 +4492,6 @@ mlx5e_destroy_ifp(struct mlx5_core_dev *mdev, void *vp
/* unregister device */
ifmedia_removeall(&priv->media);
ether_ifdetach(ifp);
- if_free(ifp);
#ifdef RATELIMIT
mlx5e_rl_cleanup(priv);
@@ -4498,7 +4509,8 @@ mlx5e_destroy_ifp(struct mlx5_core_dev *mdev, void *vp
mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
mlx5e_disable_async_events(priv);
flush_workqueue(priv->wq);
- mlx5e_priv_mtx_destroy(priv);
+ mlx5e_priv_static_destroy(priv, mdev->priv.eq_table.num_comp_vectors);
+ if_free(ifp);
free(priv, M_MLX5EN);
}
More information about the svn-src-head
mailing list