git: 9fdb683d92b3 - main - cxgbe/iw_cxgbe: Fail early in some callbacks when the RNIC is stopped.
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Fri, 30 Aug 2024 15:33:06 UTC
The branch main has been updated by np: URL: https://cgit.FreeBSD.org/src/commit/?id=9fdb683d92b36cbd20bbd8d61f0c1138f8348dd4 commit 9fdb683d92b36cbd20bbd8d61f0c1138f8348dd4 Author: Navdeep Parhar <np@FreeBSD.org> AuthorDate: 2024-08-30 00:23:16 +0000 Commit: Navdeep Parhar <np@FreeBSD.org> CommitDate: 2024-08-30 15:31:45 +0000 cxgbe/iw_cxgbe: Fail early in some callbacks when the RNIC is stopped. Stop allocating new resources when the RNIC is stopped but continue to allow previously allocated resources to be freed. Note that t4_tom's uld_stop tears down all TOE connections, including those being used for iWARP, and that triggers the cleanup of iWARP resources. Fail post_send/post_recv early too to avoid the SQ doorbell. MFC after: 1 week Sponsored by: Chelsio Communications --- sys/dev/cxgbe/iw_cxgbe/cm.c | 2 ++ sys/dev/cxgbe/iw_cxgbe/cq.c | 4 ++++ sys/dev/cxgbe/iw_cxgbe/mem.c | 3 +++ sys/dev/cxgbe/iw_cxgbe/provider.c | 2 ++ sys/dev/cxgbe/iw_cxgbe/qp.c | 7 +++++++ 5 files changed, 18 insertions(+) diff --git a/sys/dev/cxgbe/iw_cxgbe/cm.c b/sys/dev/cxgbe/iw_cxgbe/cm.c index d8def446d6f0..d291eeeb4f40 100644 --- a/sys/dev/cxgbe/iw_cxgbe/cm.c +++ b/sys/dev/cxgbe/iw_cxgbe/cm.c @@ -2602,6 +2602,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) CTR2(KTR_IW_CXGBE, "%s:ccB %p", __func__, cm_id); + if (__predict_false(c4iw_stopped(&dev->rdev))) + return -EIO; if ((conn_param->ord > c4iw_max_read_depth) || (conn_param->ird > c4iw_max_read_depth)) { diff --git a/sys/dev/cxgbe/iw_cxgbe/cq.c b/sys/dev/cxgbe/iw_cxgbe/cq.c index 9339d083cae3..197f2bcf8af0 100644 --- a/sys/dev/cxgbe/iw_cxgbe/cq.c +++ b/sys/dev/cxgbe/iw_cxgbe/cq.c @@ -106,6 +106,8 @@ create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, struct wrqe *wr; u64 cq_bar2_qoffset = 0; + if (__predict_false(c4iw_stopped(rdev))) + return -EIO; cq->cqid = c4iw_get_cqid(rdev, uctx); if (!cq->cqid) { ret = -ENOMEM; @@ -1037,6 +1039,8 @@ int c4iw_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) unsigned long flag; chp = to_c4iw_cq(ibcq); + if (__predict_false(c4iw_stopped(chp->cq.rdev))) + return -EIO; spin_lock_irqsave(&chp->lock, flag); t4_arm_cq(&chp->cq, (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED); diff --git a/sys/dev/cxgbe/iw_cxgbe/mem.c b/sys/dev/cxgbe/iw_cxgbe/mem.c index 348cd3985e87..4a1adc118b7c 100644 --- a/sys/dev/cxgbe/iw_cxgbe/mem.c +++ b/sys/dev/cxgbe/iw_cxgbe/mem.c @@ -621,6 +621,9 @@ struct ib_mr *c4iw_alloc_mr(struct ib_pd *pd, php = to_c4iw_pd(pd); rhp = php->rhp; + if (__predict_false(c4iw_stopped(&rhp->rdev))) + return ERR_PTR(-EIO); + if (mr_type != IB_MR_TYPE_MEM_REG || max_num_sg > t4_max_fr_depth(&rhp->rdev, use_dsgl)) return ERR_PTR(-EINVAL); diff --git a/sys/dev/cxgbe/iw_cxgbe/provider.c b/sys/dev/cxgbe/iw_cxgbe/provider.c index 729733a040d5..511caa436969 100644 --- a/sys/dev/cxgbe/iw_cxgbe/provider.c +++ b/sys/dev/cxgbe/iw_cxgbe/provider.c @@ -231,6 +231,8 @@ c4iw_allocate_pd(struct ib_pd *pd, struct ib_udata *udata) CTR4(KTR_IW_CXGBE, "%s: ibdev %p, pd %p, data %p", __func__, ibdev, pd, udata); rhp = (struct c4iw_dev *) ibdev; + if (__predict_false(c4iw_stopped(&rhp->rdev))) + return -EIO; pdid = c4iw_get_resource(&rhp->rdev.resource.pdid_table); if (!pdid) return -EINVAL; diff --git a/sys/dev/cxgbe/iw_cxgbe/qp.c b/sys/dev/cxgbe/iw_cxgbe/qp.c index 3aab07755101..0e374bc961c4 100644 --- a/sys/dev/cxgbe/iw_cxgbe/qp.c +++ b/sys/dev/cxgbe/iw_cxgbe/qp.c @@ -138,6 +138,9 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq, struct wrqe *wr; u64 sq_bar2_qoffset = 0, rq_bar2_qoffset = 0; + if (__predict_false(c4iw_stopped(rdev))) + return -EIO; + wq->sq.qid = c4iw_get_qpid(rdev, uctx); if (!wq->sq.qid) return -ENOMEM; @@ -785,6 +788,8 @@ int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr, qhp = to_c4iw_qp(ibqp); rdev = &qhp->rhp->rdev; + if (__predict_false(c4iw_stopped(rdev))) + return -EIO; spin_lock_irqsave(&qhp->lock, flag); if (t4_wq_in_error(&qhp->wq)) { spin_unlock_irqrestore(&qhp->lock, flag); @@ -920,6 +925,8 @@ int c4iw_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr, u16 idx = 0; qhp = to_c4iw_qp(ibqp); + if (__predict_false(c4iw_stopped(&qhp->rhp->rdev))) + return -EIO; spin_lock_irqsave(&qhp->lock, flag); if (t4_wq_in_error(&qhp->wq)) { spin_unlock_irqrestore(&qhp->lock, flag);