git: 5b7aa6c7bc9d - main - irdma(4): update irdma to version 1.3.56-k
Date: Fri, 13 Mar 2026 12:16:31 UTC
The branch main has been updated by kgalazka:
URL: https://cgit.FreeBSD.org/src/commit/?id=5b7aa6c7bc9db19e8bd34a5b7892fb5df2a3068b
commit 5b7aa6c7bc9db19e8bd34a5b7892fb5df2a3068b
Author: Bartosz Sobczak <bartosz.sobczak@intel.com>
AuthorDate: 2026-03-13 11:56:25 +0000
Commit: Krzysztof Galazka <kgalazka@FreeBSD.org>
CommitDate: 2026-03-13 12:00:55 +0000
irdma(4): update irdma to version 1.3.56-k
Update Intel irdma driver to version 1.3.56-k
Notable changes:
- adding E830 support
- adding E835 support
Signed-off-by: Sobczak, Bartosz <bartosz.sobczak@intel.com>
Reviewed by: Andrew Zhu <anzhu@netapp.com>
Tested by: Mateusz Moga <mateusz.moga@intel.com>
MFC after: 2 weeks
Sponsored by: Intel Corporation
Differential Revision: https://reviews.freebsd.org/D55479
---
contrib/ofed/libirdma/ice_devids.h | 26 +-
contrib/ofed/libirdma/irdma.h | 3 +-
contrib/ofed/libirdma/irdma_defs.h | 100 +++----
contrib/ofed/libirdma/irdma_uk.c | 252 ++++++++++--------
contrib/ofed/libirdma/irdma_umain.c | 18 +-
contrib/ofed/libirdma/irdma_user.h | 20 +-
contrib/ofed/libirdma/irdma_uverbs.c | 110 ++++----
sys/dev/irdma/fbsd_kcompat.c | 160 +++++++++++-
sys/dev/irdma/fbsd_kcompat.h | 19 +-
sys/dev/irdma/ice_devids.h | 26 +-
sys/dev/irdma/icrdma.c | 32 ++-
sys/dev/irdma/icrdma_hw.c | 59 +++--
sys/dev/irdma/icrdma_hw.h | 17 +-
sys/dev/irdma/irdma.h | 4 +-
sys/dev/irdma/irdma_cm.c | 493 +++++++++++++++++++++++++----------
sys/dev/irdma/irdma_cm.h | 3 +-
sys/dev/irdma/irdma_ctrl.c | 273 +++++++++----------
sys/dev/irdma/irdma_defs.h | 136 +++++-----
sys/dev/irdma/irdma_hmc.c | 12 +-
sys/dev/irdma/irdma_hw.c | 151 ++++++-----
sys/dev/irdma/irdma_kcompat.c | 301 ++++++++++++++++-----
sys/dev/irdma/irdma_main.h | 41 ++-
sys/dev/irdma/irdma_pble.c | 10 +-
sys/dev/irdma/irdma_protos.h | 7 +-
sys/dev/irdma/irdma_puda.c | 25 +-
sys/dev/irdma/irdma_puda.h | 14 +-
sys/dev/irdma/irdma_type.h | 51 ++--
sys/dev/irdma/irdma_uda.h | 2 +
sys/dev/irdma/irdma_uda_d.h | 4 +-
sys/dev/irdma/irdma_uk.c | 250 ++++++++++--------
sys/dev/irdma/irdma_user.h | 26 +-
sys/dev/irdma/irdma_utils.c | 376 +++++++++++++++++---------
sys/dev/irdma/irdma_verbs.c | 245 +++++++++--------
sys/dev/irdma/irdma_verbs.h | 25 +-
sys/dev/irdma/irdma_ws.c | 157 ++++++-----
sys/dev/irdma/osdep.h | 9 +-
sys/modules/irdma/Makefile | 2 +-
37 files changed, 2225 insertions(+), 1234 deletions(-)
diff --git a/contrib/ofed/libirdma/ice_devids.h b/contrib/ofed/libirdma/ice_devids.h
index 57a7f2f7c2af..0cf7aa6aee22 100644
--- a/contrib/ofed/libirdma/ice_devids.h
+++ b/contrib/ofed/libirdma/ice_devids.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2019 - 2020 Intel Corporation
+ * Copyright (c) 2019 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -88,4 +88,28 @@
#define ICE_DEV_ID_E822L_10G_BASE_T 0x1899
/* Intel(R) Ethernet Connection E822-L 1GbE */
#define ICE_DEV_ID_E822L_SGMII 0x189A
+/* Intel(R) Ethernet Controller E830-CC for backplane */
+#define ICE_DEV_ID_E830_BACKPLANE 0x12D1
+/* Intel(R) Ethernet Controller E830-CC for QSFP */
+#define ICE_DEV_ID_E830_QSFP56 0x12D2
+/* Intel(R) Ethernet Controller E830-CC for SFP */
+#define ICE_DEV_ID_E830_SFP 0x12D3
+/* Intel(R) Ethernet Controller E830-CC for SFP-DD */
+#define ICE_DEV_ID_E830_SFP_DD 0x12D4
+/* Intel(R) Ethernet Controller E830-C for backplane */
+#define ICE_DEV_ID_E830C_BACKPLANE 0x12D5
+/* Intel(R) Ethernet Controller E830-XXV for backplane */
+#define ICE_DEV_ID_E830_XXV_BACKPLANE 0x12DC
+/* Intel(R) Ethernet Controller E830-C for QSFP */
+#define ICE_DEV_ID_E830C_QSFP 0x12D8
+/* Intel(R) Ethernet Controller E830-XXV for QSFP */
+#define ICE_DEV_ID_E830_XXV_QSFP 0x12DD
+/* Intel(R) Ethernet Controller E830-C for SFP */
+#define ICE_DEV_ID_E830C_SFP 0x12DA
+/* Intel(R) Ethernet Controller E830-XXV for SFP */
+#define ICE_DEV_ID_E830_XXV_SFP 0x12DE
+/* Intel(R) Ethernet Controller E835-XXV for SFP */
+#define ICE_DEV_ID_E835_XXV_SFP 0x124A
+/* Intel(R) Ethernet Controller E835-CC for QSFP */
+#define ICE_DEV_ID_E835_QSFP 0x1249
#endif /* ICE_DEVIDS_H */
diff --git a/contrib/ofed/libirdma/irdma.h b/contrib/ofed/libirdma/irdma.h
index f4a5a4796f82..6b85ff1a7105 100644
--- a/contrib/ofed/libirdma/irdma.h
+++ b/contrib/ofed/libirdma/irdma.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2017 - 2022 Intel Corporation
+ * Copyright (c) 2017 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -57,6 +57,7 @@ struct irdma_uk_attrs {
u32 max_hw_wq_quanta;
u32 min_hw_cq_size;
u32 max_hw_cq_size;
+ u16 max_hw_push_len;
u16 max_hw_sq_chunk;
u16 min_hw_wq_size;
u8 hw_rev;
diff --git a/contrib/ofed/libirdma/irdma_defs.h b/contrib/ofed/libirdma/irdma_defs.h
index 39d4e7772c31..7deaf762c204 100644
--- a/contrib/ofed/libirdma/irdma_defs.h
+++ b/contrib/ofed/libirdma/irdma_defs.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2023 Intel Corporation
+ * Copyright (c) 2015 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -63,6 +63,27 @@
#define IRDMA_BYTE_200 200
#define IRDMA_BYTE_208 208
#define IRDMA_BYTE_216 216
+#define IRDMA_BYTE_224 224
+#define IRDMA_BYTE_232 232
+#define IRDMA_BYTE_240 240
+#define IRDMA_BYTE_248 248
+#define IRDMA_BYTE_256 256
+#define IRDMA_BYTE_264 264
+#define IRDMA_BYTE_272 272
+#define IRDMA_BYTE_280 280
+#define IRDMA_BYTE_288 288
+#define IRDMA_BYTE_296 296
+#define IRDMA_BYTE_304 304
+#define IRDMA_BYTE_312 312
+#define IRDMA_BYTE_320 320
+#define IRDMA_BYTE_328 328
+#define IRDMA_BYTE_336 336
+#define IRDMA_BYTE_344 344
+#define IRDMA_BYTE_352 352
+#define IRDMA_BYTE_360 360
+#define IRDMA_BYTE_368 368
+#define IRDMA_BYTE_376 376
+#define IRDMA_BYTE_384 384
#define IRDMA_QP_TYPE_IWARP 1
#define IRDMA_QP_TYPE_UDA 2
@@ -81,6 +102,8 @@
#define IRDMA_MAX_RQ_WQE_SHIFT_GEN1 2
#define IRDMA_MAX_RQ_WQE_SHIFT_GEN2 3
+#define IRDMA_DEFAULT_MAX_PUSH_LEN 8192
+
#define IRDMA_SQ_RSVD 258
#define IRDMA_RQ_RSVD 1
@@ -241,7 +264,7 @@
#define IRDMAQPSQ_DESTQPN_S 32
#define IRDMAQPSQ_DESTQPN GENMASK_ULL(55, 32)
#define IRDMAQPSQ_AHID_S 0
-#define IRDMAQPSQ_AHID GENMASK_ULL(16, 0)
+#define IRDMAQPSQ_AHID GENMASK_ULL(24, 0)
#define IRDMAQPSQ_INLINEDATAFLAG_S 57
#define IRDMAQPSQ_INLINEDATAFLAG BIT_ULL(57)
@@ -338,9 +361,9 @@
#define IRDMA_RING_MOVE_HEAD(_ring, _retcode) \
{ \
u32 size; \
- size = (_ring).size; \
+ size = IRDMA_RING_SIZE(_ring); \
if (!IRDMA_RING_FULL_ERR(_ring)) { \
- (_ring).head = ((_ring).head + 1) % size; \
+ IRDMA_RING_CURRENT_HEAD(_ring) = (IRDMA_RING_CURRENT_HEAD(_ring) + 1) % size; \
(_retcode) = 0; \
} else { \
(_retcode) = ENOSPC; \
@@ -349,79 +372,40 @@
#define IRDMA_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \
{ \
u32 size; \
- size = (_ring).size; \
+ size = IRDMA_RING_SIZE(_ring); \
if ((IRDMA_RING_USED_QUANTA(_ring) + (_count)) < size) { \
- (_ring).head = ((_ring).head + (_count)) % size; \
- (_retcode) = 0; \
- } else { \
- (_retcode) = ENOSPC; \
- } \
- }
-#define IRDMA_SQ_RING_MOVE_HEAD(_ring, _retcode) \
- { \
- u32 size; \
- size = (_ring).size; \
- if (!IRDMA_SQ_RING_FULL_ERR(_ring)) { \
- (_ring).head = ((_ring).head + 1) % size; \
- (_retcode) = 0; \
- } else { \
- (_retcode) = ENOSPC; \
- } \
- }
-#define IRDMA_SQ_RING_MOVE_HEAD_BY_COUNT(_ring, _count, _retcode) \
- { \
- u32 size; \
- size = (_ring).size; \
- if ((IRDMA_RING_USED_QUANTA(_ring) + (_count)) < (size - 256)) { \
- (_ring).head = ((_ring).head + (_count)) % size; \
+ IRDMA_RING_CURRENT_HEAD(_ring) = (IRDMA_RING_CURRENT_HEAD(_ring) + (_count)) % size; \
(_retcode) = 0; \
} else { \
(_retcode) = ENOSPC; \
} \
}
-#define IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(_ring, _count) \
- (_ring).head = ((_ring).head + (_count)) % (_ring).size
-#define IRDMA_RING_MOVE_TAIL(_ring) \
- (_ring).tail = ((_ring).tail + 1) % (_ring).size
+#define IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(_ring, _count) \
+ (IRDMA_RING_CURRENT_HEAD(_ring) = (IRDMA_RING_CURRENT_HEAD(_ring) + (_count)) % IRDMA_RING_SIZE(_ring))
#define IRDMA_RING_MOVE_HEAD_NOCHECK(_ring) \
- (_ring).head = ((_ring).head + 1) % (_ring).size
+ IRDMA_RING_MOVE_HEAD_BY_COUNT_NOCHECK(_ring, 1)
#define IRDMA_RING_MOVE_TAIL_BY_COUNT(_ring, _count) \
- (_ring).tail = ((_ring).tail + (_count)) % (_ring).size
+ IRDMA_RING_CURRENT_TAIL(_ring) = (IRDMA_RING_CURRENT_TAIL(_ring) + (_count)) % IRDMA_RING_SIZE(_ring)
+
+#define IRDMA_RING_MOVE_TAIL(_ring) \
+ IRDMA_RING_MOVE_TAIL_BY_COUNT(_ring, 1)
#define IRDMA_RING_SET_TAIL(_ring, _pos) \
- (_ring).tail = (_pos) % (_ring).size
+ IRDMA_RING_CURRENT_TAIL(_ring) = (_pos) % IRDMA_RING_SIZE(_ring)
#define IRDMA_RING_FULL_ERR(_ring) \
( \
- (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 1)) \
- )
-
-#define IRDMA_ERR_RING_FULL2(_ring) \
- ( \
- (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 2)) \
- )
-
-#define IRDMA_ERR_RING_FULL3(_ring) \
- ( \
- (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 3)) \
+ (IRDMA_RING_USED_QUANTA(_ring) == (IRDMA_RING_SIZE(_ring) - 1)) \
)
#define IRDMA_SQ_RING_FULL_ERR(_ring) \
( \
- (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 257)) \
+ (IRDMA_RING_USED_QUANTA(_ring) == (IRDMA_RING_SIZE(_ring) - 257)) \
)
-#define IRDMA_ERR_SQ_RING_FULL2(_ring) \
- ( \
- (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 258)) \
- )
-#define IRDMA_ERR_SQ_RING_FULL3(_ring) \
- ( \
- (IRDMA_RING_USED_QUANTA(_ring) == ((_ring).size - 259)) \
- )
#define IRDMA_RING_MORE_WORK(_ring) \
( \
(IRDMA_RING_USED_QUANTA(_ring) != 0) \
@@ -429,17 +413,17 @@
#define IRDMA_RING_USED_QUANTA(_ring) \
( \
- (((_ring).head + (_ring).size - (_ring).tail) % (_ring).size) \
+ ((IRDMA_RING_CURRENT_HEAD(_ring) + IRDMA_RING_SIZE(_ring) - IRDMA_RING_CURRENT_TAIL(_ring)) % IRDMA_RING_SIZE(_ring)) \
)
#define IRDMA_RING_FREE_QUANTA(_ring) \
( \
- ((_ring).size - IRDMA_RING_USED_QUANTA(_ring) - 1) \
+ (IRDMA_RING_SIZE(_ring) - IRDMA_RING_USED_QUANTA(_ring) - 1) \
)
#define IRDMA_SQ_RING_FREE_QUANTA(_ring) \
( \
- ((_ring).size - IRDMA_RING_USED_QUANTA(_ring) - 257) \
+ (IRDMA_RING_SIZE(_ring) - IRDMA_RING_USED_QUANTA(_ring) - 257) \
)
#define IRDMA_ATOMIC_RING_MOVE_HEAD(_ring, index, _retcode) \
diff --git a/contrib/ofed/libirdma/irdma_uk.c b/contrib/ofed/libirdma/irdma_uk.c
index 115c5f0a27f0..c42d0f3e9673 100644
--- a/contrib/ofed/libirdma/irdma_uk.c
+++ b/contrib/ofed/libirdma/irdma_uk.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2023 Intel Corporation
+ * Copyright (c) 2015 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -133,16 +133,18 @@ irdma_nop_1(struct irdma_qp_uk *qp)
void
irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx)
{
- __le64 *wqe;
+ struct irdma_qp_quanta *sq;
u32 wqe_idx;
if (!(qp_wqe_idx & 0x7F)) {
wqe_idx = (qp_wqe_idx + 128) % qp->sq_ring.size;
- wqe = qp->sq_base[wqe_idx].elem;
+ sq = qp->sq_base + wqe_idx;
if (wqe_idx)
- memset(wqe, qp->swqe_polarity ? 0 : 0xFF, 0x1000);
+ memset(sq, qp->swqe_polarity ? 0 : 0xFF,
+ 128 * sizeof(*sq));
else
- memset(wqe, qp->swqe_polarity ? 0xFF : 0, 0x1000);
+ memset(sq, qp->swqe_polarity ? 0xFF : 0,
+ 128 * sizeof(*sq));
}
}
@@ -200,22 +202,65 @@ irdma_qp_ring_push_db(struct irdma_qp_uk *qp, u32 wqe_idx)
qp->push_dropped = false;
}
+/**
+ * irdma_qp_push_wqe - setup push wqe and ring db
+ * @qp: hw qp ptr
+ * @wqe: wqe ptr
+ * @quanta: numbers of quanta in wqe
+ * @wqe_idx: wqe index
+ * @push_wqe: if to use push for the wqe
+ */
void
irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 * wqe, u16 quanta,
- u32 wqe_idx, bool post_sq)
+ u32 wqe_idx, bool push_wqe)
{
__le64 *push;
- if (IRDMA_RING_CURRENT_HEAD(qp->initial_ring) !=
- IRDMA_RING_CURRENT_TAIL(qp->sq_ring) &&
- !qp->push_mode) {
- irdma_uk_qp_post_wr(qp);
- } else {
+ if (push_wqe) {
push = (__le64 *) ((uintptr_t)qp->push_wqe +
(wqe_idx & 0x7) * 0x20);
irdma_memcpy(push, wqe, quanta * IRDMA_QP_WQE_MIN_SIZE);
irdma_qp_ring_push_db(qp, wqe_idx);
+ qp->last_push_db = true;
+ } else if (qp->last_push_db) {
+ qp->last_push_db = false;
+ db_wr32(qp->qp_id, qp->wqe_alloc_db);
+ } else {
+ irdma_uk_qp_post_wr(qp);
+ }
+}
+
+/**
+ * irdma_push_ring_free - check if sq ring free to pust push wqe
+ * @qp: hw qp ptr
+ */
+static inline bool
+irdma_push_ring_free(struct irdma_qp_uk *qp)
+{
+ u32 head, tail;
+
+ head = IRDMA_RING_CURRENT_HEAD(qp->initial_ring);
+ tail = IRDMA_RING_CURRENT_TAIL(qp->sq_ring);
+
+ if (head == tail || head == (tail + 1))
+ return true;
+
+ return false;
+}
+
+/**
+ * irdma_enable_push_wqe - depending on sq ring and total size
+ * @qp: hw qp ptr
+ * @total_size: total data size
+ */
+static inline bool
+irdma_enable_push_wqe(struct irdma_qp_uk *qp, u32 total_size)
+{
+ if (irdma_push_ring_free(qp) &&
+ total_size <= qp->uk_attrs->max_hw_push_len) {
+ return true;
}
+ return false;
}
/**
@@ -234,7 +279,8 @@ irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx,
__le64 *wqe;
__le64 *wqe_0 = NULL;
u32 nop_wqe_idx;
- u16 avail_quanta, wqe_quanta = *quanta;
+ u16 wqe_quanta = *quanta;
+ u16 avail_quanta;
u16 i;
avail_quanta = qp->uk_attrs->max_hw_sq_chunk -
@@ -330,7 +376,7 @@ irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
bool read_fence = false;
u16 quanta;
- info->push_wqe = qp->push_db ? true : false;
+ info->push_wqe = false;
op_info = &info->op.rdma_write;
if (op_info->num_lo_sges > qp->max_sq_frag_cnt)
@@ -350,11 +396,13 @@ irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
if (ret_code)
return ret_code;
+ if (qp->push_db)
+ info->push_wqe = irdma_enable_push_wqe(qp, total_size);
+
wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
if (!wqe)
return ENOSPC;
- qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
set_64bit_val(wqe, IRDMA_BYTE_16,
FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
@@ -399,8 +447,8 @@ irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
- if (info->push_wqe)
- irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
+ if (qp->push_db)
+ irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, info->push_wqe);
else if (post_sq)
irdma_uk_qp_post_wr(qp);
@@ -429,7 +477,7 @@ irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
u16 quanta;
u64 hdr;
- info->push_wqe = qp->push_db ? true : false;
+ info->push_wqe &= qp->push_db ? true : false;
op_info = &info->op.rdma_read;
if (qp->max_sq_frag_cnt < op_info->num_lo_sges)
@@ -451,7 +499,6 @@ irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
qp->ord_cnt = 0;
}
- qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
addl_frag_cnt = op_info->num_lo_sges > 1 ?
(op_info->num_lo_sges - 1) : 0;
local_fence |= info->local_fence;
@@ -490,8 +537,8 @@ irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
- if (info->push_wqe)
- irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
+ if (qp->push_db)
+ irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, info->push_wqe);
else if (post_sq)
irdma_uk_qp_post_wr(qp);
@@ -517,7 +564,7 @@ irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
bool read_fence = false;
u16 quanta;
- info->push_wqe = qp->push_db ? true : false;
+ info->push_wqe = false;
op_info = &info->op.send;
if (qp->max_sq_frag_cnt < op_info->num_sges)
@@ -534,6 +581,9 @@ irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
if (ret_code)
return ret_code;
+ if (qp->push_db)
+ info->push_wqe = irdma_enable_push_wqe(qp, total_size);
+
wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
if (!wqe)
return ENOSPC;
@@ -587,8 +637,8 @@ irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
udma_to_device_barrier(); /* make sure WQE is populated before valid bit is set */
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
- if (info->push_wqe)
- irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
+ if (qp->push_db)
+ irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, info->push_wqe);
else if (post_sq)
irdma_uk_qp_post_wr(qp);
@@ -780,11 +830,11 @@ irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
return EINVAL;
quanta = qp->wqe_ops.iw_inline_data_size_to_quanta(total_size);
+
wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, &quanta, total_size, info);
if (!wqe)
return ENOSPC;
- qp->sq_wrtrk_array[wqe_idx].signaled = info->signaled;
read_fence |= info->read_fence;
set_64bit_val(wqe, IRDMA_BYTE_16,
FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr));
@@ -812,8 +862,8 @@ irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp,
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
- if (info->push_wqe)
- irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
+ if (qp->push_db)
+ irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, info->push_wqe);
else if (post_sq)
irdma_uk_qp_post_wr(qp);
@@ -886,8 +936,8 @@ irdma_uk_inline_send(struct irdma_qp_uk *qp,
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
- if (info->push_wqe)
- irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
+ if (qp->push_db)
+ irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, info->push_wqe);
else if (post_sq)
irdma_uk_qp_post_wr(qp);
@@ -937,8 +987,8 @@ irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp,
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
- if (info->push_wqe)
- irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
+ if (qp->push_db)
+ irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, info->push_wqe);
else if (post_sq)
irdma_uk_qp_post_wr(qp);
@@ -989,8 +1039,8 @@ irdma_uk_mw_bind(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info,
set_64bit_val(wqe, IRDMA_BYTE_24, hdr);
- if (info->push_wqe)
- irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, post_sq);
+ if (qp->push_db)
+ irdma_qp_push_wqe(qp, wqe, quanta, wqe_idx, info->push_wqe);
else if (post_sq)
irdma_uk_qp_post_wr(qp);
@@ -1226,26 +1276,25 @@ irdma_check_rq_cqe(struct irdma_qp_uk *qp, u32 *array_idx)
}
/**
- * irdma_skip_duplicate_flush_cmpl - check last cmpl and update wqe if needed
- *
- * @ring: sq/rq ring
- * @flush_seen: information if flush for specific ring was already seen
- * @comp_status: completion status
- * @wqe_idx: new value of WQE index returned if there is more work on ring
+ * irdma_uk_cq_empty - Check if CQ is empty
+ * @cq: hw cq
*/
-static inline int
-irdma_skip_duplicate_flush_cmpl(struct irdma_ring ring, u8 flush_seen,
- enum irdma_cmpl_status comp_status,
- u32 *wqe_idx)
+bool
+irdma_uk_cq_empty(struct irdma_cq_uk *cq)
{
- if (flush_seen) {
- if (IRDMA_RING_MORE_WORK(ring))
- *wqe_idx = ring.tail;
- else
- return ENOENT;
- }
+ __le64 *cqe;
+ u8 polarity;
+ u64 qword3;
- return 0;
+ if (cq->avoid_mem_cflct)
+ cqe = IRDMA_GET_CURRENT_EXTENDED_CQ_ELEM(cq);
+ else
+ cqe = IRDMA_GET_CURRENT_CQ_ELEM(cq);
+
+ get_64bit_val(cqe, 24, &qword3);
+ polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3);
+
+ return polarity != cq->polarity;
}
/**
@@ -1338,6 +1387,10 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
info->ipv4 = (bool)FIELD_GET(IRDMACQ_IPV4, qword3);
get_64bit_val(cqe, IRDMA_BYTE_8, &comp_ctx);
qp = (struct irdma_qp_uk *)(irdma_uintptr) comp_ctx;
+ if (!qp || qp->destroy_pending) {
+ ret_code = EFAULT;
+ goto exit;
+ }
if (info->error) {
info->major_err = FIELD_GET(IRDMA_CQ_MAJERR, qword3);
info->minor_err = FIELD_GET(IRDMA_CQ_MINERR, qword3);
@@ -1367,10 +1420,6 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
info->ud_src_qpn = (u32)FIELD_GET(IRDMACQ_UDSRCQPN, qword2);
info->solicited_event = (bool)FIELD_GET(IRDMACQ_SOEVENT, qword3);
- if (!qp || qp->destroy_pending) {
- ret_code = EFAULT;
- goto exit;
- }
wqe_idx = (u32)FIELD_GET(IRDMA_CQ_WQEIDX, qword3);
info->qp_handle = (irdma_qp_handle) (irdma_uintptr) qp;
info->op_type = (u8)FIELD_GET(IRDMACQ_OP, qword3);
@@ -1378,51 +1427,44 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
if (info->q_type == IRDMA_CQE_QTYPE_RQ) {
u32 array_idx;
- ret_code = irdma_skip_duplicate_flush_cmpl(qp->rq_ring,
- qp->rq_flush_seen,
- info->comp_status,
- &wqe_idx);
- if (ret_code != 0)
- goto exit;
-
array_idx = wqe_idx / qp->rq_wqe_size_multiplier;
+ info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
+ info->signaled = 1;
+
+ if (qword3 & IRDMACQ_STAG) {
+ info->stag_invalid_set = true;
+ info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2);
+ } else {
+ info->stag_invalid_set = false;
+ }
if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED ||
info->comp_status == IRDMA_COMPL_STATUS_UNKNOWN) {
+ ret_code = pthread_spin_lock(qp->lock);
+ if (ret_code)
+ return ret_code;
if (!IRDMA_RING_MORE_WORK(qp->rq_ring)) {
ret_code = ENOENT;
+ pthread_spin_unlock(qp->lock);
goto exit;
}
info->wr_id = qp->rq_wrid_array[qp->rq_ring.tail];
- info->signaled = 1;
- array_idx = qp->rq_ring.tail;
+ IRDMA_RING_SET_TAIL(qp->rq_ring, qp->rq_ring.tail + 1);
+ if (!IRDMA_RING_MORE_WORK(qp->rq_ring))
+ qp->rq_flush_complete = true;
+ else
+ move_cq_head = false;
+ pthread_spin_unlock(qp->lock);
} else {
info->wr_id = qp->rq_wrid_array[array_idx];
- info->signaled = 1;
if (irdma_check_rq_cqe(qp, &array_idx)) {
info->wr_id = qp->rq_wrid_array[array_idx];
info->comp_status = IRDMA_COMPL_STATUS_UNKNOWN;
IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
return 0;
}
- }
-
- info->bytes_xfered = (u32)FIELD_GET(IRDMACQ_PAYLDLEN, qword0);
-
- if (qword3 & IRDMACQ_STAG) {
- info->stag_invalid_set = true;
- info->inv_stag = (u32)FIELD_GET(IRDMACQ_INVSTAG, qword2);
- } else {
- info->stag_invalid_set = false;
- }
- IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
- if (info->comp_status == IRDMA_COMPL_STATUS_FLUSHED) {
- qp->rq_flush_seen = true;
- if (!IRDMA_RING_MORE_WORK(qp->rq_ring))
- qp->rq_flush_complete = true;
- else
- move_cq_head = false;
+ IRDMA_RING_SET_TAIL(qp->rq_ring, array_idx + 1);
}
pring = &qp->rq_ring;
} else { /* q_type is IRDMA_CQE_QTYPE_SQ */
@@ -1444,12 +1486,6 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
qp->push_mode = false;
qp->push_dropped = true;
}
- ret_code = irdma_skip_duplicate_flush_cmpl(qp->sq_ring,
- qp->sq_flush_seen,
- info->comp_status,
- &wqe_idx);
- if (ret_code != 0)
- goto exit;
if (info->comp_status != IRDMA_COMPL_STATUS_FLUSHED) {
info->wr_id = qp->sq_wrtrk_array[wqe_idx].wrid;
info->signaled = qp->sq_wrtrk_array[wqe_idx].signaled;
@@ -1459,10 +1495,9 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
IRDMA_RING_SET_TAIL(qp->sq_ring,
wqe_idx + qp->sq_wrtrk_array[wqe_idx].quanta);
} else {
- if (pthread_spin_lock(qp->lock)) {
- ret_code = ENOENT;
- goto exit;
- }
+ ret_code = pthread_spin_lock(qp->lock);
+ if (ret_code)
+ return ret_code;
if (!IRDMA_RING_MORE_WORK(qp->sq_ring)) {
pthread_spin_unlock(qp->lock);
ret_code = ENOENT;
@@ -1493,7 +1528,6 @@ irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
if (info->op_type == IRDMA_OP_TYPE_BIND_MW &&
info->minor_err == FLUSH_PROT_ERR)
info->minor_err = FLUSH_MW_BIND_ERR;
- qp->sq_flush_seen = true;
if (!IRDMA_RING_MORE_WORK(qp->sq_ring))
qp->sq_flush_complete = true;
pthread_spin_unlock(qp->lock);
@@ -1508,6 +1542,7 @@ exit:
if (pring && IRDMA_RING_MORE_WORK(*pring))
move_cq_head = false;
}
+
if (move_cq_head) {
IRDMA_RING_MOVE_HEAD_NOCHECK(cq->cq_ring);
if (!IRDMA_RING_CURRENT_HEAD(cq->cq_ring))
@@ -1522,8 +1557,9 @@ exit:
IRDMA_RING_MOVE_TAIL(cq->cq_ring);
if (!cq->avoid_mem_cflct && ext_valid)
IRDMA_RING_MOVE_TAIL(cq->cq_ring);
- set_64bit_val(cq->shadow_area, IRDMA_BYTE_0,
- IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
+ if (IRDMA_RING_CURRENT_HEAD(cq->cq_ring) & 0x3F || irdma_uk_cq_empty(cq))
+ set_64bit_val(cq->shadow_area, IRDMA_BYTE_0,
+ IRDMA_RING_CURRENT_HEAD(cq->cq_ring));
} else {
qword3 &= ~IRDMA_CQ_WQEIDX;
qword3 |= FIELD_PREP(IRDMA_CQ_WQEIDX, pring->tail);
@@ -1537,9 +1573,7 @@ exit:
* irdma_round_up_wq - return round up qp wq depth
* @wqdepth: wq depth in quanta to round up
*/
-static int
-irdma_round_up_wq(u32 wqdepth)
-{
+static u64 irdma_round_up_wq(u64 wqdepth) {
int scount = 1;
for (wqdepth--; scount <= 16; scount *= 2)
@@ -1588,15 +1622,16 @@ irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
int
irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, u32 *sqdepth)
{
- u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
+ u32 min_hw_quanta = (u32)uk_attrs->min_hw_wq_size << shift;
+ u64 hw_quanta =
+ irdma_round_up_wq(((u64)sq_size << shift) + IRDMA_SQ_RSVD);
- *sqdepth = irdma_round_up_wq((sq_size << shift) + IRDMA_SQ_RSVD);
-
- if (*sqdepth < min_size)
- *sqdepth = min_size;
- else if (*sqdepth > uk_attrs->max_hw_wq_quanta)
+ if (hw_quanta < min_hw_quanta)
+ hw_quanta = min_hw_quanta;
+ else if (hw_quanta > uk_attrs->max_hw_wq_quanta)
return EINVAL;
+ *sqdepth = hw_quanta;
return 0;
}
@@ -1607,15 +1642,16 @@ irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, u32 *s
int
irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, u32 *rqdepth)
{
- u32 min_size = (u32)uk_attrs->min_hw_wq_size << shift;
-
- *rqdepth = irdma_round_up_wq((rq_size << shift) + IRDMA_RQ_RSVD);
+ u32 min_hw_quanta = (u32)uk_attrs->min_hw_wq_size << shift;
+ u64 hw_quanta =
+ irdma_round_up_wq(((u64)rq_size << shift) + IRDMA_RQ_RSVD);
- if (*rqdepth < min_size)
- *rqdepth = min_size;
- else if (*rqdepth > uk_attrs->max_hw_rq_quanta)
+ if (hw_quanta < min_hw_quanta)
+ hw_quanta = min_hw_quanta;
+ else if (hw_quanta > uk_attrs->max_hw_rq_quanta)
return EINVAL;
+ *rqdepth = hw_quanta;
return 0;
}
diff --git a/contrib/ofed/libirdma/irdma_umain.c b/contrib/ofed/libirdma/irdma_umain.c
index e8d27c31a0dc..63b082a5aa2b 100644
--- a/contrib/ofed/libirdma/irdma_umain.c
+++ b/contrib/ofed/libirdma/irdma_umain.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2021 - 2022 Intel Corporation
+ * Copyright (c) 2021 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -48,7 +48,7 @@
/**
* Driver version
*/
-char libirdma_version[] = "1.2.36-k";
+char libirdma_version[] = "1.3.56-k";
unsigned int irdma_dbg;
@@ -87,6 +87,18 @@ static const struct hca_info hca_table[] = {
INTEL_HCA(ICE_DEV_ID_E822L_SFP),
INTEL_HCA(ICE_DEV_ID_E822L_10G_BASE_T),
INTEL_HCA(ICE_DEV_ID_E822L_SGMII),
+ INTEL_HCA(ICE_DEV_ID_E830_BACKPLANE),
+ INTEL_HCA(ICE_DEV_ID_E830_QSFP56),
+ INTEL_HCA(ICE_DEV_ID_E830_SFP),
+ INTEL_HCA(ICE_DEV_ID_E830_SFP_DD),
+ INTEL_HCA(ICE_DEV_ID_E830C_BACKPLANE),
+ INTEL_HCA(ICE_DEV_ID_E830_XXV_BACKPLANE),
+ INTEL_HCA(ICE_DEV_ID_E830C_QSFP),
+ INTEL_HCA(ICE_DEV_ID_E830_XXV_QSFP),
+ INTEL_HCA(ICE_DEV_ID_E830C_SFP),
+ INTEL_HCA(ICE_DEV_ID_E830_XXV_SFP),
+ INTEL_HCA(ICE_DEV_ID_E835_XXV_SFP),
+ INTEL_HCA(ICE_DEV_ID_E835_QSFP),
};
static struct ibv_context_ops irdma_ctx_ops = {
@@ -239,7 +251,7 @@ irdma_driver_init(const char *uverbs_sys_path,
hca_size = sizeof(hca_table) / sizeof(struct hca_info);
while (i < hca_size && !device_found) {
- if (device_id != hca_table[i].device)
+ if (device_id == hca_table[i].device)
device_found = 1;
++i;
}
diff --git a/contrib/ofed/libirdma/irdma_user.h b/contrib/ofed/libirdma/irdma_user.h
index aeb6aa9feebd..c9f707380c59 100644
--- a/contrib/ofed/libirdma/irdma_user.h
+++ b/contrib/ofed/libirdma/irdma_user.h
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (c) 2015 - 2023 Intel Corporation
+ * Copyright (c) 2015 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -94,12 +94,10 @@ enum irdma_device_caps_const {
IRDMA_MIN_IW_QP_ID = 0,
IRDMA_QUERY_FPM_BUF_SIZE = 176,
IRDMA_COMMIT_FPM_BUF_SIZE = 176,
- IRDMA_MAX_IW_QP_ID = 262143,
IRDMA_MIN_CEQID = 0,
IRDMA_MAX_CEQID = 1023,
IRDMA_CEQ_MAX_COUNT = IRDMA_MAX_CEQID + 1,
IRDMA_MIN_CQID = 0,
- IRDMA_MAX_CQID = 524287,
IRDMA_MIN_AEQ_ENTRIES = 1,
IRDMA_MAX_AEQ_ENTRIES = 524287,
IRDMA_MIN_CEQ_ENTRIES = 1,
@@ -188,7 +186,7 @@ struct irdma_cq_uk_init_info;
struct irdma_ring {
volatile u32 head;
- volatile u32 tail; /* effective tail */
+ volatile u32 tail;
u32 size;
};
@@ -327,6 +325,7 @@ struct irdma_wqe_uk_ops {
struct irdma_bind_window *op_info);
};
+bool irdma_uk_cq_empty(struct irdma_cq_uk *cq);
int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq,
struct irdma_cq_poll_info *info);
void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq,
@@ -364,6 +363,8 @@ struct irdma_qp_uk {
__le64 *shadow_area;
__le32 *push_db;
__le64 *push_wqe;
+ void *push_db_map;
+ void *push_wqe_map;
struct irdma_ring sq_ring;
struct irdma_ring sq_sig_ring;
struct irdma_ring rq_ring;
@@ -393,12 +394,11 @@ struct irdma_qp_uk {
bool sq_flush_complete:1; /* Indicates flush was seen and SQ was empty after the flush */
bool rq_flush_complete:1; /* Indicates flush was seen and RQ was empty after the flush */
bool destroy_pending:1; /* Indicates the QP is being destroyed */
+ bool last_push_db:1; /* Indicates last DB was push DB */
void *back_qp;
pthread_spinlock_t *lock;
u8 dbg_rq_flushed;
u16 ord_cnt;
- u8 sq_flush_seen;
- u8 rq_flush_seen;
u8 rd_fence_rate;
};
@@ -462,9 +462,11 @@ int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta);
int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size);
void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge,
u32 inline_data, u8 *shift);
-int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, u32 *sqdepth);
-int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, u32 *rqdepth);
+int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size,
+ u8 shift, u32 *sqdepth);
+int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size,
+ u8 shift, u32 *rqdepth);
void irdma_qp_push_wqe(struct irdma_qp_uk *qp, __le64 *wqe, u16 quanta,
- u32 wqe_idx, bool post_sq);
+ u32 wqe_idx, bool push_wqe);
void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx);
#endif /* IRDMA_USER_H */
diff --git a/contrib/ofed/libirdma/irdma_uverbs.c b/contrib/ofed/libirdma/irdma_uverbs.c
index e52ce1cfa229..aee904a087bf 100644
--- a/contrib/ofed/libirdma/irdma_uverbs.c
+++ b/contrib/ofed/libirdma/irdma_uverbs.c
@@ -1,7 +1,7 @@
/*-
* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
*
- * Copyright (C) 2019 - 2023 Intel Corporation
+ * Copyright (C) 2019 - 2026 Intel Corporation
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -221,7 +221,7 @@ irdma_urereg_mr(struct verbs_mr *vmr, int flags, struct ibv_pd *pd,
void *addr, size_t length, int access)
{
struct irdma_urereg_mr cmd = {};
- struct ibv_rereg_mr_resp resp;
+ struct ibv_rereg_mr_resp resp = {};
cmd.reg_type = IRDMA_MEMREG_TYPE_MEM;
return ibv_cmd_rereg_mr(&vmr->ibv_mr, flags, addr, length, (uintptr_t)addr,
@@ -258,7 +258,7 @@ irdma_ualloc_mw(struct ibv_pd *pd, enum ibv_mw_type type)
{
struct ibv_mw *mw;
struct ibv_alloc_mw cmd;
*** 6932 LINES SKIPPED ***