git: cf5e6370f15c - main - cxgbe(4): Changes to ULD list management.
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Fri, 19 Jul 2024 16:18:23 UTC
The branch main has been updated by np:
URL: https://cgit.FreeBSD.org/src/commit/?id=cf5e6370f15cffabbbf508083ba7d48ec8abfa79
commit cf5e6370f15cffabbbf508083ba7d48ec8abfa79
Author: Navdeep Parhar <np@FreeBSD.org>
AuthorDate: 2024-07-06 20:43:17 +0000
Commit: Navdeep Parhar <np@FreeBSD.org>
CommitDate: 2024-07-19 16:12:49 +0000
cxgbe(4): Changes to ULD list management.
* Convert t4_uld_list to an array. There will be at most 3 items in the
list and it's simpler to track them in an array with a fixed slot for
each ULD.
* There is no need to refcount ULDs so stop doing that.
* Add uld_ prefix to all members of uld_info.
* Rename async_event to uld_stop to match its actual purpose. Call it
for all ULDs and not just ULD_IWARP.
Reviewed by: jhb
MFC after: 1 week
Sponsored by: Chelsio Communications
Differential Revision: https://reviews.freebsd.org/D46029
---
sys/dev/cxgbe/cxgbei/cxgbei.c | 9 +-
sys/dev/cxgbe/iw_cxgbe/device.c | 19 +++--
sys/dev/cxgbe/offload.h | 13 ++-
sys/dev/cxgbe/t4_main.c | 177 +++++++++++++++++-----------------------
sys/dev/cxgbe/tom/t4_tom.c | 9 +-
5 files changed, 97 insertions(+), 130 deletions(-)
diff --git a/sys/dev/cxgbe/cxgbei/cxgbei.c b/sys/dev/cxgbe/cxgbei/cxgbei.c
index 193d58f9eda4..04454a98e247 100644
--- a/sys/dev/cxgbe/cxgbei/cxgbei.c
+++ b/sys/dev/cxgbe/cxgbei/cxgbei.c
@@ -901,9 +901,8 @@ cxgbei_deactivate_all(struct adapter *sc, void *arg __unused)
}
static struct uld_info cxgbei_uld_info = {
- .uld_id = ULD_ISCSI,
- .activate = cxgbei_activate,
- .deactivate = cxgbei_deactivate,
+ .uld_activate = cxgbei_activate,
+ .uld_deactivate = cxgbei_deactivate,
};
static int
@@ -916,7 +915,7 @@ cxgbei_mod_load(void)
t4_register_cpl_handler(CPL_RX_ISCSI_DDP, do_rx_iscsi_ddp);
t4_register_cpl_handler(CPL_RX_ISCSI_CMP, do_rx_iscsi_cmp);
- rc = t4_register_uld(&cxgbei_uld_info);
+ rc = t4_register_uld(&cxgbei_uld_info, ULD_ISCSI);
if (rc != 0)
return (rc);
@@ -931,7 +930,7 @@ cxgbei_mod_unload(void)
t4_iterate(cxgbei_deactivate_all, NULL);
- if (t4_unregister_uld(&cxgbei_uld_info) == EBUSY)
+ if (t4_unregister_uld(&cxgbei_uld_info, ULD_ISCSI) == EBUSY)
return (EBUSY);
t4_register_cpl_handler(CPL_ISCSI_HDR, NULL);
diff --git a/sys/dev/cxgbe/iw_cxgbe/device.c b/sys/dev/cxgbe/iw_cxgbe/device.c
index fa886766e383..28d6d0f2d00c 100644
--- a/sys/dev/cxgbe/iw_cxgbe/device.c
+++ b/sys/dev/cxgbe/iw_cxgbe/device.c
@@ -259,13 +259,12 @@ static int c4iw_mod_load(void);
static int c4iw_mod_unload(void);
static int c4iw_activate(struct adapter *);
static int c4iw_deactivate(struct adapter *);
-static void c4iw_async_event(struct adapter *);
+static int c4iw_stop(struct adapter *);
static struct uld_info c4iw_uld_info = {
- .uld_id = ULD_IWARP,
- .activate = c4iw_activate,
- .deactivate = c4iw_deactivate,
- .async_event = c4iw_async_event,
+ .uld_activate = c4iw_activate,
+ .uld_deactivate = c4iw_deactivate,
+ .uld_stop = c4iw_stop,
};
static int
@@ -326,8 +325,8 @@ c4iw_deactivate(struct adapter *sc)
return (0);
}
-static void
-c4iw_async_event(struct adapter *sc)
+static int
+c4iw_stop(struct adapter *sc)
{
struct c4iw_dev *iwsc = sc->iwarp_softc;
@@ -341,6 +340,8 @@ c4iw_async_event(struct adapter *sc)
event.device = &iwsc->ibdev;
ib_dispatch_event(&event);
}
+
+ return (0);
}
static void
@@ -379,7 +380,7 @@ c4iw_mod_load(void)
if (rc != 0)
return (rc);
- rc = t4_register_uld(&c4iw_uld_info);
+ rc = t4_register_uld(&c4iw_uld_info, ULD_IWARP);
if (rc != 0) {
c4iw_cm_term();
return (rc);
@@ -398,7 +399,7 @@ c4iw_mod_unload(void)
c4iw_cm_term();
- if (t4_unregister_uld(&c4iw_uld_info) == EBUSY)
+ if (t4_unregister_uld(&c4iw_uld_info, ULD_IWARP) == EBUSY)
return (EBUSY);
return (0);
diff --git a/sys/dev/cxgbe/offload.h b/sys/dev/cxgbe/offload.h
index a8b243b764c8..a55d6f45cabf 100644
--- a/sys/dev/cxgbe/offload.h
+++ b/sys/dev/cxgbe/offload.h
@@ -209,12 +209,9 @@ enum {
struct adapter;
struct port_info;
struct uld_info {
- SLIST_ENTRY(uld_info) link;
- int refcount;
- int uld_id;
- int (*activate)(struct adapter *);
- int (*deactivate)(struct adapter *);
- void (*async_event)(struct adapter *);
+ int (*uld_activate)(struct adapter *);
+ int (*uld_deactivate)(struct adapter *);
+ int (*uld_stop)(struct adapter *);
};
struct tom_tunables {
@@ -242,8 +239,8 @@ struct tls_tunables {
};
#ifdef TCP_OFFLOAD
-int t4_register_uld(struct uld_info *);
-int t4_unregister_uld(struct uld_info *);
+int t4_register_uld(struct uld_info *, int);
+int t4_unregister_uld(struct uld_info *, int);
int t4_activate_uld(struct adapter *, int);
int t4_deactivate_uld(struct adapter *, int);
int uld_active(struct adapter *, int);
diff --git a/sys/dev/cxgbe/t4_main.c b/sys/dev/cxgbe/t4_main.c
index 11dd9e1d24a3..9ff56b93a0e6 100644
--- a/sys/dev/cxgbe/t4_main.c
+++ b/sys/dev/cxgbe/t4_main.c
@@ -263,7 +263,7 @@ static struct sx t4_list_lock;
SLIST_HEAD(, adapter) t4_list;
#ifdef TCP_OFFLOAD
static struct sx t4_uld_list_lock;
-SLIST_HEAD(, uld_info) t4_uld_list;
+struct uld_info *t4_uld_list[ULD_MAX + 1];
#endif
/*
@@ -864,7 +864,7 @@ static int release_clip_addr(struct adapter *, struct t4_clip_addr *);
#ifdef TCP_OFFLOAD
static int toe_capability(struct vi_info *, bool);
static int t4_deactivate_all_uld(struct adapter *);
-static void t4_async_event(struct adapter *);
+static void stop_all_uld(struct adapter *);
#endif
#ifdef KERN_TLS
static int ktls_capability(struct adapter *, bool);
@@ -3616,7 +3616,7 @@ fatal_error_task(void *arg, int pending)
int rc;
#ifdef TCP_OFFLOAD
- t4_async_event(sc);
+ stop_all_uld(sc);
#endif
if (atomic_testandclear_int(&sc->error_flags, ilog2(ADAP_CIM_ERR))) {
dump_cim_regs(sc);
@@ -12400,82 +12400,61 @@ toe_capability(struct vi_info *vi, bool enable)
* Add an upper layer driver to the global list.
*/
int
-t4_register_uld(struct uld_info *ui)
+t4_register_uld(struct uld_info *ui, int id)
{
- int rc = 0;
- struct uld_info *u;
+ int rc;
+ if (id < 0 || id > ULD_MAX)
+ return (EINVAL);
sx_xlock(&t4_uld_list_lock);
- SLIST_FOREACH(u, &t4_uld_list, link) {
- if (u->uld_id == ui->uld_id) {
- rc = EEXIST;
- goto done;
- }
+ if (t4_uld_list[id] != NULL)
+ rc = EEXIST;
+ else {
+ t4_uld_list[id] = ui;
+ rc = 0;
}
-
- SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
- ui->refcount = 0;
-done:
sx_xunlock(&t4_uld_list_lock);
return (rc);
}
int
-t4_unregister_uld(struct uld_info *ui)
+t4_unregister_uld(struct uld_info *ui, int id)
{
- int rc = EINVAL;
- struct uld_info *u;
+ if (id < 0 || id > ULD_MAX)
+ return (EINVAL);
sx_xlock(&t4_uld_list_lock);
-
- SLIST_FOREACH(u, &t4_uld_list, link) {
- if (u == ui) {
- if (ui->refcount > 0) {
- rc = EBUSY;
- goto done;
- }
-
- SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
- rc = 0;
- goto done;
- }
- }
-done:
+ MPASS(t4_uld_list[id] == ui);
+ t4_uld_list[id] = NULL;
sx_xunlock(&t4_uld_list_lock);
- return (rc);
+ return (0);
}
int
t4_activate_uld(struct adapter *sc, int id)
{
int rc;
- struct uld_info *ui;
ASSERT_SYNCHRONIZED_OP(sc);
if (id < 0 || id > ULD_MAX)
return (EINVAL);
- rc = EAGAIN; /* kldoad the module with this ULD and try again. */
-
- sx_slock(&t4_uld_list_lock);
- SLIST_FOREACH(ui, &t4_uld_list, link) {
- if (ui->uld_id == id) {
- if (!(sc->flags & FULL_INIT_DONE)) {
- rc = adapter_init(sc);
- if (rc != 0)
- break;
- }
-
- rc = ui->activate(sc);
- if (rc == 0) {
- setbit(&sc->active_ulds, id);
- ui->refcount++;
- }
- break;
- }
+ /* Adapter needs to be initialized before any ULD can be activated. */
+ if (!(sc->flags & FULL_INIT_DONE)) {
+ rc = adapter_init(sc);
+ if (rc != 0)
+ return (rc);
}
+ sx_slock(&t4_uld_list_lock);
+ if (t4_uld_list[id] == NULL)
+ rc = EAGAIN; /* load the KLD with this ULD and try again. */
+ else {
+ rc = t4_uld_list[id]->uld_activate(sc);
+ if (rc == 0)
+ setbit(&sc->active_ulds, id);
+ }
sx_sunlock(&t4_uld_list_lock);
return (rc);
@@ -12485,27 +12464,20 @@ int
t4_deactivate_uld(struct adapter *sc, int id)
{
int rc;
- struct uld_info *ui;
ASSERT_SYNCHRONIZED_OP(sc);
if (id < 0 || id > ULD_MAX)
return (EINVAL);
- rc = ENXIO;
sx_slock(&t4_uld_list_lock);
-
- SLIST_FOREACH(ui, &t4_uld_list, link) {
- if (ui->uld_id == id) {
- rc = ui->deactivate(sc);
- if (rc == 0) {
- clrbit(&sc->active_ulds, id);
- ui->refcount--;
- }
- break;
- }
+ if (t4_uld_list[id] == NULL)
+ rc = ENXIO;
+ else {
+ rc = t4_uld_list[id]->uld_deactivate(sc);
+ if (rc == 0)
+ clrbit(&sc->active_ulds, id);
}
-
sx_sunlock(&t4_uld_list_lock);
return (rc);
@@ -12514,25 +12486,20 @@ t4_deactivate_uld(struct adapter *sc, int id)
static int
t4_deactivate_all_uld(struct adapter *sc)
{
- int rc;
- struct uld_info *ui;
+ int i, rc;
rc = begin_synchronized_op(sc, NULL, SLEEP_OK, "t4detuld");
if (rc != 0)
return (ENXIO);
-
sx_slock(&t4_uld_list_lock);
-
- SLIST_FOREACH(ui, &t4_uld_list, link) {
- if (isset(&sc->active_ulds, ui->uld_id)) {
- rc = ui->deactivate(sc);
- if (rc != 0)
- break;
- clrbit(&sc->active_ulds, ui->uld_id);
- ui->refcount--;
- }
+ for (i = 0; i <= ULD_MAX; i++) {
+ if (t4_uld_list[i] == NULL || !uld_active(sc, i))
+ continue;
+ rc = t4_uld_list[i]->uld_deactivate(sc);
+ if (rc != 0)
+ break;
+ clrbit(&sc->active_ulds, i);
}
-
sx_sunlock(&t4_uld_list_lock);
end_synchronized_op(sc, 0);
@@ -12540,30 +12507,30 @@ t4_deactivate_all_uld(struct adapter *sc)
}
static void
-t4_async_event(struct adapter *sc)
+stop_all_uld(struct adapter *sc)
{
- struct uld_info *ui;
+ int i;
- if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4async") != 0)
+ if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4uldst") != 0)
return;
sx_slock(&t4_uld_list_lock);
- SLIST_FOREACH(ui, &t4_uld_list, link) {
- if (ui->uld_id == ULD_IWARP) {
- ui->async_event(sc);
- break;
- }
+ for (i = 0; i <= ULD_MAX; i++) {
+ if (t4_uld_list[i] == NULL || !uld_active(sc, i) ||
+ t4_uld_list[i]->uld_stop == NULL)
+ continue;
+ (void) t4_uld_list[i]->uld_stop(sc);
}
sx_sunlock(&t4_uld_list_lock);
end_synchronized_op(sc, 0);
}
int
-uld_active(struct adapter *sc, int uld_id)
+uld_active(struct adapter *sc, int id)
{
- MPASS(uld_id >= 0 && uld_id <= ULD_MAX);
+ MPASS(id >= 0 && id <= ULD_MAX);
- return (isset(&sc->active_ulds, uld_id));
+ return (isset(&sc->active_ulds, id));
}
#endif
@@ -13111,7 +13078,6 @@ mod_event(module_t mod, int cmd, void *arg)
callout_init(&fatal_callout, 1);
#ifdef TCP_OFFLOAD
sx_init(&t4_uld_list_lock, "T4/T5 ULDs");
- SLIST_INIT(&t4_uld_list);
#endif
#ifdef INET6
t4_clip_modload();
@@ -13140,9 +13106,20 @@ mod_event(module_t mod, int cmd, void *arg)
case MOD_UNLOAD:
sx_xlock(&mlu);
if (--loaded == 0) {
+#ifdef TCP_OFFLOAD
+ int i;
+#endif
int tries;
taskqueue_free(reset_tq);
+
+ tries = 0;
+ while (tries++ < 5 && t4_sge_extfree_refs() != 0) {
+ uprintf("%ju clusters with custom free routine "
+ "still is use.\n", t4_sge_extfree_refs());
+ pause("t4unload", 2 * hz);
+ }
+
sx_slock(&t4_list_lock);
if (!SLIST_EMPTY(&t4_list)) {
rc = EBUSY;
@@ -13151,20 +13128,14 @@ mod_event(module_t mod, int cmd, void *arg)
}
#ifdef TCP_OFFLOAD
sx_slock(&t4_uld_list_lock);
- if (!SLIST_EMPTY(&t4_uld_list)) {
- rc = EBUSY;
- sx_sunlock(&t4_uld_list_lock);
- sx_sunlock(&t4_list_lock);
- goto done_unload;
- }
-#endif
- tries = 0;
- while (tries++ < 5 && t4_sge_extfree_refs() != 0) {
- uprintf("%ju clusters with custom free routine "
- "still is use.\n", t4_sge_extfree_refs());
- pause("t4unload", 2 * hz);
+ for (i = 0; i <= ULD_MAX; i++) {
+ if (t4_uld_list[i] != NULL) {
+ rc = EBUSY;
+ sx_sunlock(&t4_uld_list_lock);
+ sx_sunlock(&t4_list_lock);
+ goto done_unload;
+ }
}
-#ifdef TCP_OFFLOAD
sx_sunlock(&t4_uld_list_lock);
#endif
sx_sunlock(&t4_list_lock);
diff --git a/sys/dev/cxgbe/tom/t4_tom.c b/sys/dev/cxgbe/tom/t4_tom.c
index 645822b6f781..f9d8dcd706b7 100644
--- a/sys/dev/cxgbe/tom/t4_tom.c
+++ b/sys/dev/cxgbe/tom/t4_tom.c
@@ -91,9 +91,8 @@ static int t4_tom_activate(struct adapter *);
static int t4_tom_deactivate(struct adapter *);
static struct uld_info tom_uld_info = {
- .uld_id = ULD_TOM,
- .activate = t4_tom_activate,
- .deactivate = t4_tom_deactivate,
+ .uld_activate = t4_tom_activate,
+ .uld_deactivate = t4_tom_deactivate,
};
static void release_offload_resources(struct toepcb *);
@@ -2013,7 +2012,7 @@ t4_tom_mod_load(void)
toe6_protosw.pr_ctloutput = t4_ctloutput_tom;
toe6_protosw.pr_aio_queue = t4_aio_queue_tom;
- return (t4_register_uld(&tom_uld_info));
+ return (t4_register_uld(&tom_uld_info, ULD_TOM));
}
static void
@@ -2034,7 +2033,7 @@ t4_tom_mod_unload(void)
{
t4_iterate(tom_uninit, NULL);
- if (t4_unregister_uld(&tom_uld_info) == EBUSY)
+ if (t4_unregister_uld(&tom_uld_info, ULD_TOM) == EBUSY)
return (EBUSY);
t4_tls_mod_unload();