git: 305f85a3d6f2 - main - rman: Embed the mutex in struct rman instead of using a separate allocation
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Tue, 09 Dec 2025 15:59:55 UTC
The branch main has been updated by jhb:
URL: https://cgit.FreeBSD.org/src/commit/?id=305f85a3d6f2ec32ee8178413a716de7c0a73eaa
commit 305f85a3d6f2ec32ee8178413a716de7c0a73eaa
Author: John Baldwin <jhb@FreeBSD.org>
AuthorDate: 2025-12-09 15:59:30 +0000
Commit: John Baldwin <jhb@FreeBSD.org>
CommitDate: 2025-12-09 15:59:30 +0000
rman: Embed the mutex in struct rman instead of using a separate allocation
This used a separate allocation when rman was first imported (back
when the lock was a pre-SMPng "simplelock" instead of a mutex).
Reported by: des
Reviewed by: des
Differential Revision: https://reviews.freebsd.org/D54143
---
sys/kern/subr_rman.c | 68 +++++++++++++++++++++++++---------------------------
sys/sys/rman.h | 3 ++-
2 files changed, 34 insertions(+), 37 deletions(-)
diff --git a/sys/kern/subr_rman.c b/sys/kern/subr_rman.c
index b8528104ba28..e09ca10f0a2e 100644
--- a/sys/kern/subr_rman.c
+++ b/sys/kern/subr_rman.c
@@ -131,10 +131,7 @@ rman_init(struct rman *rm)
panic("implement RMAN_GAUGE");
TAILQ_INIT(&rm->rm_list);
- rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
- if (rm->rm_mtx == NULL)
- return ENOMEM;
- mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF);
+ mtx_init(&rm->rm_mtx, "rman", NULL, MTX_DEF);
mtx_lock(&rman_mtx);
TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
@@ -159,7 +156,7 @@ rman_manage_region(struct rman *rm, rman_res_t start, rman_res_t end)
r->r_end = end;
r->r_rm = rm;
- mtx_lock(rm->rm_mtx);
+ mtx_lock(&rm->rm_mtx);
/* Skip entries before us. */
TAILQ_FOREACH(s, &rm->rm_list, r_link) {
@@ -216,7 +213,7 @@ rman_manage_region(struct rman *rm, rman_res_t start, rman_res_t end)
}
}
out:
- mtx_unlock(rm->rm_mtx);
+ mtx_unlock(&rm->rm_mtx);
return rv;
}
@@ -235,10 +232,10 @@ rman_fini(struct rman *rm)
{
struct resource_i *r;
- mtx_lock(rm->rm_mtx);
+ mtx_lock(&rm->rm_mtx);
TAILQ_FOREACH(r, &rm->rm_list, r_link) {
if (r->r_flags & RF_ALLOCATED) {
- mtx_unlock(rm->rm_mtx);
+ mtx_unlock(&rm->rm_mtx);
return EBUSY;
}
}
@@ -252,12 +249,11 @@ rman_fini(struct rman *rm)
TAILQ_REMOVE(&rm->rm_list, r, r_link);
free(r, M_RMAN);
}
- mtx_unlock(rm->rm_mtx);
+ mtx_unlock(&rm->rm_mtx);
mtx_lock(&rman_mtx);
TAILQ_REMOVE(&rman_head, rm, rm_link);
mtx_unlock(&rman_mtx);
- mtx_destroy(rm->rm_mtx);
- free(rm->rm_mtx, M_RMAN);
+ mtx_destroy(&rm->rm_mtx);
return 0;
}
@@ -267,16 +263,16 @@ rman_first_free_region(struct rman *rm, rman_res_t *start, rman_res_t *end)
{
struct resource_i *r;
- mtx_lock(rm->rm_mtx);
+ mtx_lock(&rm->rm_mtx);
TAILQ_FOREACH(r, &rm->rm_list, r_link) {
if (!(r->r_flags & RF_ALLOCATED)) {
*start = r->r_start;
*end = r->r_end;
- mtx_unlock(rm->rm_mtx);
+ mtx_unlock(&rm->rm_mtx);
return (0);
}
}
- mtx_unlock(rm->rm_mtx);
+ mtx_unlock(&rm->rm_mtx);
return (ENOENT);
}
@@ -285,16 +281,16 @@ rman_last_free_region(struct rman *rm, rman_res_t *start, rman_res_t *end)
{
struct resource_i *r;
- mtx_lock(rm->rm_mtx);
+ mtx_lock(&rm->rm_mtx);
TAILQ_FOREACH_REVERSE(r, &rm->rm_list, resource_head, r_link) {
if (!(r->r_flags & RF_ALLOCATED)) {
*start = r->r_start;
*end = r->r_end;
- mtx_unlock(rm->rm_mtx);
+ mtx_unlock(&rm->rm_mtx);
return (0);
}
}
- mtx_unlock(rm->rm_mtx);
+ mtx_unlock(&rm->rm_mtx);
return (ENOENT);
}
@@ -323,7 +319,7 @@ rman_adjust_resource(struct resource *rr, rman_res_t start, rman_res_t end)
* allocated resource.
*/
rm = r->r_rm;
- mtx_lock(rm->rm_mtx);
+ mtx_lock(&rm->rm_mtx);
#ifdef INVARIANTS
TAILQ_FOREACH(s, &rm->rm_list, r_link) {
if (s == r)
@@ -345,12 +341,12 @@ rman_adjust_resource(struct resource *rr, rman_res_t start, rman_res_t end)
*/
if (start < r->r_start && (s == NULL || (s->r_flags & RF_ALLOCATED) ||
s->r_start > start)) {
- mtx_unlock(rm->rm_mtx);
+ mtx_unlock(&rm->rm_mtx);
return (EBUSY);
}
if (end > r->r_end && (t == NULL || (t->r_flags & RF_ALLOCATED) ||
t->r_end < end)) {
- mtx_unlock(rm->rm_mtx);
+ mtx_unlock(&rm->rm_mtx);
return (EBUSY);
}
@@ -380,7 +376,7 @@ rman_adjust_resource(struct resource *rr, rman_res_t start, rman_res_t end)
} else
t->r_start = end + 1;
}
- mtx_unlock(rm->rm_mtx);
+ mtx_unlock(&rm->rm_mtx);
/*
* Handle the shrinking cases that require allocating a new
@@ -392,7 +388,7 @@ rman_adjust_resource(struct resource *rr, rman_res_t start, rman_res_t end)
new->r_start = r->r_start;
new->r_end = start - 1;
new->r_rm = rm;
- mtx_lock(rm->rm_mtx);
+ mtx_lock(&rm->rm_mtx);
r->r_start = start;
s = TAILQ_PREV(r, resource_head, r_link);
if (s != NULL && !(s->r_flags & RF_ALLOCATED)) {
@@ -400,14 +396,14 @@ rman_adjust_resource(struct resource *rr, rman_res_t start, rman_res_t end)
free(new, M_RMAN);
} else
TAILQ_INSERT_BEFORE(r, new, r_link);
- mtx_unlock(rm->rm_mtx);
+ mtx_unlock(&rm->rm_mtx);
}
if (end < r->r_end) {
new = int_alloc_resource(M_WAITOK);
new->r_start = end + 1;
new->r_end = r->r_end;
new->r_rm = rm;
- mtx_lock(rm->rm_mtx);
+ mtx_lock(&rm->rm_mtx);
r->r_end = end;
t = TAILQ_NEXT(r, r_link);
if (t != NULL && !(t->r_flags & RF_ALLOCATED)) {
@@ -415,7 +411,7 @@ rman_adjust_resource(struct resource *rr, rman_res_t start, rman_res_t end)
free(new, M_RMAN);
} else
TAILQ_INSERT_AFTER(&rm->rm_list, r, new, r_link);
- mtx_unlock(rm->rm_mtx);
+ mtx_unlock(&rm->rm_mtx);
}
return (0);
}
@@ -441,7 +437,7 @@ rman_reserve_resource(struct rman *rm, rman_res_t start, rman_res_t end,
("invalid flags %#x", flags));
new_rflags = (flags & ~RF_FIRSTSHARE) | RF_ALLOCATED;
- mtx_lock(rm->rm_mtx);
+ mtx_lock(&rm->rm_mtx);
r = TAILQ_FIRST(&rm->rm_list);
if (r == NULL)
@@ -628,7 +624,7 @@ rman_reserve_resource(struct rman *rm, rman_res_t start, rman_res_t end,
*/
out:
- mtx_unlock(rm->rm_mtx);
+ mtx_unlock(&rm->rm_mtx);
return (rv == NULL ? NULL : &rv->r_r);
}
@@ -640,9 +636,9 @@ rman_activate_resource(struct resource *re)
r = re->__r_i;
rm = r->r_rm;
- mtx_lock(rm->rm_mtx);
+ mtx_lock(&rm->rm_mtx);
r->r_flags |= RF_ACTIVE;
- mtx_unlock(rm->rm_mtx);
+ mtx_unlock(&rm->rm_mtx);
return 0;
}
@@ -652,9 +648,9 @@ rman_deactivate_resource(struct resource *r)
struct rman *rm;
rm = r->__r_i->r_rm;
- mtx_lock(rm->rm_mtx);
+ mtx_lock(&rm->rm_mtx);
r->__r_i->r_flags &= ~RF_ACTIVE;
- mtx_unlock(rm->rm_mtx);
+ mtx_unlock(&rm->rm_mtx);
return 0;
}
@@ -761,9 +757,9 @@ rman_release_resource(struct resource *re)
r = re->__r_i;
rm = r->r_rm;
- mtx_lock(rm->rm_mtx);
+ mtx_lock(&rm->rm_mtx);
rv = int_rman_release_resource(rm, r);
- mtx_unlock(rm->rm_mtx);
+ mtx_unlock(&rm->rm_mtx);
return (rv);
}
@@ -991,7 +987,7 @@ sysctl_rman(SYSCTL_HANDLER_ARGS)
/*
* Find the indexed resource and return it.
*/
- mtx_lock(rm->rm_mtx);
+ mtx_lock(&rm->rm_mtx);
TAILQ_FOREACH(res, &rm->rm_list, r_link) {
if (res->r_sharehead != NULL) {
LIST_FOREACH(sres, res->r_sharehead, r_sharelink)
@@ -1003,7 +999,7 @@ sysctl_rman(SYSCTL_HANDLER_ARGS)
else if (res_idx-- == 0)
goto found;
}
- mtx_unlock(rm->rm_mtx);
+ mtx_unlock(&rm->rm_mtx);
return (ENOENT);
found:
@@ -1028,7 +1024,7 @@ found:
ures.r_size = res->r_end - res->r_start + 1;
ures.r_flags = res->r_flags;
- mtx_unlock(rm->rm_mtx);
+ mtx_unlock(&rm->rm_mtx);
error = SYSCTL_OUT(req, &ures, sizeof(ures));
return (error);
}
diff --git a/sys/sys/rman.h b/sys/sys/rman.h
index 323da4a62201..2479942c3217 100644
--- a/sys/sys/rman.h
+++ b/sys/sys/rman.h
@@ -35,6 +35,7 @@
#ifndef _KERNEL
#include <sys/queue.h>
#else
+#include <sys/_mutex.h>
#include <machine/_bus.h>
#include <machine/resource.h>
#endif
@@ -112,7 +113,7 @@ TAILQ_HEAD(resource_head, resource_i);
struct rman {
struct resource_head rm_list;
- struct mtx *rm_mtx; /* mutex used to protect rm_list */
+ struct mtx rm_mtx; /* mutex used to protect rm_list */
TAILQ_ENTRY(rman) rm_link; /* link in list of all rmans */
rman_res_t rm_start; /* index of globally first entry */
rman_res_t rm_end; /* index of globally last entry */