git: 69b2217cde24 - stable/13 - nfscl: Fix a use after free in nfscl_cleanupkext()
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Fri, 04 Mar 2022 16:10:50 UTC
The branch stable/13 has been updated by rmacklem:
URL: https://cgit.FreeBSD.org/src/commit/?id=69b2217cde24b3691f4cea14682baa75f48225ea
commit 69b2217cde24b3691f4cea14682baa75f48225ea
Author: Rick Macklem <rmacklem@FreeBSD.org>
AuthorDate: 2022-02-25 15:27:03 +0000
Commit: Rick Macklem <rmacklem@FreeBSD.org>
CommitDate: 2022-03-04 16:08:36 +0000
nfscl: Fix a use after free in nfscl_cleanupkext()
ler@, markj@ reported a use after free in nfscl_cleanupkext().
They also provided two possible causes:
- In nfscl_cleanup_common(), "own" is the owner string
owp->nfsow_owner. If we free that particular
owner structure, than in subsequent comparisons
"own" will point to freed memory.
- nfscl_cleanup_common() can free more than one owner, so the use
of LIST_FOREACH_SAFE() in nfscl_cleanupkext() is not sufficient.
I also believe there is a 3rd:
- If nfscl_freeopenowner() or nfscl_freelockowner() is called
without the NFSCLSTATE mutex held, this could race with
nfscl_cleanupkext().
This could happen when the exclusive lock is held
on the client, such as when delegations are being returned
or when recovering from NFSERR_EXPIRED.
This patch fixes them as follows:
1 - Copy the owner string to a local variable before the
nfscl_cleanup_common() call.
2 - Modify nfscl_cleanup_common() so that it will never free more
than the first matching element. Normally there should only
be one element in each list with a matching open/lock owner
anyhow (but there might be a bug that results in a duplicate).
This should guarantee that the FOREACH_SAFE loops in
nfscl_cleanupkext() are adequate.
3 - Acquire the NFSCLSTATE mutex in nfscl_freeopenowner()
and nfscl_freelockowner(), if it is not already held.
This serializes all of these calls with the ones done in
nfscl_cleanup_common().
(cherry picked from commit 1cedb4ea1a790f976ec6211c938dfaa23874b497)
---
sys/fs/nfsclient/nfs_clstate.c | 48 ++++++++++++++++++++++++++++++++++++------
1 file changed, 42 insertions(+), 6 deletions(-)
diff --git a/sys/fs/nfsclient/nfs_clstate.c b/sys/fs/nfsclient/nfs_clstate.c
index dc86d1fded12..a4704a04ac22 100644
--- a/sys/fs/nfsclient/nfs_clstate.c
+++ b/sys/fs/nfsclient/nfs_clstate.c
@@ -1644,8 +1644,22 @@ nfscl_expireopen(struct nfsclclient *clp, struct nfsclopen *op,
static void
nfscl_freeopenowner(struct nfsclowner *owp, int local)
{
+ int owned;
+ /*
+ * Make sure the NFSCLSTATE mutex is held, to avoid races with
+ * calls in nfscl_renewthread() that do not hold a reference
+ * count on the nfsclclient and just the mutex.
+ * The mutex will not be held for calls done with the exclusive
+ * nfsclclient lock held, in particular, nfscl_hasexpired()
+ * and nfscl_recalldeleg() might do this.
+ */
+ owned = mtx_owned(NFSCLSTATEMUTEXPTR);
+ if (owned == 0)
+ NFSLOCKCLSTATE();
LIST_REMOVE(owp, nfsow_list);
+ if (owned == 0)
+ NFSUNLOCKCLSTATE();
free(owp, M_NFSCLOWNER);
if (local)
nfsstatsv1.cllocalopenowners--;
@@ -1660,8 +1674,22 @@ void
nfscl_freelockowner(struct nfscllockowner *lp, int local)
{
struct nfscllock *lop, *nlop;
+ int owned;
+ /*
+ * Make sure the NFSCLSTATE mutex is held, to avoid races with
+ * calls in nfscl_renewthread() that do not hold a reference
+ * count on the nfsclclient and just the mutex.
+ * The mutex will not be held for calls done with the exclusive
+ * nfsclclient lock held, in particular, nfscl_hasexpired()
+ * and nfscl_recalldeleg() might do this.
+ */
+ owned = mtx_owned(NFSCLSTATEMUTEXPTR);
+ if (owned == 0)
+ NFSLOCKCLSTATE();
LIST_REMOVE(lp, nfsl_list);
+ if (owned == 0)
+ NFSUNLOCKCLSTATE();
LIST_FOREACH_SAFE(lop, &lp->nfsl_lock, nfslo_list, nlop) {
nfscl_freelock(lop, local);
}
@@ -1851,16 +1879,17 @@ static void
nfscl_cleanup_common(struct nfsclclient *clp, u_int8_t *own)
{
struct nfsclowner *owp, *nowp;
- struct nfscllockowner *lp, *nlp;
+ struct nfscllockowner *lp;
struct nfscldeleg *dp;
/* First, get rid of local locks on delegations. */
TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
- LIST_FOREACH_SAFE(lp, &dp->nfsdl_lock, nfsl_list, nlp) {
+ LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
if (!NFSBCMP(lp->nfsl_owner, own, NFSV4CL_LOCKNAMELEN)) {
if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED))
panic("nfscllckw");
nfscl_freelockowner(lp, 1);
+ break;
}
}
}
@@ -1879,6 +1908,7 @@ nfscl_cleanup_common(struct nfsclclient *clp, u_int8_t *own)
nfscl_freeopenowner(owp, 0);
else
owp->nfsow_defunct = 1;
+ break;
}
owp = nowp;
}
@@ -1894,6 +1924,7 @@ nfscl_cleanupkext(struct nfsclclient *clp, struct nfscllockownerfhhead *lhp)
struct nfsclopen *op;
struct nfscllockowner *lp, *nlp;
struct nfscldeleg *dp;
+ uint8_t own[NFSV4CL_LOCKNAMELEN];
/*
* All the pidhash locks must be acquired, since they are sx locks
@@ -1910,8 +1941,10 @@ nfscl_cleanupkext(struct nfsclclient *clp, struct nfscllockownerfhhead *lhp)
nfscl_emptylockowner(lp, lhp);
}
}
- if (nfscl_procdoesntexist(owp->nfsow_owner))
- nfscl_cleanup_common(clp, owp->nfsow_owner);
+ if (nfscl_procdoesntexist(owp->nfsow_owner)) {
+ memcpy(own, owp->nfsow_owner, NFSV4CL_LOCKNAMELEN);
+ nfscl_cleanup_common(clp, own);
+ }
}
/*
@@ -1923,8 +1956,11 @@ nfscl_cleanupkext(struct nfsclclient *clp, struct nfscllockownerfhhead *lhp)
*/
TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
LIST_FOREACH_SAFE(lp, &dp->nfsdl_lock, nfsl_list, nlp) {
- if (nfscl_procdoesntexist(lp->nfsl_owner))
- nfscl_cleanup_common(clp, lp->nfsl_owner);
+ if (nfscl_procdoesntexist(lp->nfsl_owner)) {
+ memcpy(own, lp->nfsl_owner,
+ NFSV4CL_LOCKNAMELEN);
+ nfscl_cleanup_common(clp, own);
+ }
}
}
NFSUNLOCKCLSTATE();