git: dd08b84e35b6 - main - nfscl: Fix a use after free in nfscl_cleanupkext()
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Tue, 22 Feb 2022 22:24:20 UTC
The branch main has been updated by rmacklem:
URL: https://cgit.FreeBSD.org/src/commit/?id=dd08b84e35b6fdee0df5fd0e0533cd361dec71cb
commit dd08b84e35b6fdee0df5fd0e0533cd361dec71cb
Author: Rick Macklem <rmacklem@FreeBSD.org>
AuthorDate: 2022-02-22 22:21:43 +0000
Commit: Rick Macklem <rmacklem@FreeBSD.org>
CommitDate: 2022-02-22 22:21:43 +0000
nfscl: Fix a use after free in nfscl_cleanupkext()
ler@, markj@ reported a use after free in nfscl_cleanupkext().
They also provided two possible causes:
- In nfscl_cleanup_common(), "own" is the owner string
owp->nfsow_owner. If we free that particular
owner structure, than in subsequent comparisons
"own" will point to freed memory.
- nfscl_cleanup_common() can free more than one owner, so the use
of LIST_FOREACH_SAFE() in nfscl_cleanupkext() is not sufficient.
I also believe there is a 3rd:
- If nfscl_freeopenowner() or nfscl_freelockowner() is called
without the NFSCLSTATE mutex held, this could race with
nfscl_cleanupkext().
This could happen when the exclusive lock is held
on the client, such as when delegations are being returned.
This patch fixes them as follows:
1 - Copy the owner string to a local variable before the
nfscl_cleanup_common() call.
2 - Modify nfscl_cleanup_common() to return whether or not a
free was done.
When a free was done, do a goto to restart the loop, instead
of using FOREACH_SAFE, which was not safe in this case.
3 - Acquire the NFSCLSTATE mutex in nfscl_freeopenowner()
and nfscl_freelockowner(), if it not already held.
This serializes all of these calls with the ones done in
nfscl_cleanup_common().
Reported by: ler
Reviewed by: markj
MFC after: 1 week
Differential Revision: https://reviews.freebsd.org/D34334
---
sys/fs/nfsclient/nfs_clstate.c | 70 +++++++++++++++++++++++++++++++++++-------
1 file changed, 59 insertions(+), 11 deletions(-)
diff --git a/sys/fs/nfsclient/nfs_clstate.c b/sys/fs/nfsclient/nfs_clstate.c
index 5262986645cd..c37ff8a8ab11 100644
--- a/sys/fs/nfsclient/nfs_clstate.c
+++ b/sys/fs/nfsclient/nfs_clstate.c
@@ -158,7 +158,7 @@ static int nfsrpc_reopen(struct nfsmount *, u_int8_t *, int, u_int32_t,
static void nfscl_freedeleg(struct nfscldeleghead *, struct nfscldeleg *,
bool);
static int nfscl_errmap(struct nfsrv_descript *, u_int32_t);
-static void nfscl_cleanup_common(struct nfsclclient *, u_int8_t *);
+static u_int nfscl_cleanup_common(struct nfsclclient *, u_int8_t *);
static int nfscl_recalldeleg(struct nfsclclient *, struct nfsmount *,
struct nfscldeleg *, vnode_t, struct ucred *, NFSPROC_T *, int,
vnode_t *);
@@ -1642,8 +1642,21 @@ nfscl_expireopen(struct nfsclclient *clp, struct nfsclopen *op,
static void
nfscl_freeopenowner(struct nfsclowner *owp, int local)
{
+ int owned;
+ /*
+ * Make sure the NFSCLSTATE mutex is held, to avoid races with
+ * calls in nfscl_renewthread() that do not hold a reference
+ * count on the nfsclclient and just the mutex.
+ * The mutex will not be held for calls done with the exclusive
+ * nfsclclient lock held.
+ */
+ owned = mtx_owned(NFSCLSTATEMUTEXPTR);
+ if (owned == 0)
+ NFSLOCKCLSTATE();
LIST_REMOVE(owp, nfsow_list);
+ if (owned == 0)
+ NFSUNLOCKCLSTATE();
free(owp, M_NFSCLOWNER);
if (local)
nfsstatsv1.cllocalopenowners--;
@@ -1658,8 +1671,21 @@ void
nfscl_freelockowner(struct nfscllockowner *lp, int local)
{
struct nfscllock *lop, *nlop;
+ int owned;
+ /*
+ * Make sure the NFSCLSTATE mutex is held, to avoid races with
+ * calls in nfscl_renewthread() that do not hold a reference
+ * count on the nfsclclient and just the mutex.
+ * The mutex will not be held for calls done with the exclusive
+ * nfsclclient lock held.
+ */
+ owned = mtx_owned(NFSCLSTATEMUTEXPTR);
+ if (owned == 0)
+ NFSLOCKCLSTATE();
LIST_REMOVE(lp, nfsl_list);
+ if (owned == 0)
+ NFSUNLOCKCLSTATE();
LIST_FOREACH_SAFE(lop, &lp->nfsl_lock, nfslo_list, nlop) {
nfscl_freelock(lop, local);
}
@@ -1841,17 +1867,24 @@ nfscl_expireclient(struct nfsclclient *clp, struct nfsmount *nmp,
}
}
+#define FREED_OPENOWNER 0x1
+#define FREED_LOCKOWNER 0x2
+
/*
* This function must be called after the process represented by "own" has
* exited. Must be called with CLSTATE lock held.
+ * Return the FREED_xxx flag bits, since the caller needs to know if either
+ * the open owner or lock owner lists have changed.
*/
-static void
+static u_int
nfscl_cleanup_common(struct nfsclclient *clp, u_int8_t *own)
{
struct nfsclowner *owp, *nowp;
struct nfscllockowner *lp, *nlp;
struct nfscldeleg *dp;
+ u_int didfree;
+ didfree = 0;
/* First, get rid of local locks on delegations. */
TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
LIST_FOREACH_SAFE(lp, &dp->nfsdl_lock, nfsl_list, nlp) {
@@ -1859,6 +1892,7 @@ nfscl_cleanup_common(struct nfsclclient *clp, u_int8_t *own)
if ((lp->nfsl_rwlock.nfslock_lock & NFSV4LOCK_WANTED))
panic("nfscllckw");
nfscl_freelockowner(lp, 1);
+ didfree |= FREED_LOCKOWNER;
}
}
}
@@ -1873,13 +1907,15 @@ nfscl_cleanup_common(struct nfsclclient *clp, u_int8_t *own)
* here. For that case, let the renew thread clear
* out the OpenOwner later.
*/
- if (LIST_EMPTY(&owp->nfsow_open))
+ if (LIST_EMPTY(&owp->nfsow_open)) {
nfscl_freeopenowner(owp, 0);
- else
+ didfree |= FREED_OPENOWNER;
+ } else
owp->nfsow_defunct = 1;
}
owp = nowp;
}
+ return (didfree);
}
/*
@@ -1888,10 +1924,11 @@ nfscl_cleanup_common(struct nfsclclient *clp, u_int8_t *own)
static void
nfscl_cleanupkext(struct nfsclclient *clp, struct nfscllockownerfhhead *lhp)
{
- struct nfsclowner *owp, *nowp;
+ struct nfsclowner *owp;
struct nfsclopen *op;
struct nfscllockowner *lp, *nlp;
struct nfscldeleg *dp;
+ uint8_t own[NFSV4CL_LOCKNAMELEN];
/*
* All the pidhash locks must be acquired, since they are sx locks
@@ -1901,15 +1938,20 @@ nfscl_cleanupkext(struct nfsclclient *clp, struct nfscllockownerfhhead *lhp)
*/
pidhash_slockall();
NFSLOCKCLSTATE();
- LIST_FOREACH_SAFE(owp, &clp->nfsc_owner, nfsow_list, nowp) {
+tryagain:
+ LIST_FOREACH(owp, &clp->nfsc_owner, nfsow_list) {
LIST_FOREACH(op, &owp->nfsow_open, nfso_list) {
LIST_FOREACH_SAFE(lp, &op->nfso_lock, nfsl_list, nlp) {
if (LIST_EMPTY(&lp->nfsl_lock))
nfscl_emptylockowner(lp, lhp);
}
}
- if (nfscl_procdoesntexist(owp->nfsow_owner))
- nfscl_cleanup_common(clp, owp->nfsow_owner);
+ if (nfscl_procdoesntexist(owp->nfsow_owner)) {
+ memcpy(own, owp->nfsow_owner, NFSV4CL_LOCKNAMELEN);
+ if ((nfscl_cleanup_common(clp, own) &
+ FREED_OPENOWNER) != 0)
+ goto tryagain;
+ }
}
/*
@@ -1920,9 +1962,15 @@ nfscl_cleanupkext(struct nfsclclient *clp, struct nfscllockownerfhhead *lhp)
* nfscl_cleanup_common().
*/
TAILQ_FOREACH(dp, &clp->nfsc_deleg, nfsdl_list) {
- LIST_FOREACH_SAFE(lp, &dp->nfsdl_lock, nfsl_list, nlp) {
- if (nfscl_procdoesntexist(lp->nfsl_owner))
- nfscl_cleanup_common(clp, lp->nfsl_owner);
+tryagain2:
+ LIST_FOREACH(lp, &dp->nfsdl_lock, nfsl_list) {
+ if (nfscl_procdoesntexist(lp->nfsl_owner)) {
+ memcpy(own, lp->nfsl_owner,
+ NFSV4CL_LOCKNAMELEN);
+ if ((nfscl_cleanup_common(clp, own) &
+ FREED_LOCKOWNER) != 0)
+ goto tryagain2;
+ }
}
}
NFSUNLOCKCLSTATE();