git: 3364fdf16a00 - stable/13 - ufs: handle LoR between snap lock and vnode lock
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Wed, 16 Feb 2022 00:25:16 UTC
The branch stable/13 has been updated by mckusick:
URL: https://cgit.FreeBSD.org/src/commit/?id=3364fdf16a006eb3546e3192f0d4803f88fc4e9a
commit 3364fdf16a006eb3546e3192f0d4803f88fc4e9a
Author: Kirk McKusick <mckusick@FreeBSD.org>
AuthorDate: 2022-01-28 07:00:51 +0000
Commit: Kirk McKusick <mckusick@FreeBSD.org>
CommitDate: 2022-02-16 00:01:23 +0000
ufs: handle LoR between snap lock and vnode lock
(cherry picked from commit ddf162d1d15f63e871fa1e44334c9461772b7f7a)
Differential Revision: https://reviews.freebsd.org/D33946
---
sys/ufs/ffs/ffs_snapshot.c | 86 ++++++++++++++++++++++++++++------------------
1 file changed, 53 insertions(+), 33 deletions(-)
diff --git a/sys/ufs/ffs/ffs_snapshot.c b/sys/ufs/ffs/ffs_snapshot.c
index cb5c4f5e2481..486288c7e659 100644
--- a/sys/ufs/ffs/ffs_snapshot.c
+++ b/sys/ufs/ffs/ffs_snapshot.c
@@ -175,6 +175,7 @@ static int mapacct_ufs2(struct vnode *, ufs2_daddr_t *, ufs2_daddr_t *,
struct fs *, ufs_lbn_t, int);
static int readblock(struct vnode *vp, struct buf *, ufs2_daddr_t);
static void try_free_snapdata(struct vnode *devvp);
+static void revert_snaplock(struct vnode *, struct vnode *, struct snapdata *);
static struct snapdata *ffs_snapdata_acquire(struct vnode *devvp);
static int ffs_bp_snapblk(struct vnode *, struct buf *);
@@ -1649,7 +1650,7 @@ ffs_snapremove(vp)
struct buf *ibp;
struct fs *fs;
ufs2_daddr_t numblks, blkno, dblk;
- int error, i, last, loc;
+ int error, last, loc;
struct snapdata *sn;
ip = VTOI(vp);
@@ -1667,20 +1668,10 @@ ffs_snapremove(vp)
sn = devvp->v_rdev->si_snapdata;
TAILQ_REMOVE(&sn->sn_head, ip, i_nextsnap);
ip->i_nextsnap.tqe_prev = 0;
- VI_UNLOCK(devvp);
- lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL);
- for (i = 0; i < sn->sn_lock.lk_recurse; i++)
- lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL);
- KASSERT(vp->v_vnlock == &sn->sn_lock,
- ("ffs_snapremove: lost lock mutation"));
- vp->v_vnlock = &vp->v_lock;
- VI_LOCK(devvp);
- while (sn->sn_lock.lk_recurse > 0)
- lockmgr(&sn->sn_lock, LK_RELEASE, NULL);
- lockmgr(&sn->sn_lock, LK_RELEASE, NULL);
+ revert_snaplock(vp, devvp, sn);
try_free_snapdata(devvp);
- } else
- VI_UNLOCK(devvp);
+ }
+ VI_UNLOCK(devvp);
/*
* Clear all BLK_NOCOPY fields. Pass any block claims to other
* snapshots that want them (see ffs_snapblkfree below).
@@ -2156,27 +2147,18 @@ ffs_snapshot_unmount(mp)
xp->i_nextsnap.tqe_prev = 0;
lockmgr(&sn->sn_lock, LK_INTERLOCK | LK_EXCLUSIVE,
VI_MTX(devvp));
- /*
- * Avoid LOR with above snapshot lock. The LK_NOWAIT should
- * never fail as the lock is currently unused. Rather than
- * panic, we recover by doing the blocking lock.
- */
- if (lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) {
- printf("ffs_snapshot_unmount: Unexpected LK_NOWAIT "
- "failure\n");
- lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL);
- }
- KASSERT(vp->v_vnlock == &sn->sn_lock,
- ("ffs_snapshot_unmount: lost lock mutation"));
- vp->v_vnlock = &vp->v_lock;
+ VI_LOCK(devvp);
+ revert_snaplock(vp, devvp, sn);
lockmgr(&vp->v_lock, LK_RELEASE, NULL);
- lockmgr(&sn->sn_lock, LK_RELEASE, NULL);
- if (xp->i_effnlink > 0)
+ if (xp->i_effnlink > 0) {
+ VI_UNLOCK(devvp);
vrele(vp);
- VI_LOCK(devvp);
+ VI_LOCK(devvp);
+ }
sn = devvp->v_rdev->si_snapdata;
}
try_free_snapdata(devvp);
+ VI_UNLOCK(devvp);
}
/*
@@ -2680,10 +2662,8 @@ try_free_snapdata(struct vnode *devvp)
sn = devvp->v_rdev->si_snapdata;
if (sn == NULL || TAILQ_FIRST(&sn->sn_head) != NULL ||
- (devvp->v_vflag & VV_COPYONWRITE) == 0) {
- VI_UNLOCK(devvp);
+ (devvp->v_vflag & VV_COPYONWRITE) == 0)
return;
- }
devvp->v_rdev->si_snapdata = NULL;
devvp->v_vflag &= ~VV_COPYONWRITE;
@@ -2695,6 +2675,46 @@ try_free_snapdata(struct vnode *devvp)
if (snapblklist != NULL)
free(snapblklist, M_UFSMNT);
ffs_snapdata_free(sn);
+ VI_LOCK(devvp);
+}
+
+/*
+ * Revert a vnode lock from using the snapshot lock back to its own lock.
+ *
+ * Aquire a lock on the vnode's own lock and release the lock on the
+ * snapshot lock. If there are any recursions on the snapshot lock
+ * get the same number of recursions on the vnode's own lock.
+ */
+static void
+revert_snaplock(vp, devvp, sn)
+ struct vnode *vp;
+ struct vnode *devvp;
+ struct snapdata *sn;
+{
+ int i;
+
+ ASSERT_VI_LOCKED(devvp, "revert_snaplock");
+ /*
+ * Avoid LOR with snapshot lock. The LK_NOWAIT should
+ * never fail as the lock is currently unused. Rather than
+ * panic, we recover by doing the blocking lock.
+ */
+ for (i = 0; i <= sn->sn_lock.lk_recurse; i++) {
+ if (lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT |
+ LK_INTERLOCK, VI_MTX(devvp)) != 0) {
+ printf("revert_snaplock: Unexpected LK_NOWAIT "
+ "failure\n");
+ lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_INTERLOCK,
+ VI_MTX(devvp));
+ }
+ VI_LOCK(devvp);
+ }
+ KASSERT(vp->v_vnlock == &sn->sn_lock,
+ ("revert_snaplock: lost lock mutation"));
+ vp->v_vnlock = &vp->v_lock;
+ while (sn->sn_lock.lk_recurse > 0)
+ lockmgr(&sn->sn_lock, LK_RELEASE, NULL);
+ lockmgr(&sn->sn_lock, LK_RELEASE, NULL);
}
static struct snapdata *