Possible kqueue related issue on STABLE/RC.

Patrick Lamaiziere patfbsd at davenulle.org
Tue Sep 24 09:47:41 UTC 2013


Le Tue, 24 Sep 2013 11:29:09 +0300,
Konstantin Belousov <kostikbel at gmail.com> a écrit :

Hello,

...

> > > > Ok This has been mfced to 9.2-STABLE. But I still see this panic
> > > > with 9-2/STABLE of today (Revision : 255811). This may be better
> > > > because before the box paniced within minutes and now within
> > > > hours (still using poudriere).
> > > > 
> > > > panic:
> > > > fault code              = supervisor read data, page not present
> > > > instruction pointer     = 0x20:0xffffffff808ebfcd
> > > > stack pointer           = 0x28:0xffffff824c2e0630
> > > > frame pointer           = 0x28:0xffffff824c2e06a0
> > > > code segment            = base 0x0, limit 0xfffff, type 0x1b
> > > >                         = DPL 0, pres 1, long 1, def32 0, gran 1
> > > > processor eflags        = interrupt enabled, resume, IOPL = 0
> > > > current process         = 54243 (gvfsd-trash)
> > > > trap number             = 12
> > > > panic: page fault
> > > > cpuid = 2
> > > > KDB: stack backtrace:
> > > > #0 0xffffffff80939ad6 at kdb_backtrace+0x66
> > > > #1 0xffffffff808ffacd at panic+0x1cd
> > > > #2 0xffffffff80cdfbe9 at trap_fatal+0x289
> > > > #3 0xffffffff80cdff4f at trap_pfault+0x20f
> > > > #4 0xffffffff80ce0504 at trap+0x344
> > > > #5 0xffffffff80cc9b43 at calltrap+0x8
> > > > #6 0xffffffff8099d043 at filt_vfsvnode+0xf3
> > > > #7 0xffffffff808c4793 at kqueue_register+0x3e3
> > > > #8 0xffffffff808c4de8 at kern_kevent+0x108
> > > > #9 0xffffffff808c5950 at sys_kevent+0x90
> > > > #10 0xffffffff80cdf3a8 at amd64_syscall+0x5d8
> > > > #11 0xffffffff80cc9e27 at Xfast_syscall+0xf7
> > > > 
> > > > Full core.txt : 
> > > > http://user.lamaiziere.net/patrick/public/vfs_vnode-core.txt.0
> > > 
> > > For start, please load the core into kgdb and for
> > > frame 8
> > > p *kn
> > 
> > (kgdb) frame 8
> > #8  0xffffffff8099d043 in filt_vfsvnode (kn=0xfffffe0147a7f000,
> > hint=0) at /usr/src/sys/kern/vfs_subr.c:4600
> > 4600		VI_LOCK(vp);
> > (kgdb) p *kn
> > $1 = {kn_link = {sle_next = 0x0}, kn_selnext = {sle_next = 0x0}, 
> >   kn_knlist = 0x0, kn_tqe = {tqe_next = 0x0, tqe_prev = 0x0}, 
> >   kn_kq = 0xfffffe01079a6200, kn_kevent = {ident = 62, filter = -4, 
> >     flags = 32784, fflags = 0, data = 0, udata = 0x0}, kn_status =
> > 24, kn_sfflags = 47, kn_sdata = 0, kn_ptr = {p_fp =
> > 0xfffffe016949e190, p_proc = 0xfffffe016949e190, p_aio =
> > 0xfffffe016949e190, p_lio = 0xfffffe016949e190}, kn_fop =
> > 0xffffffff812f0, kn_hook = 0xfffffe0119d0b1f8, kn_hookid = 0}
> From the kgdb, also please do
> p *(struct vnode *)0xfffffe0119d0b1f8

With a kernel with debug info, this panic becomes  mtx_lock() of
destroyed mutex
panic: mtx_lock() of destroyed mutex

http://user.lamaiziere.net/patrick/public/panic_mtx_lock.txt

@ /usr/src/sys/kern/vfs_subr.c:4600 cpuid = 2
KDB: stack backtrace:
#0 0xffffffff80920286 at kdb_backtrace+0x66
#1 0xffffffff808e738d at panic+0x1cd
#2 0xffffffff808d58d6 at _mtx_lock_flags+0x116
#3 0xffffffff8098143b at filt_vfsvnode+0x3b
#4 0xffffffff808b213a at kqueue_register+0x4ca
#5 0xffffffff808b2688 at kern_kevent+0x108
#6 0xffffffff808b3190 at sys_kevent+0x90
#7 0xffffffff80cbd975 at amd64_syscall+0x2f5
#8 0xffffffff80ca8557 at Xfast_syscall+0xf7

(kgdb) frame 5
#5  0xffffffff808b213a in kqueue_register (kq=0xfffffe00ddc98900, kev=0xffffff824bb5f880, td=0xfffffe00b1e7f000, waitok=1) at /usr/src/sys/kern/kern_event.c:1136
1136		event = kn->kn_fop->f_event(kn, 0);

(kgdb) p *kn
$1 = {kn_link = {sle_next = 0x0}, kn_selnext = {sle_next = 0xfffffe011c232b00}, kn_knlist = 0x0, kn_tqe = {tqe_next = 0x0, tqe_prev = 0x0}, kn_kq = 0xfffffe00ddc98900, 
  kn_kevent = {ident = 62, filter = -4, flags = 32784, fflags = 0, data = 0, udata = 0x0}, kn_status = 24, kn_sfflags = 47, kn_sdata = 0, kn_ptr = {
    p_fp = 0xfffffe00ddd4d870, p_proc = 0xfffffe00ddd4d870, p_aio = 0xfffffe00ddd4d870, p_lio = 0xfffffe00ddd4d870}, kn_fop = 0xffffffff812fcca0, 
  kn_hook = 0xfffffe02064a6000, kn_hookid = 0}

(kgdb) p *(struct vnode *)0xfffffe02064a6000
$2 = {v_type = VBAD, v_tag = 0xffffffff80d89084 "none", v_op = 0x0, v_data = 0x0, v_mount = 0x0, v_nmntvnodes = {tqe_next = 0xfffffe020d3e6000, 
    tqe_prev = 0xfffffe0086625a68}, v_un = {vu_mount = 0x0, vu_socket = 0x0, vu_cdev = 0x0, vu_fifoinfo = 0x0}, v_hashlist = {le_next = 0x0, 
    le_prev = 0xffffff8000de9698}, v_hash = 238022, v_cache_src = {lh_first = 0x0}, v_cache_dst = {tqh_first = 0x0, tqh_last = 0xfffffe02064a6060}, v_cache_dd = 0x0, 
  v_cstart = 0, v_lasta = 0, v_lastw = 0, v_clen = 0, v_lock = {lock_object = {lo_name = 0xffffffff80f56e48 "ufs", lo_flags = 91881472, lo_data = 0, 
      lo_witness = 0xffffff80006c3400}, lk_lock = 1, lk_exslpfail = 0, lk_timo = 51, lk_pri = 96, lk_stack = {depth = 12, pcs = {18446744071571296822, 
        18446744071573768556, 18446744071576111075, 18446744071606114523, 18446744071576111075, 18446744071572113927, 18446744071572067653, 18446744071606111219, 
        18446744071572016126, 18446744071572018094, 18446744071575427445, 18446744071575340375, 0, 0, 0, 0, 0, 0}}}, v_interlock = {lock_object = {
      lo_name = 0xffffffff80f6f8a3 "vnode interlock", lo_flags = 16908288, lo_data = 0, lo_witness = 0xffffff80006a9600}, mtx_lock = 6}, v_vnlock = 0xfffffe02064a6098, 
  v_holdcnt = 0, v_usecount = 0, v_iflag = 128, v_vflag = 0, v_writecount = 0, v_actfreelist = {tqe_next = 0xfffffe0086625a40, tqe_prev = 0xfffffe020d3e61a8}, 
  v_bufobj = {bo_mtx = {lock_object = {lo_name = 0xffffffff80f781ec "bufobj interlock", lo_flags = 16908288, lo_data = 0, lo_witness = 0xffffff80006b1680}, 
      mtx_lock = 6}, bo_clean = {bv_hd = {tqh_first = 0x0, tqh_last = 0xfffffe02064a61d8}, bv_root = 0x0, bv_cnt = 0}, bo_dirty = {bv_hd = {tqh_first = 0x0, 
        tqh_last = 0xfffffe02064a61f8}, bv_root = 0x0, bv_cnt = 0}, bo_numoutput = 0, bo_flag = 0, bo_ops = 0xffffffff812f9960, bo_bsize = 32768, bo_object = 0x0, 
    bo_synclist = {le_next = 0xfffffe001d797bf8, le_prev = 0xfffffe005f632240}, bo_private = 0xfffffe02064a6000, __bo_vnode = 0xfffffe02064a6000}, 
  v_pollinfo = 0xfffffe00b15dc540, v_label = 0x0, v_lockf = 0x0, v_rl =
  {rl_waiters = {tqh_first = 0x0, tqh_last = 0xfffffe02064a6278},
  rl_currdep = 0x0}}


-----------

> Please try the following.
> 
> diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
> index aa165a0..5715f35 100644
> --- a/sys/kern/vfs_subr.c
> +++ b/sys/kern/vfs_subr.c
> @@ -4421,10 +4421,14 @@ filt_vfsdetach(struct knote *kn)
>  static int
>  filt_vfsread(struct knote *kn, long hint)
>  {
> -	struct vnode *vp = (struct vnode *)kn->kn_hook;
> +	struct vnode *vp;
>  	struct vattr va;
>  	int res;
>  
> +	if ((kn->kn_status & KN_DETACHED) != 0)
> +		return (0);
> +	vp = (struct vnode *)kn->kn_hook;
> +
>  	/*
>  	 * filesystem is gone, so set the EOF flag and schedule
>  	 * the knote for deletion.
> @@ -4450,8 +4454,11 @@ filt_vfsread(struct knote *kn, long hint)
>  static int
>  filt_vfswrite(struct knote *kn, long hint)
>  {
> -	struct vnode *vp = (struct vnode *)kn->kn_hook;
> +	struct vnode *vp;
>  
> +	if ((kn->kn_status & KN_DETACHED) != 0)
> +		return (0);
> +	vp = (struct vnode *)kn->kn_hook;
>  	VI_LOCK(vp);
>  
>  	/*
> @@ -4469,9 +4476,12 @@ filt_vfswrite(struct knote *kn, long hint)
>  static int
>  filt_vfsvnode(struct knote *kn, long hint)
>  {
> -	struct vnode *vp = (struct vnode *)kn->kn_hook;
> +	struct vnode *vp;
>  	int res;
>  
> +	if ((kn->kn_status & KN_DETACHED) != 0)
> +		return (0);
> +	vp = (struct vnode *)kn->kn_hook;
>  	VI_LOCK(vp);
>  	if (kn->kn_sfflags & hint)
>  		kn->kn_fflags |= hint;

With this I've got another panic 'vn_lock zero hold count' (it is an assertion in vfs_vnops.c) :

http://user.lamaiziere.net/patrick/public/panic_zero_hold_count.txt

panic: vn_lock 0xfffffe018afd0000: zero hold count
cpuid = 4
KDB: stack backtrace:
#0 0xffffffff80920286 at kdb_backtrace+0x66
#1 0xffffffff808e738d at panic+0x1cd
#2 0xffffffff80994abd at _vn_lock+0xcd
#3 0xffffffff808aff57 at knlist_remove_kq+0x67
#4 0xffffffff808b1617 at knote_fdclose+0x177
#5 0xffffffff808ab1a9 at kern_close+0xe9
#6 0xffffffff80cbd9b5 at amd64_syscall+0x2f5
#7 0xffffffff80ca8597 at Xfast_syscall+0xf7

(kgdb) #0  doadump (textdump=<value optimized out>) at pcpu.h:234
#1  0xffffffff808e792e in kern_reboot (howto=260)
    at /usr/src/sys/kern/kern_shutdown.c:449
#2  0xffffffff808e7364 in panic (fmt=0x1 <Address 0x1 out of bounds>)
    at /usr/src/sys/kern/kern_shutdown.c:637
#3  0xffffffff80994abd in _vn_lock (vp=0xfffffe018afd0000, flags=0, file=0x0, 
    line=0) at /usr/src/sys/kern/vfs_vnops.c:1398
#4  0xffffffff808aff57 in knlist_remove_kq (knl=0xfffffe0056b053b0, 
    kn=0xfffffe018a9c0b80, knlislocked=0, kqislocked=0)
    at /usr/src/sys/kern/kern_event.c:1843
#5  0xffffffff808b1617 in knote_fdclose (td=0xfffffe0056c2a000, fd=62)
    at /usr/src/sys/kern/kern_event.c:2065
#6  0xffffffff808ab1a9 in kern_close (td=0xfffffe0056c2a000, fd=62)
    at /usr/src/sys/kern/kern_descrip.c:1250
#7  0xffffffff80cbd9b5 in amd64_syscall (td=0xfffffe0056c2a000, traced=0)
    at subr_syscall.c:135
#8  0xffffffff80ca8597 in Xfast_syscall ()
    at /usr/src/sys/amd64/amd64/exception.S:391
#9  0x00000008032f3bcc in ?? ()
Previous frame inner to this frame (corrupt stack?)

the code of frame 3 is
(kgdb) frame 3
#3  0xffffffff80994abd in _vn_lock (vp=0xfffffe018afd0000, flags=0, file=0x0, line=0) at /usr/src/sys/kern/vfs_vnops.c:1398
1398			KASSERT(vp->v_holdcnt != 0,
(kgdb) l
1393	
1394		VNASSERT((flags & LK_TYPE_MASK) != 0, vp,
1395		    ("vn_lock called with no locktype."));
1396		do {
1397	#ifdef DEBUG_VFS_LOCKS
1398			KASSERT(vp->v_holdcnt != 0,
1399			    ("vn_lock %p: zero hold count", vp));
1400	#endif
1401			error = VOP_LOCK1(vp, flags, file, line);
1402			flags &= ~LK_INTERLOCK;	/* Interlock is always dropped. */

the vp is = 
(kgdb) p *(struct vnode *) 0xfffffe018afd0000
$2 = {v_type = VDIR, v_tag = 0xffffffff80f56e88 "ufs", v_op = 0xffffffff8132c800, v_data = 0xfffffe013a2a7348, v_mount = 0xfffffe0009c00b70, v_nmntvnodes = {
    tqe_next = 0xfffffe018ace3520, tqe_prev = 0xfffffe018afd02b8}, v_un = {vu_mount = 0x0, vu_socket = 0x0, vu_cdev = 0x0, vu_fifoinfo = 0x0}, v_hashlist = {
    le_next = 0x0, le_prev = 0xffffff8000e29f00}, v_hash = 236846, v_cache_src = {lh_first = 0x0}, v_cache_dst = {tqh_first = 0x0, tqh_last = 0xfffffe018afd0060}, 
  v_cache_dd = 0x0, v_cstart = 0, v_lasta = 0, v_lastw = 0, v_clen = 0, v_lock = {lock_object = {lo_name = 0xffffffff80f56e88 "ufs", lo_flags = 91947008, lo_data = 0, 
      lo_witness = 0xffffff80006c3400}, lk_lock = 1, lk_exslpfail = 0, lk_timo = 51, lk_pri = 96, lk_stack = {depth = 12, pcs = {18446744071571296822, 
        18446744071573768620, 18446744071576111139, 18446744071606114523, 18446744071576111139, 18446744071572113991, 18446744071572067715, 18446744071606111219, 
        18446744071572016126, 18446744071572018094, 18446744071575427509, 18446744071575340439, 0, 0, 0, 0, 0, 0}}}, v_interlock = {lock_object = {
      lo_name = 0xffffffff80f6f8e3 "vnode interlock", lo_flags = 16973824, lo_data = 0, lo_witness = 0xffffff80006a9600}, mtx_lock = 4}, v_vnlock = 0xfffffe018afd0098, 
  v_holdcnt = 0, v_usecount = 0, v_iflag = 256, v_vflag = 0, v_writecount = 0, v_actfreelist = {tqe_next = 0xfffffe018ace3cd0, tqe_prev = 0xfffffe00aefe6e78}, 
  v_bufobj = {bo_mtx = {lock_object = {lo_name = 0xffffffff80f7822c "bufobj interlock", lo_flags = 16973824, lo_data = 0, lo_witness = 0xffffff80006b1680}, 
      mtx_lock = 4}, bo_clean = {bv_hd = {tqh_first = 0x0, tqh_last = 0xfffffe018afd01d8}, bv_root = 0x0, bv_cnt = 0}, bo_dirty = {bv_hd = {tqh_first = 0x0, 
        tqh_last = 0xfffffe018afd01f8}, bv_root = 0x0, bv_cnt = 0}, bo_numoutput = 0, bo_flag = 0, bo_ops = 0xffffffff812f99a0, bo_bsize = 32768, 
    bo_object = 0xfffffe01f8a6c570, bo_synclist = {le_next = 0xfffffe018afd0448, le_prev = 0xfffffe00096d5ca0}, bo_private = 0xfffffe018afd0000, 
    __bo_vnode = 0xfffffe018afd0000}, v_pollinfo = 0xfffffe0056b05380, v_label = 0x0, v_lockf = 0x0, v_rl = {rl_waiters = {tqh_first = 0x0, 
      tqh_last = 0xfffffe018afd0278}, rl_currdep = 0x0}}

-------

Thanks a lot, regards.


More information about the freebsd-stable mailing list