ZFS triggered 9-STABLE r246646 panic "vdrop: holdcnt 0"

Lawrence Stewart lstewart at freebsd.org
Tue Mar 12 02:10:39 UTC 2013


Hi all,

I got this panic yesterday. I haven't seen it before (or since), but I
have the crashdump and kernel here if there's additional information I
can provide that would be useful in finding the cause.

The machine runs ZFS exclusively and was under quite heavy CPU and IO
load at the time of the crash as I was compiling in a VirtualBox VM and
on the host itself, as well as running a full KDE desktop environment.
I'm fairly certain the machine was not swapping at the time of the crash.

lstewart at lstewart> uname -a
FreeBSD lstewart 9.1-STABLE FreeBSD 9.1-STABLE #8 r246646M: Mon Feb 11
14:57:13 EST 2013
root at lstewart:/usr/obj/usr/src/sys/LSTEWART-DESKTOP  amd64

lstewart at lstewart> sudo kgdb /boot/kernel/kernel /var/crash/vmcore.0

[...]

(kgdb) bt
#0  doadump (textdump=<value optimized out>) at pcpu.h:229
#1  0xffffffff808e5824 in kern_reboot (howto=260) at
/usr/src/sys/kern/kern_shutdown.c:448
#2  0xffffffff808e5d27 in panic (fmt=0x1 <Address 0x1 out of bounds>) at
/usr/src/sys/kern/kern_shutdown.c:636
#3  0xffffffff8097a71e in vdropl (vp=<value optimized out>) at
/usr/src/sys/kern/vfs_subr.c:2465
#4  0xffffffff80b4da2b in vm_page_alloc (object=0xffffffff8132c000,
pindex=143696, req=32) at /usr/src/sys/vm/vm_page.c:1569
#5  0xffffffff80b3f312 in kmem_back (map=0xfffffe00020000e8,
addr=18446743524542296064, size=131072, flags=705200752)
    at /usr/src/sys/vm/vm_kern.c:361
#6  0xffffffff80b3fc8b in kmem_malloc (map=0xfffffe00020000e8,
size=131072, flags=2) at /usr/src/sys/vm/vm_kern.c:312
#7  0xffffffff80b3685a in uma_large_malloc (size=131072, wait=2) at
/usr/src/sys/vm/uma_core.c:3068
#8  0xffffffff808d0539 in malloc (size=131072, mtp=0xffffffff817f4ce0,
flags=2) at /usr/src/sys/kern/kern_malloc.c:492
#9  0xffffffff816696e2 in zio_write_bp_init (zio=0xfffffe016b70a000)
    at
/usr/src/sys/modules/zfs/../../cddl/contrib/opensolaris/uts/common/fs/zfs/zio.c:1060
#10 0xffffffff81668e23 in zio_execute (zio=0xfffffe016b70a000)
    at
/usr/src/sys/modules/zfs/../../cddl/contrib/opensolaris/uts/common/fs/zfs/zio.c:1256
#11 0xffffffff80928474 in taskqueue_run_locked
(queue=0xfffffe0010484280) at /usr/src/sys/kern/subr_taskqueue.c:312
#12 0xffffffff80929426 in taskqueue_thread_loop (arg=<value optimized
out>) at /usr/src/sys/kern/subr_taskqueue.c:501
#13 0xffffffff808b67af in fork_exit (callout=0xffffffff809293e0
<taskqueue_thread_loop>, arg=0xfffffe00103869d0,
    frame=0xffffff823df70b00) at /usr/src/sys/kern/kern_fork.c:988
#14 0xffffffff80c4ddee in fork_trampoline () at
/usr/src/sys/amd64/amd64/exception.S:602
#15 0x0000000000000000 in ?? ()

(kgdb) frame 4
#4  0xffffffff80b4da2b in vm_page_alloc (object=0xffffffff8132c000,
pindex=143696, req=32) at /usr/src/sys/vm/vm_page.c:1569
1569                    vdrop(vp);

(kgdb) p *vp
$3 = {v_type = VREG, v_tag = 0xffffffff816f7842 "zfs", v_op =
0xffffffff816ff7a0, v_data = 0xfffffe00784e42e0,
  v_mount = 0xfffffe0010890000, v_nmntvnodes = {tqe_next =
0xfffffe00a95281f8, tqe_prev = 0xfffffe0091d09220}, v_un = {
    vu_mount = 0x0, vu_socket = 0x0, vu_cdev = 0x0, vu_fifoinfo = 0x0},
v_hashlist = {le_next = 0x0, le_prev = 0x0},
  v_hash = 19896209, v_cache_src = {lh_first = 0x0}, v_cache_dst =
{tqh_first = 0x0, tqh_last = 0xfffffe012f979258},
  v_cache_dd = 0x0, v_cstart = 0, v_lasta = 0, v_lastw = 0, v_clen = 0,
v_lock = {lock_object = {
      lo_name = 0xffffffff816f7842 "zfs", lo_flags = 91947008, lo_data =
0, lo_witness = 0x0}, lk_lock = 1, lk_exslpfail = 0,
    lk_timo = 51, lk_pri = 96}, v_interlock = {lock_object = {lo_name =
0xffffffff80ec2790 "vnode interlock", lo_flags = 16973824,
      lo_data = 0, lo_witness = 0x0}, mtx_lock = 18446741874964127744},
v_vnlock = 0xfffffe012f979290, v_holdcnt = 0,
  v_usecount = 0, v_iflag = 256, v_vflag = 0, v_writecount = 0,
v_actfreelist = {tqe_next = 0xfffffe00a95281f8,
    tqe_prev = 0xfffffe0091d09308}, v_bufobj = {bo_mtx = {lock_object =
{lo_name = 0xffffffff80ec27a0 "bufobj interlock",
        lo_flags = 16973824, lo_data = 0, lo_witness = 0x0}, mtx_lock =
4}, bo_clean = {bv_hd = {tqh_first = 0x0,
        tqh_last = 0xfffffe012f979338}, bv_root = 0x0, bv_cnt = 0},
bo_dirty = {bv_hd = {tqh_first = 0x0,
        tqh_last = 0xfffffe012f979358}, bv_root = 0x0, bv_cnt = 0},
bo_numoutput = 0, bo_flag = 0, bo_ops = 0xffffffff81253920,
    bo_bsize = 131072, bo_object = 0xfffffe0070ba5910, bo_synclist =
{le_next = 0x0, le_prev = 0x0},
    bo_private = 0xfffffe012f9791f8, __bo_vnode = 0xfffffe012f9791f8},
v_pollinfo = 0x0, v_label = 0x0, v_lockf = 0x0, v_rl = {
    rl_waiters = {tqh_first = 0x0, tqh_last = 0xfffffe012f9793d8},
rl_currdep = 0x0}}

(kgdb) p *object
$6 = {mtx = {lock_object = {lo_name = 0xffffffff80ee61ad "vm object",
lo_flags = 21168128, lo_data = 0, lo_witness = 0x0},
    mtx_lock = 18446741874964127744}, object_list = {tqe_next =
0xffffffff8132bcc0, tqe_prev = 0xffffffff8132bf20}, shadow_head = {
    lh_first = 0x0}, shadow_list = {le_next = 0x0, le_prev = 0x0}, memq
= {tqh_first = 0xfffffe021eebb880,
    tqh_last = 0xfffffe022a0882f8}, root = 0xfffffe022a0882e8, size =
134217727, generation = 1, ref_count = 2659,
  shadow_count = 0, memattr = 6 '\006', type = 4 '\004', flags = 4096,
pg_color = 0, pad1 = 0, resident_page_count = 124507,
  backing_object = 0x0, backing_object_offset = 0, pager_object_list =
{tqe_next = 0x0, tqe_prev = 0x0}, rvq = {
    lh_first = 0xfffffe021df8a5c0}, cache = 0x0, handle = 0x0, un_pager
= {vnp = {vnp_size = 0, writemappings = 0}, devp = {
      devp_pglist = {tqh_first = 0x0, tqh_last = 0x0}, ops = 0x0}, sgp =
{sgp_pglist = {tqh_first = 0x0, tqh_last = 0x0}}, swp = {
      swp_bcount = 0}}, cred = 0x0, charge = 0, paging_in_progress = 0}


Cheers,
Lawrence


More information about the freebsd-fs mailing list