kern/78179: bus_dmamem_alloc() with BUS_DMA_NOWAIT can block
Mark Tinguely
tinguely at casselton.net
Thu Nov 17 22:10:28 GMT 2005
The following reply was made to PR kern/78179; it has been noted by GNATS.
From: Mark Tinguely <tinguely at casselton.net>
To: bug-followup at FreeBSD.org, PeterJeremy at optushome.com.au
Cc:
Subject: Re: kern/78179: bus_dmamem_alloc() with BUS_DMA_NOWAIT can block
Date: Thu, 17 Nov 2005 16:04:14 -0600 (CST)
I took a stab at the problem that NOWAIT is not being honored in
contigmalloc() by making the vm_contig_launder_page() honor
the flag.
Basically there is a few places that NOWAIT can sleep:
1) the page is busy - change, don't sleep, just return EWOULDBLOCK
2) the page is dirty and the object is the kernel object because
vm_pageout_flush() will force a syncronous write - change check for
kernel object and return EWOULDBLOCK
3) the page is dirty and the object is not the kernel object - change
send the NOWAIT information to the flushing routines.
the flags is added to vm_page_alloc_contig(), so the header file and
the routine in sys/arm that calls vm_page_alloc_contig() needs to be
updated.
I put a temporary printf() statement before the EWOULDBLOCK just to see
if we can trip the code to know that a panic was everted. Since the problem
generates panics with a combination of fragmented memory and the combination
of an allocation at interrupt time, I have not tripped the problem to prove
this patch.
-- patch from -current should work with 6.0-RELEASE too --
*** arm/arm/vm_machdep.c.orig Thu Nov 17 15:10:52 2005
--- arm/arm/vm_machdep.c Thu Nov 17 15:11:09 2005
***************
*** 415,421 ****
if (alloc_curaddr < 0xf0000000) {/* XXX */
mtx_lock(&Giant);
page_array = vm_page_alloc_contig(0x100000 / PAGE_SIZE,
! 0, 0xffffffff, 0x100000, 0);
mtx_unlock(&Giant);
}
if (page_array) {
--- 415,421 ----
if (alloc_curaddr < 0xf0000000) {/* XXX */
mtx_lock(&Giant);
page_array = vm_page_alloc_contig(0x100000 / PAGE_SIZE,
! 0, 0xffffffff, 0x100000, 0, 0);
mtx_unlock(&Giant);
}
if (page_array) {
*** vm/vm_contig.c.orig Thu Nov 17 15:10:10 2005
--- vm/vm_contig.c Thu Nov 17 13:59:47 2005
***************
*** 86,92 ****
#include <vm/vm_extern.h>
static int
! vm_contig_launder_page(vm_page_t m)
{
vm_object_t object;
vm_page_t m_tmp;
--- 86,92 ----
#include <vm/vm_extern.h>
static int
! vm_contig_launder_page(vm_page_t m, int flags)
{
vm_object_t object;
vm_page_t m_tmp;
***************
*** 95,100 ****
--- 95,105 ----
object = m->object;
if (!VM_OBJECT_TRYLOCK(object))
return (EAGAIN);
+ if (flags & M_NOWAIT && (m->flags & PG_BUSY || m->busy)) {
+ VM_OBJECT_UNLOCK(object);
+ printf("vm_contig_launder_page: would sleep (busy)\n");
+ return (EWOULDBLOCK);
+ }
if (vm_page_sleep_if_busy(m, TRUE, "vpctw0")) {
VM_OBJECT_UNLOCK(object);
vm_page_lock_queues();
***************
*** 104,126 ****
if (m->dirty == 0 && m->hold_count == 0)
pmap_remove_all(m);
if (m->dirty) {
if (object->type == OBJT_VNODE) {
vm_page_unlock_queues();
vp = object->handle;
VM_OBJECT_UNLOCK(object);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
VM_OBJECT_LOCK(object);
! vm_object_page_clean(object, 0, 0, OBJPC_SYNC);
VM_OBJECT_UNLOCK(object);
VOP_UNLOCK(vp, 0, curthread);
vm_page_lock_queues();
! return (0);
} else if (object->type == OBJT_SWAP ||
object->type == OBJT_DEFAULT) {
m_tmp = m;
! vm_pageout_flush(&m_tmp, 1, VM_PAGER_PUT_SYNC);
VM_OBJECT_UNLOCK(object);
! return (0);
}
} else if (m->hold_count == 0)
vm_page_cache(m);
--- 109,149 ----
if (m->dirty == 0 && m->hold_count == 0)
pmap_remove_all(m);
if (m->dirty) {
+ /* Both paths use vm_pageout_flush() which forces
+ * a syncronous putpage for the kernel_object.
+ */
+ if (flags & M_NOWAIT && object == kernel_object) {
+ VM_OBJECT_UNLOCK(object);
+ printf("vm_contig_launder_page: would sleep (kobject)\n");
+ return (EWOULDBLOCK);
+ }
if (object->type == OBJT_VNODE) {
vm_page_unlock_queues();
vp = object->handle;
VM_OBJECT_UNLOCK(object);
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread);
VM_OBJECT_LOCK(object);
! vm_object_page_clean(object, 0, 0,
! (flags & M_NOWAIT) ? 0 : OBJPC_SYNC);
VM_OBJECT_UNLOCK(object);
VOP_UNLOCK(vp, 0, curthread);
vm_page_lock_queues();
! if ((flags & M_NOWAIT) && m->dirty) {
! printf("vm_contig_launder_page: would sleep (dirty)\n");
! return (EWOULDBLOCK);
! } else
! return (0);
} else if (object->type == OBJT_SWAP ||
object->type == OBJT_DEFAULT) {
m_tmp = m;
! vm_pageout_flush(&m_tmp, 1,
! (flags & M_NOWAIT) ? 0 : VM_PAGER_PUT_SYNC);
VM_OBJECT_UNLOCK(object);
! if ((flags & M_NOWAIT) && m->dirty) {
! printf("vm_contig_launder_page: would sleep (dirty)\n");
! return (EWOULDBLOCK);
! } else
! return (0);
}
} else if (m->hold_count == 0)
vm_page_cache(m);
***************
*** 129,135 ****
}
static int
! vm_contig_launder(int queue)
{
vm_page_t m, next;
int error;
--- 152,158 ----
}
static int
! vm_contig_launder(int queue, int flags)
{
vm_page_t m, next;
int error;
***************
*** 143,149 ****
KASSERT(m->queue == queue,
("vm_contig_launder: page %p's queue is not %d", m, queue));
! error = vm_contig_launder_page(m);
if (error == 0)
return (TRUE);
if (error == EBUSY)
--- 166,172 ----
KASSERT(m->queue == queue,
("vm_contig_launder: page %p's queue is not %d", m, queue));
! error = vm_contig_launder_page(m, flags);
if (error == 0)
return (TRUE);
if (error == EBUSY)
***************
*** 224,235 ****
actmax = vm_page_queues[PQ_ACTIVE].lcnt;
again1:
if (inactl < inactmax &&
! vm_contig_launder(PQ_INACTIVE)) {
inactl++;
goto again1;
}
if (actl < actmax &&
! vm_contig_launder(PQ_ACTIVE)) {
actl++;
goto again1;
}
--- 247,258 ----
actmax = vm_page_queues[PQ_ACTIVE].lcnt;
again1:
if (inactl < inactmax &&
! vm_contig_launder(PQ_INACTIVE, flags)) {
inactl++;
goto again1;
}
if (actl < actmax &&
! vm_contig_launder(PQ_ACTIVE, flags)) {
actl++;
goto again1;
}
***************
*** 381,387 ****
vm_page_t
vm_page_alloc_contig(vm_pindex_t npages, vm_paddr_t low, vm_paddr_t high,
! vm_offset_t alignment, vm_offset_t boundary)
{
vm_object_t object;
vm_offset_t size;
--- 404,410 ----
vm_page_t
vm_page_alloc_contig(vm_pindex_t npages, vm_paddr_t low, vm_paddr_t high,
! vm_offset_t alignment, vm_offset_t boundary, int flags)
{
vm_object_t object;
vm_offset_t size;
***************
*** 459,465 ****
switch (m->queue) {
case PQ_ACTIVE:
case PQ_INACTIVE:
! if (vm_contig_launder_page(m) != 0)
goto cleanup_freed;
pqtype = m->queue - m->pc;
if (pqtype == PQ_FREE ||
--- 482,488 ----
switch (m->queue) {
case PQ_ACTIVE:
case PQ_INACTIVE:
! if (vm_contig_launder_page(m, flags) != 0)
goto cleanup_freed;
pqtype = m->queue - m->pc;
if (pqtype == PQ_FREE ||
***************
*** 570,576 ****
boundary, kernel_map);
} else {
pages = vm_page_alloc_contig(npgs, low, high,
! alignment, boundary);
if (pages == NULL) {
ret = NULL;
} else {
--- 593,599 ----
boundary, kernel_map);
} else {
pages = vm_page_alloc_contig(npgs, low, high,
! alignment, boundary, flags);
if (pages == NULL) {
ret = NULL;
} else {
*** vm/vm_page.h.orig Thu Nov 17 15:12:56 2005
--- vm/vm_page.h Thu Nov 17 15:13:12 2005
***************
*** 344,350 ****
void vm_page_activate (vm_page_t);
vm_page_t vm_page_alloc (vm_object_t, vm_pindex_t, int);
vm_page_t vm_page_alloc_contig (vm_pindex_t, vm_paddr_t, vm_paddr_t,
! vm_offset_t, vm_offset_t);
void vm_page_release_contig (vm_page_t, vm_pindex_t);
vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int);
void vm_page_cache (register vm_page_t);
--- 344,350 ----
void vm_page_activate (vm_page_t);
vm_page_t vm_page_alloc (vm_object_t, vm_pindex_t, int);
vm_page_t vm_page_alloc_contig (vm_pindex_t, vm_paddr_t, vm_paddr_t,
! vm_offset_t, vm_offset_t, int);
void vm_page_release_contig (vm_page_t, vm_pindex_t);
vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int);
void vm_page_cache (register vm_page_t);
More information about the freebsd-bugs
mailing list