svn commit: r263347 - projects/sendfile/sys/kern
Gleb Smirnoff
glebius at FreeBSD.org
Wed Mar 19 12:30:45 UTC 2014
Author: glebius
Date: Wed Mar 19 12:30:44 2014
New Revision: 263347
URL: http://svnweb.freebsd.org/changeset/base/263347
Log:
Implement optional (default to 0) readahead for the new sendfile.
If readahead is enabled, and vm_pager_has_page() hints us that we
can do more pages in a single I/O than socket can consume, then
we preload that much pages to memory.
Sponsored by: Netflix
Sponsored by: Nginx, Inc.
Modified:
projects/sendfile/sys/kern/uipc_syscalls.c
Modified: projects/sendfile/sys/kern/uipc_syscalls.c
==============================================================================
--- projects/sendfile/sys/kern/uipc_syscalls.c Wed Mar 19 12:29:20 2014 (r263346)
+++ projects/sendfile/sys/kern/uipc_syscalls.c Wed Mar 19 12:30:44 2014 (r263347)
@@ -137,6 +137,10 @@ static int sfpgrabnowait = 0;
SYSCTL_INT(_kern_ipc_sendfile, OID_AUTO, pgrabnowait, CTLFLAG_RW,
&sfpgrabnowait, 0, "Use VM_ALLOC_NOWAIT when SF_NODISKIO is requested");
+static int sfreadahead = 0;
+SYSCTL_INT(_kern_ipc_sendfile, OID_AUTO, readahead, CTLFLAG_RW,
+ &sfreadahead, 0, "Read this more pages than socket buffer can accept");
+
#ifdef SFSYNC_DEBUG
static int sf_sync_debug = 0;
SYSCTL_INT(_debug, OID_AUTO, sf_sync_debug, CTLFLAG_RW,
@@ -2727,9 +2731,10 @@ sendfile_swapin(vm_object_t obj, struct
{
vm_page_t *pa = sfio->pa;
int npages = sfio->npages;
- int nios, rv;
+ int nios, readahead;
nios = 0;
+ readahead = sfreadahead;
if (sfpgrabnowait && (flags & SF_NODISKIO))
flags = VM_ALLOC_NOWAIT;
else
@@ -2741,12 +2746,13 @@ sendfile_swapin(vm_object_t obj, struct
VM_ALLOC_WIRED | VM_ALLOC_NORMAL | flags);
if (pa[i] == NULL) {
npages = sfio->npages = i;
+ readahead = 0;
break;
}
}
for (int i = 0; i < npages; i++) {
- int j, a;
+ int j, a, count, rv;
if (vm_page_is_valid(pa[i], vmoff(i, off) & PAGE_MASK,
xfsize(i, npages, off, len))) {
@@ -2770,9 +2776,19 @@ sendfile_swapin(vm_object_t obj, struct
if (i == j)
continue;
+ count = min(a + 1, npages + readahead - i);
+ for (j = npages; j < i + count; j++) {
+ pa[j] = vm_page_grab(obj, OFF_TO_IDX(vmoff(j, off)),
+ VM_ALLOC_NORMAL | VM_ALLOC_NOWAIT);
+ if (pa[j] == NULL) {
+ count = j - i;
+ break;
+ }
+ }
+
refcount_acquire(&sfio->nios);
- rv = vm_pager_get_pages_async(obj, pa + i,
- min(a + 1, npages - i), 0, &sf_io_done, sfio);
+ rv = vm_pager_get_pages_async(obj, pa + i, count, 0,
+ &sf_io_done, sfio);
KASSERT(rv == VM_PAGER_OK, ("%s: pager fail obj %p page %p",
__func__, obj, pa[i]));
@@ -2780,12 +2796,13 @@ sendfile_swapin(vm_object_t obj, struct
SFSTAT_INC(sf_iocnt);
nios++;
- i += a;
- for (j = i - a; a > 0 && j < npages; a--, j++)
+ for (j = i; j < i + count; j++)
KASSERT(pa[j] == vm_page_lookup(obj,
OFF_TO_IDX(vmoff(j, off))),
("pa[j] %p lookup %p\n", pa[j],
vm_page_lookup(obj, OFF_TO_IDX(vmoff(j, off)))));
+
+ i += count - 1;
}
VM_OBJECT_WUNLOCK(obj);
@@ -3064,7 +3081,8 @@ retry_space:
else
npages = howmany(space, PAGE_SIZE);
sfio = malloc(sizeof(struct sf_io) +
- npages * sizeof(vm_page_t), M_TEMP, M_WAITOK);
+ (sfreadahead + npages) * sizeof(vm_page_t),
+ M_TEMP, M_WAITOK);
refcount_init(&sfio->nios, 1);
sfio->npages = npages;
More information about the svn-src-projects
mailing list