svn commit: r330246 - user/markj/vm-playground/sys/vm
Mark Johnston
markj at FreeBSD.org
Thu Mar 1 18:11:04 UTC 2018
Author: markj
Date: Thu Mar 1 18:11:03 2018
New Revision: 330246
URL: https://svnweb.freebsd.org/changeset/base/330246
Log:
Revert changes to batch the insertion of pages into page queues.
It will be replaced by a more general mechanism in a future commit.
The approach in this change has a number of disadvantages:
- It bloats the per-domain structure quite a bit: we keep a batch
queue per page lock per page queue, for a total of
PQ_COUNT*PA_LOCK_COUNT queues per domain. We'd like to be able
to increase PA_LOCK_COUNT without incurring bloat.
- It only improves scalability for enqueue operations; threads which
wish to dequeue or requeue pages still must acquire the page queue
lock. Thus, the page queue lock remains a bottleneck in certain
workloads. Builds, for example, involve frequent removal of pages
from PQ_ACTIVE as short-lived VM objects are destroyed.
- The page daemon still needs to acquire the page queue lock once
per page during a queue scan.
Modified:
user/markj/vm-playground/sys/vm/vm_object.c
user/markj/vm-playground/sys/vm/vm_page.c
user/markj/vm-playground/sys/vm/vm_pageout.c
user/markj/vm-playground/sys/vm/vm_pagequeue.h
Modified: user/markj/vm-playground/sys/vm/vm_object.c
==============================================================================
--- user/markj/vm-playground/sys/vm/vm_object.c Thu Mar 1 17:47:28 2018 (r330245)
+++ user/markj/vm-playground/sys/vm/vm_object.c Thu Mar 1 18:11:03 2018 (r330246)
@@ -723,6 +723,7 @@ vm_object_terminate_pages(vm_object_t object)
vm_page_t p, p_next;
struct mtx *mtx, *mtx1;
struct vm_pagequeue *pq, *pq1;
+ int dequeued;
VM_OBJECT_ASSERT_WLOCKED(object);
@@ -747,6 +748,7 @@ vm_object_terminate_pages(vm_object_t object)
if (mtx != NULL)
mtx_unlock(mtx);
if (pq != NULL) {
+ vm_pagequeue_cnt_add(pq, dequeued);
vm_pagequeue_unlock(pq);
pq = NULL;
}
@@ -764,19 +766,27 @@ vm_object_terminate_pages(vm_object_t object)
"page %p is not queued", p));
pq1 = vm_page_pagequeue(p);
if (pq != pq1) {
- if (pq != NULL)
+ if (pq != NULL) {
+ vm_pagequeue_cnt_add(pq, dequeued);
vm_pagequeue_unlock(pq);
+ }
pq = pq1;
vm_pagequeue_lock(pq);
+ dequeued = 0;
}
+ p->queue = PQ_NONE;
+ TAILQ_REMOVE(&pq->pq_pl, p, plinks.q);
+ dequeued--;
}
if (vm_page_free_prep(p, true))
continue;
unlist:
TAILQ_REMOVE(&object->memq, p, listq);
}
- if (pq != NULL)
+ if (pq != NULL) {
+ vm_pagequeue_cnt_add(pq, dequeued);
vm_pagequeue_unlock(pq);
+ }
if (mtx != NULL)
mtx_unlock(mtx);
Modified: user/markj/vm-playground/sys/vm/vm_page.c
==============================================================================
--- user/markj/vm-playground/sys/vm/vm_page.c Thu Mar 1 17:47:28 2018 (r330245)
+++ user/markj/vm-playground/sys/vm/vm_page.c Thu Mar 1 18:11:03 2018 (r330246)
@@ -74,13 +74,6 @@
* * The page daemon can acquire and hold any pair of page queue
* locks in any order.
*
- * * Batch queues are used to defer insertions of pages into the
- * main paging queues. The aim is to reduce contention at the
- * entry point of the queue by inserting multiple pages in an
- * O(1) operation. This comes at the expense of strict LRU.
- * Only a page lock is required to insert a page into a batch
- * queue.
- *
* - The object lock is required when inserting or removing
* pages from an object (vm_page_insert() or vm_page_remove()).
*
@@ -443,7 +436,7 @@ vm_page_domain_init(int domain)
{
struct vm_domain *vmd;
struct vm_pagequeue *pq;
- int i, j;
+ int i;
vmd = VM_DOMAIN(domain);
bzero(vmd, sizeof(*vmd));
@@ -465,15 +458,6 @@ vm_page_domain_init(int domain)
TAILQ_INIT(&pq->pq_pl);
mtx_init(&pq->pq_mutex, pq->pq_name, "vm pagequeue",
MTX_DEF | MTX_DUPOK);
-
- /*
- * The batch queue limits are set in vm_pageout_init() once
- * we've set the paging targets.
- */
- for (j = 0; j < BPQ_COUNT; j++) {
- TAILQ_INIT(&pq->pq_bpqs[j].bpq_pl);
- pq->pq_bpqs[j].bpq_lim = 1;
- }
}
mtx_init(&vmd->vmd_free_mtx, "vm page free queue", NULL, MTX_DEF);
mtx_init(&vmd->vmd_pageout_mtx, "vm pageout lock", NULL, MTX_DEF);
@@ -3040,30 +3024,6 @@ vm_page_pagequeue(vm_page_t m)
}
/*
- * vm_page_enqueue_batch:
- *
- * Concatenate the pages in a batch queue to their corresponding paging
- * queue.
- *
- * The pagequeue must be locked.
- */
-static void
-vm_page_enqueue_batch(struct vm_pagequeue *pq, u_int idx)
-{
- struct vm_batchqueue *bpq;
-
- KASSERT(idx < BPQ_COUNT, ("invalid batch queue index %u", idx));
- vm_pagequeue_assert_locked(pq);
-
- bpq = &pq->pq_bpqs[idx];
- if (bpq->bpq_cnt != 0) {
- TAILQ_CONCAT(&pq->pq_pl, &bpq->bpq_pl, plinks.q);
- vm_pagequeue_cnt_add(pq, bpq->bpq_cnt);
- bpq->bpq_cnt = 0;
- }
-}
-
-/*
* vm_page_dequeue:
*
* Remove the given page from its current page queue.
@@ -3081,7 +3041,6 @@ vm_page_dequeue(vm_page_t m)
pq = vm_page_pagequeue(m);
vm_pagequeue_lock(pq);
m->queue = PQ_NONE;
- vm_page_enqueue_batch(pq, BPQ_IDX(m));
TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
vm_pagequeue_cnt_dec(pq);
vm_pagequeue_unlock(pq);
@@ -3102,7 +3061,6 @@ vm_page_dequeue_locked(vm_page_t m)
vm_page_lock_assert(m, MA_OWNED);
pq = vm_page_pagequeue(m);
vm_pagequeue_assert_locked(pq);
- vm_page_enqueue_batch(pq, BPQ_IDX(m));
m->queue = PQ_NONE;
TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
vm_pagequeue_cnt_dec(pq);
@@ -3118,7 +3076,6 @@ vm_page_dequeue_locked(vm_page_t m)
static void
vm_page_enqueue(uint8_t queue, vm_page_t m)
{
- struct vm_batchqueue *bpq;
struct vm_pagequeue *pq;
vm_page_lock_assert(m, MA_OWNED);
@@ -3126,14 +3083,11 @@ vm_page_enqueue(uint8_t queue, vm_page_t m)
("vm_page_enqueue: invalid queue %u request for page %p",
queue, m));
pq = &vm_pagequeue_domain(m)->vmd_pagequeues[queue];
+ vm_pagequeue_lock(pq);
m->queue = queue;
- bpq = &pq->pq_bpqs[BPQ_IDX(m)];
- TAILQ_INSERT_TAIL(&bpq->bpq_pl, m, plinks.q);
- if (bpq->bpq_cnt++ >= bpq->bpq_lim) {
- vm_pagequeue_lock(pq);
- vm_page_enqueue_batch(pq, BPQ_IDX(m));
- vm_pagequeue_unlock(pq);
- }
+ TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
+ vm_pagequeue_cnt_inc(pq);
+ vm_pagequeue_unlock(pq);
}
/*
@@ -3153,7 +3107,6 @@ vm_page_requeue(vm_page_t m)
("vm_page_requeue: page %p is not queued", m));
pq = vm_page_pagequeue(m);
vm_pagequeue_lock(pq);
- vm_page_enqueue_batch(pq, BPQ_IDX(m));
TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
vm_pagequeue_unlock(pq);
@@ -3171,12 +3124,10 @@ vm_page_requeue_locked(vm_page_t m)
{
struct vm_pagequeue *pq;
- vm_page_lock_assert(m, MA_OWNED);
KASSERT(m->queue != PQ_NONE,
("vm_page_requeue_locked: page %p is not queued", m));
pq = vm_page_pagequeue(m);
vm_pagequeue_assert_locked(pq);
- vm_page_enqueue_batch(pq, BPQ_IDX(m));
TAILQ_REMOVE(&pq->pq_pl, m, plinks.q);
TAILQ_INSERT_TAIL(&pq->pq_pl, m, plinks.q);
}
@@ -3481,7 +3432,6 @@ vm_page_unwire_noq(vm_page_t m)
static inline void
_vm_page_deactivate(vm_page_t m, boolean_t noreuse)
{
- struct vm_batchqueue *bpq;
struct vm_pagequeue *pq;
int queue;
@@ -3502,17 +3452,9 @@ _vm_page_deactivate(vm_page_t m, boolean_t noreuse)
} else {
if (queue != PQ_NONE)
vm_page_dequeue(m);
- bpq = &pq->pq_bpqs[BPQ_IDX(m)];
- if (bpq->bpq_cnt < bpq->bpq_lim) {
- bpq->bpq_cnt++;
- m->queue = PQ_INACTIVE;
- TAILQ_INSERT_TAIL(&bpq->bpq_pl, m, plinks.q);
- return;
- }
vm_pagequeue_lock(pq);
}
m->queue = PQ_INACTIVE;
- vm_page_enqueue_batch(pq, BPQ_IDX(m));
if (noreuse)
TAILQ_INSERT_BEFORE(
&vm_pagequeue_domain(m)->vmd_inacthead, m,
Modified: user/markj/vm-playground/sys/vm/vm_pageout.c
==============================================================================
--- user/markj/vm-playground/sys/vm/vm_pageout.c Thu Mar 1 17:47:28 2018 (r330245)
+++ user/markj/vm-playground/sys/vm/vm_pageout.c Thu Mar 1 18:11:03 2018 (r330246)
@@ -1952,7 +1952,6 @@ vm_pageout_init_domain(int domain)
{
struct vm_domain *vmd;
struct sysctl_oid *oid;
- int lim, i, j;
vmd = VM_DOMAIN(domain);
vmd->vmd_interrupt_free_min = 2;
@@ -1991,22 +1990,6 @@ vm_pageout_init_domain(int domain)
*/
vmd->vmd_background_launder_target = (vmd->vmd_free_target -
vmd->vmd_free_min) / 10;
-
- /*
- * Set batch queue limits for paging queues.
- *
- * We want these to be small relative to the amount of system memory.
- * Roughly v_page_count / PA_LOCK_COUNT pages are mapped to a given
- * batch queue; ensure that no more than 0.1% of them may be queued in
- * the batch queue for a particular page queue. Then no more than
- * 0.1% * PQ_COUNT can be queued across all page queues. This gives a
- * per-page queue batch limit of 1 page per GB of memory on amd64.
- */
-
- lim = MAX(vmd->vmd_page_count / 1000 / BPQ_COUNT, 8);
- for (i = 0; i < PQ_COUNT; i++)
- for (j = 0; j < BPQ_COUNT; j++)
- vmd->vmd_pagequeues[i].pq_bpqs[j].bpq_lim = lim;
/* Initialize the pageout daemon pid controller. */
pidctrl_init(&vmd->vmd_pid, hz / VM_INACT_SCAN_RATE,
Modified: user/markj/vm-playground/sys/vm/vm_pagequeue.h
==============================================================================
--- user/markj/vm-playground/sys/vm/vm_pagequeue.h Thu Mar 1 17:47:28 2018 (r330245)
+++ user/markj/vm-playground/sys/vm/vm_pagequeue.h Thu Mar 1 18:11:03 2018 (r330246)
@@ -66,23 +66,11 @@
#define _VM_PAGEQUEUE_
#ifdef _KERNEL
-
-#define BPQ_COUNT PA_LOCK_COUNT
-#define BPQ_IDX(m) (pa_index(VM_PAGE_TO_PHYS(m)) % BPQ_COUNT)
-
-struct vm_batchqueue {
- struct pglist bpq_pl;
- int bpq_cnt;
- int bpq_lim;
-} __aligned(CACHE_LINE_SIZE);
-
struct vm_pagequeue {
struct mtx pq_mutex;
struct pglist pq_pl;
int pq_cnt;
const char * const pq_name;
- char _pq_pad[0] __aligned(CACHE_LINE_SIZE);
- struct vm_batchqueue pq_bpqs[BPQ_COUNT];
} __aligned(CACHE_LINE_SIZE);
#include <vm/uma.h>
More information about the svn-src-user
mailing list