PERFORCE change 213088 for review
John Baldwin
jhb at FreeBSD.org
Mon Jun 18 19:50:59 UTC 2012
http://p4web.freebsd.org/@@213088?ac=10
Change 213088 by jhb at jhb_jhbbsd on 2012/06/18 19:50:53
More hacking.
Affected files ...
.. //depot/projects/fadvise/sys/vm/vm_page.c#9 edit
.. //depot/projects/fadvise/sys/vm/vm_phys.c#7 edit
.. //depot/projects/fadvise/sys/vm/vm_reserv.c#5 edit
.. //depot/projects/fadvise/sys/vm/vm_reserv.h#4 edit
Differences ...
==== //depot/projects/fadvise/sys/vm/vm_page.c#9 (text+ko) ====
@@ -1187,6 +1187,17 @@
m->flags ^= PG_CACHED | PG_FREE;
KASSERT((m->flags & (PG_CACHED | PG_FREE)) == PG_FREE,
("vm_page_cache_free: page %p has inconsistent flags", m));
+#if 1
+ if (vm_phys_unfree_page(m)) {
+ vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, 0);
+ vm_phys_free_pages(m, 0);
+#if VM_NRESERVLEVEL > 0
+ } else if (!vm_reserv_cache_free(m))
+#else
+ } else
+#endif
+ panic("cache page is not free");
+#endif
cnt.v_cache_count--;
cnt.v_free_count++;
}
==== //depot/projects/fadvise/sys/vm/vm_phys.c#7 (text+ko) ====
@@ -36,6 +36,8 @@
* virtual memory system.
*/
+#define CACHE_SEPARATE
+
#include <sys/cdefs.h>
__FBSDID("$FreeBSD: src/sys/vm/vm_phys.c,v 1.28 2012/05/12 20:42:56 kib Exp $");
@@ -133,6 +135,9 @@
static int vm_phys_uc_alloc_pages;
SYSCTL_INT(_vm, OID_AUTO, phys_uc_alloc_pages, CTLFLAG_RD,
&vm_phys_uc_alloc_pages, 0, "");
+static int vm_phys_uc_alloc_pages2;
+SYSCTL_INT(_vm, OID_AUTO, phys_uc_alloc_pages2, CTLFLAG_RD,
+ &vm_phys_uc_alloc_pages2, 0, "");
static int vm_phys_uc_free_pages;
SYSCTL_INT(_vm, OID_AUTO, phys_uc_free_pages, CTLFLAG_RD,
&vm_phys_uc_free_pages, 0, "");
@@ -464,6 +469,11 @@
struct vm_freelist *alt;
int domain, oind, pind;
vm_page_t m;
+#ifdef CACHE_SEPARATE
+ struct vm_phys_seg *seg;
+ vm_paddr_t pa;
+ vm_page_t m_next, m_buddy;
+#endif
KASSERT(flind < VM_NFREELIST,
("vm_phys_alloc_freelist_pages: freelist %d is out of range", flind));
@@ -514,11 +524,68 @@
}
}
+#ifdef CACHE_SEPARATE
/*
* XXX: If we get here, do deferred merging of cache pages
* with pages from another pool to satisfy the request and
- * try again. This may be quite hard to do.
+ * try again. This may be quite hard to do, and certainly
+ * not very efficient.
*/
+ for (oind = order - 1; oind > 0; oind--) {
+ alt = (*vm_phys_lookup_lists[domain][flind])[VM_FREEPOOL_CACHE];
+ TAILQ_FOREACH_SAFE(m, &alt[oind].pl, pageq, m_next) {
+ struct vm_freelist *fl2;
+ int newoind;
+
+ seg = &vm_phys_segs[m->segind];
+ pa = VM_PAGE_TO_PHYS(m);
+ newoind = oind;
+ do {
+ pa ^= ((vm_paddr_t)1 << (PAGE_SHIFT + newoind));
+ if (pa < seg->start || pa >= seg->end)
+ break;
+ m_buddy = &seg->first_page[atop(pa - seg->start)];
+ if (m_buddy->order != newoind)
+ break;
+ fl2 = (*seg->free_queues)[m_buddy->pool];
+ /*
+ * Two same-sized buddies should not
+ * be on the 'alt[oind].pl' list.
+ */
+ KASSERT(m_buddy != m_next,
+ ("identical buddies"));
+ TAILQ_REMOVE(&fl2[newoind].pl, m_buddy, pageq);
+ fl2[order].lcnt--;
+ m_buddy->order = VM_NFREEORDER;
+ if (m_buddy->pool != m->pool) {
+ if (m->pool == VM_FREEPOOL_CACHE) {
+ vm_phys_uc_alloc_pages2++;
+ vm_phys_set_pool(m_buddy->pool,
+ m, newoind);
+ } else {
+ if (m_buddy->pool ==
+ VM_FREEPOOL_CACHE)
+ vm_phys_uc_alloc_pages2++;
+ vm_phys_set_pool(m->pool,
+ m_buddy, newoind);
+ }
+ }
+ newoind++;
+ pa &= ~(((vm_paddr_t)1 << (PAGE_SHIFT + newoind)) - 1);
+ m = &seg->first_page[atop(pa - seg->start)];
+ } while (newoind < order);
+ m->order = newoind;
+ if (newoind == order) {
+ if (m->pool != pool)
+ vm_phys_set_pool(pool, m, order);
+ return (m);
+ }
+ fl2 = (*seg->free_queues)[m->pool];
+ TAILQ_INSERT_TAIL(&fl2[newoind].pl, m, pageq);
+ fl2[newoind].lcnt++;
+ }
+ }
+#endif
return (NULL);
}
@@ -695,17 +762,18 @@
m_buddy = &seg->first_page[atop(pa - seg->start)];
if (m_buddy->order != order)
break;
+#ifdef CACHE_SEPARATE
+ if (m_buddy->pool != m->pool &&
+ (m_buddy->pool == VM_FREEPOOL_CACHE ||
+ m->pool == VM_FREEPOOL_CACHE))
+ break;
+#endif
fl = (*seg->free_queues)[m_buddy->pool];
TAILQ_REMOVE(&fl[order].pl, m_buddy, pageq);
fl[order].lcnt--;
m_buddy->order = VM_NFREEORDER;
if (m_buddy->pool != m->pool) {
#if 1
-#if 1
- if (m_buddy->pool == VM_FREEPOOL_CACHE ||
- m->pool == VM_FREEPOOL_CACHE)
- break;
-#endif
if (m_buddy->pool == VM_FREEPOOL_CACHE)
vm_phys_uc_free_pages++;
vm_phys_set_pool(m->pool, m_buddy, order);
==== //depot/projects/fadvise/sys/vm/vm_reserv.c#5 (text+ko) ====
@@ -640,6 +640,24 @@
}
/*
+ * Note a cached page has been moved to free. Returns TRUE if this
+ * page belongs to a reservation.
+ *
+ * The free page queue lock must be held.
+ */
+boolean_t
+vm_reserv_cache_free(vm_page_t m)
+{
+ vm_reserv_t rv;
+
+ mtx_assert(&vm_page_queue_free_mtx, MA_OWNED);
+ rv = vm_reserv_from_page(m);
+ if (rv->object == NULL)
+ return (FALSE);
+ return (TRUE);
+}
+
+/*
* Frees the given page if it belongs to a reservation. Returns TRUE if the
* page is freed and FALSE otherwise.
*
==== //depot/projects/fadvise/sys/vm/vm_reserv.h#4 (text+ko) ====
@@ -50,6 +50,7 @@
u_long alignment, vm_paddr_t boundary);
vm_page_t vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex);
void vm_reserv_break_all(vm_object_t object);
+boolean_t vm_reserv_cache_free(vm_page_t m);
boolean_t vm_reserv_free_page(vm_page_t m);
void vm_reserv_init(void);
int vm_reserv_level_iffullpop(vm_page_t m);
More information about the p4-projects
mailing list