svn commit: r247360 - in head/sys: arm/arm powerpc/booke vm
Attilio Rao
attilio at FreeBSD.org
Tue Feb 26 23:35:29 UTC 2013
Author: attilio
Date: Tue Feb 26 23:35:27 2013
New Revision: 247360
URL: http://svnweb.freebsd.org/changeset/base/247360
Log:
Merge from vmc-playground branch:
Replace the sub-optimal uma_zone_set_obj() primitive with more modern
uma_zone_reserve_kva(). The new primitive reserves before hand
the necessary KVA space to cater the zone allocations and allocates pages
with ALLOC_NOOBJ. More specifically:
- uma_zone_reserve_kva() does not need an object to cater the backend
allocator.
- uma_zone_reserve_kva() can cater M_WAITOK requests, in order to
serve zones which need to do uma_prealloc() too.
- When possible, uma_zone_reserve_kva() uses directly the direct-mapping
by uma_small_alloc() rather than relying on the KVA / offset
combination.
The removal of the object attribute allows 2 further changes:
1) _vm_object_allocate() becomes static within vm_object.c
2) VM_OBJECT_LOCK_INIT() is removed. This function is replaced by
direct calls to mtx_init() as there is no need to export it anymore
and the calls aren't either homogeneous anymore: there are now small
differences between arguments passed to mtx_init().
Sponsored by: EMC / Isilon storage division
Reviewed by: alc (which also offered almost all the comments)
Tested by: pho, jhb, davide
Modified:
head/sys/arm/arm/pmap-v6.c
head/sys/arm/arm/pmap.c
head/sys/powerpc/booke/pmap.c
head/sys/vm/swap_pager.c
head/sys/vm/uma.h
head/sys/vm/uma_core.c
head/sys/vm/uma_int.h
head/sys/vm/vm_map.c
head/sys/vm/vm_object.c
head/sys/vm/vm_object.h
Modified: head/sys/arm/arm/pmap-v6.c
==============================================================================
--- head/sys/arm/arm/pmap-v6.c Tue Feb 26 23:18:35 2013 (r247359)
+++ head/sys/arm/arm/pmap-v6.c Tue Feb 26 23:35:27 2013 (r247360)
@@ -392,7 +392,6 @@ static uma_zone_t l2table_zone;
static vm_offset_t pmap_kernel_l2dtable_kva;
static vm_offset_t pmap_kernel_l2ptp_kva;
static vm_paddr_t pmap_kernel_l2ptp_phys;
-static struct vm_object pvzone_obj;
static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
static struct rwlock pvh_global_lock;
@@ -1164,7 +1163,7 @@ pmap_init(void)
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
- uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
+ uma_zone_reserve_kva(pvzone, pv_entry_max);
pv_entry_high_water = 9 * (pv_entry_max / 10);
/*
Modified: head/sys/arm/arm/pmap.c
==============================================================================
--- head/sys/arm/arm/pmap.c Tue Feb 26 23:18:35 2013 (r247359)
+++ head/sys/arm/arm/pmap.c Tue Feb 26 23:35:27 2013 (r247360)
@@ -397,7 +397,6 @@ static uma_zone_t l2table_zone;
static vm_offset_t pmap_kernel_l2dtable_kva;
static vm_offset_t pmap_kernel_l2ptp_kva;
static vm_paddr_t pmap_kernel_l2ptp_phys;
-static struct vm_object pvzone_obj;
static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
static struct rwlock pvh_global_lock;
@@ -1828,7 +1827,7 @@ pmap_init(void)
NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE);
TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc);
pv_entry_max = shpgperproc * maxproc + cnt.v_page_count;
- uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
+ uma_zone_reserve_kva(pvzone, pv_entry_max);
pv_entry_high_water = 9 * (pv_entry_max / 10);
/*
Modified: head/sys/powerpc/booke/pmap.c
==============================================================================
--- head/sys/powerpc/booke/pmap.c Tue Feb 26 23:18:35 2013 (r247359)
+++ head/sys/powerpc/booke/pmap.c Tue Feb 26 23:35:27 2013 (r247360)
@@ -217,7 +217,6 @@ static struct rwlock_padalign pvh_global
/* Data for the pv entry allocation mechanism */
static uma_zone_t pvzone;
-static struct vm_object pvzone_obj;
static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
#define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */
@@ -1343,7 +1342,7 @@ mmu_booke_init(mmu_t mmu)
TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max);
pv_entry_high_water = 9 * (pv_entry_max / 10);
- uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max);
+ uma_zone_reserve_kva(pvzone, pv_entry_max);
/* Pre-fill pvzone with initial number of pv entries. */
uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN);
Modified: head/sys/vm/swap_pager.c
==============================================================================
--- head/sys/vm/swap_pager.c Tue Feb 26 23:18:35 2013 (r247359)
+++ head/sys/vm/swap_pager.c Tue Feb 26 23:35:27 2013 (r247360)
@@ -343,7 +343,6 @@ SYSCTL_INT(_vm, OID_AUTO, swap_async_max
static struct mtx sw_alloc_mtx; /* protect list manipulation */
static struct pagerlst swap_pager_object_list[NOBJLISTS];
static uma_zone_t swap_zone;
-static struct vm_object swap_zone_obj;
/*
* pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure
@@ -554,7 +553,7 @@ swap_pager_swap_init(void)
if (swap_zone == NULL)
panic("failed to create swap_zone.");
do {
- if (uma_zone_set_obj(swap_zone, &swap_zone_obj, n))
+ if (uma_zone_reserve_kva(swap_zone, n))
break;
/*
* if the allocation failed, try a zone two thirds the
Modified: head/sys/vm/uma.h
==============================================================================
--- head/sys/vm/uma.h Tue Feb 26 23:18:35 2013 (r247359)
+++ head/sys/vm/uma.h Tue Feb 26 23:35:27 2013 (r247360)
@@ -432,24 +432,23 @@ void uma_reclaim(void);
void uma_set_align(int align);
/*
- * Switches the backing object of a zone
+ * Reserves the maximum KVA space required by the zone and configures the zone
+ * to use a VM_ALLOC_NOOBJ-based backend allocator.
*
* Arguments:
* zone The zone to update.
- * obj The VM object to use for future allocations.
- * size The size of the object to allocate.
+ * nitems The upper limit on the number of items that can be allocated.
*
* Returns:
- * 0 if kva space can not be allocated
+ * 0 if KVA space can not be allocated
* 1 if successful
*
* Discussion:
- * A NULL object can be used and uma will allocate one for you. Setting
- * the size will limit the amount of memory allocated to this zone.
- *
+ * When the machine supports a direct map and the zone's items are smaller
+ * than a page, the zone will use the direct map instead of allocating KVA
+ * space.
*/
-struct vm_object;
-int uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int size);
+int uma_zone_reserve_kva(uma_zone_t zone, int nitems);
/*
* Sets a high limit on the number of items allowed in a zone
@@ -521,7 +520,7 @@ void uma_zone_set_zinit(uma_zone_t zone,
void uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini);
/*
- * Replaces the standard page_alloc or obj_alloc functions for this zone
+ * Replaces the standard backend allocator for this zone.
*
* Arguments:
* zone The zone whose backend allocator is being changed.
Modified: head/sys/vm/uma_core.c
==============================================================================
--- head/sys/vm/uma_core.c Tue Feb 26 23:18:35 2013 (r247359)
+++ head/sys/vm/uma_core.c Tue Feb 26 23:35:27 2013 (r247360)
@@ -79,6 +79,7 @@ __FBSDID("$FreeBSD$");
#include <vm/vm.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
+#include <vm/vm_pageout.h>
#include <vm/vm_param.h>
#include <vm/vm_map.h>
#include <vm/vm_kern.h>
@@ -213,7 +214,7 @@ enum zfreeskip { SKIP_NONE, SKIP_DTOR, S
/* Prototypes.. */
-static void *obj_alloc(uma_zone_t, int, u_int8_t *, int);
+static void *noobj_alloc(uma_zone_t, int, u_int8_t *, int);
static void *page_alloc(uma_zone_t, int, u_int8_t *, int);
static void *startup_alloc(uma_zone_t, int, u_int8_t *, int);
static void page_free(void *, int, u_int8_t);
@@ -1030,50 +1031,53 @@ page_alloc(uma_zone_t zone, int bytes, u
* NULL if M_NOWAIT is set.
*/
static void *
-obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
+noobj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
{
- vm_object_t object;
+ TAILQ_HEAD(, vm_page) alloctail;
+ u_long npages;
vm_offset_t retkva, zkva;
- vm_page_t p;
- int pages, startpages;
+ vm_page_t p, p_next;
uma_keg_t keg;
+ TAILQ_INIT(&alloctail);
keg = zone_first_keg(zone);
- object = keg->uk_obj;
- retkva = 0;
- /*
- * This looks a little weird since we're getting one page at a time.
- */
- VM_OBJECT_LOCK(object);
- p = TAILQ_LAST(&object->memq, pglist);
- pages = p != NULL ? p->pindex + 1 : 0;
- startpages = pages;
- zkva = keg->uk_kva + pages * PAGE_SIZE;
- for (; bytes > 0; bytes -= PAGE_SIZE) {
- p = vm_page_alloc(object, pages,
- VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED);
- if (p == NULL) {
- if (pages != startpages)
- pmap_qremove(retkva, pages - startpages);
- while (pages != startpages) {
- pages--;
- p = TAILQ_LAST(&object->memq, pglist);
- vm_page_unwire(p, 0);
- vm_page_free(p);
- }
- retkva = 0;
- goto done;
+ npages = howmany(bytes, PAGE_SIZE);
+ while (npages > 0) {
+ p = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT |
+ VM_ALLOC_WIRED | VM_ALLOC_NOOBJ);
+ if (p != NULL) {
+ /*
+ * Since the page does not belong to an object, its
+ * listq is unused.
+ */
+ TAILQ_INSERT_TAIL(&alloctail, p, listq);
+ npages--;
+ continue;
+ }
+ if (wait & M_WAITOK) {
+ VM_WAIT;
+ continue;
}
+
+ /*
+ * Page allocation failed, free intermediate pages and
+ * exit.
+ */
+ TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) {
+ vm_page_unwire(p, 0);
+ vm_page_free(p);
+ }
+ return (NULL);
+ }
+ *flags = UMA_SLAB_PRIV;
+ zkva = keg->uk_kva +
+ atomic_fetchadd_long(&keg->uk_offset, round_page(bytes));
+ retkva = zkva;
+ TAILQ_FOREACH(p, &alloctail, listq) {
pmap_qenter(zkva, &p, 1);
- if (retkva == 0)
- retkva = zkva;
zkva += PAGE_SIZE;
- pages += 1;
}
-done:
- VM_OBJECT_UNLOCK(object);
- *flags = UMA_SLAB_PRIV;
return ((void *)retkva);
}
@@ -3012,7 +3016,7 @@ uma_zone_set_allocf(uma_zone_t zone, uma
/* See uma.h */
int
-uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count)
+uma_zone_reserve_kva(uma_zone_t zone, int count)
{
uma_keg_t keg;
vm_offset_t kva;
@@ -3024,21 +3028,25 @@ uma_zone_set_obj(uma_zone_t zone, struct
if (pages * keg->uk_ipers < count)
pages++;
- kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE);
-
- if (kva == 0)
- return (0);
- if (obj == NULL)
- obj = vm_object_allocate(OBJT_PHYS, pages);
- else {
- VM_OBJECT_LOCK_INIT(obj, "uma object");
- _vm_object_allocate(OBJT_PHYS, pages, obj);
- }
+#ifdef UMA_MD_SMALL_ALLOC
+ if (keg->uk_ppera > 1) {
+#else
+ if (1) {
+#endif
+ kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE);
+ if (kva == 0)
+ return (0);
+ } else
+ kva = 0;
ZONE_LOCK(zone);
keg->uk_kva = kva;
- keg->uk_obj = obj;
+ keg->uk_offset = 0;
keg->uk_maxpages = pages;
- keg->uk_allocf = obj_alloc;
+#ifdef UMA_MD_SMALL_ALLOC
+ keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc;
+#else
+ keg->uk_allocf = noobj_alloc;
+#endif
keg->uk_flags |= UMA_ZONE_NOFREE | UMA_ZFLAG_PRIVALLOC;
ZONE_UNLOCK(zone);
return (1);
Modified: head/sys/vm/uma_int.h
==============================================================================
--- head/sys/vm/uma_int.h Tue Feb 26 23:18:35 2013 (r247359)
+++ head/sys/vm/uma_int.h Tue Feb 26 23:35:27 2013 (r247360)
@@ -221,8 +221,8 @@ struct uma_keg {
uma_alloc uk_allocf; /* Allocation function */
uma_free uk_freef; /* Free routine */
- struct vm_object *uk_obj; /* Zone specific object */
- vm_offset_t uk_kva; /* Base kva for zones with objs */
+ u_long uk_offset; /* Next free offset from base KVA */
+ vm_offset_t uk_kva; /* Zone base KVA */
uma_zone_t uk_slabzone; /* Slab zone backing us, if OFFPAGE */
u_int16_t uk_pgoff; /* Offset to uma_slab struct */
Modified: head/sys/vm/vm_map.c
==============================================================================
--- head/sys/vm/vm_map.c Tue Feb 26 23:18:35 2013 (r247359)
+++ head/sys/vm/vm_map.c Tue Feb 26 23:35:27 2013 (r247360)
@@ -125,7 +125,6 @@ static uma_zone_t mapentzone;
static uma_zone_t kmapentzone;
static uma_zone_t mapzone;
static uma_zone_t vmspace_zone;
-static struct vm_object kmapentobj;
static int vmspace_zinit(void *mem, int size, int flags);
static void vmspace_zfini(void *mem, int size);
static int vm_map_zinit(void *mem, int ize, int flags);
@@ -303,7 +302,7 @@ vmspace_alloc(min, max)
void
vm_init2(void)
{
- uma_zone_set_obj(kmapentzone, &kmapentobj, lmin(cnt.v_page_count,
+ uma_zone_reserve_kva(kmapentzone, lmin(cnt.v_page_count,
(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE) / 8 +
maxproc * 2 + maxfiles);
vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL,
Modified: head/sys/vm/vm_object.c
==============================================================================
--- head/sys/vm/vm_object.c Tue Feb 26 23:18:35 2013 (r247359)
+++ head/sys/vm/vm_object.c Tue Feb 26 23:35:27 2013 (r247360)
@@ -194,7 +194,7 @@ vm_object_zinit(void *mem, int size, int
object = (vm_object_t)mem;
bzero(&object->mtx, sizeof(object->mtx));
- VM_OBJECT_LOCK_INIT(object, "standard object");
+ mtx_init(&object->mtx, "vm object", NULL, MTX_DEF | MTX_DUPOK);
/* These are true for any object that has been freed */
object->paging_in_progress = 0;
@@ -203,7 +203,7 @@ vm_object_zinit(void *mem, int size, int
return (0);
}
-void
+static void
_vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object)
{
@@ -266,7 +266,7 @@ vm_object_init(void)
TAILQ_INIT(&vm_object_list);
mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF);
- VM_OBJECT_LOCK_INIT(kernel_object, "kernel object");
+ mtx_init(&kernel_object->mtx, "vm object", "kernel object", MTX_DEF);
_vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
kernel_object);
#if VM_NRESERVLEVEL > 0
@@ -274,7 +274,7 @@ vm_object_init(void)
kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS);
#endif
- VM_OBJECT_LOCK_INIT(kmem_object, "kmem object");
+ mtx_init(&kmem_object->mtx, "vm object", "kmem object", MTX_DEF);
_vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS),
kmem_object);
#if VM_NRESERVLEVEL > 0
Modified: head/sys/vm/vm_object.h
==============================================================================
--- head/sys/vm/vm_object.h Tue Feb 26 23:18:35 2013 (r247359)
+++ head/sys/vm/vm_object.h Tue Feb 26 23:35:27 2013 (r247360)
@@ -206,9 +206,6 @@ extern struct vm_object kmem_object_stor
#define VM_OBJECT_LOCK(object) mtx_lock(&(object)->mtx)
#define VM_OBJECT_LOCK_ASSERT(object, type) \
mtx_assert(&(object)->mtx, (type))
-#define VM_OBJECT_LOCK_INIT(object, type) \
- mtx_init(&(object)->mtx, "vm object", \
- (type), MTX_DEF | MTX_DUPOK)
#define VM_OBJECT_LOCKED(object) mtx_owned(&(object)->mtx)
#define VM_OBJECT_SLEEP(object, wchan, pri, wmesg, timo) \
msleep((wchan), &(object)->mtx, (pri), \
@@ -234,7 +231,6 @@ void vm_object_pip_wakeupn(vm_object_t o
void vm_object_pip_wait(vm_object_t object, char *waitid);
vm_object_t vm_object_allocate (objtype_t, vm_pindex_t);
-void _vm_object_allocate (objtype_t, vm_pindex_t, vm_object_t);
boolean_t vm_object_coalesce(vm_object_t, vm_ooffset_t, vm_size_t, vm_size_t,
boolean_t);
void vm_object_collapse (vm_object_t);
More information about the svn-src-head
mailing list