svn commit: r226873 - user/attilio/vmcontention/sys/vm

Attilio Rao attilio at FreeBSD.org
Fri Oct 28 01:56:37 UTC 2011


Author: attilio
Date: Fri Oct 28 01:56:36 2011
New Revision: 226873
URL: http://svn.freebsd.org/changeset/base/226873

Log:
  Use an UMA zone for the radix node.  This avoids the problem to check
  for the kernel_map/kmem_map recursion because it uses direct mapping
  provided by amd64 to avoid object and map search and recursion.
  
  Probabilly all the others architectures using UMA_MD_SMALL_ALLOC are also
  fixed by this, but other remains, where the most notable case is i386.
  For it a solution has still to be determined.  A way to do this would
  be to have a reserved map just for radix node and mark all accesses to
  its lock to be witness safe, but that would still be unoptimal due to
  the large amount of virtual address space needed to cater the whole
  tree.

Modified:
  user/attilio/vmcontention/sys/vm/vm_init.c
  user/attilio/vmcontention/sys/vm/vm_page.c
  user/attilio/vmcontention/sys/vm/vm_radix.c
  user/attilio/vmcontention/sys/vm/vm_radix.h

Modified: user/attilio/vmcontention/sys/vm/vm_init.c
==============================================================================
--- user/attilio/vmcontention/sys/vm/vm_init.c	Fri Oct 28 01:10:59 2011	(r226872)
+++ user/attilio/vmcontention/sys/vm/vm_init.c	Fri Oct 28 01:56:36 2011	(r226873)
@@ -82,6 +82,7 @@ __FBSDID("$FreeBSD$");
 #include <vm/vm_kern.h>
 #include <vm/vm_object.h>
 #include <vm/vm_page.h>
+#include <vm/vm_radix.h>
 #include <vm/vm_map.h>
 #include <vm/vm_pager.h>
 #include <vm/vm_extern.h>
@@ -123,6 +124,7 @@ vm_mem_init(dummy)
 	vm_object_init();
 	vm_map_startup();
 	kmem_init(virtual_avail, virtual_end);
+	vm_radix_init();
 	pmap_init();
 	vm_pager_init();
 }

Modified: user/attilio/vmcontention/sys/vm/vm_page.c
==============================================================================
--- user/attilio/vmcontention/sys/vm/vm_page.c	Fri Oct 28 01:10:59 2011	(r226872)
+++ user/attilio/vmcontention/sys/vm/vm_page.c	Fri Oct 28 01:56:36 2011	(r226873)
@@ -122,13 +122,6 @@ struct vpglocks vm_page_queue_free_lock;
 
 struct vpglocks	pa_lock[PA_LOCK_COUNT];
 
-#ifdef VM_RADIX
-extern SLIST_HEAD(, vm_radix_node) res_rnodes_head;
-extern struct mtx rnode_lock;
-extern vm_offset_t rnode_start;
-extern vm_offset_t rnode_end;
-#endif
-
 vm_page_t vm_page_array = 0;
 int vm_page_array_size = 0;
 long first_page = 0;
@@ -260,10 +253,6 @@ vm_page_startup(vm_offset_t vaddr)
 	vm_paddr_t pa;
 	vm_paddr_t last_pa;
 	char *list;
-#ifdef VM_RADIX
-	unsigned int rtree_res_count;
-	vm_pindex_t size;
-#endif
 
 	/* the biggest memory array is the second group of pages */
 	vm_paddr_t end;
@@ -323,34 +312,6 @@ vm_page_startup(vm_offset_t vaddr)
 	vm_page_queues[PQ_INACTIVE].cnt = &cnt.v_inactive_count;
 	vm_page_queues[PQ_ACTIVE].cnt = &cnt.v_active_count;
 	vm_page_queues[PQ_HOLD].cnt = &cnt.v_active_count;
-#ifdef VM_RADIX
-	mtx_init(&rnode_lock, "radix node", NULL, MTX_SPIN);
-	/*
-	 * Reserve memory for radix nodes.  Allocate enough nodes so that
-	 * insert on kernel_object will not result in recurrsion.
-	 */
-	size = OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) * 2;
-	rtree_res_count = 0;
-	while (size != 0) {
-		rtree_res_count += size / VM_RADIX_COUNT;
-		size /= VM_RADIX_COUNT;
-	}
-	printf("Allocated %d tree pages for %lu bytes of memory.\n",
-	    rtree_res_count, VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS);
-	new_end = end - (rtree_res_count * sizeof(struct vm_radix_node));
-	new_end = trunc_page(new_end);
-	mapped = pmap_map(&vaddr, new_end, end,
-	    VM_PROT_READ | VM_PROT_WRITE);
-	bzero((void *)mapped, end - new_end);
-	end = new_end;
-	rnode_start = mapped;
-	for (i = 0; i < rtree_res_count; i++) {
-		SLIST_INSERT_HEAD(&res_rnodes_head,
-		    (struct vm_radix_node *)mapped, next);
-		mapped += sizeof(struct vm_radix_node);
-	}
-	rnode_end = mapped;
-#endif
 	/*
 	 * Allocate memory for use when boot strapping the kernel memory
 	 * allocator.
@@ -902,7 +863,8 @@ vm_page_insert(vm_page_t m, vm_object_t 
 		} else 
 			TAILQ_INSERT_TAIL(&object->memq, m, listq);
 	}
-	vm_radix_insert(&object->rtree, pindex, m);
+	if (vm_radix_insert(&object->rtree, pindex, m) != 0)
+		panic("vm_page_insert: unable to insert the new page");
 #else
 	/*
 	 * Now link into the object's ordered list of backed pages.

Modified: user/attilio/vmcontention/sys/vm/vm_radix.c
==============================================================================
--- user/attilio/vmcontention/sys/vm/vm_radix.c	Fri Oct 28 01:10:59 2011	(r226872)
+++ user/attilio/vmcontention/sys/vm/vm_radix.c	Fri Oct 28 01:56:36 2011	(r226873)
@@ -43,19 +43,99 @@
 #include <sys/lock.h>
 #include <sys/mutex.h>
 #include <sys/ktr.h>
+#include <vm/uma.h>
 #include <vm/vm.h>
+#include <vm/vm_extern.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
 #include <vm/vm_radix.h>
 #include <vm/vm_object.h>
 
 #include <sys/kdb.h>
 
+CTASSERT(sizeof(struct vm_radix_node) < PAGE_SIZE);
 
-SLIST_HEAD(, vm_radix_node) res_rnodes_head =
-    SLIST_HEAD_INITIALIZER(res_rnodes_head);
+static uma_zone_t vm_radix_node_zone;
 
-struct mtx rnode_lock;
-vm_offset_t rnode_start;
-vm_offset_t rnode_end;
+#if 0
+static void *
+vm_radix_node_zone_allocf(uma_zone_t zone, int size, uint8_t *flags, int wait)
+{
+	vm_offset_t addr;
+	vm_page_t m;
+	int pflags;
+
+	/* Inform UMA that this allocator uses kernel_map. */
+	*flags = UMA_SLAB_KERNEL;
+
+	pflags = VM_ALLOC_WIRED | VM_ALLOC_NOOBJ;
+
+	/*
+	 * As kmem_alloc_nofault() can however fail, let just assume that
+	 * M_NOWAIT is on and act accordingly.
+	 */
+	pflags |= ((wait & M_USE_RESERVE) != 0) ? VM_ALLOC_INTERRUPT :
+	    VM_ALLOC_SYSTEM;
+	if ((wait & M_ZERO) != 0)
+		pflags |= VM_ALLOC_ZERO; 
+	addr = kmem_alloc_nofault(kernel_map, size);
+	if (addr == 0)
+		return (NULL);
+
+	/* Just one page allocation is assumed here. */
+	m = vm_page_alloc(NULL, OFF_TO_IDX(addr - VM_MIN_KERNEL_ADDRESS),
+	    pflags);
+	if (m == NULL) {
+		kmem_free(kernel_map, addr, size);
+		return (NULL);
+	}
+	if ((wait & M_ZERO) != 0 && (m->flags & PG_ZERO) == 0)
+		pmap_zero_page(m);
+	pmap_qenter(addr, &m, 1);
+	return ((void *)addr);
+}
+
+static void
+vm_radix_node_zone_freef(void *item, int size, uint8_t flags)
+{
+	vm_page_t m;
+	vm_offset_t voitem;
+
+	MPASS((flags & UMA_SLAB_KERNEL) != 0);
+
+	/* Just one page allocation is assumed here. */
+	voitem = (vm_offset_t)item;
+	m = PHYS_TO_VM_PAGE(pmap_kextract(voitem));
+	pmap_qremove(voitem, 1);
+	vm_page_free(m);
+	kmem_free(kernel_map, voitem, size);
+}
+
+static void
+init_vm_radix_alloc(void *dummy __unused)
+{
+
+	uma_zone_set_allocf(vm_radix_node_zone, vm_radix_node_zone_allocf);
+	uma_zone_set_freef(vm_radix_node_zone, vm_radix_node_zone_freef);
+}
+SYSINIT(vm_radix, SI_SUB_KMEM, SI_ORDER_SECOND, init_vm_radix_alloc, NULL);
+#endif
+
+/*
+ * Radix node zone destructor.
+ */
+#ifdef INVARIANTS
+static void
+vm_radix_node_zone_dtor(void *mem, int size, void *arg)
+{
+	struct vm_radix_node *rnode;
+
+	rnode = mem;
+	KASSERT(rnode->rn_count == 0,
+	    ("vm_radix_node_put: Freeing a node with %d children\n",
+	    rnode->rn_count));
+}
+#endif
 
 /*
  * Allocate a radix node.  Initializes all elements to 0.
@@ -63,28 +143,8 @@ vm_offset_t rnode_end;
 static struct vm_radix_node *
 vm_radix_node_get(void)
 {
-	struct vm_radix_node *rnode;
 
-	if (VM_OBJECT_LOCKED(kernel_object) || VM_OBJECT_LOCKED(kmem_object)){
-		mtx_lock_spin(&rnode_lock);
-		if (!SLIST_EMPTY(&res_rnodes_head)) {
-			rnode = SLIST_FIRST(&res_rnodes_head);
-			SLIST_REMOVE_HEAD(&res_rnodes_head, next);
-			mtx_unlock_spin(&rnode_lock);
-			bzero((void *)rnode, sizeof(*rnode));
-			goto out;
-		} 
-		mtx_unlock_spin(&rnode_lock);
-		panic("No memory for kernel_object. . .");
-	}
-	rnode = malloc(sizeof(struct vm_radix_node), M_TEMP, M_NOWAIT | M_ZERO);
-	if (rnode == NULL) {
-		panic("vm_radix_node_get: Can not allocate memory\n");
-		return NULL;
-	}
-out:
-
-	return rnode;
+	return (uma_zalloc(vm_radix_node_zone, M_NOWAIT | M_ZERO));
 }
 
 /*
@@ -94,16 +154,7 @@ static void
 vm_radix_node_put(struct vm_radix_node *rnode)
 {
 
-	KASSERT(rnode->rn_count == 0,
-	    ("vm_radix_node_put: Freeing a node with %d children\n",
-	    rnode->rn_count));
-	if ((vm_offset_t)rnode >= rnode_start &&
-	    (vm_offset_t)rnode < rnode_end) {
-		mtx_lock_spin(&rnode_lock);
-		SLIST_INSERT_HEAD(&res_rnodes_head, rnode, next);
-		mtx_unlock_spin(&rnode_lock);
-	} else
-		free(rnode,M_TEMP);
+	uma_zfree(vm_radix_node_zone, rnode);
 }
 
 /*
@@ -116,6 +167,20 @@ vm_radix_slot(vm_pindex_t index, int lev
 	return ((index >> (level * VM_RADIX_WIDTH)) & VM_RADIX_MASK);
 }
 
+void
+vm_radix_init(void)
+{
+
+	vm_radix_node_zone = uma_zcreate("RADIX NODE",
+	    sizeof(struct vm_radix_node), NULL,
+#ifdef INVARIANTS
+	    vm_radix_node_zone_dtor,
+#else
+	    NULL,
+#endif
+	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM);
+}
+
 /*
  * Inserts the key-value pair in to the radix tree.  Returns errno.
  * Panics if the key already exists.

Modified: user/attilio/vmcontention/sys/vm/vm_radix.h
==============================================================================
--- user/attilio/vmcontention/sys/vm/vm_radix.h	Fri Oct 28 01:10:59 2011	(r226872)
+++ user/attilio/vmcontention/sys/vm/vm_radix.h	Fri Oct 28 01:56:36 2011	(r226873)
@@ -52,6 +52,7 @@ struct vm_radix {
 	int 	rt_height; 			/* Number of levels + 1. */
 };
 
+void	vm_radix_init(void);
 int 	vm_radix_insert(struct vm_radix *, vm_pindex_t, void *);
 void	*vm_radix_remove(struct vm_radix *, vm_pindex_t);
 void	*vm_radix_lookup(struct vm_radix *, vm_pindex_t);


More information about the svn-src-user mailing list