svn commit: r348525 - head/sys/vm

Alan Cox alc at FreeBSD.org
Mon Jun 3 05:15:38 UTC 2019


Author: alc
Date: Mon Jun  3 05:15:36 2019
New Revision: 348525
URL: https://svnweb.freebsd.org/changeset/base/348525

Log:
  Retire vm_reserv_extend_{contig,page}().  These functions were introduced
  as part of a false start toward fine-grained reservation locking.  In the
  end, they were not needed, so eliminate them.
  
  Order the parameters to vm_reserv_alloc_{contig,page}() consistently with
  the vm_page functions that call them.
  
  Update the comments about the locking requirements for
  vm_reserv_alloc_{contig,page}().  They no longer require a free page
  queues lock.
  
  Wrap several lines that became too long after the "req" and "domain"
  parameters were added to vm_reserv_alloc_{contig,page}().
  
  Reviewed by:	kib, markj
  MFC after:	1 week
  Differential Revision:	https://reviews.freebsd.org/D20492

Modified:
  head/sys/vm/vm_page.c
  head/sys/vm/vm_reserv.c
  head/sys/vm/vm_reserv.h

Modified: head/sys/vm/vm_page.c
==============================================================================
--- head/sys/vm/vm_page.c	Mon Jun  3 04:38:07 2019	(r348524)
+++ head/sys/vm/vm_page.c	Mon Jun  3 05:15:36 2019	(r348525)
@@ -1834,8 +1834,8 @@ again:
 	 * Can we allocate the page from a reservation?
 	 */
 	if (vm_object_reserv(object) &&
-	    ((m = vm_reserv_extend(req, object, pindex, domain, mpred)) != NULL ||
-	    (m = vm_reserv_alloc_page(req, object, pindex, domain, mpred)) != NULL)) {
+	    (m = vm_reserv_alloc_page(object, pindex, domain, req, mpred)) !=
+	    NULL) {
 		domain = vm_phys_domain(m);
 		vmd = VM_DOMAIN(domain);
 		goto found;
@@ -2039,10 +2039,8 @@ again:
 	 * Can we allocate the pages from a reservation?
 	 */
 	if (vm_object_reserv(object) &&
-	    ((m_ret = vm_reserv_extend_contig(req, object, pindex, domain,
-	    npages, low, high, alignment, boundary, mpred)) != NULL ||
-	    (m_ret = vm_reserv_alloc_contig(req, object, pindex, domain,
-	    npages, low, high, alignment, boundary, mpred)) != NULL)) {
+	    (m_ret = vm_reserv_alloc_contig(object, pindex, domain, req,
+	    mpred, npages, low, high, alignment, boundary)) != NULL) {
 		domain = vm_phys_domain(m_ret);
 		vmd = VM_DOMAIN(domain);
 		goto found;

Modified: head/sys/vm/vm_reserv.c
==============================================================================
--- head/sys/vm/vm_reserv.c	Mon Jun  3 04:38:07 2019	(r348524)
+++ head/sys/vm/vm_reserv.c	Mon Jun  3 05:15:36 2019	(r348525)
@@ -576,95 +576,8 @@ vm_reserv_populate(vm_reserv_t rv, int index)
 }
 
 /*
- * Attempts to allocate a contiguous set of physical pages from existing
- * reservations.  See vm_reserv_alloc_contig() for a description of the
- * function's parameters.
- *
- * The page "mpred" must immediately precede the offset "pindex" within the
- * specified object.
- *
- * The object must be locked.
- */
-vm_page_t
-vm_reserv_extend_contig(int req, vm_object_t object, vm_pindex_t pindex,
-    int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
-    u_long alignment, vm_paddr_t boundary, vm_page_t mpred)
-{
-	struct vm_domain *vmd;
-	vm_paddr_t pa, size;
-	vm_page_t m, msucc;
-	vm_reserv_t rv;
-	int i, index;
-
-	VM_OBJECT_ASSERT_WLOCKED(object);
-	KASSERT(npages != 0, ("vm_reserv_alloc_contig: npages is 0"));
-
-	/*
-	 * Is a reservation fundamentally impossible?
-	 */
-	if (pindex < VM_RESERV_INDEX(object, pindex) ||
-	    pindex + npages > object->size || object->resident_page_count == 0)
-		return (NULL);
-
-	/*
-	 * All reservations of a particular size have the same alignment.
-	 * Assuming that the first page is allocated from a reservation, the
-	 * least significant bits of its physical address can be determined
-	 * from its offset from the beginning of the reservation and the size
-	 * of the reservation.
-	 *
-	 * Could the specified index within a reservation of the smallest
-	 * possible size satisfy the alignment and boundary requirements?
-	 */
-	pa = VM_RESERV_INDEX(object, pindex) << PAGE_SHIFT;
-	if ((pa & (alignment - 1)) != 0)
-		return (NULL);
-	size = npages << PAGE_SHIFT;
-	if (((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
-		return (NULL);
-
-	/*
-	 * Look for an existing reservation.
-	 */
-	rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
-	if (rv == NULL)
-		return (NULL);
-	KASSERT(object != kernel_object || rv->domain == domain,
-	    ("vm_reserv_extend_contig: Domain mismatch from reservation."));
-	index = VM_RESERV_INDEX(object, pindex);
-	/* Does the allocation fit within the reservation? */
-	if (index + npages > VM_LEVEL_0_NPAGES)
-		return (NULL);
-	domain = rv->domain;
-	vmd = VM_DOMAIN(domain);
-	vm_reserv_lock(rv);
-	if (rv->object != object)
-		goto out;
-	m = &rv->pages[index];
-	pa = VM_PAGE_TO_PHYS(m);
-	if (pa < low || pa + size > high || (pa & (alignment - 1)) != 0 ||
-	    ((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
-		goto out;
-	/* Handle vm_page_rename(m, new_object, ...). */
-	for (i = 0; i < npages; i++) {
-		if (popmap_is_set(rv->popmap, index + i))
-			goto out;
-	}
-	if (!vm_domain_allocate(vmd, req, npages))
-		goto out;
-	for (i = 0; i < npages; i++)
-		vm_reserv_populate(rv, index + i);
-	vm_reserv_unlock(rv);
-	return (m);
-
-out:
-	vm_reserv_unlock(rv);
-	return (NULL);
-}
-
-/*
  * Allocates a contiguous set of physical pages of the given size "npages"
- * from newly created reservations.  All of the physical pages
+ * from existing or newly created reservations.  All of the physical pages
  * must be at or above the given physical address "low" and below the given
  * physical address "high".  The given value "alignment" determines the
  * alignment of the first physical page in the set.  If the given value
@@ -672,18 +585,15 @@ out:
  * physical address boundary that is a multiple of that value.  Both
  * "alignment" and "boundary" must be a power of two.
  *
- * Callers should first invoke vm_reserv_extend_contig() to attempt an
- * allocation from existing reservations.
- *
  * The page "mpred" must immediately precede the offset "pindex" within the
  * specified object.
  *
- * The object and free page queue must be locked.
+ * The object must be locked.
  */
 vm_page_t
-vm_reserv_alloc_contig(int req, vm_object_t object, vm_pindex_t pindex, int domain,
-    u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment,
-    vm_paddr_t boundary, vm_page_t mpred)
+vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex, int domain,
+    int req, vm_page_t mpred, u_long npages, vm_paddr_t low, vm_paddr_t high,
+    u_long alignment, vm_paddr_t boundary)
 {
 	struct vm_domain *vmd;
 	vm_paddr_t pa, size;
@@ -721,13 +631,42 @@ vm_reserv_alloc_contig(int req, vm_object_t object, vm
 		return (NULL);
 
 	/*
-	 * Callers should've extended an existing reservation prior to
-	 * calling this function.  If a reservation exists it is
-	 * incompatible with the allocation.
+	 * Look for an existing reservation.
 	 */
 	rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
-	if (rv != NULL)
+	if (rv != NULL) {
+		KASSERT(object != kernel_object || rv->domain == domain,
+		    ("vm_reserv_alloc_contig: domain mismatch"));
+		index = VM_RESERV_INDEX(object, pindex);
+		/* Does the allocation fit within the reservation? */
+		if (index + npages > VM_LEVEL_0_NPAGES)
+			return (NULL);
+		domain = rv->domain;
+		vmd = VM_DOMAIN(domain);
+		vm_reserv_lock(rv);
+		/* Handle reclaim race. */
+		if (rv->object != object)
+			goto out;
+		m = &rv->pages[index];
+		pa = VM_PAGE_TO_PHYS(m);
+		if (pa < low || pa + size > high ||
+		    (pa & (alignment - 1)) != 0 ||
+		    ((pa ^ (pa + size - 1)) & ~(boundary - 1)) != 0)
+			goto out;
+		/* Handle vm_page_rename(m, new_object, ...). */
+		for (i = 0; i < npages; i++)
+			if (popmap_is_set(rv->popmap, index + i))
+				goto out;
+		if (!vm_domain_allocate(vmd, req, npages))
+			goto out;
+		for (i = 0; i < npages; i++)
+			vm_reserv_populate(rv, index + i);
+		vm_reserv_unlock(rv);
+		return (m);
+out:
+		vm_reserv_unlock(rv);
 		return (NULL);
+	}
 
 	/*
 	 * Could at least one reservation fit between the first index to the
@@ -849,8 +788,7 @@ vm_reserv_alloc_contig(int req, vm_object_t object, vm
 }
 
 /*
- * Attempts to extend an existing reservation and allocate the page to the
- * object.
+ * Allocate a physical page from an existing or newly created reservation.
  *
  * The page "mpred" must immediately precede the offset "pindex" within the
  * specified object.
@@ -858,70 +796,11 @@ vm_reserv_alloc_contig(int req, vm_object_t object, vm
  * The object must be locked.
  */
 vm_page_t
-vm_reserv_extend(int req, vm_object_t object, vm_pindex_t pindex, int domain,
-    vm_page_t mpred)
+vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex, int domain,
+    int req, vm_page_t mpred)
 {
 	struct vm_domain *vmd;
 	vm_page_t m, msucc;
-	vm_reserv_t rv;
-	int index;
-
-	VM_OBJECT_ASSERT_WLOCKED(object);
-
-	/*
-	 * Could a reservation currently exist?
-	 */
-	if (pindex < VM_RESERV_INDEX(object, pindex) ||
-	    pindex >= object->size || object->resident_page_count == 0)
-		return (NULL);
-
-	/*
-	 * Look for an existing reservation.
-	 */
-	rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
-	if (rv == NULL)
-		return (NULL);
-
-	KASSERT(object != kernel_object || rv->domain == domain,
-	    ("vm_reserv_extend: Domain mismatch from reservation."));
-	domain = rv->domain;
-	vmd = VM_DOMAIN(domain);
-	index = VM_RESERV_INDEX(object, pindex);
-	m = &rv->pages[index];
-	vm_reserv_lock(rv);
-	/* Handle reclaim race. */
-	if (rv->object != object ||
-	    /* Handle vm_page_rename(m, new_object, ...). */
-	    popmap_is_set(rv->popmap, index)) {
-		m = NULL;
-		goto out;
-	}
-	if (vm_domain_allocate(vmd, req, 1) == 0)
-		m = NULL;
-	else
-		vm_reserv_populate(rv, index);
-out:
-	vm_reserv_unlock(rv);
-
-	return (m);
-}
-
-/*
- * Attempts to allocate a new reservation for the object, and allocates a
- * page from that reservation.  Callers should first invoke vm_reserv_extend()
- * to attempt an allocation from an existing reservation.
- *
- * The page "mpred" must immediately precede the offset "pindex" within the
- * specified object.
- *
- * The object and free page queue must be locked.
- */
-vm_page_t
-vm_reserv_alloc_page(int req, vm_object_t object, vm_pindex_t pindex, int domain,
-    vm_page_t mpred)
-{
-	struct vm_domain *vmd;
-	vm_page_t m, msucc;
 	vm_pindex_t first, leftcap, rightcap;
 	vm_reserv_t rv;
 	int index;
@@ -936,13 +815,32 @@ vm_reserv_alloc_page(int req, vm_object_t object, vm_p
 		return (NULL);
 
 	/*
-	 * Callers should've extended an existing reservation prior to
-	 * calling this function.  If a reservation exists it is
-	 * incompatible with the allocation.
+	 * Look for an existing reservation.
 	 */
 	rv = vm_reserv_from_object(object, pindex, mpred, &msucc);
-	if (rv != NULL)
-		return (NULL);
+	if (rv != NULL) {
+		KASSERT(object != kernel_object || rv->domain == domain,
+		    ("vm_reserv_alloc_page: domain mismatch"));
+		domain = rv->domain;
+		vmd = VM_DOMAIN(domain);
+		index = VM_RESERV_INDEX(object, pindex);
+		m = &rv->pages[index];
+		vm_reserv_lock(rv);
+		/* Handle reclaim race. */
+		if (rv->object != object ||
+		    /* Handle vm_page_rename(m, new_object, ...). */
+		    popmap_is_set(rv->popmap, index)) {
+			m = NULL;
+			goto out;
+		}
+		if (vm_domain_allocate(vmd, req, 1) == 0)
+			m = NULL;
+		else
+			vm_reserv_populate(rv, index);
+out:
+		vm_reserv_unlock(rv);
+		return (m);
+	}
 
 	/*
 	 * Could a reservation fit between the first index to the left that

Modified: head/sys/vm/vm_reserv.h
==============================================================================
--- head/sys/vm/vm_reserv.h	Mon Jun  3 04:38:07 2019	(r348524)
+++ head/sys/vm/vm_reserv.h	Mon Jun  3 05:15:36 2019	(r348525)
@@ -47,17 +47,12 @@
 /*
  * The following functions are only to be used by the virtual memory system.
  */
-vm_page_t	vm_reserv_alloc_contig(int req, vm_object_t object, vm_pindex_t pindex,
-		    int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
-		    u_long alignment, vm_paddr_t boundary, vm_page_t mpred);
-vm_page_t	vm_reserv_extend_contig(int req, vm_object_t object,
-		    vm_pindex_t pindex, int domain, u_long npages,
+vm_page_t	vm_reserv_alloc_contig(vm_object_t object, vm_pindex_t pindex,
+		    int domain, int req, vm_page_t mpred, u_long npages,
 		    vm_paddr_t low, vm_paddr_t high, u_long alignment,
-		    vm_paddr_t boundary, vm_page_t mpred);
-vm_page_t	vm_reserv_alloc_page(int req, vm_object_t object, vm_pindex_t pindex,
-		    int domain, vm_page_t mpred);
-vm_page_t	vm_reserv_extend(int req, vm_object_t object,
-		    vm_pindex_t pindex, int domain, vm_page_t mpred);
+		    vm_paddr_t boundary);
+vm_page_t	vm_reserv_alloc_page(vm_object_t object, vm_pindex_t pindex,
+		    int domain, int req, vm_page_t mpred);
 void		vm_reserv_break_all(vm_object_t object);
 boolean_t	vm_reserv_free_page(vm_page_t m);
 void		vm_reserv_init(void);


More information about the svn-src-all mailing list