svn commit: r335865 - head/sys/vm
Alan Cox
alc at FreeBSD.org
Mon Jul 2 17:18:48 UTC 2018
Author: alc
Date: Mon Jul 2 17:18:46 2018
New Revision: 335865
URL: https://svnweb.freebsd.org/changeset/base/335865
Log:
Introduce vm_phys_enq_range(), and call it in vm_phys_alloc_npages()
and vm_phys_alloc_seg_contig() instead of vm_phys_free_contig(). In
short, vm_phys_enq_range() is simpler and faster than the more general
vm_phys_free_contig(), and in the case of vm_phys_alloc_seg_contig(),
vm_phys_free_contig() was placing the excess physical pages at the
wrong end of the queues.
In collaboration with: Doug Moore <dougm at rice.edu>
Modified:
head/sys/vm/vm_phys.c
Modified: head/sys/vm/vm_phys.c
==============================================================================
--- head/sys/vm/vm_phys.c Mon Jul 2 14:15:30 2018 (r335864)
+++ head/sys/vm/vm_phys.c Mon Jul 2 17:18:46 2018 (r335865)
@@ -605,6 +605,43 @@ vm_phys_split_pages(vm_page_t m, int oind, struct vm_f
}
/*
+ * Add the physical pages [m, m + npages) at the end of a power-of-two aligned
+ * and sized set to the specified free list.
+ *
+ * When this function is called by a page allocation function, the caller
+ * should request insertion at the head unless the lower-order queues are
+ * known to be empty. The objective being to reduce the likelihood of long-
+ * term fragmentation by promoting contemporaneous allocation and (hopefully)
+ * deallocation.
+ *
+ * The physical page m's buddy must not be free.
+ */
+static void
+vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail)
+{
+ u_int n;
+ int order;
+
+ KASSERT(npages > 0, ("vm_phys_enq_range: npages is 0"));
+ KASSERT(((VM_PAGE_TO_PHYS(m) + npages * PAGE_SIZE) &
+ ((PAGE_SIZE << (fls(npages) - 1)) - 1)) == 0,
+ ("vm_phys_enq_range: page %p and npages %u are misaligned",
+ m, npages));
+ do {
+ KASSERT(m->order == VM_NFREEORDER,
+ ("vm_phys_enq_range: page %p has unexpected order %d",
+ m, m->order));
+ order = ffs(npages) - 1;
+ KASSERT(order < VM_NFREEORDER,
+ ("vm_phys_enq_range: order %d is out of range", order));
+ vm_freelist_add(fl, m, order, tail);
+ n = 1 << order;
+ m += n;
+ npages -= n;
+ } while (npages > 0);
+}
+
+/*
* Tries to allocate the specified number of pages from the specified pool
* within the specified domain. Returns the actual number of allocated pages
* and a pointer to each page through the array ma[].
@@ -644,7 +681,12 @@ vm_phys_alloc_npages(int domain, int pool, int npages,
for (end = i + need; i < end;)
ma[i++] = m++;
if (need < avail) {
- vm_phys_free_contig(m, avail - need);
+ /*
+ * Return excess pages to fl. Its
+ * order [0, oind) queues are empty.
+ */
+ vm_phys_enq_range(m, avail - need, fl,
+ 1);
return (npages);
} else if (i == npages)
return (npages);
@@ -662,8 +704,13 @@ vm_phys_alloc_npages(int domain, int pool, int npages,
for (end = i + need; i < end;)
ma[i++] = m++;
if (need < avail) {
- vm_phys_free_contig(m, avail -
- need);
+ /*
+ * Return excess pages to fl.
+ * Its order [0, oind) queues
+ * are empty.
+ */
+ vm_phys_enq_range(m, avail -
+ need, fl, 1);
return (npages);
} else if (i == npages)
return (npages);
@@ -1303,8 +1350,10 @@ done:
}
/* Return excess pages to the free lists. */
npages_end = roundup2(npages, 1 << oind);
- if (npages < npages_end)
- vm_phys_free_contig(&m_ret[npages], npages_end - npages);
+ if (npages < npages_end) {
+ fl = (*seg->free_queues)[VM_FREEPOOL_DEFAULT];
+ vm_phys_enq_range(&m_ret[npages], npages_end - npages, fl, 0);
+ }
return (m_ret);
}
More information about the svn-src-all
mailing list