git: b7370efade86 - main - Revert "vm_phys_enqueue_contig: handle npages==0"
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Wed, 02 Aug 2023 09:33:50 UTC
The branch main has been updated by dougm:
URL: https://cgit.FreeBSD.org/src/commit/?id=b7370efade8667b7025c27470163601255ac21f4
commit b7370efade8667b7025c27470163601255ac21f4
Author: Doug Moore <dougm@FreeBSD.org>
AuthorDate: 2023-08-02 09:32:08 +0000
Commit: Doug Moore <dougm@FreeBSD.org>
CommitDate: 2023-08-02 09:33:40 +0000
Revert "vm_phys_enqueue_contig: handle npages==0"
This reverts commit 1a7fcf6d51eb67ee3e05fdbb806f7e68f9f53c9c.
Peter Holm reported a problem, so I'm reverting now and looking for
the problem later.
---
sys/vm/vm_phys.c | 74 +++++++++++++++++++++++++++++---------------------------
1 file changed, 39 insertions(+), 35 deletions(-)
diff --git a/sys/vm/vm_phys.c b/sys/vm/vm_phys.c
index b2084bdef4e1..28f12231e01c 100644
--- a/sys/vm/vm_phys.c
+++ b/sys/vm/vm_phys.c
@@ -72,8 +72,6 @@ __FBSDID("$FreeBSD$");
_Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX,
"Too many physsegs.");
-_Static_assert(sizeof(long long) >= sizeof(vm_paddr_t),
- "vm_paddr_t too big for ffsll, flsll.");
#ifdef NUMA
struct mem_affinity __read_mostly *mem_affinity;
@@ -692,16 +690,18 @@ vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order,
*
* The physical page m's buddy must not be free.
*/
-static vm_page_t
+static void
vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail)
{
+ u_int n;
int order;
+ KASSERT(npages > 0, ("vm_phys_enq_range: npages is 0"));
KASSERT(((VM_PAGE_TO_PHYS(m) + npages * PAGE_SIZE) &
((PAGE_SIZE << (fls(npages) - 1)) - 1)) == 0,
("vm_phys_enq_range: page %p and npages %u are misaligned",
m, npages));
- while (npages > 0) {
+ do {
KASSERT(m->order == VM_NFREEORDER,
("vm_phys_enq_range: page %p has unexpected order %d",
m, m->order));
@@ -709,10 +709,10 @@ vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail)
KASSERT(order < VM_NFREEORDER,
("vm_phys_enq_range: order %d is out of range", order));
vm_freelist_add(fl, m, order, tail);
- m += 1 << order;
- npages -= 1 << order;
- }
- return (m);
+ n = 1 << order;
+ m += n;
+ npages -= n;
+ } while (npages > 0);
}
/*
@@ -744,7 +744,7 @@ vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[])
{
struct vm_freelist *alt, *fl;
vm_page_t m;
- int avail, end, flind, freelist, i, oind, pind;
+ int avail, end, flind, freelist, i, need, oind, pind;
KASSERT(domain >= 0 && domain < vm_ndomains,
("vm_phys_alloc_npages: domain %d is out of range", domain));
@@ -762,18 +762,20 @@ vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[])
for (oind = 0; oind < VM_NFREEORDER; oind++) {
while ((m = TAILQ_FIRST(&fl[oind].pl)) != NULL) {
vm_freelist_rem(fl, m, oind);
- avail = i + (1 << oind);
- end = imin(npages, avail);
- while (i < end)
+ avail = 1 << oind;
+ need = imin(npages - i, avail);
+ for (end = i + need; i < end;)
ma[i++] = m++;
- if (i == npages) {
+ if (need < avail) {
/*
- * Return excess pages to fl. Its order
- * [0, oind) queues are empty.
+ * Return excess pages to fl. Its
+ * order [0, oind) queues are empty.
*/
- vm_phys_enq_range(m, avail - i, fl, 1);
+ vm_phys_enq_range(m, avail - need, fl,
+ 1);
+ return (npages);
+ } else if (i == npages)
return (npages);
- }
}
}
for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
@@ -783,20 +785,21 @@ vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[])
NULL) {
vm_freelist_rem(alt, m, oind);
vm_phys_set_pool(pool, m, oind);
- avail = i + (1 << oind);
- end = imin(npages, avail);
- while (i < end)
+ avail = 1 << oind;
+ need = imin(npages - i, avail);
+ for (end = i + need; i < end;)
ma[i++] = m++;
- if (i == npages) {
+ if (need < avail) {
/*
* Return excess pages to fl.
* Its order [0, oind) queues
* are empty.
*/
- vm_phys_enq_range(m, avail - i,
- fl, 1);
+ vm_phys_enq_range(m, avail -
+ need, fl, 1);
+ return (npages);
+ } else if (i == npages)
return (npages);
- }
}
}
}
@@ -1143,7 +1146,7 @@ max_order(vm_page_t m)
* because the size of a physical address exceeds the size of
* a long.
*/
- return (min(ffsll(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1,
+ return (min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1,
VM_NFREEORDER - 1));
}
@@ -1159,7 +1162,6 @@ vm_phys_enqueue_contig(vm_page_t m, u_long npages)
struct vm_freelist *fl;
struct vm_phys_seg *seg;
vm_page_t m_end;
- vm_paddr_t diff, lo;
int order;
/*
@@ -1171,15 +1173,15 @@ vm_phys_enqueue_contig(vm_page_t m, u_long npages)
fl = (*seg->free_queues)[m->pool];
m_end = m + npages;
/* Free blocks of increasing size. */
- lo = VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT;
- if (m < m_end &&
- (diff = lo ^ (lo + npages - 1)) != 0) {
- order = min(flsll(diff) - 1, VM_NFREEORDER - 1);
- m = vm_phys_enq_range(m, roundup2(-lo, 1 << order), fl, 1);
+ while ((order = max_order(m)) < VM_NFREEORDER - 1 &&
+ m + (1 << order) <= m_end) {
+ KASSERT(seg == &vm_phys_segs[m->segind],
+ ("%s: page range [%p,%p) spans multiple segments",
+ __func__, m_end - npages, m));
+ vm_freelist_add(fl, m, order, 1);
+ m += 1 << order;
}
-
/* Free blocks of maximum size. */
- order = VM_NFREEORDER - 1;
while (m + (1 << order) <= m_end) {
KASSERT(seg == &vm_phys_segs[m->segind],
("%s: page range [%p,%p) spans multiple segments",
@@ -1558,8 +1560,10 @@ vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, oind);
}
/* Return excess pages to the free lists. */
- fl = (*queues)[VM_FREEPOOL_DEFAULT];
- vm_phys_enq_range(&m_run[npages], m - &m_run[npages], fl, 0);
+ if (&m_run[npages] < m) {
+ fl = (*queues)[VM_FREEPOOL_DEFAULT];
+ vm_phys_enq_range(&m_run[npages], m - &m_run[npages], fl, 0);
+ }
return (m_run);
}