Re: git: 1a7fcf6d51eb - main - vm_phys_enqueue_contig: handle npages==0
- In reply to: Dmitry Chagin : "Re: git: 1a7fcf6d51eb - main - vm_phys_enqueue_contig: handle npages==0"
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Wed, 02 Aug 2023 09:23:48 UTC
> On 2. Aug 2023, at 10:15, Dmitry Chagin <dchagin@freebsd.org> wrote:
>
>
> Hi,
>
> With this change, I got a panic:
> vm_phys_enq_range: page 0xfffffe0000000000 and pages 0 are misaligned
My arm64 system also panics:
panic: vm_phys_enq_range: page 0xffffa000796a7000 and npages 4294443008 are misaligned
Best regards
Michael
>
> Ср, 2 авг. 2023 г. в 06:22, Doug Moore <dougm@freebsd.org>:
> The branch main has been updated by dougm:
>
> URL: https://cgit.FreeBSD.org/src/commit/?id=1a7fcf6d51eb67ee3e05fdbb806f7e68f9f53c9c
>
> commit 1a7fcf6d51eb67ee3e05fdbb806f7e68f9f53c9c
> Author: Doug Moore <dougm@FreeBSD.org>
> AuthorDate: 2023-08-02 03:12:00 +0000
> Commit: Doug Moore <dougm@FreeBSD.org>
> CommitDate: 2023-08-02 03:12:00 +0000
>
> vm_phys_enqueue_contig: handle npages==0
>
> By letting vm_phys_enqueue_contig handle the case when npages == 0,
> the callers can stop checking it, and the compiler can stop
> zero-checking with every call to ffs(). Letting vm_phys_enqueue_contig
> call vm_phys_enqueue_contig for part of its work also saves a few
> bytes.
>
> The amd64 object code shrinks by 80 bytes.
>
> Reviewed by: kib
> Differential Revision: https://reviews.freebsd.org/D41154
> ---
> sys/vm/vm_phys.c | 74 +++++++++++++++++++++++++++-----------------------------
> 1 file changed, 35 insertions(+), 39 deletions(-)
>
> diff --git a/sys/vm/vm_phys.c b/sys/vm/vm_phys.c
> index 28f12231e01c..b2084bdef4e1 100644
> --- a/sys/vm/vm_phys.c
> +++ b/sys/vm/vm_phys.c
> @@ -72,6 +72,8 @@ __FBSDID("$FreeBSD$");
>
> _Static_assert(sizeof(long) * NBBY >= VM_PHYSSEG_MAX,
> "Too many physsegs.");
> +_Static_assert(sizeof(long long) >= sizeof(vm_paddr_t),
> + "vm_paddr_t too big for ffsll, flsll.");
>
> #ifdef NUMA
> struct mem_affinity __read_mostly *mem_affinity;
> @@ -690,18 +692,16 @@ vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order,
> *
> * The physical page m's buddy must not be free.
> */
> -static void
> +static vm_page_t
> vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail)
> {
> - u_int n;
> int order;
>
> - KASSERT(npages > 0, ("vm_phys_enq_range: npages is 0"));
> KASSERT(((VM_PAGE_TO_PHYS(m) + npages * PAGE_SIZE) &
> ((PAGE_SIZE << (fls(npages) - 1)) - 1)) == 0,
> ("vm_phys_enq_range: page %p and npages %u are misaligned",
> m, npages));
> - do {
> + while (npages > 0) {
> KASSERT(m->order == VM_NFREEORDER,
> ("vm_phys_enq_range: page %p has unexpected order %d",
> m, m->order));
> @@ -709,10 +709,10 @@ vm_phys_enq_range(vm_page_t m, u_int npages, struct vm_freelist *fl, int tail)
> KASSERT(order < VM_NFREEORDER,
> ("vm_phys_enq_range: order %d is out of range", order));
> vm_freelist_add(fl, m, order, tail);
> - n = 1 << order;
> - m += n;
> - npages -= n;
> - } while (npages > 0);
> + m += 1 << order;
> + npages -= 1 << order;
> + }
> + return (m);
> }
>
> /*
> @@ -744,7 +744,7 @@ vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[])
> {
> struct vm_freelist *alt, *fl;
> vm_page_t m;
> - int avail, end, flind, freelist, i, need, oind, pind;
> + int avail, end, flind, freelist, i, oind, pind;
>
> KASSERT(domain >= 0 && domain < vm_ndomains,
> ("vm_phys_alloc_npages: domain %d is out of range", domain));
> @@ -762,20 +762,18 @@ vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[])
> for (oind = 0; oind < VM_NFREEORDER; oind++) {
> while ((m = TAILQ_FIRST(&fl[oind].pl)) != NULL) {
> vm_freelist_rem(fl, m, oind);
> - avail = 1 << oind;
> - need = imin(npages - i, avail);
> - for (end = i + need; i < end;)
> + avail = i + (1 << oind);
> + end = imin(npages, avail);
> + while (i < end)
> ma[i++] = m++;
> - if (need < avail) {
> + if (i == npages) {
> /*
> - * Return excess pages to fl. Its
> - * order [0, oind) queues are empty.
> + * Return excess pages to fl. Its order
> + * [0, oind) queues are empty.
> */
> - vm_phys_enq_range(m, avail - need, fl,
> - 1);
> - return (npages);
> - } else if (i == npages)
> + vm_phys_enq_range(m, avail - i, fl, 1);
> return (npages);
> + }
> }
> }
> for (oind = VM_NFREEORDER - 1; oind >= 0; oind--) {
> @@ -785,21 +783,20 @@ vm_phys_alloc_npages(int domain, int pool, int npages, vm_page_t ma[])
> NULL) {
> vm_freelist_rem(alt, m, oind);
> vm_phys_set_pool(pool, m, oind);
> - avail = 1 << oind;
> - need = imin(npages - i, avail);
> - for (end = i + need; i < end;)
> + avail = i + (1 << oind);
> + end = imin(npages, avail);
> + while (i < end)
> ma[i++] = m++;
> - if (need < avail) {
> + if (i == npages) {
> /*
> * Return excess pages to fl.
> * Its order [0, oind) queues
> * are empty.
> */
> - vm_phys_enq_range(m, avail -
> - need, fl, 1);
> - return (npages);
> - } else if (i == npages)
> + vm_phys_enq_range(m, avail - i,
> + fl, 1);
> return (npages);
> + }
> }
> }
> }
> @@ -1146,7 +1143,7 @@ max_order(vm_page_t m)
> * because the size of a physical address exceeds the size of
> * a long.
> */
> - return (min(ffsl(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1,
> + return (min(ffsll(VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT) - 1,
> VM_NFREEORDER - 1));
> }
>
> @@ -1162,6 +1159,7 @@ vm_phys_enqueue_contig(vm_page_t m, u_long npages)
> struct vm_freelist *fl;
> struct vm_phys_seg *seg;
> vm_page_t m_end;
> + vm_paddr_t diff, lo;
> int order;
>
> /*
> @@ -1173,15 +1171,15 @@ vm_phys_enqueue_contig(vm_page_t m, u_long npages)
> fl = (*seg->free_queues)[m->pool];
> m_end = m + npages;
> /* Free blocks of increasing size. */
> - while ((order = max_order(m)) < VM_NFREEORDER - 1 &&
> - m + (1 << order) <= m_end) {
> - KASSERT(seg == &vm_phys_segs[m->segind],
> - ("%s: page range [%p,%p) spans multiple segments",
> - __func__, m_end - npages, m));
> - vm_freelist_add(fl, m, order, 1);
> - m += 1 << order;
> + lo = VM_PAGE_TO_PHYS(m) >> PAGE_SHIFT;
> + if (m < m_end &&
> + (diff = lo ^ (lo + npages - 1)) != 0) {
> + order = min(flsll(diff) - 1, VM_NFREEORDER - 1);
> + m = vm_phys_enq_range(m, roundup2(-lo, 1 << order), fl, 1);
> }
> +
> /* Free blocks of maximum size. */
> + order = VM_NFREEORDER - 1;
> while (m + (1 << order) <= m_end) {
> KASSERT(seg == &vm_phys_segs[m->segind],
> ("%s: page range [%p,%p) spans multiple segments",
> @@ -1560,10 +1558,8 @@ vm_phys_alloc_contig(int domain, u_long npages, vm_paddr_t low, vm_paddr_t high,
> vm_phys_set_pool(VM_FREEPOOL_DEFAULT, m, oind);
> }
> /* Return excess pages to the free lists. */
> - if (&m_run[npages] < m) {
> - fl = (*queues)[VM_FREEPOOL_DEFAULT];
> - vm_phys_enq_range(&m_run[npages], m - &m_run[npages], fl, 0);
> - }
> + fl = (*queues)[VM_FREEPOOL_DEFAULT];
> + vm_phys_enq_range(&m_run[npages], m - &m_run[npages], fl, 0);
> return (m_run);
> }
>