svn commit: r357017 - in head/sys: dev/spibus kern vm

Jeff Roberson jeff at FreeBSD.org
Thu Jan 23 04:54:50 UTC 2020


Author: jeff
Date: Thu Jan 23 04:54:49 2020
New Revision: 357017
URL: https://svnweb.freebsd.org/changeset/base/357017

Log:
  Consistently use busy and vm_page_valid() rather than touching page bits
  directly.  This improves API compliance, asserts, etc.
  
  Reviewed by:	kib, markj
  Differential Revision:	https://reviews.freebsd.org/D23283

Modified:
  head/sys/dev/spibus/spigen.c
  head/sys/kern/kern_kcov.c
  head/sys/kern/kern_sendfile.c
  head/sys/vm/vm_glue.c
  head/sys/vm/vm_kern.c

Modified: head/sys/dev/spibus/spigen.c
==============================================================================
--- head/sys/dev/spibus/spigen.c	Thu Jan 23 03:38:41 2020	(r357016)
+++ head/sys/dev/spibus/spigen.c	Thu Jan 23 04:54:49 2020	(r357017)
@@ -325,8 +325,9 @@ spigen_mmap_single(struct cdev *cdev, vm_ooffset_t *of
 	vm_object_reference_locked(mmap->bufobj); // kernel and userland both
 	for (n = 0; n < pages; n++) {
 		m[n] = vm_page_grab(mmap->bufobj, n,
-		    VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_WIRED);
-		m[n]->valid = VM_PAGE_BITS_ALL;
+		    VM_ALLOC_ZERO | VM_ALLOC_WIRED);
+		vm_page_valid(m[n]);
+		vm_page_xunbusy(m[n]);
 	}
 	VM_OBJECT_WUNLOCK(mmap->bufobj);
 	pmap_qenter(mmap->kvaddr, m, pages);

Modified: head/sys/kern/kern_kcov.c
==============================================================================
--- head/sys/kern/kern_kcov.c	Thu Jan 23 03:38:41 2020	(r357016)
+++ head/sys/kern/kern_kcov.c	Thu Jan 23 04:54:49 2020	(r357017)
@@ -383,8 +383,9 @@ kcov_alloc(struct kcov_info *info, size_t entries)
 	VM_OBJECT_WLOCK(info->bufobj);
 	for (n = 0; n < pages; n++) {
 		m = vm_page_grab(info->bufobj, n,
-		    VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_WIRED);
-		m->valid = VM_PAGE_BITS_ALL;
+		    VM_ALLOC_ZERO | VM_ALLOC_WIRED);
+		vm_page_valid(m);
+		vm_page_xunbusy(m);
 		pmap_qenter(info->kvaddr + n * PAGE_SIZE, &m, 1);
 	}
 	VM_OBJECT_WUNLOCK(info->bufobj);

Modified: head/sys/kern/kern_sendfile.c
==============================================================================
--- head/sys/kern/kern_sendfile.c	Thu Jan 23 03:38:41 2020	(r357016)
+++ head/sys/kern/kern_sendfile.c	Thu Jan 23 04:54:49 2020	(r357017)
@@ -388,7 +388,7 @@ sendfile_swapin(vm_object_t obj, struct sf_io *sfio, i
 		if (!vm_pager_has_page(obj, OFF_TO_IDX(vmoff(i, off)), NULL,
 		    &a)) {
 			pmap_zero_page(pa[i]);
-			pa[i]->valid = VM_PAGE_BITS_ALL;
+			vm_page_valid(pa[i]);
 			MPASS(pa[i]->dirty == 0);
 			vm_page_xunbusy(pa[i]);
 			i++;

Modified: head/sys/vm/vm_glue.c
==============================================================================
--- head/sys/vm/vm_glue.c	Thu Jan 23 03:38:41 2020	(r357016)
+++ head/sys/vm/vm_glue.c	Thu Jan 23 04:54:49 2020	(r357017)
@@ -340,10 +340,12 @@ vm_thread_stack_create(struct domainset *ds, vm_object
 	 * page of stack.
 	 */
 	VM_OBJECT_WLOCK(ksobj);
-	(void)vm_page_grab_pages(ksobj, 0, VM_ALLOC_NORMAL | VM_ALLOC_NOBUSY |
-	    VM_ALLOC_WIRED, ma, pages);
-	for (i = 0; i < pages; i++)
-		ma[i]->valid = VM_PAGE_BITS_ALL;
+	(void)vm_page_grab_pages(ksobj, 0, VM_ALLOC_NORMAL | VM_ALLOC_WIRED,
+	    ma, pages);
+	for (i = 0; i < pages; i++) {
+		vm_page_valid(ma[i]);
+		vm_page_xunbusy(ma[i]);
+	}
 	VM_OBJECT_WUNLOCK(ksobj);
 	pmap_qenter(ks, ma, pages);
 	*ksobjp = ksobj;

Modified: head/sys/vm/vm_kern.c
==============================================================================
--- head/sys/vm/vm_kern.c	Thu Jan 23 03:38:41 2020	(r357016)
+++ head/sys/vm/vm_kern.c	Thu Jan 23 04:54:49 2020	(r357017)
@@ -193,7 +193,7 @@ kmem_alloc_attr_domain(int domain, vm_size_t size, int
 	if (vmem_alloc(vmem, size, M_BESTFIT | flags, &addr))
 		return (0);
 	offset = addr - VM_MIN_KERNEL_ADDRESS;
-	pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
+	pflags = malloc2vm_flags(flags) | VM_ALLOC_WIRED;
 	pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL);
 	pflags |= VM_ALLOC_NOWAIT;
 	prot = (flags & M_EXEC) != 0 ? VM_PROT_ALL : VM_PROT_RW;
@@ -223,7 +223,8 @@ retry:
 		    vm_phys_domain(m), domain));
 		if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
 			pmap_zero_page(m);
-		m->valid = VM_PAGE_BITS_ALL;
+		vm_page_valid(m);
+		vm_page_xunbusy(m);
 		pmap_enter(kernel_pmap, addr + i, m, prot,
 		    prot | PMAP_ENTER_WIRED, 0);
 	}
@@ -284,7 +285,7 @@ kmem_alloc_contig_domain(int domain, vm_size_t size, i
 	if (vmem_alloc(vmem, size, flags | M_BESTFIT, &addr))
 		return (0);
 	offset = addr - VM_MIN_KERNEL_ADDRESS;
-	pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
+	pflags = malloc2vm_flags(flags) | VM_ALLOC_WIRED;
 	pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL);
 	pflags |= VM_ALLOC_NOWAIT;
 	npages = atop(size);
@@ -315,7 +316,8 @@ retry:
 	for (; m < end_m; m++) {
 		if ((flags & M_ZERO) && (m->flags & PG_ZERO) == 0)
 			pmap_zero_page(m);
-		m->valid = VM_PAGE_BITS_ALL;
+		vm_page_valid(m);
+		vm_page_xunbusy(m);
 		pmap_enter(kernel_pmap, tmp, m, VM_PROT_RW,
 		    VM_PROT_RW | PMAP_ENTER_WIRED, 0);
 		tmp += PAGE_SIZE;
@@ -465,7 +467,7 @@ kmem_back_domain(int domain, vm_object_t object, vm_of
 	    ("kmem_back_domain: only supports kernel object."));
 
 	offset = addr - VM_MIN_KERNEL_ADDRESS;
-	pflags = malloc2vm_flags(flags) | VM_ALLOC_NOBUSY | VM_ALLOC_WIRED;
+	pflags = malloc2vm_flags(flags) | VM_ALLOC_WIRED;
 	pflags &= ~(VM_ALLOC_NOWAIT | VM_ALLOC_WAITOK | VM_ALLOC_WAITFAIL);
 	if (flags & M_WAITOK)
 		pflags |= VM_ALLOC_WAITFAIL;
@@ -498,7 +500,8 @@ retry:
 			pmap_zero_page(m);
 		KASSERT((m->oflags & VPO_UNMANAGED) != 0,
 		    ("kmem_malloc: page %p is managed", m));
-		m->valid = VM_PAGE_BITS_ALL;
+		vm_page_valid(m);
+		vm_page_xunbusy(m);
 		pmap_enter(kernel_pmap, addr + i, m, prot,
 		    prot | PMAP_ENTER_WIRED, 0);
 #if VM_NRESERVLEVEL > 0


More information about the svn-src-head mailing list