svn commit: r336294 - head/sys/riscv/riscv

Alan Cox alc at FreeBSD.org
Sat Jul 14 20:14:01 UTC 2018


Author: alc
Date: Sat Jul 14 20:14:00 2018
New Revision: 336294
URL: https://svnweb.freebsd.org/changeset/base/336294

Log:
  Invalidate the mapping before updating its physical address.
  
  Doing so ensures that all threads sharing the pmap have a consistent
  view of the mapping.  This fixes the problem described in the commit
  log message for r329254 without the overhead of an extra page fault
  in the common case.  (Now that all pmap_enter() implementations are
  similarly modified, the workaround added in r329254 can be removed,
  reducing the overhead of COW faults.)
  
  With this change we can reuse the PV entry from the old mapping,
  potentially avoiding a call to reclaim_pv_chunk().  Otherwise, there is
  nothing preventing the old PV entry from being reclaimed.  In rare
  cases this could result in the PTE's page table page being freed,
  leading to a use-after-free of the page when the updated PTE is written
  following the allocation of the PV entry for the new mapping.
  
  Reviewed by:	br, markj
  MFC after:	3 weeks
  Differential Revision:	https://reviews.freebsd.org/D16261

Modified:
  head/sys/riscv/riscv/pmap.c

Modified: head/sys/riscv/riscv/pmap.c
==============================================================================
--- head/sys/riscv/riscv/pmap.c	Sat Jul 14 19:53:41 2018	(r336293)
+++ head/sys/riscv/riscv/pmap.c	Sat Jul 14 20:14:00 2018	(r336294)
@@ -2040,6 +2040,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, v
 	new_l3 |= (pn << PTE_PPN0_S);
 	if ((flags & PMAP_ENTER_WIRED) != 0)
 		new_l3 |= PTE_SW_WIRED;
+	if ((m->oflags & VPO_UNMANAGED) == 0)
+		new_l3 |= PTE_SW_MANAGED;
 
 	CTR2(KTR_PMAP, "pmap_enter: %.16lx -> %.16lx", va, pa);
 
@@ -2109,9 +2111,9 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, v
 		pmap_invalidate_page(pmap, va);
 	}
 
-	om = NULL;
 	orig_l3 = pmap_load(l3);
 	opa = PTE_TO_PHYS(orig_l3);
+	pv = NULL;
 
 	/*
 	 * Is the specified virtual address already mapped?
@@ -2148,7 +2150,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, v
 			 * No, might be a protection or wiring change.
 			 */
 			if ((orig_l3 & PTE_SW_MANAGED) != 0) {
-				new_l3 |= PTE_SW_MANAGED;
 				if (pmap_is_write(new_l3))
 					vm_page_aflag_set(m, PGA_WRITEABLE);
 			}
@@ -2158,6 +2159,42 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, v
 		/* Flush the cache, there might be uncommitted data in it */
 		if (pmap_is_current(pmap) && pmap_l3_valid_cacheable(orig_l3))
 			cpu_dcache_wb_range(va, L3_SIZE);
+
+		/*
+		 * The physical page has changed.  Temporarily invalidate
+		 * the mapping.  This ensures that all threads sharing the
+		 * pmap keep a consistent view of the mapping, which is
+		 * necessary for the correct handling of COW faults.  It
+		 * also permits reuse of the old mapping's PV entry,
+		 * avoiding an allocation.
+		 *
+		 * For consistency, handle unmanaged mappings the same way.
+		 */
+		orig_l3 = pmap_load_clear(l3);
+		KASSERT(PTE_TO_PHYS(orig_l3) == opa,
+		    ("pmap_enter: unexpected pa update for %#lx", va));
+		if ((orig_l3 & PTE_SW_MANAGED) != 0) {
+			om = PHYS_TO_VM_PAGE(opa);
+
+			/*
+			 * The pmap lock is sufficient to synchronize with
+			 * concurrent calls to pmap_page_test_mappings() and
+			 * pmap_ts_referenced().
+			 */
+			if (pmap_page_dirty(orig_l3))
+				vm_page_dirty(om);
+			if ((orig_l3 & PTE_A) != 0)
+				vm_page_aflag_set(om, PGA_REFERENCED);
+			CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
+			pv = pmap_pvh_remove(&om->md, pmap, va);
+			if ((new_l3 & PTE_SW_MANAGED) == 0)
+				free_pv_entry(pmap, pv);
+			if ((om->aflags & PGA_WRITEABLE) != 0 &&
+			    TAILQ_EMPTY(&om->md.pv_list))
+				vm_page_aflag_clear(om, PGA_WRITEABLE);
+		}
+		pmap_invalidate_page(pmap, va);
+		orig_l3 = 0;
 	} else {
 		/*
 		 * Increment the counters.
@@ -2169,10 +2206,11 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, v
 	/*
 	 * Enter on the PV list if part of our managed memory.
 	 */
-	if ((m->oflags & VPO_UNMANAGED) == 0) {
-		new_l3 |= PTE_SW_MANAGED;
-		pv = get_pv_entry(pmap, &lock);
-		pv->pv_va = va;
+	if ((new_l3 & PTE_SW_MANAGED) != 0) {
+		if (pv == NULL) {
+			pv = get_pv_entry(pmap, &lock);
+			pv->pv_va = va;
+		}
 		CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa);
 		TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next);
 		m->md.pv_gen++;
@@ -2187,22 +2225,11 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, v
 validate:
 		orig_l3 = pmap_load_store(l3, new_l3);
 		PTE_SYNC(l3);
-		opa = PTE_TO_PHYS(orig_l3);
-
-		if (opa != pa) {
-			if ((orig_l3 & PTE_SW_MANAGED) != 0) {
-				om = PHYS_TO_VM_PAGE(opa);
-				if (pmap_page_dirty(orig_l3))
-					vm_page_dirty(om);
-				if ((orig_l3 & PTE_A) != 0)
-					vm_page_aflag_set(om, PGA_REFERENCED);
-				CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, opa);
-				pmap_pvh_free(&om->md, pmap, va);
-			}
-		} else if (pmap_page_dirty(orig_l3)) {
-			if ((orig_l3 & PTE_SW_MANAGED) != 0)
-				vm_page_dirty(m);
-		}
+		KASSERT(PTE_TO_PHYS(orig_l3) == pa,
+		    ("pmap_enter: invalid update"));
+		if (pmap_page_dirty(orig_l3) &&
+		    (orig_l3 & PTE_SW_MANAGED) != 0)
+			vm_page_dirty(m);
 	} else {
 		pmap_load_store(l3, new_l3);
 		PTE_SYNC(l3);


More information about the svn-src-head mailing list