PERFORCE change 34277 for review

Peter Wemm peter at FreeBSD.org
Wed Jul 9 15:50:34 PDT 2003


http://perforce.freebsd.org/chv.cgi?CH=34277

Change 34277 by peter at peter_hammer on 2003/07/09 15:49:35

	Fix bugs that stopped >512G of user VM working.  I neglected
	to mask off to the correct size for a few intra-directory page
	offsets in _pmap_allocpte, and in unwire_pte_hold, I did the
	exact opposite and masked off too much.

Affected files ...

.. //depot/projects/hammer/sys/amd64/amd64/pmap.c#26 edit

Differences ...

==== //depot/projects/hammer/sys/amd64/amd64/pmap.c#26 (text+ko) ====

@@ -890,6 +890,7 @@
 
 	if (m->hold_count == 0) {
 		vm_offset_t pteva;
+
 		/*
 		 * unmap the page table page
 		 */
@@ -914,9 +915,11 @@
 		}
 		--pmap->pm_stats.resident_count;
 		if (m->pindex < NUPDE) {
-			/* Unhold the PD page */
+			/* We just released a PT, unhold the matching PD */
 			vm_page_t pdpg;
-			pdpg = vm_page_lookup(pmap->pm_pteobj, NUPDE + pmap_pdpe_index(va));
+
+			pdpg = vm_page_lookup(pmap->pm_pteobj, NUPDE +
+			    ((va >> PDPSHIFT) & (NUPDPE - 1)));
 			while (vm_page_sleep_if_busy(pdpg, FALSE, "pulook"))
 				vm_page_lock_queues();
 			vm_page_unhold(pdpg);
@@ -924,9 +927,11 @@
 				_pmap_unwire_pte_hold(pmap, va, pdpg);
 		}
 		if (m->pindex >= NUPDE && m->pindex < (NUPDE + NUPDPE)) {
-			/* Unhold the PDP page */
+			/* We just released a PD, unhold the matching PDP */
 			vm_page_t pdppg;
-			pdppg = vm_page_lookup(pmap->pm_pteobj, NUPDE + NUPDPE + pmap_pml4e_index(va));
+
+			pdppg = vm_page_lookup(pmap->pm_pteobj, NUPDE + NUPDPE +
+			    ((va >> PML4SHIFT) & (NUPML4E - 1)));
 			while (vm_page_sleep_if_busy(pdppg, FALSE, "pulooK"))
 				vm_page_lock_queues();
 			vm_page_unhold(pdppg);
@@ -1124,7 +1129,8 @@
 			_pmap_allocpte(pmap, NUPDE + NUPDPE + pml4index);
 		} else {
 			/* Add reference to pdp page */
-			pdppg = pmap_page_lookup(pmap->pm_pteobj, NUPDE + NUPDPE + pml4index);
+			pdppg = pmap_page_lookup(pmap->pm_pteobj,
+			    NUPDE + NUPDPE + pml4index);
 			pdppg->hold_count++;
 		}
 		pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
@@ -1150,16 +1156,17 @@
 			/* Have to allocate a new pd, recurse */
 			_pmap_allocpte(pmap, NUPDE + pdpindex);
 			pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
-			pdp = &pdp[pdpindex];
+			pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
 		} else {
 			pdp = (pdp_entry_t *)PHYS_TO_DMAP(*pml4 & PG_FRAME);
-			pdp = &pdp[pdpindex];
+			pdp = &pdp[pdpindex & ((1ul << NPDPEPGSHIFT) - 1)];
 			if ((*pdp & PG_V) == 0) {
 				/* Have to allocate a new pd, recurse */
 				_pmap_allocpte(pmap, NUPDE + pdpindex);
 			} else {
 				/* Add reference to the pd page */
-				pdpg = pmap_page_lookup(pmap->pm_pteobj, NUPDE + pdpindex);
+				pdpg = pmap_page_lookup(pmap->pm_pteobj,
+				    NUPDE + pdpindex);
 				pdpg->hold_count++;
 			}
 		}
@@ -1239,7 +1246,7 @@
 
 
 /***************************************************
-* Pmap allocation/deallocation routines.
+ * Pmap allocation/deallocation routines.
  ***************************************************/
 
 /*


More information about the p4-projects mailing list