PERFORCE change 133553 for review

Scott Long scottl at FreeBSD.org
Fri Jan 18 07:08:44 PST 2008


http://perforce.freebsd.org/chv.cgi?CH=133553

Change 133553 by scottl at scottl-ix on 2008/01/18 15:08:34

	Sync with fixed pmap and console

Affected files ...

.. //depot/projects/xen31-xenbus/sys/dev/xen/console/console.c#2 integrate
.. //depot/projects/xen31-xenbus/sys/i386/i386/intr_machdep.c#2 integrate
.. //depot/projects/xen31-xenbus/sys/i386/i386/vm_machdep.c#2 integrate
.. //depot/projects/xen31-xenbus/sys/i386/include/pmap.h#2 integrate
.. //depot/projects/xen31-xenbus/sys/i386/include/xen/xenpmap.h#2 integrate
.. //depot/projects/xen31-xenbus/sys/i386/xen/pmap.c#2 integrate
.. //depot/projects/xen31-xenbus/sys/i386/xen/xen_machdep.c#2 integrate
.. //depot/projects/xen31-xenbus/sys/xen/xenbus/xenbus_probe.c#2 edit

Differences ...

==== //depot/projects/xen31-xenbus/sys/dev/xen/console/console.c#2 (text+ko) ====

@@ -76,9 +76,10 @@
 #define	XCUNIT(x)	(minor(x))
 #define ISTTYOPEN(tp)	((tp) && ((tp)->t_state & TS_ISOPEN))
 #define CN_LOCK_INIT(x, _name) \
-        mtx_init(&x, _name, NULL, MTX_DEF|MTX_RECURSE)
-#define CN_LOCK(l)        mtx_lock(&(l))
-#define CN_UNLOCK(l)      mtx_unlock(&(l))
+        mtx_init(&x, _name, NULL, MTX_SPIN|MTX_RECURSE)
+
+#define CN_LOCK(l)        mtx_lock_spin(&(l))
+#define CN_UNLOCK(l)      mtx_unlock_spin(&(l))
 #define CN_LOCK_ASSERT(x)    mtx_assert(&x, MA_OWNED)
 #define CN_LOCK_DESTROY(x)   mtx_destroy(&x)
 
@@ -162,9 +163,7 @@
 static void
 xccnputc(struct consdev *dev, int c)
 {
-	CN_LOCK(cn_mtx);
 	xcons_putc(c);
-	CN_UNLOCK(cn_mtx);
 }
 
 static void
@@ -288,20 +287,20 @@
 	int           i;
 	struct tty *tp = xccons;
 	
-	CN_LOCK(cn_mtx);
 	for (i = 0; i < len; i++) {
-		if (xen_console_up)
+		if (xen_console_up) 
 			(*linesw[tp->t_line]->l_rint)(buf[i], tp);
 		else
 			rbuf[RBUF_MASK(rp++)] = buf[i];
 	}
-	CN_UNLOCK(cn_mtx);
 }
 
 static void 
 __xencons_tx_flush(void)
 {
 	int        sz, work_done = 0;
+
+	CN_LOCK(cn_mtx);
 	while (wc != wp) {
 		int sent;
 		sz = wp - wc;
@@ -318,7 +317,8 @@
 		}
 		work_done = 1;
 	}
-
+	CN_UNLOCK(cn_mtx);
+	
 	if (work_done && xen_console_up)
 		ttwakeup(xccons);
 }
@@ -326,9 +326,7 @@
 void
 xencons_tx(void)
 {
-	CN_LOCK(cn_mtx);
 	__xencons_tx_flush();
-	CN_UNLOCK(cn_mtx);
 }
 
 static void
@@ -432,31 +430,33 @@
 static void
 xcstart(struct tty *tp)
 {
-	int s;
 	boolean_t cons_full = FALSE;
 
-	s = spltty();
 	CN_LOCK(cn_mtx);
 	if (tp->t_state & (TS_TIMEOUT | TS_TTSTOP)) {
+			CN_UNLOCK(cn_mtx);
+
 		ttwwakeup(tp);
-		CN_UNLOCK(cn_mtx);
 		return;
 	}
 
 	tp->t_state |= TS_BUSY;
+	CN_UNLOCK(cn_mtx);
+
 	while (tp->t_outq.c_cc != 0 && !cons_full)
 		cons_full = xcons_putc(getc(&tp->t_outq));
 
 	/* if the console is close to full leave our state as busy */
 	if (!cons_full) {
-		tp->t_state &= ~TS_BUSY;
-		ttwwakeup(tp);
+			CN_LOCK(cn_mtx);
+			tp->t_state &= ~TS_BUSY;
+			CN_UNLOCK(cn_mtx);
+			ttwwakeup(tp);
 	} else {
 	    	/* let the timeout kick us in a bit */
 	    	xc_start_needed = TRUE;
 	}
-	CN_UNLOCK(cn_mtx);
-	splx(s);
+
 }
 
 static void

==== //depot/projects/xen31-xenbus/sys/i386/i386/intr_machdep.c#2 (text+ko) ====

@@ -395,7 +395,8 @@
 	/* Schedule the ithread if needed. */
 	if (thread) {
 		error = intr_event_schedule_thread(ie);
-		KASSERT(error == 0, ("bad stray interrupt"));
+		if (error != 0)
+			log(LOG_CRIT, "bad stray interrupt %d", vector);
 	}
 	critical_exit();
 	td->td_intr_nesting_level--;

==== //depot/projects/xen31-xenbus/sys/i386/i386/vm_machdep.c#2 (text+ko) ====

@@ -779,7 +779,7 @@
 	ptep = vtopte(sf->kva);
 	opte = *ptep;
 #ifdef XEN
-	PT_SET_MA(sf->kva, xpmap_ptom(VM_PAGE_TO_PHYS(m)) | pgeflag | PG_RW | PG_V);
+	*ptep = xpmap_ptom(VM_PAGE_TO_PHYS(m)) | pgeflag | PG_RW | PG_V;
 #else	
 	*ptep = VM_PAGE_TO_PHYS(m) | pgeflag | PG_RW | PG_V;
 #endif

==== //depot/projects/xen31-xenbus/sys/i386/include/pmap.h#2 (text+ko) ====

@@ -232,32 +232,58 @@
 void    pmap_map_readonly(struct pmap *pmap, vm_offset_t va, int len);
 void    pmap_map_readwrite(struct pmap *pmap, vm_offset_t va, int len);
 
+
+static __inline pt_entry_t
+pte_load_store(pt_entry_t *ptep, pt_entry_t v)
+{
+	pt_entry_t r;
+
+	v = xpmap_ptom(v);
+	r = *ptep;
+	__asm __volatile(
+	    "1:\n"
+	    "\tlock; cmpxchg8b %1\n"
+	    "\tjnz 1b"
+	    : "+A" (r)
+	    : "m" (*ptep), "b" ((uint32_t)v), "c" ((uint32_t)(v >> 32)));
+	return (r);
+}
+
 static __inline pt_entry_t
-pte_load_clear(pt_entry_t *ptep)
+pte_load_store_ma(pt_entry_t *ptep, pt_entry_t v)
 {
 	pt_entry_t r;
 
-	r = PT_GET(ptep);
-	PT_CLEAR_VA(ptep, TRUE);
+	r = *ptep;
+	__asm __volatile(
+	    "1:\n"
+	    "\tlock; cmpxchg8b %1\n"
+	    "\tjnz 1b"
+	    : "+A" (r)
+	    : "m" (*ptep), "b" ((uint32_t)v), "c" ((uint32_t)(v >> 32)));
 	return (r);
 }
+
+
+#else 
+
+
 static __inline pt_entry_t
 pte_load_store(pt_entry_t *ptep, pt_entry_t v)
 {
 	pt_entry_t r;
-	r = PT_GET(ptep);
-	PT_SET_VA(ptep, v, TRUE);
+
+	r = *ptep;
+	__asm __volatile(
+	    "1:\n"
+	    "\tlock; cmpxchg8b %1\n"
+	    "\tjnz 1b"
+	    : "+A" (r)
+	    : "m" (*ptep), "b" ((uint32_t)v), "c" ((uint32_t)(v >> 32)));
 	return (r);
 }
 
-#define	pte_store(ptep, pte)	pte_load_store((ptep), (pt_entry_t)pte)
-#define	pte_clear(ptep)	pte_load_store((ptep), (pt_entry_t)0ULL)
-
-#ifdef PAE
-extern pt_entry_t pg_nx;
-#endif
 
-#else
 /*
  *	Routine:	pmap_kextract
  *	Function:
@@ -278,6 +304,8 @@
 	return pa;
 }
 
+#endif
+
 #ifdef PAE
 
 static __inline pt_entry_t
@@ -292,21 +320,6 @@
 	return (r);
 }
 
-static __inline pt_entry_t
-pte_load_store(pt_entry_t *ptep, pt_entry_t v)
-{
-	pt_entry_t r;
-
-	r = *ptep;
-	__asm __volatile(
-	    "1:\n"
-	    "\tlock; cmpxchg8b %1\n"
-	    "\tjnz 1b"
-	    : "+A" (r)
-	    : "m" (*ptep), "b" ((uint32_t)v), "c" ((uint32_t)(v >> 32)));
-	return (r);
-}
-
 /* XXXRU move to atomic.h? */
 static __inline int
 atomic_cmpset_64(volatile uint64_t *dst, uint64_t exp, uint64_t src)
@@ -332,6 +345,8 @@
 
 #define	pte_store(ptep, pte)	pte_load_store((ptep), (pt_entry_t)pte)
 
+#define	pte_store_ma(ptep, pte)	pte_load_store_ma((ptep), (pt_entry_t)pte)
+
 extern pt_entry_t pg_nx;
 
 #else /* PAE */
@@ -373,8 +388,6 @@
 #define	pte_clear(ptep)		pte_store((ptep), (pt_entry_t)0ULL)
 
 #define	pde_store(pdep, pde)	pte_store((pdep), (pde))
-#endif /* !XEN */
-
 #endif /* _KERNEL */
 
 /*

==== //depot/projects/xen31-xenbus/sys/i386/include/xen/xenpmap.h#2 (text+ko) ====

@@ -75,7 +75,7 @@
 
 #define INVALID_P2M_ENTRY	(~0UL)
 
-#define pmap_valid_entry(E)           ((E) > PAGE_SIZE) /* is PDE or PTE valid? */
+#define pmap_valid_entry(E)           ((E) & PG_V) /* is PDE or PTE valid? */
 
 #define SH_PD_SET_VA        1
 #define SH_PD_SET_VA_MA     2
@@ -102,7 +102,7 @@
 }
 #endif
 #define	PT_GET(_ptp)						\
-	(pmap_valid_entry(*(_ptp)) ? xpmap_mtop(*(_ptp)) : *(_ptp))
+	(pmap_valid_entry(*(_ptp)) ? xpmap_mtop(*(_ptp)) : (0))
 
 #ifdef WRITABLE_PAGETABLES
 

==== //depot/projects/xen31-xenbus/sys/i386/xen/pmap.c#2 (text+ko) ====

@@ -193,12 +193,8 @@
 #define pmap_pte_u(pte)		((*(int *)pte & PG_A) != 0)
 #define pmap_pte_v(pte)		((*(int *)pte & PG_V) != 0)
 
-#define pmap_pte_set_w(pte, v)  {				\
-    if (v)							\
-	PT_SET_VA_MA(pte, *pte | PG_W, TRUE); 			\
-    else 							\
-	PT_SET_VA_MA(pte, *pte & ~PG_W, TRUE); 			\
-}
+#define pmap_pte_set_w(pte, v)	((v) ? atomic_set_int((u_int *)(pte), PG_W) : \
+    atomic_clear_int((u_int *)(pte), PG_W))
 #define pmap_pte_set_prot(pte, v) ((*(int *)pte &= ~PG_PROT), (*(int *)pte |= (v)))
 
 struct pmap kernel_pmap_store;
@@ -252,12 +248,8 @@
  */
 static caddr_t crashdumpmap;
 
-#ifdef SMP
-extern pt_entry_t *SMPpt;
-#endif
 static pt_entry_t *PMAP1 = 0, *PMAP2;
 static pt_entry_t *PADDR1 = 0, *PADDR2;
-static int PMAP1_inuse = 0, PMAP2_inuse = 0;
 #ifdef SMP
 static int PMAP1cpu;
 static int PMAP1changedcpu;
@@ -305,6 +297,32 @@
 CTASSERT(1 << PDESHIFT == sizeof(pd_entry_t));
 CTASSERT(1 << PTESHIFT == sizeof(pt_entry_t));
 
+/*
+ * If you get an error here, then you set KVA_PAGES wrong! See the
+ * description of KVA_PAGES in sys/i386/include/pmap.h. It must be
+ * multiple of 4 for a normal kernel, or a multiple of 8 for a PAE.
+ */
+CTASSERT(KERNBASE % (1 << 24) == 0);
+
+
+
+
+static __inline void
+pagezero(void *page)
+{
+#if defined(I686_CPU)
+	if (cpu_class == CPUCLASS_686) {
+#if defined(CPU_ENABLE_SSE)
+		if (cpu_feature & CPUID_SSE2)
+			sse2_pagezero(page);
+		else
+#endif
+			i686_pagezero(page);
+	} else
+#endif
+		bzero(page, PAGE_SIZE);
+}
+
 void 
 pd_set(struct pmap *pmap, int ptepindex, vm_paddr_t val, int type)
 {
@@ -419,7 +437,7 @@
 	}
 	SYSMAP(caddr_t, CMAP1, CADDR1, 1)
 	SYSMAP(caddr_t, CMAP3, CADDR3, 1)
-	PT_CLEAR_VA(CMAP3, TRUE);
+	*CMAP3 = 0;
 
 	/*
 	 * Crashdump maps.
@@ -445,8 +463,9 @@
 	mtx_init(&PMAP2mutex, "PMAP2", NULL, MTX_DEF);
 
 	virtual_avail = va;
-	PT_CLEAR_VA(CMAP1, TRUE);
-	
+
+	*CMAP1 = 0;
+
 	/*
 	 * Leave in place an identity mapping (virt == phys) for the low 1 MB
 	 * physical memory region that is used by the ACPI wakeup code.  This
@@ -546,7 +565,7 @@
 		va = (vm_offset_t)btext;
 		while (va < endva) {
 			pte = vtopte(va);
-			if (*pte)
+			if (*pte & PG_V)
 				*pte |= pgeflag;
 			invltlb();	/* Play it safe, invltlb() every time */
 			va += PAGE_SIZE;
@@ -602,28 +621,19 @@
 	*head = *pte;
 	if (*head & PG_V)
 		panic("pmap_ptelist_alloc: va with PG_V set!");
-#ifdef XEN
-	PT_SET_MA(va, (vm_paddr_t)0);
-#else	
 	*pte = 0;
-#endif	
 	return (va);
 }
 
 static void
 pmap_ptelist_free(vm_offset_t *head, vm_offset_t va)
 {
-#ifndef XEN
 	pt_entry_t *pte;
 
 	if (va & PG_V)
 		panic("pmap_ptelist_free: freeing va with PG_V set!");
-
 	pte = vtopte(va);
 	*pte = *head;		/* virtual! PG_V is 0 though */
-#else
-	PT_SET_MA(va, (vm_paddr_t)*head);
-#endif	
 	*head = va;
 }
 
@@ -938,7 +948,7 @@
 		mtx_lock(&PMAP2mutex);
 		newpf = *pde & PG_FRAME;
 		if ((*PMAP2 & PG_FRAME) != newpf) {
-			PT_SET_VA_MA(PMAP2, newpf | PG_V | PG_A | PG_M, TRUE);
+			*PMAP2 = newpf | PG_V | PG_A;
 			pmap_invalidate_page(kernel_pmap, (vm_offset_t)PADDR2);
 		}
 		return (PADDR2 + (i386_btop(va) & (NPTEPG - 1)));
@@ -954,8 +964,10 @@
 pmap_pte_release(pt_entry_t *pte)
 {
 
-	if ((pt_entry_t *)((vm_offset_t)pte & ~PAGE_MASK) == PADDR2)
+	if ((pt_entry_t *)((vm_offset_t)pte & ~PAGE_MASK) == PADDR2) {
+		*PMAP2 = 0;
 		mtx_unlock(&PMAP2mutex);
+	}
 }
 
 static __inline void
@@ -992,8 +1004,8 @@
 		mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 		KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
 		newpf = *pde & PG_FRAME;
-		if ((PT_GET(PMAP1) & PG_FRAME) != newpf) {
-			PT_SET_VA_MA(PMAP1, newpf | PG_V | PG_A, TRUE);
+		if ((*PMAP1 & PG_FRAME) != newpf) {
+			*PMAP1 = newpf | PG_V | PG_A;
 #ifdef SMP
 			PMAP1cpu = PCPU_GET(cpuid);
 #endif
@@ -1038,10 +1050,6 @@
 		pte = pmap_pte(pmap, va);
 		rtval = (PT_GET(pte) & PG_FRAME) | (va & PAGE_MASK);
 		pmap_pte_release(pte);
-		if (PMAP2_inuse) {
-			PT_CLEAR_VA(PMAP2, TRUE);
-			PMAP2_inuse = 0;
-		}
 	}
 	PMAP_UNLOCK(pmap);
 	return (rtval);
@@ -1071,12 +1079,6 @@
 		pte = pmap_pte(pmap, va);
 		rtval = (*pte & PG_FRAME) | (va & PAGE_MASK);
 		pmap_pte_release(pte);
-#ifdef XEN
-		if (PMAP2_inuse) {
-			PT_CLEAR_VA(PMAP2, TRUE);
-			PMAP2_inuse = 0;
-		}
-#endif
 	}
 	PMAP_UNLOCK(pmap);
 	return (rtval);
@@ -1110,11 +1112,9 @@
 		} else {
 			sched_pin();
 			pte = PT_GET(pmap_pte_quick(pmap, va));
-			if (PMAP1_inuse) {
-				PT_CLEAR_VA(PMAP1, TRUE);
-				PMAP1_inuse = 0;
-			}
-			if (pte != 0 &&
+			if (*PMAP1)
+				*PMAP1 = 0;
+			if ((pte & PG_V) &&
 			    ((pte & PG_RW) || (prot & VM_PROT_WRITE) == 0)) {
 				m = PHYS_TO_VM_PAGE(pte & PG_FRAME);
 				vm_page_hold(m);
@@ -1150,7 +1150,7 @@
 	pt_entry_t *pte;
 
 	pte = vtopte(va);
-	PT_SET_VA_MA(pte, ma | PG_RW | PG_V | pgeflag, TRUE);
+	pte_store_ma(pte, ma | PG_RW | PG_V | pgeflag);
 }
 
 
@@ -1459,7 +1459,7 @@
 	for (i = KPTDI; i < KPTDI + nkpt; i++)
 		pmap->pm_pdir_shadow[i] = PTD[i] & ~(PG_RW|PG_M|PG_A);
 	for (i = 0; i < NPGPTD; i++) {
-		vm_paddr_t *pd;
+		pt_entry_t *pd;
 
 		pd = pmap->pm_pdir + (i * NPDEPG);
 		PT_SET_MA(pd, *vtopte((vm_offset_t)pd) & ~(PG_M|PG_A|PG_U|PG_RW));
@@ -1569,7 +1569,7 @@
 	 * If the page table page is mapped, we just increment the
 	 * hold count, and activate it.
 	 */
-	if (ptema) {
+	if (ptema & PG_V) {
 		m = PHYS_TO_VM_PAGE(xpmap_mtop(ptema) & PG_FRAME);
 		m->wire_count++;
 	} else {
@@ -1692,7 +1692,12 @@
 {
 	vm_page_t m, ptdpg[NPGPTD+1];
 	vm_paddr_t ma;
-	int i, npgptd = NPGPTD + 1;
+	int i;
+#ifdef PAE	
+	int npgptd = 2*NPGPTD + 1;
+#else
+	int npgptd = 2*NPGPTD;
+#endif
 
 	KASSERT(pmap->pm_stats.resident_count == 0,
 	    ("pmap_release: pmap resident count %ld != 0",
@@ -1704,17 +1709,17 @@
 	mtx_unlock_spin(&allpmaps_lock);
 
 	for (i = 0; i < NPGPTD; i++)
-		ptdpg[i] = PHYS_TO_VM_PAGE(PT_GET(&pmap->pm_pdir[PTDPTDI + i]) & PG_FRAME);
+		ptdpg[i] = PHYS_TO_VM_PAGE(vtophys(pmap->pm_pdir_shadow + (i*NPDEPG)) & PG_FRAME);
+	for (i = 0; i < NPGPTD; i++)
+		ptdpg[NPGPTD + i] = PHYS_TO_VM_PAGE(vtophys(pmap->pm_pdir + (i*NPDEPG)) & PG_FRAME);
 
-	ptdpg[NPGPTD] = PHYS_TO_VM_PAGE(vtophys(pmap->pm_pdir));
-	for (i = 0; i < nkpt + NPGPTD; i++) 
-		PD_CLEAR_VA(pmap, PTDPTDI + i, FALSE);
+#ifdef PAE
+	ptdpg[2*NPGPTD] = PHYS_TO_VM_PAGE(vtophys(pmap->pm_pdpt));
+#endif	
 
-	
-	bzero(pmap->pm_pdir + PTDPTDI, (nkpt + NPGPTD) *
-	    sizeof(*pmap->pm_pdir));
-
+	PT_UPDATES_FLUSH();
 	pmap_qremove((vm_offset_t)pmap->pm_pdir, NPGPTD);
+	pmap_qremove((vm_offset_t)pmap->pm_pdir_shadow, NPGPTD);	
 
 	for (i = 0; i < npgptd; i++) {
 		m = ptdpg[i];
@@ -1727,7 +1732,7 @@
 #endif
 		m->wire_count--;
 		atomic_subtract_int(&cnt.v_wire_count, 1);
-		vm_page_free_zero(m);
+		vm_page_free(m);
 	}
 	PMAP_LOCK_DESTROY(pmap);
 }
@@ -2065,7 +2070,7 @@
 	pc = (struct pv_chunk *)pmap_ptelist_alloc(&pv_vafree);
 	pmap_qenter((vm_offset_t)pc, &m, 1);
 	if ((m->flags & PG_ZERO) == 0)
-		bzero(pc, PG_ZERO);
+		pagezero(pc);
 	pc->pc_pmap = pmap;
 	pc->pc_map[0] = pc_freemask[0] & ~1ul;	/* preallocated bit 0 */
 	for (field = 1; field < _NPCM; field++)
@@ -2150,8 +2155,14 @@
 	if (oldpte & PG_G)
 		pmap_invalidate_page(kernel_pmap, va);
 	pmap->pm_stats.resident_count -= 1;
-	if (oldpte & PG_MANAGED) {
-		m = PHYS_TO_VM_PAGE(oldpte & PG_FRAME);
+	/*
+	 * XXX This is not strictly correctly, but somewhere along the line
+	 * we are losing the managed bit on some pages. It is unclear to me
+	 * why, but I think the most likely explanation is that xen's writable
+	 * page table implementation doesn't respect the unused bits.
+	 */
+	if ((oldpte & PG_MANAGED) || ((oldpte & PG_V) && (va < VM_MAXUSER_ADDRESS))) {
+		m = PHYS_TO_VM_PAGE(xpmap_mtop(oldpte) & PG_FRAME);
 		if (oldpte & PG_M) {
 			KASSERT((oldpte & PG_RW),
 	("pmap_remove_pte: modified page not writable: va: %#x, pte: %#jx",
@@ -2162,6 +2173,10 @@
 			vm_page_flag_set(m, PG_REFERENCED);
 		pmap_remove_entry(pmap, m, va);
 	}
+#ifdef DEBUG
+	else if (va < VM_MAXUSER_ADDRESS) 
+		printf("va=0x%x is unmanaged :-( \n", va);
+#endif	
 	return (pmap_unuse_pt(pmap, va, free));
 }
 
@@ -2176,14 +2191,13 @@
 	mtx_assert(&vm_page_queue_mtx, MA_OWNED);
 	KASSERT(curthread->td_pinned > 0, ("curthread not pinned"));
 	PMAP_LOCK_ASSERT(pmap, MA_OWNED);
-	if ((pte = pmap_pte_quick(pmap, va)) == NULL || *pte == 0)
+	if ((pte = pmap_pte_quick(pmap, va)) == NULL || (*pte & PG_V) == 0)
 		return;
 	pmap_remove_pte(pmap, pte, va, free);
 	pmap_invalidate_page(pmap, va);
-	if (PMAP1_inuse) {
-		PT_CLEAR_VA(PMAP1, TRUE);
-		PMAP1_inuse = 0;
-	}
+	if (*PMAP1)
+		*PMAP1 = 0;
+
 }
 
 /*
@@ -2264,7 +2278,7 @@
 
 		for (pte = pmap_pte_quick(pmap, sva); sva != pdnxt; pte++,
 		    sva += PAGE_SIZE) {
-			if (*pte == 0)
+			if ((*pte & PG_V) == 0)
 				continue;
 
 			/*
@@ -2277,10 +2291,8 @@
 				break;
 		}
 	}
-	if (PMAP1_inuse) {
-		PT_CLEAR_VA(PMAP1, TRUE);
-		PMAP1_inuse = 0;
-	}
+	if (*PMAP1)
+		*PMAP1 = 0;
 out:
 	sched_unpin();
 	if (anyvalid)
@@ -2351,10 +2363,8 @@
 		PMAP_UNLOCK(pmap);
 	}
 	vm_page_flag_clear(m, PG_WRITEABLE);
-	if (PMAP1_inuse) {
-		PT_CLEAR_VA(PMAP1, TRUE);
-		PMAP1_inuse = 0;
-	}
+	if (*PMAP1)
+		*PMAP1 = 0;
 	sched_unpin();
 }
 
@@ -2426,27 +2436,25 @@
 		    sva += PAGE_SIZE) {
 			vm_page_t m;
 
-#ifndef XEN
 retry:
-#endif			
 			/*
 			 * Regardless of whether a pte is 32 or 64 bits in
 			 * size, PG_RW, PG_A, and PG_M are among the least
 			 * significant 32 bits.
 			 */
-			obits = pbits = PT_GET(pte);
+			obits = pbits = *pte;
 			if ((pbits & PG_V) == 0)
 				continue;
 			if (pbits & PG_MANAGED) {
 				m = NULL;
 				if (pbits & PG_A) {
-					m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
+					m = PHYS_TO_VM_PAGE(xpmap_mtop(pbits) & PG_FRAME);
 					vm_page_flag_set(m, PG_REFERENCED);
 					pbits &= ~PG_A;
 				}
 				if ((pbits & PG_M) != 0) {
 					if (m == NULL)
-						m = PHYS_TO_VM_PAGE(pbits & PG_FRAME);
+						m = PHYS_TO_VM_PAGE(xpmap_mtop(pbits) & PG_FRAME);
 					vm_page_dirty(m);
 				}
 			}
@@ -2459,7 +2467,6 @@
 #endif
 
 			if (pbits != obits) {
-#ifndef XEN				
 #ifdef PAE
 				if (!atomic_cmpset_64(pte, obits, pbits))
 					goto retry;
@@ -2468,9 +2475,6 @@
 				    pbits))
 					goto retry;
 #endif
-#else
-				PT_SET_VA(pte, pbits, FALSE);
-#endif				
 				if (obits & PG_G)
 					pmap_invalidate_page(pmap, sva);
 				else
@@ -2478,10 +2482,8 @@
 			}
 		}
 	}
-	if (PMAP1_inuse) {
-		PT_CLEAR_VA(PMAP1, TRUE);
-		PMAP1_inuse = 0;
-	}	
+	if (*PMAP1)
+		*PMAP1 = 0;
 	sched_unpin();
 	if (anychanged)
 		pmap_invalidate_all(pmap);
@@ -2544,10 +2546,12 @@
 		}
 	}
 #endif
+
 	pde = pmap_pde(pmap, va);
 	if ((*pde & PG_PS) != 0)
 		panic("pmap_enter: attempted pmap_enter on 4MB page");
 	pte = pmap_pte_quick(pmap, va);
+
 	/*
 	 * Page Directory table entry not valid, we need a new PT page
 	 */
@@ -2606,7 +2610,9 @@
 		if (origpte & PG_MANAGED) {
 			om = PHYS_TO_VM_PAGE(opa);
 			pmap_remove_entry(pmap, om, va);
-		}
+		} else if (va < VM_MAXUSER_ADDRESS) 
+			printf("va=0x%x is unmanaged :-( \n", va);
+			
 		if (mpte != NULL) {
 			mpte->wire_count--;
 			KASSERT(mpte->wire_count > 0,
@@ -2685,10 +2691,8 @@
 		} else
 			pte_store(pte, newpte | PG_A);
 	}
-	if (PMAP1_inuse) {
-		PT_CLEAR_VA(PMAP1, TRUE);
-		PMAP1_inuse = 0;
-	}	
+	if (*PMAP1)
+		*PMAP1 = 0;
 	sched_unpin();
 	vm_page_unlock_queues();
 	PMAP_UNLOCK(pmap);
@@ -2782,7 +2786,7 @@
 			 * If the page table page is mapped, we just increment
 			 * the hold count, and activate it.
 			 */
-			if (ptema) {
+			if (ptema & PG_V) {
 				if (ptema & PG_PS)
 					panic("pmap_enter_quick: unexpected mapping into 4MB page");
 				mpte = PHYS_TO_VM_PAGE(xpmap_mtop(ptema) & PG_FRAME);
@@ -2964,10 +2968,6 @@
 	 */
 	pmap_pte_set_w(pte, wired);
 	pmap_pte_release(pte);
-	if (PMAP2_inuse) {
-		PT_CLEAR_VA(PMAP2, TRUE);
-		PMAP2_inuse = 0;
-	}
 	PMAP_UNLOCK(pmap);
 }
 
@@ -3040,7 +3040,7 @@
 		src_pte = vtopte(addr);
 		while (addr < pdnxt) {
 			pt_entry_t ptetemp;
-			ptetemp = PT_GET(src_pte);
+			ptetemp = *src_pte;
 			/*
 			 * we only virtual copy managed pages
 			 */
@@ -3052,14 +3052,14 @@
 				dst_pte = pmap_pte_quick(dst_pmap, addr);
 				if (*dst_pte == 0 &&
 				    pmap_try_insert_pv_entry(dst_pmap, addr,
-				    PHYS_TO_VM_PAGE(ptetemp & PG_FRAME))) {
+				    PHYS_TO_VM_PAGE(xpmap_mtop(ptetemp) & PG_FRAME))) {
 					/*
 					 * Clear the wired, modified, and
 					 * accessed (referenced) bits
 					 * during the copy.
 					 */
-					PT_SET_VA(dst_pte, ptetemp & ~(PG_W | PG_M | PG_A), FALSE);	
-
+					*dst_pte = ptetemp & ~(PG_W | PG_M |
+					    PG_A);
 					dst_pmap->pm_stats.resident_count++;
 	 			} else {
 					free = NULL;
@@ -3077,33 +3077,14 @@
 			src_pte++;
 		}
 	}
-	if (PMAP1_inuse) {
-		PT_CLEAR_VA(PMAP1, FALSE);
-		PMAP1_inuse = 0;
-	}
+	if (*PMAP1)
+		*PMAP1 = 0;
 	sched_unpin();
 	vm_page_unlock_queues();
-	PT_UPDATES_FLUSH();
 	PMAP_UNLOCK(src_pmap);
 	PMAP_UNLOCK(dst_pmap);
 }	
 
-static __inline void
-pagezero(void *page)
-{
-#if defined(I686_CPU)
-	if (cpu_class == CPUCLASS_686) {
-#if defined(CPU_ENABLE_SSE)
-		if (cpu_feature & CPUID_SSE2)
-			sse2_pagezero(page);
-		else
-#endif
-			i686_pagezero(page);
-	} else
-#endif
-		bzero(page, PAGE_SIZE);
-}
-
 /*
  *	pmap_zero_page zeros the specified hardware page by mapping 
  *	the page into KVM and using bzero to clear its contents.
@@ -3118,17 +3099,10 @@
 	if (*sysmaps->CMAP2)
 		panic("pmap_zero_page: CMAP2 busy");
 	sched_pin();
-#ifdef XEN
-	PT_SET_VA(sysmaps->CMAP2, PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M, TRUE);
-#else	
-	*sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M;
-#endif
-	KASSERT(*sysmaps->CMAP2 == (PG_V | PG_RW | xpmap_ptom(VM_PAGE_TO_PHYS(m)) | PG_A | PG_M),
-	    ("CMAP2 did not get set is %llx", *sysmaps->CMAP2));
-	
+	*sysmaps->CMAP2 = PG_V | PG_RW | xpmap_ptom(VM_PAGE_TO_PHYS(m)) | PG_A | PG_M;
 	invlcaddr(sysmaps->CADDR2);
 	pagezero(sysmaps->CADDR2);
-	PT_CLEAR_VA(sysmaps->CMAP2, TRUE);
+	*sysmaps->CMAP2 = 0;
 	sched_unpin();
 	mtx_unlock(&sysmaps->lock);
 }
@@ -3149,15 +3123,14 @@
 	if (*sysmaps->CMAP2)
 		panic("pmap_zero_page: CMAP2 busy");
 	sched_pin();
-	PT_SET_VA(sysmaps->CMAP2, PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M, TRUE); 
+	*sysmaps->CMAP2 = PG_V | PG_RW | xpmap_ptom(VM_PAGE_TO_PHYS(m)) | PG_A | PG_M;
 
 	invlcaddr(sysmaps->CADDR2);
 	if (off == 0 && size == PAGE_SIZE) 
 		pagezero(sysmaps->CADDR2);
 	else
 		bzero((char *)sysmaps->CADDR2 + off, size);
-
-	PT_CLEAR_VA(sysmaps->CMAP2, TRUE);
+	*sysmaps->CMAP2 = 0;
 	sched_unpin();
 	mtx_unlock(&sysmaps->lock);
 }
@@ -3175,10 +3148,10 @@
 	if (*CMAP3)
 		panic("pmap_zero_page: CMAP3 busy");
 	sched_pin();
-	PT_SET_VA(CMAP3, PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M, TRUE);
+	*CMAP3 = PG_V | PG_RW | xpmap_ptom(VM_PAGE_TO_PHYS(m)) | PG_A | PG_M;
 	invlcaddr(CADDR3);
 	pagezero(CADDR3);
-	PT_CLEAR_VA(CMAP3, TRUE);
+	*CMAP3 = 0;
 	sched_unpin();
 }
 
@@ -3196,25 +3169,17 @@
 	sysmaps = &sysmaps_pcpu[PCPU_GET(cpuid)];
 	mtx_lock(&sysmaps->lock);
 	if (*sysmaps->CMAP1)
-		panic("pmap_copy_page: CMAP1 busy, CMAP1=%llx", *sysmaps->CMAP1);
+		panic("pmap_copy_page: CMAP1 busy");
 	if (*sysmaps->CMAP2)
 		panic("pmap_copy_page: CMAP2 busy");
 	sched_pin();
 	invlpg((u_int)sysmaps->CADDR1);
 	invlpg((u_int)sysmaps->CADDR2);
-	PT_SET_VA(sysmaps->CMAP1, PG_V | VM_PAGE_TO_PHYS(src) | PG_A, TRUE);
-	PT_SET_VA(sysmaps->CMAP2, PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M, TRUE);
-	KASSERT(*sysmaps->CMAP1 == (PG_V | xpmap_ptom(VM_PAGE_TO_PHYS(src)) | PG_A ),
-	    ("CMAP1 did not get set is %llx", *sysmaps->CMAP1));
-	KASSERT(*sysmaps->CMAP2 == (PG_V | PG_RW | xpmap_ptom(VM_PAGE_TO_PHYS(dst)) | PG_A | PG_M),
-	    ("CMAP2 did not get set is %llx", *sysmaps->CMAP2));	
+	*sysmaps->CMAP1 = PG_V | xpmap_ptom(VM_PAGE_TO_PHYS(src)) | PG_A;
+	*sysmaps->CMAP2 = PG_V | PG_RW | xpmap_ptom(VM_PAGE_TO_PHYS(dst)) | PG_A | PG_M;
 	bcopy(sysmaps->CADDR1, sysmaps->CADDR2, PAGE_SIZE);
-	PT_CLEAR_VA(sysmaps->CMAP1, FALSE);
-	PT_CLEAR_VA(sysmaps->CMAP2, TRUE);
-	if (*sysmaps->CMAP1)
-		panic("pmap_copy_page: CMAP1 busy, CMAP1=%llx", *sysmaps->CMAP1);
-	if (*sysmaps->CMAP2)
-		panic("pmap_copy_page: CMAP2 busy");
+	*sysmaps->CMAP1 = 0;
+	*sysmaps->CMAP2 = 0;
 	sched_unpin();
 	mtx_unlock(&sysmaps->lock);
 }
@@ -3318,7 +3283,13 @@
 
 				if (pmap->pm_pdir_shadow[pv->pv_va >> PDRSHIFT] == 0) {
 					printf("PDIR IS ZERO @ VA %08x\n", pv->pv_va);
-					panic("bad pte");					
+					/* workaround insufficient wired count
+					 * on page directory - this only buys
+					 * us a little bit of time as the list
+					 * on one of the pages is now corrupt
+					 */
+					allfree = 0;
+					goto skip;
 				}
 
 				pte = vtopte(pv->pv_va);
@@ -3328,8 +3299,9 @@
 					printf(
 					    "TPTE at %p  IS ZERO @ VA %08x\n",
 					    pte, pv->pv_va);
-					panic("bad pte tpte");
+					panic("bad pte");
 				}
+
 /*
  * We cannot remove wired pages from a process' mapping at this time
  */
@@ -3348,7 +3320,6 @@
 					("pmap_remove_pages: bad tpte %#jx",
 					(uintmax_t)tpte));
 
-				pmap->pm_stats.resident_count--;
 
 				pte_clear(pte);
 
@@ -3358,16 +3329,19 @@
 				if (tpte & PG_M)
 					vm_page_dirty(m);
 
+				TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
+				if (TAILQ_EMPTY(&m->md.pv_list))
+					vm_page_flag_clear(m, PG_WRITEABLE);
+
+				pmap_unuse_pt(pmap, pv->pv_va, &free);
+			skip:
+
 				/* Mark free */
 				PV_STAT(pv_entry_frees++);
 				PV_STAT(pv_entry_spare++);
 				pv_entry_count--;
 				pc->pc_map[field] |= bitmask;
-				TAILQ_REMOVE(&m->md.pv_list, pv, pv_list);
-				if (TAILQ_EMPTY(&m->md.pv_list))
-					vm_page_flag_clear(m, PG_WRITEABLE);
-
-				pmap_unuse_pt(pmap, pv->pv_va, &free);
+				pmap->pm_stats.resident_count--;			
 			}
 		}
 		if (allfree) {
@@ -3382,10 +3356,8 @@
 			pmap_ptelist_free(&pv_vafree, (vm_offset_t)pc);
 		}
 	}
-	if (PMAP1_inuse) {
-		PT_CLEAR_VA(PMAP1, TRUE);
-		PMAP1_inuse = 0;
-	}
+	if (*PMAP1) 
+		*PMAP1 = 0;
 	sched_unpin();
 	pmap_invalidate_all(pmap);
 	vm_page_unlock_queues();
@@ -3422,10 +3394,8 @@
 		if (rv)
 			break;
 	}
-	if (PMAP1_inuse) {
-		PT_CLEAR_VA(PMAP1, TRUE);

>>> TRUNCATED FOR MAIL (1000 lines) <<<


More information about the p4-projects mailing list