PERFORCE change 61980 for review
Marcel Moolenaar
marcel at FreeBSD.org
Wed Sep 22 17:31:17 PDT 2004
http://perforce.freebsd.org/chv.cgi?CH=61980
Change 61980 by marcel at marcel_nfs on 2004/09/23 00:30:31
IFC @61979
Affected files ...
.. //depot/projects/ia64/sys/ia64/ia64/exception.S#9 integrate
.. //depot/projects/ia64/sys/ia64/ia64/locore.S#6 integrate
.. //depot/projects/ia64/sys/ia64/ia64/machdep.c#110 integrate
.. //depot/projects/ia64/sys/ia64/ia64/pmap.c#85 integrate
.. //depot/projects/ia64/sys/ia64/include/pmap.h#18 integrate
.. //depot/projects/ia64/sys/ia64/include/pte.h#4 integrate
Differences ...
==== //depot/projects/ia64/sys/ia64/ia64/exception.S#9 (text+ko) ====
@@ -1,5 +1,5 @@
/*-
- * Copyright (c) 2003 Marcel Moolenaar
+ * Copyright (c) 2003,2004 Marcel Moolenaar
* Copyright (c) 2000 Doug Rabson
* All rights reserved.
*
@@ -24,7 +24,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $FreeBSD: src/sys/ia64/ia64/exception.S,v 1.54 2004/08/30 01:32:28 marcel Exp $
+ * $FreeBSD: src/sys/ia64/ia64/exception.S,v 1.55 2004/09/23 00:05:20 marcel Exp $
*/
#include <machine/asm.h>
@@ -793,8 +793,10 @@
cmp.eq p15,p14=7,r17 // RR7->p15, RR6->p14
(p13) br.spnt 9f
;;
-(p15) movl r17=PTE_P+PTE_MA_WB+PTE_A+PTE_D+PTE_PL_KERN+PTE_AR_RX
-(p14) movl r17=PTE_P+PTE_MA_UC+PTE_A+PTE_D+PTE_PL_KERN+PTE_AR_RX
+(p15) movl r17=PTE_PRESENT+PTE_MA_WB+PTE_ACCESSED+PTE_DIRTY+PTE_PL_KERN+ \
+ PTE_AR_RX
+(p14) movl r17=PTE_PRESENT+PTE_MA_UC+PTE_ACCESSED+PTE_DIRTY+PTE_PL_KERN+ \
+ PTE_AR_RX
;;
dep r16=0,r16,50,14 // clear bits above PPN
;;
@@ -818,8 +820,10 @@
cmp.eq p15,p14=7,r17 // RR7->p15, RR6->p14
(p13) br.spnt 9f
;;
-(p15) movl r17=PTE_P+PTE_MA_WB+PTE_A+PTE_D+PTE_PL_KERN+PTE_AR_RW
-(p14) movl r17=PTE_P+PTE_MA_UC+PTE_A+PTE_D+PTE_PL_KERN+PTE_AR_RW
+(p15) movl r17=PTE_PRESENT+PTE_MA_WB+PTE_ACCESSED+PTE_DIRTY+PTE_PL_KERN+ \
+ PTE_AR_RW
+(p14) movl r17=PTE_PRESENT+PTE_MA_UC+PTE_ACCESSED+PTE_DIRTY+PTE_PL_KERN+ \
+ PTE_AR_RW
;;
dep r16=0,r16,50,14 // clear bits above PPN
;;
@@ -887,7 +891,7 @@
ld8 r28=[r27]
;;
mov cr.itir=r29
- or r28=PTE_D|PTE_A,r28
+ or r28=PTE_DIRTY+PTE_ACCESSED,r28
;;
}
{ .mlx
@@ -944,7 +948,7 @@
(p15) br.cond.sptk.few 2f // if not, read next in chain
;;
ld8 r21=[r20] // read pte
- mov r22=PTE_D|PTE_A
+ mov r22=PTE_DIRTY+PTE_ACCESSED
;;
or r21=r22,r21 // set dirty & access bit
;;
@@ -1010,7 +1014,7 @@
(p15) br.cond.sptk.few 2f // if not, read next in chain
;;
ld8 r21=[r20] // read pte
- mov r22=PTE_A
+ mov r22=PTE_ACCESSED
;;
or r21=r22,r21 // set accessed bit
;;
@@ -1076,7 +1080,7 @@
(p15) br.cond.sptk.few 2f // if not, read next in chain
;;
ld8 r21=[r20] // read pte
- mov r22=PTE_A
+ mov r22=PTE_ACCESSED
;;
or r21=r22,r21 // set accessed bit
;;
==== //depot/projects/ia64/sys/ia64/ia64/locore.S#6 (text+ko) ====
@@ -23,7 +23,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $FreeBSD: src/sys/ia64/ia64/locore.S,v 1.37 2004/09/17 20:42:45 marcel Exp $
+ * $FreeBSD: src/sys/ia64/ia64/locore.S,v 1.38 2004/09/23 00:05:20 marcel Exp $
*/
#include <machine/asm.h>
@@ -181,7 +181,8 @@
mov rr[r17] = r16
;;
srlz.d
- mov r16 = (PTE_P|PTE_MA_WB|PTE_A|PTE_D|PTE_PL_KERN|PTE_AR_RWX)
+ mov r16 = PTE_PRESENT+PTE_MA_WB+PTE_ACCESSED+PTE_DIRTY+ \
+ PTE_PL_KERN+PTE_AR_RWX
mov r18 = 28<<2
;;
==== //depot/projects/ia64/sys/ia64/ia64/machdep.c#110 (text+ko) ====
@@ -26,7 +26,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/ia64/ia64/machdep.c,v 1.188 2004/09/19 03:50:46 marcel Exp $");
+__FBSDID("$FreeBSD: src/sys/ia64/ia64/machdep.c,v 1.189 2004/09/23 00:05:20 marcel Exp $");
#include "opt_compat.h"
#include "opt_ddb.h"
@@ -381,20 +381,15 @@
void
map_pal_code(void)
{
- struct ia64_pte pte;
- u_int64_t psr;
+ pt_entry_t pte;
+ uint64_t psr;
if (ia64_pal_base == 0)
return;
- bzero(&pte, sizeof(pte));
- pte.pte_p = 1;
- pte.pte_ma = PTE_MA_WB;
- pte.pte_a = 1;
- pte.pte_d = 1;
- pte.pte_pl = PTE_PL_KERN;
- pte.pte_ar = PTE_AR_RWX;
- pte.pte_ppn = ia64_pal_base >> 12;
+ pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
+ PTE_PL_KERN | PTE_AR_RWX;
+ pte |= ia64_pal_base & PTE_PPN_MASK;
__asm __volatile("ptr.d %0,%1; ptr.i %0,%1" ::
"r"(IA64_PHYS_TO_RR7(ia64_pal_base)), "r"(IA64_ID_PAGE_SHIFT<<2));
@@ -405,9 +400,9 @@
__asm __volatile("mov cr.ifa=%0" ::
"r"(IA64_PHYS_TO_RR7(ia64_pal_base)));
__asm __volatile("mov cr.itir=%0" :: "r"(IA64_ID_PAGE_SHIFT << 2));
- __asm __volatile("itr.d dtr[%0]=%1" :: "r"(1), "r"(*(u_int64_t*)&pte));
+ __asm __volatile("itr.d dtr[%0]=%1" :: "r"(1), "r"(pte));
__asm __volatile("srlz.d"); /* XXX not needed. */
- __asm __volatile("itr.i itr[%0]=%1" :: "r"(1), "r"(*(u_int64_t*)&pte));
+ __asm __volatile("itr.i itr[%0]=%1" :: "r"(1), "r"(pte));
__asm __volatile("mov psr.l=%0" :: "r" (psr));
__asm __volatile("srlz.i");
}
@@ -415,17 +410,12 @@
void
map_gateway_page(void)
{
- struct ia64_pte pte;
- u_int64_t psr;
+ pt_entry_t pte;
+ uint64_t psr;
- bzero(&pte, sizeof(pte));
- pte.pte_p = 1;
- pte.pte_ma = PTE_MA_WB;
- pte.pte_a = 1;
- pte.pte_d = 1;
- pte.pte_pl = PTE_PL_KERN;
- pte.pte_ar = PTE_AR_X_RX;
- pte.pte_ppn = IA64_RR_MASK((u_int64_t)ia64_gateway_page) >> 12;
+ pte = PTE_PRESENT | PTE_MA_WB | PTE_ACCESSED | PTE_DIRTY |
+ PTE_PL_KERN | PTE_AR_X_RX;
+ pte |= (uint64_t)ia64_gateway_page & PTE_PPN_MASK;
__asm __volatile("ptr.d %0,%1; ptr.i %0,%1" ::
"r"(VM_MAX_ADDRESS), "r"(PAGE_SHIFT << 2));
@@ -435,9 +425,9 @@
__asm __volatile("srlz.i");
__asm __volatile("mov cr.ifa=%0" :: "r"(VM_MAX_ADDRESS));
__asm __volatile("mov cr.itir=%0" :: "r"(PAGE_SHIFT << 2));
- __asm __volatile("itr.d dtr[%0]=%1" :: "r"(3), "r"(*(u_int64_t*)&pte));
+ __asm __volatile("itr.d dtr[%0]=%1" :: "r"(3), "r"(pte));
__asm __volatile("srlz.d"); /* XXX not needed. */
- __asm __volatile("itr.i itr[%0]=%1" :: "r"(3), "r"(*(u_int64_t*)&pte));
+ __asm __volatile("itr.i itr[%0]=%1" :: "r"(3), "r"(pte));
__asm __volatile("mov psr.l=%0" :: "r" (psr));
__asm __volatile("srlz.i");
==== //depot/projects/ia64/sys/ia64/ia64/pmap.c#85 (text+ko) ====
@@ -46,7 +46,7 @@
*/
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/sys/ia64/ia64/pmap.c,v 1.149 2004/09/18 22:56:58 alc Exp $");
+__FBSDID("$FreeBSD: src/sys/ia64/ia64/pmap.c,v 1.151 2004/09/23 00:05:20 marcel Exp $");
#include <sys/param.h>
#include <sys/kernel.h>
@@ -116,7 +116,7 @@
*/
/* XXX move to a header. */
-extern u_int64_t ia64_gateway_page[];
+extern uint64_t ia64_gateway_page[];
MALLOC_DEFINE(M_PMAP, "PMAP", "PMAP Structures");
@@ -132,7 +132,20 @@
#define PMAP_INLINE
#endif
-#define pmap_pte_pa(pte) (((pte)->pte_ppn) << 12)
+#define pmap_lpte_accessed(lpte) ((lpte)->pte & PTE_ACCESSED)
+#define pmap_lpte_dirty(lpte) ((lpte)->pte & PTE_DIRTY)
+#define pmap_lpte_managed(lpte) ((lpte)->pte & PTE_MANAGED)
+#define pmap_lpte_ppn(lpte) ((lpte)->pte & PTE_PPN_MASK)
+#define pmap_lpte_present(lpte) ((lpte)->pte & PTE_PRESENT)
+#define pmap_lpte_prot(lpte) (((lpte)->pte & PTE_PROT_MASK) >> 56)
+#define pmap_lpte_wired(lpte) ((lpte)->pte & PTE_WIRED)
+
+#define pmap_clear_accessed(lpte) (lpte)->pte &= ~PTE_ACCESSED
+#define pmap_clear_dirty(lpte) (lpte)->pte &= ~PTE_DIRTY
+#define pmap_clear_present(lpte) (lpte)->pte &= ~PTE_PRESENT
+#define pmap_clear_wired(lpte) (lpte)->pte &= ~PTE_WIRED
+
+#define pmap_set_wired(lpte) (lpte)->pte |= PTE_WIRED
/*
* Statically allocated kernel pmap
@@ -147,9 +160,7 @@
struct mtx pmap_vhptmutex;
/*
- * We use an object to own the kernel's 'page tables'. For simplicity,
- * we use one page directory to index a set of pages containing
- * ia64_lptes. This gives us up to 2Gb of kernel virtual space.
+ * Kernel virtual memory management.
*/
static int nkpt;
struct ia64_lpte **ia64_kptdir;
@@ -162,11 +173,11 @@
vm_offset_t kernel_vm_end;
/* Values for ptc.e. XXX values for SKI. */
-static u_int64_t pmap_ptc_e_base = 0x100000000;
-static u_int64_t pmap_ptc_e_count1 = 3;
-static u_int64_t pmap_ptc_e_count2 = 2;
-static u_int64_t pmap_ptc_e_stride1 = 0x2000;
-static u_int64_t pmap_ptc_e_stride2 = 0x100000000;
+static uint64_t pmap_ptc_e_base = 0x100000000;
+static uint64_t pmap_ptc_e_count1 = 3;
+static uint64_t pmap_ptc_e_count2 = 2;
+static uint64_t pmap_ptc_e_stride1 = 0x2000;
+static uint64_t pmap_ptc_e_stride2 = 0x100000000;
/*
* Data for the RID allocator
@@ -175,7 +186,7 @@
static int pmap_rididx;
static int pmap_ridmapsz;
static int pmap_ridmax;
-static u_int64_t *pmap_ridmap;
+static uint64_t *pmap_ridmap;
struct mtx pmap_ridmutex;
/*
@@ -308,7 +319,7 @@
pmap_ridmax = (1 << ridbits);
pmap_ridmapsz = pmap_ridmax / 64;
- pmap_ridmap = (u_int64_t *)pmap_steal_memory(pmap_ridmax / 8);
+ pmap_ridmap = (uint64_t *)pmap_steal_memory(pmap_ridmax / 8);
pmap_ridmap[0] |= 0xff;
pmap_rididx = 0;
pmap_ridcount = 8;
@@ -505,7 +516,7 @@
static void
pmap_invalidate_all_1(void *arg)
{
- u_int64_t addr;
+ uint64_t addr;
int i, j;
register_t psr;
@@ -535,7 +546,7 @@
#endif
}
-static u_int32_t
+static uint32_t
pmap_allocate_rid(void)
{
uint64_t bit, bits;
@@ -568,7 +579,7 @@
}
static void
-pmap_free_rid(u_int32_t rid)
+pmap_free_rid(uint32_t rid)
{
uint64_t bit;
int idx;
@@ -592,10 +603,10 @@
static PMAP_INLINE void
pmap_install_pte(struct ia64_lpte *vhpte, struct ia64_lpte *pte)
{
- u_int64_t *vhp, *p;
+ uint64_t *vhp, *p;
- vhp = (u_int64_t *)vhpte;
- p = (u_int64_t *)pte;
+ vhp = (uint64_t *)vhpte;
+ p = (uint64_t *)pte;
critical_enter();
@@ -620,7 +631,7 @@
static PMAP_INLINE int
pmap_equal_pte(struct ia64_lpte *pte1, struct ia64_lpte *pte2)
{
- return *(u_int64_t *) pte1 == *(u_int64_t *) pte2;
+ return *(uint64_t *) pte1 == *(uint64_t *) pte2;
}
/*
@@ -766,17 +777,17 @@
vhpte = (struct ia64_lpte *) ia64_thash(va);
- if (vhpte->pte_chain)
+ if (vhpte->chain)
pmap_vhpt_collisions++;
mtx_lock(&pmap_vhptmutex);
- pte->pte_chain = vhpte->pte_chain;
+ pte->chain = vhpte->chain;
ia64_mf();
- vhpte->pte_chain = ia64_tpa((vm_offset_t)pte);
+ vhpte->chain = ia64_tpa((vm_offset_t)pte);
ia64_mf();
- if (!vhpte->pte_p && pte->pte_p)
+ if (!pmap_lpte_present(vhpte) && pmap_lpte_present(pte))
pmap_install_pte(vhpte, pte);
mtx_unlock(&pmap_vhptmutex);
@@ -794,7 +805,8 @@
mtx_lock(&pmap_vhptmutex);
- if ((!vhpte->pte_p || vhpte->pte_tag == pte->pte_tag) && pte->pte_p)
+ if ((!pmap_lpte_present(vhpte) || vhpte->tag == pte->tag) &&
+ pmap_lpte_present(pte))
pmap_install_pte(vhpte, pte);
mtx_unlock(&pmap_vhptmutex);
@@ -810,15 +822,15 @@
struct ia64_lpte *pte;
struct ia64_lpte *lpte;
struct ia64_lpte *vhpte;
- u_int64_t tag;
+ uint64_t tag;
vhpte = (struct ia64_lpte *)ia64_thash(va);
/*
* If the VHPTE is invalid, there can't be a collision chain.
*/
- if (!vhpte->pte_p) {
- KASSERT(!vhpte->pte_chain, ("bad vhpte"));
+ if (!pmap_lpte_present(vhpte)) {
+ KASSERT(!vhpte->chain, ("bad vhpte"));
return (ENOENT);
}
@@ -827,32 +839,32 @@
mtx_lock(&pmap_vhptmutex);
- pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(vhpte->pte_chain);
+ pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(vhpte->chain);
KASSERT(pte != NULL, ("foo"));
- while (pte->pte_tag != tag) {
+ while (pte->tag != tag) {
lpte = pte;
- if (pte->pte_chain == 0) {
+ if (pte->chain == 0) {
mtx_unlock(&pmap_vhptmutex);
return (ENOENT);
}
- pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(pte->pte_chain);
+ pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(pte->chain);
}
/* Snip this pv_entry out of the collision chain. */
- lpte->pte_chain = pte->pte_chain;
+ lpte->chain = pte->chain;
ia64_mf();
/*
* If the VHPTE matches as well, change it to map the first
* element from the chain if there is one.
*/
- if (vhpte->pte_tag == tag) {
- if (vhpte->pte_chain) {
- pte = (void*)IA64_PHYS_TO_RR7(vhpte->pte_chain);
+ if (vhpte->tag == tag) {
+ if (vhpte->chain) {
+ pte = (void*)IA64_PHYS_TO_RR7(vhpte->chain);
pmap_install_pte(vhpte, pte);
} else
- vhpte->pte_p = 0;
+ pmap_clear_present(vhpte);
}
mtx_unlock(&pmap_vhptmutex);
@@ -867,17 +879,17 @@
pmap_find_vhpt(vm_offset_t va)
{
struct ia64_lpte *pte;
- u_int64_t tag;
+ uint64_t tag;
tag = ia64_ttag(va);
pte = (struct ia64_lpte *)ia64_thash(va);
- if (pte->pte_chain == 0)
+ if (pte->chain == 0)
return (NULL);
- pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(pte->pte_chain);
- while (pte->pte_tag != tag) {
- if (pte->pte_chain == 0)
+ pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(pte->chain);
+ while (pte->tag != tag) {
+ if (pte->chain == 0)
return (NULL);
- pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(pte->pte_chain);
+ pte = (struct ia64_lpte *)IA64_PHYS_TO_RR7(pte->chain );
}
return (pte);
}
@@ -953,8 +965,8 @@
PMAP_LOCK(pmap);
oldpmap = pmap_install(pmap);
pte = pmap_find_vhpt(va);
- if (pte != NULL && pte->pte_p)
- pa = pmap_pte_pa(pte);
+ if (pte != NULL && pmap_lpte_present(pte))
+ pa = pmap_lpte_ppn(pte);
pmap_install(oldpmap);
PMAP_UNLOCK(pmap);
return (pa);
@@ -979,8 +991,9 @@
PMAP_LOCK(pmap);
oldpmap = pmap_install(pmap);
pte = pmap_find_vhpt(va);
- if (pte != NULL && pte->pte_p && (pte->pte_prot & prot) == prot) {
- m = PHYS_TO_VM_PAGE(pmap_pte_pa(pte));
+ if (pte != NULL && pmap_lpte_present(pte) &&
+ (pmap_lpte_prot(pte) & prot) == prot) {
+ m = PHYS_TO_VM_PAGE(pmap_lpte_ppn(pte));
vm_page_hold(m);
}
vm_page_unlock_queues();
@@ -1025,7 +1038,7 @@
if (!pte) {
pte = uma_zalloc(ptezone, M_NOWAIT);
if (pte != NULL)
- pte->pte_p = 0;
+ pmap_clear_present(pte);
}
return pte;
}
@@ -1041,7 +1054,7 @@
if (va < VM_MAXUSER_ADDRESS)
uma_zfree(ptezone, pte);
else
- pte->pte_p = 0;
+ pmap_clear_present(pte);
}
static PMAP_INLINE void
@@ -1054,10 +1067,11 @@
PTE_AR_RWX /* VM_PROT_WRITE|VM_PROT_EXECUTE */
};
- pte->pte_prot = prot;
- pte->pte_pl = (prot == VM_PROT_NONE || pm == kernel_pmap)
+ pte->pte &= ~(PTE_PROT_MASK | PTE_PL_MASK | PTE_AR_MASK);
+ pte->pte |= (uint64_t)(prot & VM_PROT_ALL) << 56;
+ pte->pte |= (prot == VM_PROT_NONE || pm == kernel_pmap)
? PTE_PL_KERN : PTE_PL_USER;
- pte->pte_ar = prot2ar[(prot & VM_PROT_ALL) >> 1];
+ pte->pte |= prot2ar[(prot & VM_PROT_ALL) >> 1];
}
/*
@@ -1071,26 +1085,18 @@
pmap_set_pte(struct ia64_lpte *pte, vm_offset_t va, vm_offset_t pa,
boolean_t wired, boolean_t managed)
{
- int wasvalid = pte->pte_p;
+ int wasvalid = pmap_lpte_present(pte);
- pte->pte_p = 1;
- pte->pte_ma = PTE_MA_WB;
- if (managed) {
- pte->pte_a = 0;
- pte->pte_d = 0;
- } else {
- pte->pte_a = 1;
- pte->pte_d = 1;
- }
- pte->pte_ppn = pa >> 12;
- pte->pte_ed = 0;
- pte->pte_w = (wired) ? 1 : 0;
- pte->pte_m = (managed) ? 1 : 0;
+ pte->pte &= PTE_PROT_MASK | PTE_PL_MASK | PTE_AR_MASK;
+ pte->pte |= PTE_PRESENT | PTE_MA_WB;
+ pte->pte |= (managed) ? PTE_MANAGED : (PTE_DIRTY | PTE_ACCESSED);
+ pte->pte |= (wired) ? PTE_WIRED : 0;
+ pte->pte |= pa & PTE_PPN_MASK;
- pte->pte_ps = PAGE_SHIFT;
- pte->pte_key = 0;
+ pte->itir.ps = PAGE_SHIFT;
+ pte->itir.key = 0;
- pte->pte_tag = ia64_ttag(va);
+ pte->tag = ia64_ttag(va);
if (wasvalid) {
pmap_update_vhpt(pte, va);
@@ -1105,10 +1111,10 @@
static void
pmap_clear_pte(struct ia64_lpte *pte, vm_offset_t va)
{
- if (pte->pte_p) {
+ if (pmap_lpte_present(pte)) {
pmap_remove_vhpt(va);
ia64_ptc_g(va, PAGE_SHIFT << 2);
- pte->pte_p = 0;
+ pmap_clear_present(pte);
}
}
@@ -1136,18 +1142,18 @@
/*
* Make sure pmap_set_pte() knows it isn't in the VHPT.
*/
- pte->pte_p = 0;
+ pmap_clear_present(pte);
- if (pte->pte_w)
+ if (pmap_lpte_wired(pte))
pmap->pm_stats.wired_count -= 1;
pmap->pm_stats.resident_count -= 1;
- if (pte->pte_m) {
- m = PHYS_TO_VM_PAGE(pmap_pte_pa(pte));
- if (pte->pte_d)
+ if (pmap_lpte_managed(pte)) {
+ m = PHYS_TO_VM_PAGE(pmap_lpte_ppn(pte));
+ if (pmap_lpte_dirty(pte))
if (pmap_track_modified(va))
vm_page_dirty(m);
- if (pte->pte_a)
+ if (pmap_lpte_accessed(pte))
vm_page_flag_set(m, PG_REFERENCED);
if (freepte)
@@ -1186,9 +1192,9 @@
return (0);
pte = pmap_find_kpte(va);
- if (!pte->pte_p)
+ if (!pmap_lpte_present(pte))
return (0);
- return (pmap_pte_pa(pte) | (va & PAGE_MASK));
+ return (pmap_lpte_ppn(pte) | (va & PAGE_MASK));
}
/*
@@ -1208,7 +1214,7 @@
vm_offset_t tva = va + i * PAGE_SIZE;
int wasvalid;
pte = pmap_find_kpte(tva);
- wasvalid = pte->pte_p;
+ wasvalid = pmap_lpte_present(pte);
pmap_pte_prot(kernel_pmap, pte, VM_PROT_ALL);
pmap_set_pte(pte, tva, VM_PAGE_TO_PHYS(m[i]), FALSE, FALSE);
if (wasvalid)
@@ -1244,7 +1250,7 @@
int wasvalid;
pte = pmap_find_kpte(va);
- wasvalid = pte->pte_p;
+ wasvalid = pmap_lpte_present(pte);
pmap_pte_prot(kernel_pmap, pte, VM_PROT_ALL);
pmap_set_pte(pte, va, pa, FALSE, FALSE);
if (wasvalid)
@@ -1398,7 +1404,7 @@
oldpmap = pmap_install(pmap);
pte = pmap_find_vhpt(va);
KASSERT(pte != NULL, ("pte"));
- if (pmap_pte_pa(pte) != VM_PAGE_TO_PHYS(m))
+ if (pmap_lpte_ppn(pte) != VM_PAGE_TO_PHYS(m))
panic("pmap_remove_all: pv_table for %lx is inconsistent", VM_PAGE_TO_PHYS(m));
pmap_remove_pte(pmap, pte, va, pv, 1);
pmap_invalidate_page(pmap, va);
@@ -1446,18 +1452,18 @@
continue;
}
- if (pte->pte_prot != prot) {
- if (pte->pte_m) {
- vm_offset_t pa = pmap_pte_pa(pte);
+ if (pmap_lpte_prot(pte) != prot) {
+ if (pmap_lpte_managed(pte)) {
+ vm_offset_t pa = pmap_lpte_ppn(pte);
vm_page_t m = PHYS_TO_VM_PAGE(pa);
- if (pte->pte_d) {
+ if (pmap_lpte_dirty(pte)) {
if (pmap_track_modified(sva))
vm_page_dirty(m);
- pte->pte_d = 0;
+ pmap_clear_dirty(pte);
}
- if (pte->pte_a) {
+ if (pmap_lpte_accessed(pte)) {
vm_page_flag_set(m, PG_REFERENCED);
- pte->pte_a = 0;
+ pmap_clear_accessed(pte);
}
}
pmap_pte_prot(pmap, pte, prot);
@@ -1518,43 +1524,38 @@
oldpmap = pmap_install(pmap);
}
origpte = *pte;
-
- if (origpte.pte_p)
- opa = pmap_pte_pa(&origpte);
- else
- opa = 0;
+ opa = (pmap_lpte_present(&origpte)) ? pmap_lpte_ppn(&origpte) : 0UL;
managed = FALSE;
-
pa = VM_PAGE_TO_PHYS(m) & ~PAGE_MASK;
/*
* Mapping has not changed, must be protection or wiring change.
*/
- if (origpte.pte_p && (opa == pa)) {
+ if (pmap_lpte_present(&origpte) && (opa == pa)) {
/*
* Wiring change, just update stats. We don't worry about
* wiring PT pages as they remain resident as long as there
* are valid mappings in them. Hence, if a user page is wired,
* the PT page will be also.
*/
- if (wired && !origpte.pte_w)
+ if (wired && !pmap_lpte_wired(&origpte))
pmap->pm_stats.wired_count++;
- else if (!wired && origpte.pte_w)
+ else if (!wired && pmap_lpte_wired(&origpte))
pmap->pm_stats.wired_count--;
+ managed = (pmap_lpte_managed(&origpte)) ? TRUE : FALSE;
+
/*
* We might be turning off write access to the page,
* so we go ahead and sense modify status.
*/
- if (origpte.pte_m) {
- if (origpte.pte_d && pmap_track_modified(va)) {
- vm_page_t om;
- om = PHYS_TO_VM_PAGE(opa);
- vm_page_dirty(om);
- }
+ if (managed && pmap_lpte_dirty(&origpte) &&
+ pmap_track_modified(va)) {
+ vm_page_t om;
+ om = PHYS_TO_VM_PAGE(opa);
+ vm_page_dirty(om);
}
- managed = (origpte.pte_m) ? TRUE : FALSE;
goto validate;
}
/*
@@ -1636,7 +1637,7 @@
PMAP_LOCK(pmap);
oldpmap = pmap_install(pmap);
}
- if (pte->pte_p)
+ if (pmap_lpte_present(pte))
goto reinstall;
managed = FALSE;
@@ -1704,16 +1705,13 @@
pte = pmap_find_vhpt(va);
KASSERT(pte != NULL, ("pte"));
- if (wired && !pte->pte_w)
+ if (wired && !pmap_lpte_wired(pte)) {
pmap->pm_stats.wired_count++;
- else if (!wired && pte->pte_w)
+ pmap_set_wired(pte);
+ } else if (!wired && pmap_lpte_wired(pte)) {
pmap->pm_stats.wired_count--;
-
- /*
- * Wiring is not a hardware characteristic so there is no need to
- * invalidate TLB.
- */
- pte->pte_w = (wired) ? 1 : 0;
+ pmap_clear_wired(pte);
+ }
pmap_install(oldpmap);
PMAP_UNLOCK(pmap);
@@ -1865,7 +1863,7 @@
pte = pmap_find_vhpt(pv->pv_va);
KASSERT(pte != NULL, ("pte"));
- if (pte->pte_w)
+ if (pmap_lpte_wired(pte))
continue;
pmap_remove_pte(pmap, pte, pv->pv_va, pv, 1);
@@ -1939,9 +1937,9 @@
oldpmap = pmap_install(pv->pv_pmap);
pte = pmap_find_vhpt(pv->pv_va);
KASSERT(pte != NULL, ("pte"));
- if (pte->pte_a) {
+ if (pmap_lpte_accessed(pte)) {
count++;
- pte->pte_a = 0;
+ pmap_clear_accessed(pte);
pmap_update_vhpt(pte, pv->pv_va);
pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
}
@@ -1972,7 +1970,7 @@
struct ia64_lpte *pte = pmap_find_vhpt(pv->pv_va);
pmap_install(oldpmap);
KASSERT(pte != NULL, ("pte"));
- if (pte->pte_a)
+ if (pmap_lpte_accessed(pte))
return 1;
}
@@ -2004,7 +2002,7 @@
pte = pmap_find_vhpt(pv->pv_va);
pmap_install(oldpmap);
KASSERT(pte != NULL, ("pte"));
- rv = pte->pte_d != 0;
+ rv = pmap_lpte_dirty(pte) ? TRUE : FALSE;
PMAP_UNLOCK(pv->pv_pmap);
if (rv)
break;
@@ -2025,7 +2023,7 @@
struct ia64_lpte *pte;
pte = pmap_find_vhpt(addr);
- if (pte && pte->pte_p)
+ if (pte && pmap_lpte_present(pte))
return (FALSE);
return (TRUE);
}
@@ -2048,8 +2046,8 @@
oldpmap = pmap_install(pv->pv_pmap);
pte = pmap_find_vhpt(pv->pv_va);
KASSERT(pte != NULL, ("pte"));
- if (pte->pte_d) {
- pte->pte_d = 0;
+ if (pmap_lpte_dirty(pte)) {
+ pmap_clear_dirty(pte);
pmap_update_vhpt(pte, pv->pv_va);
pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
}
@@ -2078,8 +2076,8 @@
oldpmap = pmap_install(pv->pv_pmap);
pte = pmap_find_vhpt(pv->pv_va);
KASSERT(pte != NULL, ("pte"));
- if (pte->pte_a) {
- pte->pte_a = 0;
+ if (pmap_lpte_accessed(pte)) {
+ pmap_clear_accessed(pte);
pmap_update_vhpt(pte, pv->pv_va);
pmap_invalidate_page(pv->pv_pmap, pv->pv_va);
}
@@ -2132,22 +2130,22 @@
if (!pte)
return 0;
- if (pte->pte_p) {
+ if (pmap_lpte_present(pte)) {
vm_page_t m;
vm_offset_t pa;
val = MINCORE_INCORE;
- if (!pte->pte_m)
+ if (!pmap_lpte_managed(pte))
return val;
- pa = pmap_pte_pa(pte);
+ pa = pmap_lpte_ppn(pte);
m = PHYS_TO_VM_PAGE(pa);
/*
* Modified by us
*/
- if (pte->pte_d)
+ if (pmap_lpte_dirty(pte))
val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER;
else {
/*
@@ -2161,7 +2159,7 @@
/*
* Referenced by us
*/
- if (pte->pte_a)
+ if (pmap_lpte_accessed(pte))
val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER;
else {
/*
@@ -2255,9 +2253,9 @@
struct ia64_pal_result res;
int i, maxtr;
struct {
- struct ia64_pte pte;
+ uint64_t ifa;
struct ia64_itir itir;
- uint64_t ifa;
+ pt_entry_t pte;
struct ia64_rr rr;
} buf;
static const char *manames[] = {
@@ -2280,30 +2278,26 @@
for (i = 0; i <= maxtr; i++) {
bzero(&buf, sizeof(buf));
res = ia64_call_pal_stacked_physical
- (PAL_VM_TR_READ, i, type, ia64_tpa((u_int64_t) &buf));
+ (PAL_VM_TR_READ, i, type, ia64_tpa((uint64_t) &buf));
if (!(res.pal_result[0] & 1))
- buf.pte.pte_ar = 0;
+ buf.pte &= ~PTE_AR_MASK;
if (!(res.pal_result[0] & 2))
- buf.pte.pte_pl = 0;
+ buf.pte &= ~PTE_PL_MASK;
if (!(res.pal_result[0] & 4))
- buf.pte.pte_d = 0;
+ pmap_clear_dirty(&buf);
if (!(res.pal_result[0] & 8))
- buf.pte.pte_ma = 0;
- db_printf(
- "%d %06x %013lx %013lx %4s %d %d %d %d %d %-3s %d %06x\n",
- (int)buf.ifa & 1,
- buf.rr.rr_rid,
- buf.ifa >> 12,
- buf.pte.pte_ppn,
- psnames[buf.itir.itir_ps],
- buf.pte.pte_ed,
- buf.pte.pte_ar,
- buf.pte.pte_pl,
- buf.pte.pte_d,
- buf.pte.pte_a,
- manames[buf.pte.pte_ma],
- buf.pte.pte_p,
- buf.itir.itir_key);
+ buf.pte &= ~PTE_MA_MASK;
+ db_printf("%d %06x %013lx %013lx %4s %d %d %d %d %d %-3s "
+ "%d %06x\n", (int)buf.ifa & 1, buf.rr.rr_rid,
+ buf.ifa >> 12, (buf.pte & PTE_PPN_MASK) >> 12,
+ psnames[buf.itir.ps], (buf.pte & PTE_ED) ? 1 : 0,
+ (int)(buf.pte & PTE_AR_MASK) >> 9,
+ (int)(buf.pte & PTE_PL_MASK) >> 7,
+ (pmap_lpte_dirty(&buf)) ? 1 : 0,
+ (pmap_lpte_accessed(&buf)) ? 1 : 0,
+ manames[(buf.pte & PTE_MA_MASK) >> 2],
+ (pmap_lpte_present(&buf)) ? 1 : 0,
+ buf.itir.key);
}
}
@@ -2320,7 +2314,7 @@
DB_COMMAND(rr, db_rr)
{
int i;
- u_int64_t t;
+ uint64_t t;
struct ia64_rr rr;
printf("RR RID PgSz VE\n");
@@ -2328,7 +2322,7 @@
__asm __volatile ("mov %0=rr[%1]"
: "=r"(t)
: "r"(IA64_RR_BASE(i)));
- *(u_int64_t *) &rr = t;
+ *(uint64_t *) &rr = t;
printf("%d %06x %4s %d\n",
i, rr.rr_rid, psnames[rr.rr_ps], rr.rr_ve);
}
==== //depot/projects/ia64/sys/ia64/include/pmap.h#18 (text+ko) ====
@@ -39,7 +39,7 @@
* from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90
* from: @(#)pmap.h 7.4 (Berkeley) 5/12/91
* from: i386 pmap.h,v 1.54 1997/11/20 19:30:35 bde Exp
- * $FreeBSD: src/sys/ia64/include/pmap.h,v 1.22 2004/07/19 05:39:49 alc Exp $
+ * $FreeBSD: src/sys/ia64/include/pmap.h,v 1.23 2004/09/23 00:05:20 marcel Exp $
*/
#ifndef _MACHINE_PMAP_H_
@@ -48,6 +48,7 @@
#include <sys/queue.h>
#include <sys/_lock.h>
#include <sys/_mutex.h>
+#include <machine/atomic.h>
#include <machine/pte.h>
#ifdef _KERNEL
==== //depot/projects/ia64/sys/ia64/include/pte.h#4 (text+ko) ====
@@ -23,112 +23,76 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
- * $FreeBSD: src/sys/ia64/include/pte.h,v 1.3 2004/08/09 20:44:41 marcel Exp $
+ * $FreeBSD: src/sys/ia64/include/pte.h,v 1.4 2004/09/23 00:05:20 marcel Exp $
*/
#ifndef _MACHINE_PTE_H_
#define _MACHINE_PTE_H_
-#ifdef LOCORE
+#define PTE_PRESENT 0x0000000000000001
+#define PTE__RV1_ 0x0000000000000002
+#define PTE_MA_MASK 0x000000000000001C
+#define PTE_MA_WB 0x0000000000000000
+#define PTE_MA_UC 0x0000000000000010
+#define PTE_MA_UCE 0x0000000000000014
+#define PTE_MA_WC 0x0000000000000018
+#define PTE_MA_NATPAGE 0x000000000000001C
+#define PTE_ACCESSED 0x0000000000000020
+#define PTE_DIRTY 0x0000000000000040
+#define PTE_PL_MASK 0x0000000000000180
+#define PTE_PL_KERN 0x0000000000000000
+#define PTE_PL_USER 0x0000000000000180
+#define PTE_AR_MASK 0x0000000000000E00
+#define PTE_AR_R 0x0000000000000000
+#define PTE_AR_RX 0x0000000000000200
+#define PTE_AR_RW 0x0000000000000400
+#define PTE_AR_RWX 0x0000000000000600
+#define PTE_AR_R_RW 0x0000000000000800
+#define PTE_AR_RX_RWX 0x0000000000000A00
+#define PTE_AR_RWX_RW 0x0000000000000C00
+#define PTE_AR_X_RX 0x0000000000000E00
+#define PTE_PPN_MASK 0x0003FFFFFFFFF000
+#define PTE__RV2_ 0x000C000000000000
+#define PTE_ED 0x0010000000000000
+#define PTE_IG_MASK 0xFFE0000000000000
+#define PTE_WIRED 0x0020000000000000
+#define PTE_MANAGED 0x0040000000000000
+#define PTE_PROT_MASK 0x0700000000000000
-#define PTE_P (1<<0)
-#define PTE_MA_WB (0<<2)
-#define PTE_MA_UC (4<<2)
-#define PTE_MA_UCE (5<<2)
-#define PTE_MA_WC (6<<2)
-#define PTE_MA_NATPAGE (7<<2)
-#define PTE_A (1<<5)
-#define PTE_D (1<<6)
-#define PTE_PL_KERN (0<<7)
-#define PTE_PL_USER (3<<7)
-#define PTE_AR_R (0<<9)
-#define PTE_AR_RX (1<<9)
-#define PTE_AR_RW (2<<9)
>>> TRUNCATED FOR MAIL (1000 lines) <<<
More information about the p4-projects
mailing list