svn commit: r250933 - in user/attilio/vmcontention: contrib/openbsm/bin/auditdistd sys/arm/arm sys/arm/include usr.bin/ar usr.bin/bc usr.bin/m4 usr.bin/mklocale usr.sbin/bluetooth/bthidd usr.sbin/c...
Attilio Rao
attilio at FreeBSD.org
Thu May 23 14:34:35 UTC 2013
Author: attilio
Date: Thu May 23 14:34:34 2013
New Revision: 250933
URL: http://svnweb.freebsd.org/changeset/base/250933
Log:
MFC
Modified:
user/attilio/vmcontention/contrib/openbsm/bin/auditdistd/token.l
user/attilio/vmcontention/sys/arm/arm/locore.S
user/attilio/vmcontention/sys/arm/arm/pmap-v6.c
user/attilio/vmcontention/sys/arm/arm/pmap.c
user/attilio/vmcontention/sys/arm/arm/trap.c
user/attilio/vmcontention/sys/arm/include/armreg.h
user/attilio/vmcontention/sys/arm/include/pmap.h
user/attilio/vmcontention/usr.bin/ar/acplex.l
user/attilio/vmcontention/usr.bin/bc/scan.l
user/attilio/vmcontention/usr.bin/m4/tokenizer.l
user/attilio/vmcontention/usr.bin/mklocale/lex.l
user/attilio/vmcontention/usr.sbin/bluetooth/bthidd/lexer.l
user/attilio/vmcontention/usr.sbin/config/lang.l
user/attilio/vmcontention/usr.sbin/jail/jaillex.l
Directory Properties:
user/attilio/vmcontention/ (props changed)
user/attilio/vmcontention/contrib/openbsm/ (props changed)
user/attilio/vmcontention/sys/ (props changed)
user/attilio/vmcontention/usr.sbin/jail/ (props changed)
Modified: user/attilio/vmcontention/contrib/openbsm/bin/auditdistd/token.l
==============================================================================
--- user/attilio/vmcontention/contrib/openbsm/bin/auditdistd/token.l Thu May 23 14:31:42 2013 (r250932)
+++ user/attilio/vmcontention/contrib/openbsm/bin/auditdistd/token.l Thu May 23 14:34:34 2013 (r250933)
@@ -52,6 +52,7 @@ int depth;
int lineno;
#define DP do { } while (0)
+#define YY_DECL int yylex(void)
%}
%option noinput
Modified: user/attilio/vmcontention/sys/arm/arm/locore.S
==============================================================================
--- user/attilio/vmcontention/sys/arm/arm/locore.S Thu May 23 14:31:42 2013 (r250932)
+++ user/attilio/vmcontention/sys/arm/arm/locore.S Thu May 23 14:34:34 2013 (r250933)
@@ -188,6 +188,7 @@ Lunmapped:
#ifdef _ARM_ARCH_6
orr r0, r0, #(CPU_CONTROL_V6_EXTPAGE | CPU_CONTROL_UNAL_ENABLE)
orr r2, r2, #(CPU_CONTROL_AFLT_ENABLE)
+ orr r0, r0, #(CPU_CONTROL_AF_ENABLE)
#endif
orr r0, r0, #(CPU_CONTROL_MMU_ENABLE)
mcr p15, 0, r0, c1, c0, 0
Modified: user/attilio/vmcontention/sys/arm/arm/pmap-v6.c
==============================================================================
--- user/attilio/vmcontention/sys/arm/arm/pmap-v6.c Thu May 23 14:31:42 2013 (r250932)
+++ user/attilio/vmcontention/sys/arm/arm/pmap-v6.c Thu May 23 14:34:34 2013 (r250933)
@@ -220,8 +220,8 @@ static void pmap_free_pv_entry(pmap_t p
static pv_entry_t pmap_get_pv_entry(pmap_t pmap, boolean_t try);
static vm_page_t pmap_pv_reclaim(pmap_t locked_pmap);
-static void pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t,
- vm_prot_t, boolean_t, int);
+static void pmap_enter_locked(pmap_t, vm_offset_t, vm_prot_t,
+ vm_page_t, vm_prot_t, boolean_t, int);
static vm_paddr_t pmap_extract_locked(pmap_t pmap, vm_offset_t va);
static void pmap_alloc_l1(pmap_t);
static void pmap_free_l1(pmap_t);
@@ -383,13 +383,13 @@ int pmap_needs_pte_sync;
* Macro to determine if a mapping might be resident in the
* instruction cache and/or TLB
*/
-#define PV_BEEN_EXECD(f) (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC))
+#define PTE_BEEN_EXECD(pte) (L2_S_EXECUTABLE(pte) && L2_S_REFERENCED(pte))
/*
* Macro to determine if a mapping might be resident in the
* data cache and/or TLB
*/
-#define PV_BEEN_REFD(f) (((f) & PVF_REF) != 0)
+#define PTE_BEEN_REFD(pte) (L2_S_REFERENCED(pte))
#ifndef PMAP_SHPGPERPROC
#define PMAP_SHPGPERPROC 200
@@ -563,7 +563,7 @@ pmap_pte_init_mmu_v6(void)
* This is called at pmap creation time.
*/
static void
-pmap_alloc_l1(pmap_t pm)
+pmap_alloc_l1(pmap_t pmap)
{
struct l1_ttable *l1;
u_int8_t domain;
@@ -594,8 +594,8 @@ pmap_alloc_l1(pmap_t pm)
/*
* Fix up the relevant bits in the pmap structure
*/
- pm->pm_l1 = l1;
- pm->pm_domain = domain + 1;
+ pmap->pm_l1 = l1;
+ pmap->pm_domain = domain + 1;
}
/*
@@ -603,9 +603,9 @@ pmap_alloc_l1(pmap_t pm)
* This is called at pmap destruction time.
*/
static void
-pmap_free_l1(pmap_t pm)
+pmap_free_l1(pmap_t pmap)
{
- struct l1_ttable *l1 = pm->pm_l1;
+ struct l1_ttable *l1 = pmap->pm_l1;
mtx_lock(&l1_lru_lock);
@@ -618,8 +618,8 @@ pmap_free_l1(pmap_t pm)
/*
* Free up the domain number which was allocated to the pmap
*/
- l1->l1_domain_free[pm->pm_domain - 1] = l1->l1_domain_first;
- l1->l1_domain_first = pm->pm_domain - 1;
+ l1->l1_domain_free[pmap->pm_domain - 1] = l1->l1_domain_first;
+ l1->l1_domain_first = pmap->pm_domain - 1;
l1->l1_domain_use_count--;
/*
@@ -641,7 +641,7 @@ pmap_free_l1(pmap_t pm)
* and VA, or NULL if no L2 bucket exists for the address.
*/
static PMAP_INLINE struct l2_bucket *
-pmap_get_l2_bucket(pmap_t pm, vm_offset_t va)
+pmap_get_l2_bucket(pmap_t pmap, vm_offset_t va)
{
struct l2_dtable *l2;
struct l2_bucket *l2b;
@@ -649,7 +649,7 @@ pmap_get_l2_bucket(pmap_t pm, vm_offset_
l1idx = L1_IDX(va);
- if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL ||
+ if ((l2 = pmap->pm_l2[L2_IDX(l1idx)]) == NULL ||
(l2b = &l2->l2_bucket[L2_BUCKET(l1idx)])->l2b_kva == NULL)
return (NULL);
@@ -669,7 +669,7 @@ pmap_get_l2_bucket(pmap_t pm, vm_offset_
* the bucket/page in the meantime.
*/
static struct l2_bucket *
-pmap_alloc_l2_bucket(pmap_t pm, vm_offset_t va)
+pmap_alloc_l2_bucket(pmap_t pmap, vm_offset_t va)
{
struct l2_dtable *l2;
struct l2_bucket *l2b;
@@ -677,36 +677,36 @@ pmap_alloc_l2_bucket(pmap_t pm, vm_offse
l1idx = L1_IDX(va);
- PMAP_ASSERT_LOCKED(pm);
+ PMAP_ASSERT_LOCKED(pmap);
rw_assert(&pvh_global_lock, RA_WLOCKED);
- if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) {
+ if ((l2 = pmap->pm_l2[L2_IDX(l1idx)]) == NULL) {
/*
* No mapping at this address, as there is
* no entry in the L1 table.
* Need to allocate a new l2_dtable.
*/
- PMAP_UNLOCK(pm);
+ PMAP_UNLOCK(pmap);
rw_wunlock(&pvh_global_lock);
if ((l2 = uma_zalloc(l2table_zone, M_NOWAIT)) == NULL) {
rw_wlock(&pvh_global_lock);
- PMAP_LOCK(pm);
+ PMAP_LOCK(pmap);
return (NULL);
}
rw_wlock(&pvh_global_lock);
- PMAP_LOCK(pm);
- if (pm->pm_l2[L2_IDX(l1idx)] != NULL) {
+ PMAP_LOCK(pmap);
+ if (pmap->pm_l2[L2_IDX(l1idx)] != NULL) {
/*
* Someone already allocated the l2_dtable while
* we were doing the same.
*/
uma_zfree(l2table_zone, l2);
- l2 = pm->pm_l2[L2_IDX(l1idx)];
+ l2 = pmap->pm_l2[L2_IDX(l1idx)];
} else {
bzero(l2, sizeof(*l2));
/*
* Link it into the parent pmap
*/
- pm->pm_l2[L2_IDX(l1idx)] = l2;
+ pmap->pm_l2[L2_IDX(l1idx)] = l2;
}
}
@@ -722,11 +722,11 @@ pmap_alloc_l2_bucket(pmap_t pm, vm_offse
* No L2 page table has been allocated. Chances are, this
* is because we just allocated the l2_dtable, above.
*/
- PMAP_UNLOCK(pm);
+ PMAP_UNLOCK(pmap);
rw_wunlock(&pvh_global_lock);
ptep = uma_zalloc(l2zone, M_NOWAIT);
rw_wlock(&pvh_global_lock);
- PMAP_LOCK(pm);
+ PMAP_LOCK(pmap);
if (l2b->l2b_kva != 0) {
/* We lost the race. */
uma_zfree(l2zone, ptep);
@@ -740,7 +740,7 @@ pmap_alloc_l2_bucket(pmap_t pm, vm_offse
* if we allocated a new one above.
*/
if (l2->l2_occupancy == 0) {
- pm->pm_l2[L2_IDX(l1idx)] = NULL;
+ pmap->pm_l2[L2_IDX(l1idx)] = NULL;
uma_zfree(l2table_zone, l2);
}
return (NULL);
@@ -769,7 +769,7 @@ pmap_free_l2_ptp(pt_entry_t *l2)
* for the kernel pmap).
*/
static void
-pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count)
+pmap_free_l2_bucket(pmap_t pmap, struct l2_bucket *l2b, u_int count)
{
struct l2_dtable *l2;
pd_entry_t *pl1pd, l1pd;
@@ -797,7 +797,7 @@ pmap_free_l2_bucket(pmap_t pm, struct l2
* to a performance win over time as we don't need to continually
* alloc/free.
*/
- if (l2b->l2b_occupancy > 0 || pm == pmap_kernel())
+ if (l2b->l2b_occupancy > 0 || pmap == pmap_kernel())
return;
/*
@@ -809,14 +809,14 @@ pmap_free_l2_bucket(pmap_t pm, struct l2
ptep = l2b->l2b_kva;
l2b->l2b_kva = NULL;
- pl1pd = &pm->pm_l1->l1_kva[l1idx];
+ pl1pd = &pmap->pm_l1->l1_kva[l1idx];
/*
* If the L1 slot matches the pmap's domain
* number, then invalidate it.
*/
l1pd = *pl1pd & (L1_TYPE_MASK | L1_C_DOM_MASK);
- if (l1pd == (L1_C_DOM(pm->pm_domain) | L1_TYPE_C)) {
+ if (l1pd == (L1_C_DOM(pmap->pm_domain) | L1_TYPE_C)) {
*pl1pd = 0;
PTE_SYNC(pl1pd);
}
@@ -829,7 +829,7 @@ pmap_free_l2_bucket(pmap_t pm, struct l2
/*
* Update the reference count in the associated l2_dtable
*/
- l2 = pm->pm_l2[L2_IDX(l1idx)];
+ l2 = pmap->pm_l2[L2_IDX(l1idx)];
if (--l2->l2_occupancy > 0)
return;
@@ -838,7 +838,7 @@ pmap_free_l2_bucket(pmap_t pm, struct l2
* slots managed by this l2_dtable. Go ahead and NULL-out
* the pointer in the parent pmap and free the l2_dtable.
*/
- pm->pm_l2[L2_IDX(l1idx)] = NULL;
+ pmap->pm_l2[L2_IDX(l1idx)] = NULL;
uma_zfree(l2table_zone, l2);
}
@@ -888,12 +888,12 @@ pmap_l2ptp_ctor(void *mem, int size, voi
* constants and the latter would require an extra inversion at run-time.
*/
static int
-pmap_clearbit(struct vm_page *pg, u_int maskbits)
+pmap_clearbit(struct vm_page *m, u_int maskbits)
{
struct l2_bucket *l2b;
struct pv_entry *pv;
pt_entry_t *ptep, npte, opte;
- pmap_t pm;
+ pmap_t pmap;
vm_offset_t va;
u_int oflags;
int count = 0;
@@ -902,12 +902,8 @@ pmap_clearbit(struct vm_page *pg, u_int
if (maskbits & PVF_WRITE)
maskbits |= PVF_MOD;
- /*
- * Clear saved attributes (modify, reference)
- */
- pg->md.pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF));
- if (TAILQ_EMPTY(&pg->md.pv_list)) {
+ if (TAILQ_EMPTY(&m->md.pv_list)) {
rw_wunlock(&pvh_global_lock);
return (0);
}
@@ -915,55 +911,54 @@ pmap_clearbit(struct vm_page *pg, u_int
/*
* Loop over all current mappings setting/clearing as appropos
*/
- TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) {
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) {
va = pv->pv_va;
- pm = PV_PMAP(pv);
+ pmap = PV_PMAP(pv);
oflags = pv->pv_flags;
pv->pv_flags &= ~maskbits;
- PMAP_LOCK(pm);
+ PMAP_LOCK(pmap);
- l2b = pmap_get_l2_bucket(pm, va);
+ l2b = pmap_get_l2_bucket(pmap, va);
ptep = &l2b->l2b_kva[l2pte_index(va)];
npte = opte = *ptep;
if ((maskbits & (PVF_WRITE|PVF_MOD)) && L2_S_WRITABLE(opte)) {
- vm_page_dirty(pg);
+ vm_page_dirty(m);
/* make the pte read only */
npte |= L2_APX;
}
- if (maskbits & PVF_REF) {
+ if ((maskbits & PVF_REF) && L2_S_REFERENCED(opte)) {
/*
- * Make the PTE invalid so that we will take a
- * page fault the next time the mapping is
- * referenced.
+ * Clear referenced flag in PTE so that we
+ * will take a flag fault the next time the mapping
+ * is referenced.
*/
- npte &= ~L2_TYPE_MASK;
- npte |= L2_TYPE_INV;
+ npte &= ~L2_S_REF;
}
CTR4(KTR_PMAP,"clearbit: pmap:%p bits:%x pte:%x->%x",
- pm, maskbits, opte, npte);
+ pmap, maskbits, opte, npte);
if (npte != opte) {
count++;
*ptep = npte;
PTE_SYNC(ptep);
/* Flush the TLB entry if a current pmap. */
- if (PV_BEEN_EXECD(oflags))
+ if (PTE_BEEN_EXECD(opte))
cpu_tlb_flushID_SE(pv->pv_va);
- else if (PV_BEEN_REFD(oflags))
+ else if (PTE_BEEN_REFD(opte))
cpu_tlb_flushD_SE(pv->pv_va);
}
- PMAP_UNLOCK(pm);
+ PMAP_UNLOCK(pmap);
}
if (maskbits & PVF_WRITE)
- vm_page_aflag_clear(pg, PGA_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
rw_wunlock(&pvh_global_lock);
return (count);
}
@@ -987,21 +982,19 @@ pmap_clearbit(struct vm_page *pg, u_int
* => caller should not adjust pmap's wire_count
*/
static void
-pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, pmap_t pm,
+pmap_enter_pv(struct vm_page *m, struct pv_entry *pve, pmap_t pmap,
vm_offset_t va, u_int flags)
{
rw_assert(&pvh_global_lock, RA_WLOCKED);
- PMAP_ASSERT_LOCKED(pm);
+ PMAP_ASSERT_LOCKED(pmap);
pve->pv_va = va;
pve->pv_flags = flags;
- TAILQ_INSERT_HEAD(&pg->md.pv_list, pve, pv_list);
- pg->md.pvh_attrs |= flags & (PVF_REF | PVF_MOD);
+ TAILQ_INSERT_HEAD(&m->md.pv_list, pve, pv_list);
if (pve->pv_flags & PVF_WIRED)
- ++pm->pm_stats.wired_count;
- vm_page_aflag_set(pg, PGA_REFERENCED);
+ ++pmap->pm_stats.wired_count;
}
/*
@@ -1011,13 +1004,13 @@ pmap_enter_pv(struct vm_page *pg, struct
* => caller should hold lock on vm_page
*/
static PMAP_INLINE struct pv_entry *
-pmap_find_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va)
+pmap_find_pv(struct vm_page *m, pmap_t pmap, vm_offset_t va)
{
struct pv_entry *pv;
rw_assert(&pvh_global_lock, RA_WLOCKED);
- TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list)
- if (pm == PV_PMAP(pv) && va == pv->pv_va)
+ TAILQ_FOREACH(pv, &m->md.pv_list, pv_list)
+ if (pmap == PV_PMAP(pv) && va == pv->pv_va)
break;
return (pv);
}
@@ -1036,6 +1029,12 @@ vector_page_setprot(int prot)
l2b = pmap_get_l2_bucket(pmap_kernel(), vector_page);
ptep = &l2b->l2b_kva[l2pte_index(vector_page)];
+ /*
+ * Set referenced flag.
+ * Vectors' page is always desired
+ * to be allowed to reside in TLB.
+ */
+ *ptep |= L2_S_REF;
pmap_set_prot(ptep, prot|VM_PROT_EXECUTE, 0);
@@ -1052,16 +1051,15 @@ pmap_set_prot(pt_entry_t *ptep, vm_prot_
if (!(prot & VM_PROT_EXECUTE))
*ptep |= L2_XN;
+ /* Set defaults first - kernel read access */
*ptep |= L2_APX;
*ptep |= L2_S_PROT_R;
-
+ /* Now tune APs as desired */
if (user)
*ptep |= L2_S_PROT_U;
if (prot & VM_PROT_WRITE)
*ptep &= ~(L2_APX);
- else if (user)
- *ptep &= ~(L2_S_PROT_R);
}
/*
@@ -1076,51 +1074,37 @@ pmap_set_prot(pt_entry_t *ptep, vm_prot_
*/
static void
-pmap_nuke_pv(struct vm_page *pg, pmap_t pm, struct pv_entry *pve)
+pmap_nuke_pv(struct vm_page *m, pmap_t pmap, struct pv_entry *pve)
{
rw_assert(&pvh_global_lock, RA_WLOCKED);
- PMAP_ASSERT_LOCKED(pm);
+ PMAP_ASSERT_LOCKED(pmap);
- TAILQ_REMOVE(&pg->md.pv_list, pve, pv_list);
+ TAILQ_REMOVE(&m->md.pv_list, pve, pv_list);
if (pve->pv_flags & PVF_WIRED)
- --pm->pm_stats.wired_count;
-
- if (pg->md.pvh_attrs & PVF_MOD)
- vm_page_dirty(pg);
-
- if (TAILQ_FIRST(&pg->md.pv_list) == NULL)
- pg->md.pvh_attrs &= ~PVF_REF;
- else
- vm_page_aflag_set(pg, PGA_REFERENCED);
+ --pmap->pm_stats.wired_count;
if (pve->pv_flags & PVF_WRITE) {
- TAILQ_FOREACH(pve, &pg->md.pv_list, pv_list)
+ TAILQ_FOREACH(pve, &m->md.pv_list, pv_list)
if (pve->pv_flags & PVF_WRITE)
break;
if (!pve) {
- pg->md.pvh_attrs &= ~PVF_MOD;
- vm_page_aflag_clear(pg, PGA_WRITEABLE);
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
}
}
}
static struct pv_entry *
-pmap_remove_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va)
+pmap_remove_pv(struct vm_page *m, pmap_t pmap, vm_offset_t va)
{
struct pv_entry *pve;
rw_assert(&pvh_global_lock, RA_WLOCKED);
- pve = TAILQ_FIRST(&pg->md.pv_list);
- while (pve) {
- if (PV_PMAP(pve) == pm && pve->pv_va == va) { /* match? */
- pmap_nuke_pv(pg, pm, pve);
- break;
- }
- pve = TAILQ_NEXT(pve, pv_list);
- }
+ pve = pmap_find_pv(m, pmap, va); /* find corresponding pve */
+ if (pve != NULL)
+ pmap_nuke_pv(m, pmap, pve);
return(pve); /* return removed pve */
}
@@ -1136,42 +1120,36 @@ pmap_remove_pv(struct vm_page *pg, pmap_
* Modify a physical-virtual mapping in the pv table
*/
static u_int
-pmap_modify_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va,
+pmap_modify_pv(struct vm_page *m, pmap_t pmap, vm_offset_t va,
u_int clr_mask, u_int set_mask)
{
struct pv_entry *npv;
u_int flags, oflags;
- PMAP_ASSERT_LOCKED(pm);
+ PMAP_ASSERT_LOCKED(pmap);
rw_assert(&pvh_global_lock, RA_WLOCKED);
- if ((npv = pmap_find_pv(pg, pm, va)) == NULL)
+ if ((npv = pmap_find_pv(m, pmap, va)) == NULL)
return (0);
/*
* There is at least one VA mapping this page.
*/
-
- if (clr_mask & (PVF_REF | PVF_MOD))
- pg->md.pvh_attrs |= set_mask & (PVF_REF | PVF_MOD);
-
oflags = npv->pv_flags;
npv->pv_flags = flags = (oflags & ~clr_mask) | set_mask;
if ((flags ^ oflags) & PVF_WIRED) {
if (flags & PVF_WIRED)
- ++pm->pm_stats.wired_count;
+ ++pmap->pm_stats.wired_count;
else
- --pm->pm_stats.wired_count;
+ --pmap->pm_stats.wired_count;
}
if ((oflags & PVF_WRITE) && !(flags & PVF_WRITE)) {
- TAILQ_FOREACH(npv, &pg->md.pv_list, pv_list) {
+ TAILQ_FOREACH(npv, &m->md.pv_list, pv_list) {
if (npv->pv_flags & PVF_WRITE)
break;
}
- if (!npv) {
- pg->md.pvh_attrs &= ~PVF_MOD;
- vm_page_aflag_clear(pg, PGA_WRITEABLE);
- }
+ if (!npv)
+ vm_page_aflag_clear(m, PGA_WRITEABLE);
}
return (oflags);
@@ -1192,8 +1170,6 @@ pmap_pinit0(struct pmap *pmap)
{
PDEBUG(1, printf("pmap_pinit0: pmap = %08x\n", (u_int32_t) pmap));
- dprintf("pmap_pinit0: pmap = %08x, pm_pdir = %08x\n",
- (u_int32_t) pmap, (u_int32_t) pmap->pm_pdir);
bcopy(kernel_pmap, pmap, sizeof(*pmap));
bzero(&pmap->pm_mtx, sizeof(pmap->pm_mtx));
PMAP_LOCK_INIT(pmap);
@@ -1299,7 +1275,7 @@ SYSCTL_INT(_vm_pmap, OID_AUTO, shpgperpr
"Page share factor per proc");
int
-pmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user)
+pmap_fault_fixup(pmap_t pmap, vm_offset_t va, vm_prot_t ftype, int user)
{
struct l2_dtable *l2;
struct l2_bucket *l2b;
@@ -1311,7 +1287,7 @@ pmap_fault_fixup(pmap_t pm, vm_offset_t
l1idx = L1_IDX(va);
rw_wlock(&pvh_global_lock);
- PMAP_LOCK(pm);
+ PMAP_LOCK(pmap);
/*
* If there is no l2_dtable for this address, then the process
@@ -1320,7 +1296,7 @@ pmap_fault_fixup(pmap_t pm, vm_offset_t
* Note: This will catch userland processes trying to access
* kernel addresses.
*/
- l2 = pm->pm_l2[L2_IDX(l1idx)];
+ l2 = pmap->pm_l2[L2_IDX(l1idx)];
if (l2 == NULL)
goto out;
@@ -1349,22 +1325,23 @@ pmap_fault_fixup(pmap_t pm, vm_offset_t
pa = l2pte_pa(pte);
CTR5(KTR_PMAP, "pmap_fault_fix: pmap:%p va:%x pte:0x%x ftype:%x user:%x",
- pm, va, pte, ftype, user);
- if ((ftype & VM_PROT_WRITE) && !(L2_S_WRITABLE(pte))) {
+ pmap, va, pte, ftype, user);
+ if ((ftype & VM_PROT_WRITE) && !(L2_S_WRITABLE(pte)) &&
+ L2_S_REFERENCED(pte)) {
/*
* This looks like a good candidate for "page modified"
* emulation...
*/
struct pv_entry *pv;
- struct vm_page *pg;
+ struct vm_page *m;
/* Extract the physical address of the page */
- if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) {
+ if ((m = PHYS_TO_VM_PAGE(pa)) == NULL) {
goto out;
}
/* Get the current flags for this page. */
- pv = pmap_find_pv(pg, pm, va);
+ pv = pmap_find_pv(m, pmap, va);
if (pv == NULL) {
goto out;
}
@@ -1379,38 +1356,34 @@ pmap_fault_fixup(pmap_t pm, vm_offset_t
if ((pv->pv_flags & PVF_WRITE) == 0) {
goto out;
}
- pg->md.pvh_attrs |= PVF_REF | PVF_MOD;
- vm_page_dirty(pg);
- pv->pv_flags |= PVF_REF | PVF_MOD;
+
+ vm_page_dirty(m);
/* Re-enable write permissions for the page */
- *ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO;
pmap_set_prot(ptep, VM_PROT_WRITE, *ptep & L2_S_PROT_U);
CTR1(KTR_PMAP, "pmap_fault_fix: new pte:0x%x", pte);
PTE_SYNC(ptep);
rv = 1;
- } else if ((pte & L2_TYPE_MASK) == L2_TYPE_INV) {
+ } else if (!L2_S_REFERENCED(pte)) {
/*
* This looks like a good candidate for "page referenced"
* emulation.
*/
struct pv_entry *pv;
- struct vm_page *pg;
+ struct vm_page *m;
/* Extract the physical address of the page */
- if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL)
+ if ((m = PHYS_TO_VM_PAGE(pa)) == NULL)
goto out;
/* Get the current flags for this page. */
-
- pv = pmap_find_pv(pg, pm, va);
+ pv = pmap_find_pv(m, pmap, va);
if (pv == NULL)
goto out;
- pg->md.pvh_attrs |= PVF_REF;
- pv->pv_flags |= PVF_REF;
-
+ vm_page_aflag_set(m, PGA_REFERENCED);
- *ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO;
+ /* Mark the page "referenced" */
+ *ptep = pte | L2_S_REF;
PTE_SYNC(ptep);
rv = 1;
}
@@ -1419,8 +1392,8 @@ pmap_fault_fixup(pmap_t pm, vm_offset_t
* We know there is a valid mapping here, so simply
* fix up the L1 if necessary.
*/
- pl1pd = &pm->pm_l1->l1_kva[l1idx];
- l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | L1_C_PROTO;
+ pl1pd = &pmap->pm_l1->l1_kva[l1idx];
+ l1pd = l2b->l2b_phys | L1_C_DOM(pmap->pm_domain) | L1_C_PROTO;
if (*pl1pd != l1pd) {
*pl1pd = l1pd;
PTE_SYNC(pl1pd);
@@ -1455,9 +1428,9 @@ pmap_fault_fixup(pmap_t pm, vm_offset_t
* that other parts of the pmap are not doing their job WRT managing
* the TLB.
*/
- if (rv == 0 && pm->pm_l1->l1_domain_use_count == 1) {
- printf("fixup: pm %p, va 0x%08x, ftype %d - nothing to do!\n",
- pm, va, ftype);
+ if (rv == 0 && pmap->pm_l1->l1_domain_use_count == 1) {
+ printf("fixup: pmap %p, va 0x%08x, ftype %d - nothing to do!\n",
+ pmap, va, ftype);
printf("fixup: l2 %p, l2b %p, ptep %p, pl1pd %p\n",
l2, l2b, ptep, pl1pd);
printf("fixup: pte 0x%x, l1pd 0x%x, last code 0x%x\n",
@@ -1475,7 +1448,7 @@ pmap_fault_fixup(pmap_t pm, vm_offset_t
out:
rw_wunlock(&pvh_global_lock);
- PMAP_UNLOCK(pm);
+ PMAP_UNLOCK(pmap);
return (rv);
}
@@ -1528,19 +1501,19 @@ pmap_postinit(void)
* can be accessed quickly from cpu_switch() et al.
*/
void
-pmap_set_pcb_pagedir(pmap_t pm, struct pcb *pcb)
+pmap_set_pcb_pagedir(pmap_t pmap, struct pcb *pcb)
{
struct l2_bucket *l2b;
- pcb->pcb_pagedir = pm->pm_l1->l1_physaddr;
+ pcb->pcb_pagedir = pmap->pm_l1->l1_physaddr;
pcb->pcb_dacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) |
- (DOMAIN_CLIENT << (pm->pm_domain * 2));
+ (DOMAIN_CLIENT << (pmap->pm_domain * 2));
if (vector_page < KERNBASE) {
- pcb->pcb_pl1vec = &pm->pm_l1->l1_kva[L1_IDX(vector_page)];
- l2b = pmap_get_l2_bucket(pm, vector_page);
+ pcb->pcb_pl1vec = &pmap->pm_l1->l1_kva[L1_IDX(vector_page)];
+ l2b = pmap_get_l2_bucket(pmap, vector_page);
pcb->pcb_l1vec = l2b->l2b_phys | L1_C_PROTO |
- L1_C_DOM(pm->pm_domain) | L1_C_DOM(PMAP_DOMAIN_KERNEL);
+ L1_C_DOM(pmap->pm_domain) | L1_C_DOM(PMAP_DOMAIN_KERNEL);
} else
pcb->pcb_pl1vec = NULL;
}
@@ -1548,14 +1521,14 @@ pmap_set_pcb_pagedir(pmap_t pm, struct p
void
pmap_activate(struct thread *td)
{
- pmap_t pm;
+ pmap_t pmap;
struct pcb *pcb;
- pm = vmspace_pmap(td->td_proc->p_vmspace);
+ pmap = vmspace_pmap(td->td_proc->p_vmspace);
pcb = td->td_pcb;
critical_enter();
- pmap_set_pcb_pagedir(pm, pcb);
+ pmap_set_pcb_pagedir(pmap, pcb);
if (td == curthread) {
u_int cur_dacr, cur_ttb;
@@ -1888,12 +1861,12 @@ pmap_grow_map(vm_offset_t va, pt_entry_t
struct l2_bucket *l2b;
pt_entry_t *ptep;
vm_paddr_t pa;
- struct vm_page *pg;
+ struct vm_page *m;
- pg = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
- if (pg == NULL)
+ m = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_WIRED);
+ if (m == NULL)
return (1);
- pa = VM_PAGE_TO_PHYS(pg);
+ pa = VM_PAGE_TO_PHYS(m);
if (pap)
*pap = pa;
@@ -1901,7 +1874,7 @@ pmap_grow_map(vm_offset_t va, pt_entry_t
l2b = pmap_get_l2_bucket(pmap_kernel(), va);
ptep = &l2b->l2b_kva[l2pte_index(va)];
- *ptep = L2_S_PROTO | pa | cache_mode;
+ *ptep = L2_S_PROTO | pa | cache_mode | L2_S_REF;
pmap_set_prot(ptep, VM_PROT_READ | VM_PROT_WRITE, 0);
PTE_SYNC(ptep);
@@ -1913,7 +1886,7 @@ pmap_grow_map(vm_offset_t va, pt_entry_t
* used by pmap_growkernel().
*/
static __inline struct l2_bucket *
-pmap_grow_l2_bucket(pmap_t pm, vm_offset_t va)
+pmap_grow_l2_bucket(pmap_t pmap, vm_offset_t va)
{
struct l2_dtable *l2;
struct l2_bucket *l2b;
@@ -1924,7 +1897,7 @@ pmap_grow_l2_bucket(pmap_t pm, vm_offset
l1idx = L1_IDX(va);
- if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) {
+ if ((l2 = pmap->pm_l2[L2_IDX(l1idx)]) == NULL) {
/*
* No mapping at this address, as there is
* no entry in the L1 table.
@@ -1957,7 +1930,7 @@ pmap_grow_l2_bucket(pmap_t pm, vm_offset
/*
* Link it into the parent pmap
*/
- pm->pm_l2[L2_IDX(l1idx)] = l2;
+ pmap->pm_l2[L2_IDX(l1idx)] = l2;
memset(l2, 0, sizeof(*l2));
}
@@ -2011,7 +1984,7 @@ pmap_grow_l2_bucket(pmap_t pm, vm_offset
void
pmap_growkernel(vm_offset_t addr)
{
- pmap_t kpm = pmap_kernel();
+ pmap_t kpmap = pmap_kernel();
if (addr <= pmap_curmaxkvaddr)
return; /* we are OK */
@@ -2022,7 +1995,7 @@ pmap_growkernel(vm_offset_t addr)
/* Map 1MB at a time */
for (; pmap_curmaxkvaddr < addr; pmap_curmaxkvaddr += L1_S_SIZE)
- pmap_grow_l2_bucket(kpm, pmap_curmaxkvaddr);
+ pmap_grow_l2_bucket(kpmap, pmap_curmaxkvaddr);
/*
* flush out the cache, expensive but growkernel will happen so
@@ -2050,7 +2023,7 @@ pmap_remove_pages(pmap_t pmap)
struct pv_entry *pv;
struct l2_bucket *l2b = NULL;
vm_page_t m;
- pt_entry_t *pt;
+ pt_entry_t *ptep;
struct pv_chunk *pc, *npc;
uint32_t inuse, bitmask;
int allfree, bit, field, idx;
@@ -2074,12 +2047,15 @@ pmap_remove_pages(pmap_t pmap)
continue;
}
l2b = pmap_get_l2_bucket(pmap, pv->pv_va);
- KASSERT(l2b != NULL, ("No L2 bucket in pmap_remove_pages"));
- pt = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
- m = PHYS_TO_VM_PAGE(*pt & L2_ADDR_MASK);
- KASSERT((vm_offset_t)m >= KERNBASE, ("Trying to access non-existent page va %x pte %x", pv->pv_va, *pt));
- *pt = 0;
- PTE_SYNC(pt);
+ KASSERT(l2b != NULL,
+ ("No L2 bucket in pmap_remove_pages"));
+ ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
+ m = PHYS_TO_VM_PAGE(*ptep & L2_ADDR_MASK);
+ KASSERT((vm_offset_t)m >= KERNBASE,
+ ("Trying to access non-existent page "
+ "va %x pte %x", pv->pv_va, *ptep));
+ *ptep = 0;
+ PTE_SYNC(ptep);
/* Mark free */
PV_STAT(pv_entry_frees++);
@@ -2185,7 +2161,7 @@ static PMAP_INLINE void
pmap_kenter_internal(vm_offset_t va, vm_offset_t pa, int flags)
{
struct l2_bucket *l2b;
- pt_entry_t *pte;
+ pt_entry_t *ptep;
pt_entry_t opte;
PDEBUG(1, printf("pmap_kenter: va = %08x, pa = %08x\n",
@@ -2197,8 +2173,8 @@ pmap_kenter_internal(vm_offset_t va, vm_
l2b = pmap_grow_l2_bucket(pmap_kernel(), va);
KASSERT(l2b != NULL, ("No L2 Bucket"));
- pte = &l2b->l2b_kva[l2pte_index(va)];
- opte = *pte;
+ ptep = &l2b->l2b_kva[l2pte_index(va)];
+ opte = *ptep;
if (l2pte_valid(opte)) {
cpu_tlb_flushD_SE(va);
cpu_cpwait();
@@ -2208,18 +2184,18 @@ pmap_kenter_internal(vm_offset_t va, vm_
}
if (flags & KENTER_CACHE) {
- *pte = L2_S_PROTO | pa | pte_l2_s_cache_mode;
- pmap_set_prot(pte, VM_PROT_READ | VM_PROT_WRITE,
+ *ptep = L2_S_PROTO | pa | pte_l2_s_cache_mode | L2_S_REF;
+ pmap_set_prot(ptep, VM_PROT_READ | VM_PROT_WRITE,
flags & KENTER_USER);
} else {
- *pte = L2_S_PROTO | pa;
- pmap_set_prot(pte, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE,
+ *ptep = L2_S_PROTO | pa | L2_S_REF;
+ pmap_set_prot(ptep, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE,
0);
}
PDEBUG(1, printf("pmap_kenter: pte = %08x, opte = %08x, npte = %08x\n",
- (uint32_t) pte, opte, *pte));
- PTE_SYNC(pte);
+ (uint32_t) ptep, opte, *ptep));
+ PTE_SYNC(ptep);
cpu_cpwait();
}
@@ -2263,20 +2239,20 @@ void
pmap_kremove(vm_offset_t va)
{
struct l2_bucket *l2b;
- pt_entry_t *pte, opte;
+ pt_entry_t *ptep, opte;
l2b = pmap_get_l2_bucket(pmap_kernel(), va);
if (!l2b)
return;
KASSERT(l2b != NULL, ("No L2 Bucket"));
- pte = &l2b->l2b_kva[l2pte_index(va)];
- opte = *pte;
+ ptep = &l2b->l2b_kva[l2pte_index(va)];
+ opte = *ptep;
if (l2pte_valid(opte)) {
va = va & ~PAGE_MASK;
cpu_tlb_flushD_SE(va);
cpu_cpwait();
- *pte = 0;
- PTE_SYNC(pte);
+ *ptep = 0;
+ PTE_SYNC(ptep);
}
}
@@ -2376,13 +2352,13 @@ pmap_object_init_pt(pmap_t pmap, vm_offs
boolean_t
pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr)
{
- pd_entry_t *pde;
- pt_entry_t *pte;
+ pd_entry_t *pdep;
+ pt_entry_t *ptep;
- if (!pmap_get_pde_pte(pmap, addr, &pde, &pte))
+ if (!pmap_get_pde_pte(pmap, addr, &pdep, &ptep))
return (FALSE);
- KASSERT(pte != NULL, ("Valid mapping but no pte ?"));
- if (*pte == 0)
+ KASSERT(ptep != NULL, ("Valid mapping but no pte ?"));
+ if (*ptep == 0)
return (TRUE);
return (FALSE);
}
@@ -2403,18 +2379,19 @@ pmap_is_prefaultable(pmap_t pmap, vm_off
* a "section" mapping.
*/
boolean_t
-pmap_get_pde_pte(pmap_t pm, vm_offset_t va, pd_entry_t **pdp, pt_entry_t **ptp)
+pmap_get_pde_pte(pmap_t pmap, vm_offset_t va, pd_entry_t **pdp,
+ pt_entry_t **ptp)
{
struct l2_dtable *l2;
pd_entry_t *pl1pd, l1pd;
pt_entry_t *ptep;
u_short l1idx;
- if (pm->pm_l1 == NULL)
+ if (pmap->pm_l1 == NULL)
return (FALSE);
l1idx = L1_IDX(va);
- *pdp = pl1pd = &pm->pm_l1->l1_kva[l1idx];
+ *pdp = pl1pd = &pmap->pm_l1->l1_kva[l1idx];
l1pd = *pl1pd;
if (l1pte_section_p(l1pd)) {
@@ -2422,10 +2399,10 @@ pmap_get_pde_pte(pmap_t pm, vm_offset_t
return (TRUE);
}
- if (pm->pm_l2 == NULL)
+ if (pmap->pm_l2 == NULL)
return (FALSE);
- l2 = pm->pm_l2[L2_IDX(l1idx)];
+ l2 = pmap->pm_l2[L2_IDX(l1idx)];
if (l2 == NULL ||
(ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) {
@@ -2456,8 +2433,8 @@ pmap_remove_all(vm_page_t m)
pt_entry_t *ptep;
struct l2_bucket *l2b;
boolean_t flush = FALSE;
- pmap_t curpm;
- int flags = 0;
+ pmap_t curpmap;
+ u_int is_exec = 0;
KASSERT((m->flags & PG_FICTITIOUS) == 0,
("pmap_remove_all: page %p is fictitious", m));
@@ -2465,10 +2442,10 @@ pmap_remove_all(vm_page_t m)
if (TAILQ_EMPTY(&m->md.pv_list))
return;
rw_wlock(&pvh_global_lock);
- curpm = vmspace_pmap(curproc->p_vmspace);
+ curpmap = vmspace_pmap(curproc->p_vmspace);
while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) {
pmap = PV_PMAP(pv);
- if (flush == FALSE && (pmap == curpm ||
+ if (flush == FALSE && (pmap == curpmap ||
pmap == pmap_kernel()))
flush = TRUE;
@@ -2476,22 +2453,19 @@ pmap_remove_all(vm_page_t m)
l2b = pmap_get_l2_bucket(pmap, pv->pv_va);
KASSERT(l2b != NULL, ("No l2 bucket"));
ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)];
- if (L2_S_WRITABLE(*ptep))
- vm_page_dirty(m);
+ is_exec |= PTE_BEEN_EXECD(*ptep);
*ptep = 0;
if (pmap_is_current(pmap))
PTE_SYNC(ptep);
pmap_free_l2_bucket(pmap, l2b, 1);
pmap->pm_stats.resident_count--;
- flags |= pv->pv_flags;
pmap_nuke_pv(m, pmap, pv);
pmap_free_pv_entry(pmap, pv);
PMAP_UNLOCK(pmap);
}
- m->md.pvh_attrs &= ~(PVF_MOD | PVF_REF);
if (flush) {
- if (PV_BEEN_EXECD(flags))
+ if (is_exec)
cpu_tlb_flushID();
else
cpu_tlb_flushD();
@@ -2564,16 +2538,16 @@ pmap_change_attr(vm_offset_t sva, vm_siz
* specified range of this map as requested.
*/
void
-pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot)
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-src-user
mailing list