svn commit: r278897 - in user/nwhitehorn/ppc64-pmap-rework: aim include pseries
Nathan Whitehorn
nwhitehorn at FreeBSD.org
Tue Feb 17 14:33:53 UTC 2015
Author: nwhitehorn
Date: Tue Feb 17 14:33:51 2015
New Revision: 278897
URL: https://svnweb.freebsd.org/changeset/base/278897
Log:
H_PROTECT is somehow slower than unset + insert, so don't bother with the
special case for it. Restore some statistics as well.
Modified:
user/nwhitehorn/ppc64-pmap-rework/aim/mmu_oea64.c
user/nwhitehorn/ppc64-pmap-rework/include/pmap.h
user/nwhitehorn/ppc64-pmap-rework/pseries/mmu_phyp.c
Modified: user/nwhitehorn/ppc64-pmap-rework/aim/mmu_oea64.c
==============================================================================
--- user/nwhitehorn/ppc64-pmap-rework/aim/mmu_oea64.c Tue Feb 17 13:12:54 2015 (r278896)
+++ user/nwhitehorn/ppc64-pmap-rework/aim/mmu_oea64.c Tue Feb 17 14:33:51 2015 (r278897)
@@ -1254,6 +1254,7 @@ moea64_enter(mmu_t mmu, pmap_t pmap, vm_
free_pvo_entry(pvo);
break; /* all done */
}
+ /* XXX: protection update */
}
/* Otherwise, need to kill it first */
@@ -2241,6 +2242,8 @@ moea64_pvo_enter(mmu_t mmu, struct pvo_e
panic("moea64_pvo_enter: overflow");
}
+ moea64_pvo_entries++;
+
if (pvo->pvo_pmap == kernel_pmap)
isync();
Modified: user/nwhitehorn/ppc64-pmap-rework/include/pmap.h
==============================================================================
--- user/nwhitehorn/ppc64-pmap-rework/include/pmap.h Tue Feb 17 13:12:54 2015 (r278896)
+++ user/nwhitehorn/ppc64-pmap-rework/include/pmap.h Tue Feb 17 14:33:51 2015 (r278897)
@@ -91,8 +91,10 @@ struct pvo_entry {
#endif
RB_ENTRY(pvo_entry) pvo_plink; /* Link to pmap entries */
struct {
+#ifndef __powerpc64__
/* 32-bit fields */
struct pte pte;
+#endif
/* 64-bit fields */
uintptr_t slot;
vm_paddr_t pa;
Modified: user/nwhitehorn/ppc64-pmap-rework/pseries/mmu_phyp.c
==============================================================================
--- user/nwhitehorn/ppc64-pmap-rework/pseries/mmu_phyp.c Tue Feb 17 13:12:54 2015 (r278896)
+++ user/nwhitehorn/ppc64-pmap-rework/pseries/mmu_phyp.c Tue Feb 17 14:33:51 2015 (r278897)
@@ -282,40 +282,23 @@ mphyp_pte_unset(mmu_t mmu, struct pvo_en
("Error removing page: %d", err));
if (err == H_SUCCESS)
- moea64_sync_refchg(pvo->pvo_pte.pa |
+ moea64_sync_refchg((pvo->pvo_pte.pa & LPTE_RPGN) |
(pte.pte_lo & (LPTE_REF | LPTE_CHG)));
+
+ if (err == H_NOT_FOUND)
+ moea64_pte_overflow--;
}
static void
mphyp_pte_replace(mmu_t mmu, struct pvo_entry *pvo, int flags)
{
- struct lpte pte;
- int64_t result;
PMAP_LOCK_ASSERT(pvo->pvo_pmap, MA_OWNED);
- if (flags == MOEA64_PTE_PROT_UPDATE) {
- moea64_pte_from_pvo(pvo, &pte);
-
- result = phyp_hcall(H_PROTECT,
- H_AVPN | (pte.pte_lo & (LPTE_PP | LPTE_NOEXEC)),
- pvo->pvo_pte.slot, pte.pte_hi & LPTE_AVPN_MASK);
- if (result == H_NOT_FOUND)
- return;
- KASSERT(result == H_SUCCESS || result == H_NOT_FOUND,
- ("Error changing page protection: %d", (int)result));
-
- /* Mark C changes (R zeroed by H_PROTECT) */
- result = mphyp_pte_clear(mmu, pvo, LPTE_CHG);
- if (result > 0)
- moea64_sync_refchg(pvo->pvo_pte.pa |
- (result & LPTE_CHG));
- return;
- }
-
/*
* For anything other than a simple page protection update, we have to
- * reinsert the page.
+ * reinsert the page. H_PROTECT is somehow slower than this, so just do
+ * unset followed by insert unconditionally.
*/
mphyp_pte_unset(mmu, pvo);
mphyp_pte_insert(mmu, pvo);
@@ -436,6 +419,7 @@ mphyp_pte_insert(mmu_t mmu, struct pvo_e
result = phyp_pft_hcall(H_REMOVE, H_AVPN, index,
evicted.pte_hi & LPTE_AVPN_MASK, 0, &junk, &lastptelo,
&junk);
+ moea64_pte_overflow++;
KASSERT(result == H_SUCCESS,
("Error evicting page: %d", (int)result));
More information about the svn-src-user
mailing list