svn commit: r347167 - head/sys/powerpc/booke
Justin Hibbits
jhibbits at FreeBSD.org
Sun May 5 20:23:44 UTC 2019
Author: jhibbits
Date: Sun May 5 20:23:43 2019
New Revision: 347167
URL: https://svnweb.freebsd.org/changeset/base/347167
Log:
powerpc/booke: Use #ifdef __powerpc64__ instead of hw_direct_map in places
Since the DMAP is only available on powerpc64, and is *always* available on
Book-E powerpc64, don't penalize either side (32-bit or 64-bit) by always
checking hw_direct_map to perform operations. This saves 5-10% time on
various ports builds, and on buildworld+buildkernel on Book-E hardware.
MFC after: 3 weeks
Modified:
head/sys/powerpc/booke/pmap.c
Modified: head/sys/powerpc/booke/pmap.c
==============================================================================
--- head/sys/powerpc/booke/pmap.c Sun May 5 20:14:36 2019 (r347166)
+++ head/sys/powerpc/booke/pmap.c Sun May 5 20:23:43 2019 (r347167)
@@ -2974,19 +2974,19 @@ mmu_booke_zero_page_area(mmu_t mmu, vm_page_t m, int o
/* XXX KASSERT off and size are within a single page? */
- if (hw_direct_map) {
- va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
- bzero((caddr_t)va + off, size);
- } else {
- mtx_lock(&zero_page_mutex);
- va = zero_page_va;
+#ifdef __powerpc64__
+ va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
+ bzero((caddr_t)va + off, size);
+#else
+ mtx_lock(&zero_page_mutex);
+ va = zero_page_va;
- mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
- bzero((caddr_t)va + off, size);
- mmu_booke_kremove(mmu, va);
+ mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
+ bzero((caddr_t)va + off, size);
+ mmu_booke_kremove(mmu, va);
- mtx_unlock(&zero_page_mutex);
- }
+ mtx_unlock(&zero_page_mutex);
+#endif
}
/*
@@ -2997,23 +2997,24 @@ mmu_booke_zero_page(mmu_t mmu, vm_page_t m)
{
vm_offset_t off, va;
- if (hw_direct_map) {
- va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
- } else {
- va = zero_page_va;
- mtx_lock(&zero_page_mutex);
+#ifdef __powerpc64__
+ va = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m));
- mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
- }
+ for (off = 0; off < PAGE_SIZE; off += cacheline_size)
+ __asm __volatile("dcbz 0,%0" :: "r"(va + off));
+#else
+ va = zero_page_va;
+ mtx_lock(&zero_page_mutex);
+ mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(m));
+
for (off = 0; off < PAGE_SIZE; off += cacheline_size)
__asm __volatile("dcbz 0,%0" :: "r"(va + off));
- if (!hw_direct_map) {
- mmu_booke_kremove(mmu, va);
+ mmu_booke_kremove(mmu, va);
- mtx_unlock(&zero_page_mutex);
- }
+ mtx_unlock(&zero_page_mutex);
+#endif
}
/*
@@ -3026,23 +3027,23 @@ mmu_booke_copy_page(mmu_t mmu, vm_page_t sm, vm_page_t
{
vm_offset_t sva, dva;
+#ifdef __powerpc64__
+ sva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(sm));
+ dva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dm));
+ memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
+#else
+ mtx_lock(©_page_mutex);
+ mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm));
+ mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm));
sva = copy_page_src_va;
dva = copy_page_dst_va;
- if (hw_direct_map) {
- sva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(sm));
- dva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(dm));
- } else {
- mtx_lock(©_page_mutex);
- mmu_booke_kenter(mmu, sva, VM_PAGE_TO_PHYS(sm));
- mmu_booke_kenter(mmu, dva, VM_PAGE_TO_PHYS(dm));
- }
memcpy((caddr_t)dva, (caddr_t)sva, PAGE_SIZE);
- if (!hw_direct_map) {
- mmu_booke_kremove(mmu, dva);
- mmu_booke_kremove(mmu, sva);
- mtx_unlock(©_page_mutex);
- }
+
+ mmu_booke_kremove(mmu, dva);
+ mmu_booke_kremove(mmu, sva);
+ mtx_unlock(©_page_mutex);
+#endif
}
static inline void
@@ -3053,39 +3054,55 @@ mmu_booke_copy_pages(mmu_t mmu, vm_page_t *ma, vm_offs
vm_offset_t a_pg_offset, b_pg_offset;
int cnt;
- if (hw_direct_map) {
- a_cp = (caddr_t)((uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*ma)) +
- a_offset);
- b_cp = (caddr_t)((uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(*mb)) +
- b_offset);
- bcopy(a_cp, b_cp, xfersize);
- } else {
- mtx_lock(©_page_mutex);
- while (xfersize > 0) {
- a_pg_offset = a_offset & PAGE_MASK;
- cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
- mmu_booke_kenter(mmu, copy_page_src_va,
- VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
- a_cp = (char *)copy_page_src_va + a_pg_offset;
- b_pg_offset = b_offset & PAGE_MASK;
- cnt = min(cnt, PAGE_SIZE - b_pg_offset);
- mmu_booke_kenter(mmu, copy_page_dst_va,
- VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
- b_cp = (char *)copy_page_dst_va + b_pg_offset;
- bcopy(a_cp, b_cp, cnt);
- mmu_booke_kremove(mmu, copy_page_dst_va);
- mmu_booke_kremove(mmu, copy_page_src_va);
- a_offset += cnt;
- b_offset += cnt;
- xfersize -= cnt;
- }
- mtx_unlock(©_page_mutex);
+#ifdef __powerpc64__
+ vm_page_t pa, pb;
+
+ while (xfersize > 0) {
+ a_pg_offset = a_offset & PAGE_MASK;
+ pa = ma[a_offset >> PAGE_SHIFT];
+ b_pg_offset = b_offset & PAGE_MASK;
+ pb = mb[b_offset >> PAGE_SHIFT];
+ cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
+ cnt = min(cnt, PAGE_SIZE - b_pg_offset);
+ a_cp = (caddr_t)((uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pa)) +
+ a_pg_offset);
+ b_cp = (caddr_t)((uintptr_t)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(pb)) +
+ b_pg_offset);
+ bcopy(a_cp, b_cp, cnt);
+ a_offset += cnt;
+ b_offset += cnt;
+ xfersize -= cnt;
}
+#else
+ mtx_lock(©_page_mutex);
+ while (xfersize > 0) {
+ a_pg_offset = a_offset & PAGE_MASK;
+ cnt = min(xfersize, PAGE_SIZE - a_pg_offset);
+ mmu_booke_kenter(mmu, copy_page_src_va,
+ VM_PAGE_TO_PHYS(ma[a_offset >> PAGE_SHIFT]));
+ a_cp = (char *)copy_page_src_va + a_pg_offset;
+ b_pg_offset = b_offset & PAGE_MASK;
+ cnt = min(cnt, PAGE_SIZE - b_pg_offset);
+ mmu_booke_kenter(mmu, copy_page_dst_va,
+ VM_PAGE_TO_PHYS(mb[b_offset >> PAGE_SHIFT]));
+ b_cp = (char *)copy_page_dst_va + b_pg_offset;
+ bcopy(a_cp, b_cp, cnt);
+ mmu_booke_kremove(mmu, copy_page_dst_va);
+ mmu_booke_kremove(mmu, copy_page_src_va);
+ a_offset += cnt;
+ b_offset += cnt;
+ xfersize -= cnt;
+ }
+ mtx_unlock(©_page_mutex);
+#endif
}
static vm_offset_t
mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m)
{
+#ifdef __powerpc64__
+ return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)));
+#else
vm_paddr_t paddr;
vm_offset_t qaddr;
uint32_t flags;
@@ -3093,9 +3110,6 @@ mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m)
paddr = VM_PAGE_TO_PHYS(m);
- if (hw_direct_map)
- return (PHYS_TO_DMAP(paddr));
-
flags = PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID;
flags |= tlb_calc_wimg(paddr, pmap_page_get_memattr(m)) << PTE_MAS2_SHIFT;
flags |= PTE_PS_4KB;
@@ -3122,16 +3136,15 @@ mmu_booke_quick_enter_page(mmu_t mmu, vm_page_t m)
__syncicache((void *)qaddr, PAGE_SIZE);
return (qaddr);
+#endif
}
static void
mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t addr)
{
+#ifndef __powerpc64__
pte_t *pte;
- if (hw_direct_map)
- return;
-
pte = pte_find(mmu, kernel_pmap, addr);
KASSERT(PCPU_GET(qmap_addr) == addr,
@@ -3141,6 +3154,7 @@ mmu_booke_quick_remove_page(mmu_t mmu, vm_offset_t add
*pte = 0;
critical_exit();
+#endif
}
/*
More information about the svn-src-all
mailing list