svn commit: r358305 - head/sys/powerpc/booke

Justin Hibbits jhibbits at FreeBSD.org
Tue Feb 25 01:40:24 UTC 2020


Author: jhibbits
Date: Tue Feb 25 01:40:22 2020
New Revision: 358305
URL: https://svnweb.freebsd.org/changeset/base/358305

Log:
  powerpc/booke: Use a pseudo-DMAP for the device mappings on booke64
  
  Since powerpc64 has such a large virtual address space, significantly larger
  than its physical address space, take advantage of this, and create yet
  another DMAP-like instance for the device mappings.  In this case, the
  device mapping "DMAP" is in the 0x8000000000000000 - 0xc000000000000000
  range, so as not to overlap the physical memory DMAP.
  
  This will allow us to add TLB1 entry coalescing in the future, especially
  useful for things like the radeonkms driver, which maps parts of the GPU at
  a time, but eventually maps all of it, using up a lot of TLB1 entries (~40).

Modified:
  head/sys/powerpc/booke/pmap.c

Modified: head/sys/powerpc/booke/pmap.c
==============================================================================
--- head/sys/powerpc/booke/pmap.c	Tue Feb 25 00:45:09 2020	(r358304)
+++ head/sys/powerpc/booke/pmap.c	Tue Feb 25 01:40:22 2020	(r358305)
@@ -221,8 +221,23 @@ uint32_t tlb1_entries;
 
 #define TLB1_ENTRIES (tlb1_entries)
 
-static vm_offset_t tlb1_map_base = (vm_offset_t)VM_MAXUSER_ADDRESS + PAGE_SIZE;
+/*
+ * Base of the pmap_mapdev() region.  On 32-bit it immediately follows the
+ * userspace address range.  On On 64-bit it's far above, at (1 << 63), and
+ * ranges up to the DMAP, giving 62 bits of PA allowed.  This is far larger than
+ * the widest Book-E address bus, the e6500 has a 40-bit PA space.  This allows
+ * us to map akin to the DMAP, with addresses identical to the PA, offset by the
+ * base.
+ */
+#ifdef __powerpc64__
+#define	VM_MAPDEV_BASE		0x8000000000000000
+#define	VM_MAPDEV_PA_MAX	0x4000000000000000 /* Don't encroach on DMAP */
+#else
+#define	VM_MAPDEV_BASE	(VM_MAXUSER_ADDRESS + PAGE_SIZE)
+#endif
 
+static vm_offset_t tlb1_map_base = VM_MAPDEV_BASE;
+
 static tlbtid_t tid_alloc(struct pmap *);
 static void tid_flush(tlbtid_t tid);
 
@@ -3475,8 +3490,10 @@ mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_siz
 {
 	tlb_entry_t e;
 	vm_paddr_t tmppa;
-	void *res;
-	uintptr_t va, tmpva;
+#ifndef __powerpc64__
+	uintptr_t tmpva;
+#endif
+	uintptr_t va;
 	vm_size_t sz;
 	int i;
 	int wimge;
@@ -3512,6 +3529,11 @@ mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_siz
 
 	size = roundup(size, PAGE_SIZE);
 
+#ifdef __powerpc64__
+	KASSERT(pa < VM_MAPDEV_PA_MAX,
+	    ("Unsupported physical address! %lx", pa));
+	va = VM_MAPDEV_BASE + pa;
+#else
 	/*
 	 * The device mapping area is between VM_MAXUSER_ADDRESS and
 	 * VM_MIN_KERNEL_ADDRESS.  This gives 1GB of device addressing.
@@ -3534,24 +3556,15 @@ mmu_booke_mapdev_attr(mmu_t mmu, vm_paddr_t pa, vm_siz
 	    sz = ffsl((~((1 << flsl(size-1)) - 1)) & pa);
 	    sz = sz ? min(roundup(sz + 3, 4), flsl(size) - 1) : flsl(size) - 1;
 	    va = roundup(tlb1_map_base, 1 << sz) | (((1 << sz) - 1) & pa);
-#ifdef __powerpc64__
-	} while (!atomic_cmpset_long(&tlb1_map_base, tmpva, va + size));
-#else
 	} while (!atomic_cmpset_int(&tlb1_map_base, tmpva, va + size));
-#endif
-#else
-#ifdef __powerpc64__
-	va = atomic_fetchadd_long(&tlb1_map_base, size);
-#else
 	va = atomic_fetchadd_int(&tlb1_map_base, size);
 #endif
 #endif
-	res = (void *)va;
 
 	if (tlb1_mapin_region(va, pa, size, tlb_calc_wimg(pa, ma)) != size)
 		return (NULL);
 
-	return (res);
+	return ((void *)va);
 }
 
 /*


More information about the svn-src-head mailing list