svn commit: r265998 - in stable/10/sys: boot/powerpc/ps3 dev/adb dev/uart powerpc/aim powerpc/booke powerpc/include

Ian Lepore ian at FreeBSD.org
Wed May 14 01:16:07 UTC 2014


Author: ian
Date: Wed May 14 01:16:05 2014
New Revision: 265998
URL: http://svnweb.freebsd.org/changeset/base/265998

Log:
  MFC r257180, r257195,  r257196, r257198, r257209, r257295
  
   Add some extra sanity checking and checks to printf format specifiers.
  
   Try even harder to find a console before giving up.
  
   Make devices with registers into the KVA region work reliably.
  
   Turn on VM_KMEM_SIZE_SCALE on 32-bit as well as 64-bit PowerPC.
  
   Return NOKEY instead of 0 if there are no more key presses queued.

Modified:
  stable/10/sys/boot/powerpc/ps3/start.S
  stable/10/sys/dev/adb/adb_kbd.c
  stable/10/sys/dev/uart/uart_cpu_fdt.c
  stable/10/sys/powerpc/aim/mmu_oea64.c
  stable/10/sys/powerpc/booke/pmap.c
  stable/10/sys/powerpc/include/vmparam.h
Directory Properties:
  stable/10/   (props changed)

Modified: stable/10/sys/boot/powerpc/ps3/start.S
==============================================================================
--- stable/10/sys/boot/powerpc/ps3/start.S	Wed May 14 00:55:21 2014	(r265997)
+++ stable/10/sys/boot/powerpc/ps3/start.S	Wed May 14 01:16:05 2014	(r265998)
@@ -27,7 +27,7 @@
 
 #define LOCORE
 
-#include <machine/trap_aim.h>
+#include <machine/trap.h>
 
 /*
  * KBoot and simulators will start this program from the _start symbol, with

Modified: stable/10/sys/dev/adb/adb_kbd.c
==============================================================================
--- stable/10/sys/dev/adb/adb_kbd.c	Wed May 14 00:55:21 2014	(r265997)
+++ stable/10/sys/dev/adb/adb_kbd.c	Wed May 14 01:16:05 2014	(r265998)
@@ -621,7 +621,7 @@ akbd_read_char(keyboard_t *kbd, int wait
 
 	if (!sc->buffers) {
 		mtx_unlock(&sc->sc_mutex);
-		return (0);
+		return (NOKEY);
 	}
 
 	adb_code = sc->buffer[0];

Modified: stable/10/sys/dev/uart/uart_cpu_fdt.c
==============================================================================
--- stable/10/sys/dev/uart/uart_cpu_fdt.c	Wed May 14 00:55:21 2014	(r265997)
+++ stable/10/sys/dev/uart/uart_cpu_fdt.c	Wed May 14 01:16:05 2014	(r265998)
@@ -142,14 +142,19 @@ uart_cpu_getdev(int devtype, struct uart
 	/*
 	 * Retrieve /chosen/std{in,out}.
 	 */
-	if ((chosen = OF_finddevice("/chosen")) == -1)
-		return (ENXIO);
-	for (name = propnames; *name != NULL; name++) {
-		if (phandle_chosen_propdev(chosen, *name, &node) == 0)
-			break;
+	node = -1;
+	if ((chosen = OF_finddevice("/chosen")) != -1) {
+		for (name = propnames; *name != NULL; name++) {
+			if (phandle_chosen_propdev(chosen, *name, &node) == 0)
+				break;
+		}
 	}
-	if (*name == NULL)
+	if (chosen == -1 || *name == NULL)
+		node = OF_finddevice("serial0"); /* Last ditch */
+
+	if (node == -1) /* Can't find anything */
 		return (ENXIO);
+
 	/*
 	 * Retrieve serial attributes.
 	 */

Modified: stable/10/sys/powerpc/aim/mmu_oea64.c
==============================================================================
--- stable/10/sys/powerpc/aim/mmu_oea64.c	Wed May 14 00:55:21 2014	(r265997)
+++ stable/10/sys/powerpc/aim/mmu_oea64.c	Wed May 14 01:16:05 2014	(r265998)
@@ -501,15 +501,7 @@ moea64_add_ofw_mappings(mmu_t mmup, phan
 	qsort(translations, sz, sizeof (*translations), om_cmp);
 
 	for (i = 0; i < sz; i++) {
-		CTR3(KTR_PMAP, "translation: pa=%#x va=%#x len=%#x",
-		    (uint32_t)(translations[i].om_pa_lo), translations[i].om_va,
-		    translations[i].om_len);
-
-		if (translations[i].om_pa_lo % PAGE_SIZE)
-			panic("OFW translation not page-aligned!");
-
 		pa_base = translations[i].om_pa_lo;
-
 	      #ifdef __powerpc64__
 		pa_base += (vm_offset_t)translations[i].om_pa_hi << 32;
 	      #else
@@ -517,6 +509,14 @@ moea64_add_ofw_mappings(mmu_t mmup, phan
 			panic("OFW translations above 32-bit boundary!");
 	      #endif
 
+		if (pa_base % PAGE_SIZE)
+			panic("OFW translation not page-aligned (phys)!");
+		if (translations[i].om_va % PAGE_SIZE)
+			panic("OFW translation not page-aligned (virt)!");
+
+		CTR3(KTR_PMAP, "translation: pa=%#zx va=%#x len=%#x",
+		    pa_base, translations[i].om_va, translations[i].om_len);
+
 		/* Now enter the pages for this mapping */
 
 		DISABLE_TRANS(msr);
@@ -693,9 +693,9 @@ moea64_early_bootstrap(mmu_t mmup, vm_of
 	hwphyssz = 0;
 	TUNABLE_ULONG_FETCH("hw.physmem", (u_long *) &hwphyssz);
 	for (i = 0, j = 0; i < regions_sz; i++, j += 2) {
-		CTR3(KTR_PMAP, "region: %#x - %#x (%#x)", regions[i].mr_start,
-		    regions[i].mr_start + regions[i].mr_size,
-		    regions[i].mr_size);
+		CTR3(KTR_PMAP, "region: %#zx - %#zx (%#zx)",
+		    regions[i].mr_start, regions[i].mr_start +
+		    regions[i].mr_size, regions[i].mr_size);
 		if (hwphyssz != 0 &&
 		    (physsz + regions[i].mr_size) >= hwphyssz) {
 			if (physsz < hwphyssz) {

Modified: stable/10/sys/powerpc/booke/pmap.c
==============================================================================
--- stable/10/sys/powerpc/booke/pmap.c	Wed May 14 00:55:21 2014	(r265997)
+++ stable/10/sys/powerpc/booke/pmap.c	Wed May 14 01:16:05 2014	(r265998)
@@ -189,6 +189,7 @@ static tlb_entry_t tlb1[TLB1_ENTRIES];
 
 /* Next free entry in the TLB1 */
 static unsigned int tlb1_idx;
+static vm_offset_t tlb1_map_base = VM_MAX_KERNEL_ADDRESS;
 
 static tlbtid_t tid_alloc(struct pmap *);
 
@@ -2681,11 +2682,23 @@ mmu_booke_mapdev_attr(mmu_t mmu, vm_padd
 
 	size = roundup(size, PAGE_SIZE);
 
+	/*
+	 * We leave a hole for device direct mapping between the maximum user
+	 * address (0x8000000) and the minimum KVA address (0xc0000000). If
+	 * devices are in there, just map them 1:1. If not, map them to the
+	 * device mapping area about VM_MAX_KERNEL_ADDRESS. These mapped
+	 * addresses should be pulled from an allocator, but since we do not
+	 * ever free TLB1 entries, it is safe just to increment a counter.
+	 * Note that there isn't a lot of address space here (128 MB) and it
+	 * is not at all difficult to imagine running out, since that is a 4:1
+	 * compression from the 0xc0000000 - 0xf0000000 address space that gets
+	 * mapped there.
+	 */
 	if (pa >= (VM_MAXUSER_ADDRESS + PAGE_SIZE) &&
 	    (pa + size - 1) < VM_MIN_KERNEL_ADDRESS) 
 		va = pa;
 	else
-		va = kva_alloc(size);
+		va = atomic_fetchadd_int(&tlb1_map_base, size);
 	res = (void *)va;
 
 	do {
@@ -3085,7 +3098,7 @@ tlb1_mapin_region(vm_offset_t va, vm_pad
 	}
 
 	mapped = (va - base);
-	debugf("mapped size 0x%08x (wasted space 0x%08x)\n",
+	printf("mapped size 0x%08x (wasted space 0x%08x)\n",
 	    mapped, mapped - size);
 	return (mapped);
 }
@@ -3148,7 +3161,6 @@ tlb1_init()
 vm_offset_t 
 pmap_early_io_map(vm_paddr_t pa, vm_size_t size)
 {
-	static vm_offset_t early_io_map_base = VM_MAX_KERNEL_ADDRESS;
 	vm_paddr_t pa_base;
 	vm_offset_t va, sz;
 	int i;
@@ -3165,14 +3177,14 @@ pmap_early_io_map(vm_paddr_t pa, vm_size
 
 	pa_base = trunc_page(pa);
 	size = roundup(size + (pa - pa_base), PAGE_SIZE);
-	va = early_io_map_base + (pa - pa_base);
+	va = tlb1_map_base + (pa - pa_base);
 
 	do {
 		sz = 1 << (ilog2(size) & ~1);
-		tlb1_set_entry(early_io_map_base, pa_base, sz, _TLB_ENTRY_IO);
+		tlb1_set_entry(tlb1_map_base, pa_base, sz, _TLB_ENTRY_IO);
 		size -= sz;
 		pa_base += sz;
-		early_io_map_base += sz;
+		tlb1_map_base += sz;
 	} while (size > 0);
 
 #ifdef SMP

Modified: stable/10/sys/powerpc/include/vmparam.h
==============================================================================
--- stable/10/sys/powerpc/include/vmparam.h	Wed May 14 00:55:21 2014	(r265997)
+++ stable/10/sys/powerpc/include/vmparam.h	Wed May 14 01:16:05 2014	(r265998)
@@ -112,6 +112,7 @@
 
 #define	VM_MIN_KERNEL_ADDRESS	KERNBASE
 #define	VM_MAX_KERNEL_ADDRESS	0xf8000000
+#define	VM_MAX_SAFE_KERNEL_ADDRESS	VM_MAX_KERNEL_ADDRESS
 
 #endif /* AIM/E500 */
 
@@ -175,14 +176,21 @@ struct pmap_physseg {
 #define	VM_KMEM_SIZE		(12 * 1024 * 1024)
 #endif
 
-#ifdef __powerpc64__
+/*
+ * How many physical pages per KVA page allocated.
+ * min(max(VM_KMEM_SIZE, Physical memory/VM_KMEM_SIZE_SCALE), VM_KMEM_SIZE_MAX)
+ * is the total KVA space allocated for kmem_map.
+ */
 #ifndef VM_KMEM_SIZE_SCALE
-#define VM_KMEM_SIZE_SCALE      (3)
+#define VM_KMEM_SIZE_SCALE	(3)
 #endif
 
+/*
+ * Ceiling on the amount of kmem_map KVA space: 40% of the entire KVA space.
+ */
 #ifndef VM_KMEM_SIZE_MAX
-#define VM_KMEM_SIZE_MAX        0x1c0000000  /* 7 GB */
-#endif
+#define VM_KMEM_SIZE_MAX	((VM_MAX_SAFE_KERNEL_ADDRESS - \
+    VM_MIN_KERNEL_ADDRESS + 1) * 2 / 5)
 #endif
 
 #define	ZERO_REGION_SIZE	(64 * 1024)	/* 64KB */


More information about the svn-src-all mailing list