svn commit: r197053 - in projects/ppc64/sys/powerpc: aim aim64 ofw
Nathan Whitehorn
nwhitehorn at FreeBSD.org
Thu Sep 10 01:54:04 UTC 2009
Author: nwhitehorn
Date: Thu Sep 10 01:54:04 2009
New Revision: 197053
URL: http://svn.freebsd.org/changeset/base/197053
Log:
Make calls to Open Firmware work correctly with virtual memory enabled.
This gets quite a lot of the kernel startup working on the XServe --
the PCI buses are probed, the ethernet controllers attach and initialize,
etc. It does, however, appear that macio(4) is not 64-bit clean, so the
boot stops there.
Modified:
projects/ppc64/sys/powerpc/aim/ofw_machdep.c
projects/ppc64/sys/powerpc/aim64/mmu_oea64.c
projects/ppc64/sys/powerpc/ofw/ofw_real.c
Modified: projects/ppc64/sys/powerpc/aim/ofw_machdep.c
==============================================================================
--- projects/ppc64/sys/powerpc/aim/ofw_machdep.c Thu Sep 10 01:26:45 2009 (r197052)
+++ projects/ppc64/sys/powerpc/aim/ofw_machdep.c Thu Sep 10 01:54:04 2009 (r197053)
@@ -373,29 +373,20 @@ openfirmware(void *args)
int result;
#ifndef __powerpc64__
register_t srsave[16];
- #endif
u_int i;
+ #endif
if (pmap_bootstrapped && ofw_real_mode)
args = (void *)pmap_kextract((vm_offset_t)args);
ofw_sprg_prepare();
+ #ifndef __powerpc64__
if (pmap_bootstrapped && !ofw_real_mode) {
/*
* Swap the kernel's address space with Open Firmware's
*/
- #ifdef __powerpc64__
- for (i = 1; i < 16; i++) {
- if (i == KERNEL_SR || i == KERNEL2_SR || i == USER_SR)
- continue;
-
- __asm __volatile ("slbie %0; slbmte %1, %2" ::
- "r"(i << 28), "r"(ofw_pmap.pm_slb[i].slbv),
- "r"(ofw_pmap.pm_slb[i].slbe));
- }
- #else
for (i = 0; i < 16; i++) {
srsave[i] = mfsrin(i << ADDR_SR_SHFT);
mtsrin(i << ADDR_SR_SHFT, ofw_pmap.pm_sr[i]);
@@ -409,8 +400,8 @@ openfirmware(void *args)
"mtdbatu 3, %0" : : "r" (0));
}
isync();
- #endif
}
+ #endif
__asm __volatile( "\t"
"sync\n\t"
@@ -429,28 +420,20 @@ openfirmware(void *args)
: : "r" (oldmsr)
);
- if (pmap_bootstrapped && !ofw_real_mode) {
+ #ifndef __powerpc64__
+ if (pmap_bootstrapped && !ofw_real_mode && 0) {
/*
* Restore the kernel's addr space. The isync() doesn;t
* work outside the loop unless mtsrin() is open-coded
* in an asm statement :(
*/
- #ifdef __powerpc64__
- for (i = 1; i < 16; i++) {
- if (i == KERNEL_SR || i == KERNEL2_SR || i == USER_SR)
- continue;
-
- __asm __volatile ("slbie %0; slbmte %1, %2" ::
- "r"(i << 28), "r"(kernel_pmap->pm_slb[i].slbv),
- "r"(kernel_pmap->pm_slb[i].slbe));
- }
- #else
+
for (i = 0; i < 16; i++) {
mtsrin(i << ADDR_SR_SHFT, srsave[i]);
isync();
}
- #endif
}
+ #endif
ofw_sprg_restore();
Modified: projects/ppc64/sys/powerpc/aim64/mmu_oea64.c
==============================================================================
--- projects/ppc64/sys/powerpc/aim64/mmu_oea64.c Thu Sep 10 01:26:45 2009 (r197052)
+++ projects/ppc64/sys/powerpc/aim64/mmu_oea64.c Thu Sep 10 01:54:04 2009 (r197053)
@@ -1017,20 +1017,30 @@ moea64_bridge_bootstrap(mmu_t mmup, vm_o
for (off = 0; off < translations[i].om_len; off += PAGE_SIZE) {
struct vm_page m;
+ m.phys_addr = translations[i].om_pa_lo + off;
+
+ #ifdef __powerpc64__
+ m.phys_addr += (vm_offset_t)translations[i].om_pa_hi
+ << 32;
+
+ /*
+ * ofw_pmap is unused on PPC64 since slb replacement
+ * is non-atomic, so map the kernel and OFW into
+ * the same address space.
+ */
+ moea64_kenter(mmup, translations[i].om_va + off,
+ m.phys_addr);
+ #else
/* Map low memory mappings into the kernel pmap, too.
* These are typically mappings made by the loader,
* so we need them if we want to keep executing. */
if (translations[i].om_va + off < SEGMENT_LENGTH)
moea64_kenter(mmup, translations[i].om_va + off,
- translations[i].om_va + off);
- m.phys_addr = translations[i].om_pa_lo + off;
- #ifdef __powerpc64__
- m.phys_addr += translations[i].om_pa_hi << 32;
- #endif
moea64_enter_locked(&ofw_pmap,
translations[i].om_va + off, &m, VM_PROT_ALL, 1);
+ #endif
ofw_mappings++;
}
Modified: projects/ppc64/sys/powerpc/ofw/ofw_real.c
==============================================================================
--- projects/ppc64/sys/powerpc/ofw/ofw_real.c Thu Sep 10 01:26:45 2009 (r197052)
+++ projects/ppc64/sys/powerpc/ofw/ofw_real.c Thu Sep 10 01:54:04 2009 (r197053)
@@ -192,8 +192,6 @@ ofw_real_stop(void)
static void
ofw_real_bounce_alloc(void *junk)
{
- struct vm_page m;
-
/*
* Check that ofw_real is actually in use before allocating wads
* of memory. Do this by checking if our mutex has been set up.
@@ -210,18 +208,17 @@ ofw_real_bounce_alloc(void *junk)
of_bounce_virt = contigmalloc(PAGE_SIZE, M_OFWREAL, 0,
0, BUS_SPACE_MAXADDR_32BIT, PAGE_SIZE, PAGE_SIZE);
- of_bounce_phys = vtophys(of_bounce_virt);
- of_bounce_size = PAGE_SIZE;
/*
- * Add this to the OFW pmap if we are running in virtual mode.
+ * XXX: Use of_bounce_virt in 32-bit mode. This assumes that kernel
+ * VA space is always < 0xffffffff.
*/
+ if (ofw_real_mode)
+ of_bounce_phys = vtophys(of_bounce_virt);
+ else
+ of_bounce_phys = (vm_offset_t)of_bounce_virt;
- if (!ofw_real_mode) {
- m.phys_addr = of_bounce_phys;
- pmap_enter(&ofw_pmap, of_bounce_phys, VM_PROT_ALL, &m,
- VM_PROT_ALL, 1);
- }
+ of_bounce_size = PAGE_SIZE;
mtx_unlock(&of_bounce_mtx);
}
More information about the svn-src-projects
mailing list