PERFORCE change 158267 for review
Bjoern A. Zeeb
bz at FreeBSD.org
Wed Feb 25 08:24:43 PST 2009
http://perforce.freebsd.org/chv.cgi?CH=158267
Change 158267 by bz at bz_dumpster on 2009/02/25 16:23:55
Move code around and to proper locations. Prepare
for the pmap update, more clenaup, ... a bit of the
bootstrapping ocde just does not work anymore as expected.
Affected files ...
.. //depot/projects/s390/sys/conf/files.s390#8 edit
.. //depot/projects/s390/sys/s390/include/pmap.h#5 edit
.. //depot/projects/s390/sys/s390/s390/esa.h#5 edit
.. //depot/projects/s390/sys/s390/s390/machdep.c#14 edit
Differences ...
==== //depot/projects/s390/sys/conf/files.s390#8 (text+ko) ====
@@ -91,7 +91,8 @@
s390/s390/mcheck.c standard
s390/s390/mem.c standard
s390/s390/mp_machdep.c optional smp
-s390/s390/pmap.c standard
+#s390/s390/pmap.c standard
+s390/s390/pmap_bz.c standard
s390/s390/service.c standard
s390/s390/stack_machdep.c optional ddb | stack
s390/s390/support.S standard
==== //depot/projects/s390/sys/s390/include/pmap.h#5 (text+ko) ====
@@ -81,8 +81,6 @@
typedef struct pmap *pmap_t;
#ifdef _KERNEL
-extern pmap_t kernel_pmap;
-
#define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx)
#define PMAP_LOCK_ASSERT(pmap, type) \
mtx_assert(&(pmap)->pm_mtx, (type))
@@ -117,7 +115,9 @@
extern vm_offset_t virtual_avail;
extern vm_offset_t virtual_end;
-void *pmap_kenter_temporary(vm_paddr_t pa, int i);
+void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
+void *pmap_kenter_temporary(vm_paddr_t pa, int i);
+void pmap_kremove(vm_offset_t);
#endif /* !LOCORE */
==== //depot/projects/s390/sys/s390/s390/esa.h#5 (text+ko) ====
@@ -24,8 +24,6 @@
void css_init_info(void);
void io_intr(struct trapframe *tf);
void io_intr_entry(struct trapframe *tf);
-void pmap_kenter(vm_offset_t va, vm_paddr_t pa);
-void pmap_kremove(vm_offset_t va);
extern uint8_t esa_sm_per;
extern int esa_features;
==== //depot/projects/s390/sys/s390/s390/machdep.c#14 (text+ko) ====
@@ -101,6 +101,12 @@
#include <s390/s390/trap.h>
#include <s390/s390/external.h>
#include <s390/s390/mcheck.h>
+#include <s390/s390/skey.h>
+
+vm_paddr_t phys_avail[128];
+long Maxmem = 0;
+vm_paddr_t avail_start; /* pa of first available physical page */
+vm_paddr_t avail_end; /* pa of first non-available physical page */
static struct trapframe frame0;
@@ -112,13 +118,160 @@
int cold = 1;
long realmem = 0;
+static unsigned nkptp;
+static ste_t *proc0sto;
+/* For the page copying/zeroing routines. */
+vm_offset_t csrc_ptepa, cdst_ptepa, z_ptepa, zi_ptepa;
struct msgbuf *msgbufp;
+SET_DECLARE(dat_init_set, struct dat_init_map);
+
+DAT_INIT_MAP(proc0sto, proc0sto, NKPTP0 + 1);
DAT_INIT_MAP(kstack0, kstack0, KSTACK_PAGES + 1);
DAT_INIT_MAP(msgbufp, msgbufp, atop(round_page(MSGBUF_SIZE)));
DAT_INIT_MAP(pcpu, sysarea.pcpu, 1);
+#define pte_valid(pte) (((pte) & PTE_INVALID) == 0)
+
+static void
+ipte(vm_paddr_t ptepa)
+{
+ pte_t pte;
+
+ KASSERT(ptepa, ("ipte: ptepa == 0"));
+
+ pte = lura(ptepa);
+ if (pte_valid(pte)) {
+ u_long pto, px;
+
+ pto = ptepa & ~(PTSIZE - 1);
+ px = (ptepa - pto) << (PX_SHIFT - 2);
+
+ __asm volatile (
+ " ipte %[pto], %[px]"
+ :: [pto]"r"(pto), [px]"r"(px) : "memory");
+ }
+}
+
+static vm_paddr_t
+dat_init_map(int n)
+{
+ vm_paddr_t pa = avail_start;
+
+ avail_start += n * PAGE_SIZE;
+
+ KASSERT(avail_start < avail_end,
+ ("pmap_steal_page: insufficient memory"));
+
+ virtual_avail += n * PAGE_SIZE;
+
+ return (pa);
+}
+
+void
+dat_init(void)
+{
+ struct dat_init_map **dimp;
+ vm_offset_t va;
+ ste_t *ste;
+ pte_t *pte;
+ int i;
+
+ /* Find out the size of physical memory. */
+ psw_define(&sysarea.program_new_psw, PSW_0, (u_long)&&last_page);
+
+ sk_set(0, SK_RW);
+ avail_end = PAGE_SIZE;
+ while (tprot(avail_end)) {
+ sk_set(avail_end, SK_RW);
+ avail_end += PAGE_SIZE;
+ }
+
+last_page:
+ Maxmem = physmem = atop(avail_end);
+ virtual_avail = avail_start = (vm_offset_t)_end;
+ virtual_end = VM_MAX_KERNEL_ADDRESS;
+
+ SET_FOREACH(dimp, dat_init_set) {
+ *(*dimp)->vap = dat_init_map((*dimp)->n);
+ }
+
+ phys_avail[0] = avail_start;
+ phys_avail[1] = avail_end;
+ phys_avail[2] = 0;
+
+#if 0
+ sysarea.pcpu = (struct pcpu *)
+ (pmap_bootstrap_map(1) + EXTENDED_SAVE_AREA_SIZE);
+#ifdef SMP
+ /*
+ * Sysarea and pcpu pages for APs.
+ * Not in bsme because of variable size.
+ */
+ mp_sysarea = pmap_bootstrap_map((mp_ncpus - 1) * SYSAREA_PAGES);
+ mp_pcpu = pmap_bootstrap_map(mp_ncpus - 1);
+#endif
+#endif
+ /* Create identical virtual-to-physical kernel mapping. */
+ nkptp = NKPTP0;
+ pte = (pte_t *)proc0sto + NPTEP;
+ for (va = 0; va < virtual_avail; va += PAGE_SIZE)
+ *pte++ = va;
+ for (; va < virtual_end; va += PAGE_SIZE)
+ *pte++ = PTE_INVALID;
+ pte = (pte_t *)proc0sto + NPTEP;
+ ste = proc0sto;
+ for (i = 0; i < NKPTP0*4; i++, pte += NPTEST)
+ *ste++ = STE_PTL | STE_PTO(pte);
+ for (; i < NKPTP*4; i++)
+ *ste++ = STE_INVALID;
+
+ /* Initialize the kernel pmap. */
+#if 0
+ kernel_pmap = &kernel_pmap_store;
+#endif
+
+ PMAP_LOCK_INIT(kernel_pmap);
+ kernel_pmap->pm_sto = proc0sto;
+ kernel_pmap->pm_active = ~0;
+ TAILQ_INIT(&kernel_pmap->pm_pvlist);
+
+ /* Now let's enable DAT. */
+ cr_write(CR1, STD_PRIMARY(proc0sto));
+
+ /* Why svc? Why not? */
+ psw_define(&sysarea.svc_new_psw,
+ PSW_0 | PSW_T | PSW_M | PSW_KEY, (u_long)&&DAT_enabled);
+ lpsw(&sysarea.svc_new_psw);
+
+DAT_enabled:
+#ifdef BZ_HAS_TODO_TO_MAKE_IT_COMPILE
+ csrc_ptepa = pmap_ptepa(kernel_pmap, (vm_offset_t)csrc_va);
+ cdst_ptepa = pmap_ptepa(kernel_pmap, (vm_offset_t)cdst_va);
+ z_ptepa = pmap_ptepa(kernel_pmap, (vm_offset_t)z_va);
+ zi_ptepa = pmap_ptepa(kernel_pmap, (vm_offset_t)zi_va);
+#endif
+ ipte(csrc_ptepa);
+ ipte(cdst_ptepa);
+ ipte(z_ptepa);
+ ipte(zi_ptepa);
+
+ /* Guard pages for stacks. */
+#ifdef BZ_HAS_TODO_TO_MAKE_IT_COMPILE
+ ipte(pmap_ptepa(kernel_pmap, kstack0));
+#endif
+ kstack0 += PAGE_SIZE;
+
+#ifdef DDB
+#ifdef BZ_HAS_TODO_TO_MAKE_IT_COMPILE
+ ipte(pmap_ptepa(kernel_pmap, ddbstack));
+ ddbstack += PAGE_SIZE;
+#endif
+#endif
+}
+
+
void
io_intr(struct trapframe *tf)
{
More information about the p4-projects
mailing list