PERFORCE change 132500 for review
Rafal Jaworowski
raj at FreeBSD.org
Fri Jan 4 10:12:29 PST 2008
http://perforce.freebsd.org/chv.cgi?CH=132500
Change 132500 by raj at raj_mimi on 2008/01/04 18:12:24
E500 pmap: initial conversion to the kobj-based pmap dispatcher
scheme. Needs further work.
Affected files ...
.. //depot/projects/e500/sys/powerpc/booke/machdep.c#6 edit
.. //depot/projects/e500/sys/powerpc/booke/pmap.c#6 edit
.. //depot/projects/e500/sys/powerpc/include/mmuvar.h#2 edit
Differences ...
==== //depot/projects/e500/sys/powerpc/booke/machdep.c#6 (text+ko) ====
@@ -125,6 +125,7 @@
#include <machine/psl.h>
#include <machine/trap.h>
#include <machine/md_var.h>
+#include <machine/mmuvar.h>
#include <machine/pmap.h>
#include <machine/sigframe.h>
#include <machine/metadata.h>
@@ -394,8 +395,10 @@
if (boothowto & RB_KDB)
kdb_enter("Boot flags requested debugger");
#endif
+ kobj_machdep_init();
/* Initialise virtual memory. */
+ pmap_mmu_install(MMU_TYPE_BOOKE, 0);
pmap_bootstrap(startkernel, end);
debugf("MSR = 0x%08x\n", mfmsr());
//tlb1_print_entries();
==== //depot/projects/e500/sys/powerpc/booke/pmap.c#6 (text+ko) ====
@@ -1,4 +1,5 @@
/*-
+ * Copyright (C) 2007 Semihalf, Rafal Jaworowski <raj at semihalf.com>
* Copyright (C) 2006 Semihalf, Marian Balakowicz <m8 at semihalf.com>
* All rights reserved.
*
@@ -29,22 +30,22 @@
* a few other pmap modules from the FreeBSD tree.
*/
/*
- * E500 pmap implementation notes:
+ * VM layout notes:
*
* Kernel and user threads run within one common virtual address space
* defined by AS=0.
*
* Virtual address space layout:
* -----------------------------
- * 0x0000_0000 - 0xbfffefff : user process
- * 0xc000_0000 - 0xc1ffffff : kerel reserved
+ * 0x0000_0000 - 0xbfff_efff : user process
+ * 0xc000_0000 - 0xc1ff_ffff : kerel reserved
* 0xc000_0000 - kernelend : kernel code &data
- * 0xc1ff_c000 - 0xc2000000 : kstack0
- * 0xc200_0000 - 0xffefffff : KVA
+ * 0xc1ff_c000 - 0xc200_0000 : kstack0
+ * 0xc200_0000 - 0xffef_ffff : KVA
* 0xc200_0000 - 0xc200_3fff : reserved for page zero/copy
* 0xc200_4000 - ptbl buf end: reserved for ptbl bufs
* ptbl buf end- 0xffef_ffff : actual free KVA space
- * 0xfff0_0000 - 0xffffffff : CCSRBAR region
+ * 0xfff0_0000 - 0xffff_ffff : I/O devices region
*/
#include <sys/types.h>
@@ -81,8 +82,14 @@
#include <machine/spr.h>
#include <machine/vmparam.h>
#include <machine/md_var.h>
+#include <machine/mmuvar.h>
#include <machine/pte.h>
+#include "mmu_if.h"
+
+#define DEBUG
+#undef DEBUG
+
#ifdef DEBUG
#define debugf(fmt, args...) printf(fmt, ##args)
#else
@@ -101,53 +108,39 @@
/* Kernel physical load address. */
extern uint32_t kernload;
-/* Message buffer. */
-struct msgbuf *msgbufp;
-
-/* Map of physical memory regions. */
-vm_offset_t phys_avail[128];
-u_int phys_avail_count;
-
#define MEM_REGIONS 8
struct mem_region availmem_regions[MEM_REGIONS];
int availmem_regions_sz;
-/* First and last available kernel virtual addresses. */
-vm_offset_t virtual_avail;
-vm_offset_t virtual_end;
-vm_offset_t kernel_vm_end;
+/* Reserved KVA space and mutex for mmu_booke_zero_page. */
+static vm_offset_t zero_page_va;
+static struct mtx zero_page_mutex;
-/* Reserved KVA space and mutex for pmap_zero_page. */
-vm_offset_t pmap_zero_page_va;
-struct mtx pmap_zero_page_mutex;
-
/*
- * Reserved KVA space for pmap_zero_page_idle. This is used
- * by idle thred only, no mutex required.
+ * Reserved KVA space for mmu_booke_zero_page_idle. This is used
+ * by idle thred only, no lock required.
*/
-vm_offset_t pmap_zero_page_idle_va;
+static vm_offset_t zero_page_idle_va;
-/* Reserved KVA space and mutex for pmap_copy_page. */
-vm_offset_t pmap_copy_page_src_va;
-vm_offset_t pmap_copy_page_dst_va;
-struct mtx pmap_copy_page_mutex;
+/* Reserved KVA space and mutex for mmu_booke_copy_page. */
+static vm_offset_t copy_page_src_va;
+static vm_offset_t copy_page_dst_va;
+static struct mtx copy_page_mutex;
/**************************************************************************/
/* PMAP */
/**************************************************************************/
-static void pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
-
-/* Kernel pmap */
-struct pmap kernel_pmap_store;
+static void mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t,
+ vm_prot_t, boolean_t);
unsigned int kptbl_min; /* Index of the first kernel ptbl. */
unsigned int kernel_ptbls; /* Number of KVA ptbls. */
-int pmap_pagedaemon_waken;
+static int pagedaemon_waken;
/*
- * If user pmap is processed with pmap_remove and the resident count
+ * If user pmap is processed with mmu_booke_remove and the resident count
* drops to 0, there are no more pages to remove, so we need not continue.
*/
#define PMAP_REMOVE_DONE(pmap) \
@@ -160,7 +153,7 @@
/**************************************************************************/
/* Translation ID busy table */
-volatile pmap_t tidbusy[TID_MAX + 1];
+static volatile pmap_t tidbusy[TID_MAX + 1];
/*
* Actual maximum number of TLB0 entries.
@@ -174,14 +167,14 @@
#define TLB0_NWAYS (tlb0_nways)
#define TLB0_ENTRIES_PER_WAY (tlb0_nentries_per_way)
-/* Pointer to kernel tlb0 table, allocated in pmap_bootstrap() */
+/* Pointer to kernel tlb0 table, allocated in mmu_booke_bootstrap() */
tlb_entry_t *tlb0;
/*
* Spinlock to assure proper locking between threads and
* between tlb miss handler and kernel.
*/
-struct mtx tlb0_mutex;
+static struct mtx tlb0_mutex;
#define TLB1_SIZE 16
@@ -233,23 +226,22 @@
#define PMAP_SHPGPERPROC 200
#endif
-static vm_paddr_t pte_vatopa(pmap_t, vm_offset_t);
-
static void ptbl_init(void);
static struct ptbl_buf *ptbl_buf_alloc(void);
static void ptbl_buf_free(struct ptbl_buf *);
static void ptbl_free_pmap_ptbl(pmap_t, pte_t *);
-static void ptbl_alloc(pmap_t, unsigned int);
-static void ptbl_free(pmap_t, unsigned int);
-static void ptbl_hold(pmap_t, unsigned int);
-static int ptbl_unhold(pmap_t, unsigned int);
+static void ptbl_alloc(mmu_t, pmap_t, unsigned int);
+static void ptbl_free(mmu_t, pmap_t, unsigned int);
+static void ptbl_hold(mmu_t, pmap_t, unsigned int);
+static int ptbl_unhold(mmu_t, pmap_t, unsigned int);
-static pte_t *pte_find(pmap_t, vm_offset_t);
-static void pte_enter(pmap_t, vm_page_t, vm_offset_t, u_int32_t);
-static int pte_remove(pmap_t, vm_offset_t, u_int8_t);
+static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t);
+static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t);
+void pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, u_int32_t);
+static int pte_remove(mmu_t, pmap_t, vm_offset_t, u_int8_t);
-static pv_entry_t pv_alloc(void);
+pv_entry_t pv_alloc(void);
static void pv_free(pv_entry_t);
static void pv_insert(pmap_t, vm_offset_t, vm_page_t);
static void pv_remove(pmap_t, vm_offset_t, vm_page_t);
@@ -267,17 +259,122 @@
static struct mtx ptbl_buf_freelist_lock;
/* Base address of kva space allocated fot ptbl bufs. */
-vm_offset_t ptbl_buf_pool_vabase;
+static vm_offset_t ptbl_buf_pool_vabase;
/* Pointer to ptbl_buf structures. */
-struct ptbl_buf *ptbl_bufs;
+static struct ptbl_buf *ptbl_bufs;
+
+/*
+ * Kernel MMU interface
+ */
+vm_offset_t mmu_booke_addr_hint(mmu_t, vm_object_t, vm_offset_t, vm_size_t);
+void mmu_booke_change_wiring(mmu_t, pmap_t, vm_offset_t, boolean_t);
+void mmu_booke_clear_modify(mmu_t, vm_page_t);
+void mmu_booke_clear_reference(mmu_t, vm_page_t);
+void mmu_booke_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t);
+void mmu_booke_copy_page(mmu_t, vm_page_t, vm_page_t);
+void mmu_booke_enter(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t);
+void mmu_booke_enter_object(mmu_t, pmap_t, vm_offset_t, vm_offset_t,
+ vm_page_t, vm_prot_t);
+void mmu_booke_enter_quick(mmu_t, pmap_t, vm_offset_t, vm_page_t, vm_prot_t);
+vm_paddr_t mmu_booke_extract(mmu_t, pmap_t, vm_offset_t);
+vm_page_t mmu_booke_extract_and_hold(mmu_t, pmap_t, vm_offset_t, vm_prot_t);
+void mmu_booke_init(mmu_t);
+boolean_t mmu_booke_is_modified(mmu_t, vm_page_t);
+boolean_t mmu_booke_is_prefaultable(mmu_t, pmap_t, vm_offset_t);
+boolean_t mmu_booke_ts_referenced(mmu_t, vm_page_t);
+vm_offset_t mmu_booke_map(mmu_t, vm_offset_t *, vm_offset_t, vm_offset_t, int);
+int mmu_booke_mincore(mmu_t, pmap_t, vm_offset_t);
+void mmu_booke_object_init_pt(mmu_t, pmap_t, vm_offset_t, vm_object_t,
+ vm_pindex_t, vm_size_t);
+boolean_t mmu_booke_page_exists_quick(mmu_t, pmap_t, vm_page_t);
+void mmu_booke_page_init(vm_page_t);
+void mmu_booke_pinit(mmu_t, pmap_t);
+void mmu_booke_pinit0(mmu_t, pmap_t);
+void mmu_booke_protect(mmu_t, pmap_t, vm_offset_t, vm_offset_t, vm_prot_t);
+void mmu_booke_qenter(mmu_t, vm_offset_t, vm_page_t *, int);
+void mmu_booke_qremove(mmu_t, vm_offset_t, int);
+void mmu_booke_release(mmu_t, pmap_t);
+void mmu_booke_remove(mmu_t, pmap_t, vm_offset_t, vm_offset_t);
+void mmu_booke_remove_all(mmu_t, vm_page_t);
+void mmu_booke_remove_write(mmu_t, vm_page_t);
+void mmu_booke_zero_page(mmu_t, vm_page_t);
+void mmu_booke_zero_page_area(mmu_t, vm_page_t, int, int);
+void mmu_booke_zero_page_idle(mmu_t, vm_page_t);
+void mmu_booke_activate(mmu_t, struct thread *);
+void mmu_booke_deactivate(mmu_t, struct thread *);
+void mmu_booke_bootstrap(mmu_t, vm_offset_t, vm_offset_t);
+void *mmu_booke_mapdev(mmu_t, vm_offset_t, vm_size_t);
+void mmu_booke_unmapdev(mmu_t, vm_offset_t, vm_size_t);
+vm_offset_t mmu_booke_kextract(mmu_t, vm_offset_t);
+void mmu_booke_kenter(mmu_t, vm_offset_t, vm_offset_t);
+void mmu_booke_kremove(mmu_t, vm_offset_t);
+boolean_t mmu_booke_dev_direct_mapped(mmu_t, vm_offset_t, vm_size_t);
+boolean_t mmu_booke_page_executable(mmu_t, vm_page_t);
+
+static mmu_method_t mmu_booke_methods[] = {
+ /* pmap dispatcher interface */
+ MMUMETHOD(mmu_addr_hint, mmu_booke_addr_hint),
+ MMUMETHOD(mmu_change_wiring, mmu_booke_change_wiring),
+ MMUMETHOD(mmu_clear_modify, mmu_booke_clear_modify),
+ MMUMETHOD(mmu_clear_reference, mmu_booke_clear_reference),
+ MMUMETHOD(mmu_copy, mmu_booke_copy),
+ MMUMETHOD(mmu_copy_page, mmu_booke_copy_page),
+ MMUMETHOD(mmu_enter, mmu_booke_enter),
+ MMUMETHOD(mmu_enter_object, mmu_booke_enter_object),
+ MMUMETHOD(mmu_enter_quick, mmu_booke_enter_quick),
+ MMUMETHOD(mmu_extract, mmu_booke_extract),
+ MMUMETHOD(mmu_extract_and_hold, mmu_booke_extract_and_hold),
+ MMUMETHOD(mmu_init, mmu_booke_init),
+ MMUMETHOD(mmu_is_modified, mmu_booke_is_modified),
+ MMUMETHOD(mmu_is_prefaultable, mmu_booke_is_prefaultable),
+ MMUMETHOD(mmu_ts_referenced, mmu_booke_ts_referenced),
+ MMUMETHOD(mmu_map, mmu_booke_map),
+ MMUMETHOD(mmu_mincore, mmu_booke_mincore),
+ MMUMETHOD(mmu_object_init_pt, mmu_booke_object_init_pt),
+ MMUMETHOD(mmu_page_exists_quick,mmu_booke_page_exists_quick),
+ MMUMETHOD(mmu_page_init, mmu_booke_page_init),
+ MMUMETHOD(mmu_pinit, mmu_booke_pinit),
+ MMUMETHOD(mmu_pinit0, mmu_booke_pinit0),
+ MMUMETHOD(mmu_protect, mmu_booke_protect),
+ MMUMETHOD(mmu_qenter, mmu_booke_qenter),
+ MMUMETHOD(mmu_qremove, mmu_booke_qremove),
+ MMUMETHOD(mmu_release, mmu_booke_release),
+ MMUMETHOD(mmu_remove, mmu_booke_remove),
+ MMUMETHOD(mmu_remove_all, mmu_booke_remove_all),
+ MMUMETHOD(mmu_remove_write, mmu_booke_remove_write),
+ MMUMETHOD(mmu_zero_page, mmu_booke_zero_page),
+ MMUMETHOD(mmu_zero_page_area, mmu_booke_zero_page_area),
+ MMUMETHOD(mmu_zero_page_idle, mmu_booke_zero_page_idle),
+ MMUMETHOD(mmu_activate, mmu_booke_activate),
+ MMUMETHOD(mmu_deactivate, mmu_booke_deactivate),
+
+ /* Internal interfaces */
+ MMUMETHOD(mmu_bootstrap, mmu_booke_bootstrap),
+ MMUMETHOD(mmu_dev_direct_mapped,mmu_booke_dev_direct_mapped),
+ MMUMETHOD(mmu_mapdev, mmu_booke_mapdev),
+ MMUMETHOD(mmu_kenter, mmu_booke_kenter),
+ MMUMETHOD(mmu_kextract, mmu_booke_kextract),
+/* MMUMETHOD(mmu_kremove, mmu_booke_kremove), */
+ MMUMETHOD(mmu_page_executable, mmu_booke_page_executable),
+ MMUMETHOD(mmu_unmapdev, mmu_booke_unmapdev),
+
+ { 0, 0 }
+};
+
+static mmu_def_t booke_mmu = {
+ MMU_TYPE_BOOKE,
+ mmu_booke_methods,
+ 0
+};
+MMU_DEF(booke_mmu);
/*
* This routine defines the region(s) of memory that should
* not be tested for the modified bit.
*/
static __inline int
-pmap_track_modified(pmap_t pmap, vm_offset_t va)
+mmu_booke_track_modified(pmap_t pmap, vm_offset_t va)
{
if (pmap == kernel_pmap)
return ((va < kmi.clean_sva) || (va >= kmi.clean_eva));
@@ -379,7 +476,7 @@
/* Allocate page table. */
static void
-ptbl_alloc(pmap_t pmap, unsigned int pdir_idx)
+ptbl_alloc(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
{
vm_page_t mtbl[PTBL_PAGES];
vm_page_t m;
@@ -414,8 +511,9 @@
mtbl[i] = m;
}
- /* Mapin allocated pages into kernel_pmap. */
- pmap_qenter((vm_offset_t)pmap->pm_pdir[pdir_idx], mtbl, PTBL_PAGES);
+ /* Map in allocated pages into kernel_pmap. */
+ mmu_booke_qenter(mmu, (vm_offset_t)pmap->pm_pdir[pdir_idx], mtbl,
+ PTBL_PAGES);
/* Zero whole ptbl. */
bzero((caddr_t)pmap->pm_pdir[pdir_idx], PTBL_PAGES * PAGE_SIZE);
@@ -428,7 +526,7 @@
/* Free ptbl pages and invalidate pdir entry. */
static void
-ptbl_free(pmap_t pmap, unsigned int pdir_idx)
+ptbl_free(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
{
pte_t *ptbl;
vm_paddr_t pa;
@@ -449,11 +547,11 @@
for (i = 0; i < PTBL_PAGES; i++) {
va = ((vm_offset_t)ptbl + (i * PAGE_SIZE));
- pa = pte_vatopa(kernel_pmap, va);
+ pa = pte_vatopa(mmu, kernel_pmap, va);
m = PHYS_TO_VM_PAGE(pa);
vm_page_free_zero(m);
atomic_subtract_int(&cnt.v_wire_count, 1);
- pmap_kremove(va);
+ mmu_booke_kremove(mmu, va);
}
ptbl_free_pmap_ptbl(pmap, ptbl);
@@ -469,7 +567,7 @@
* Return 1 if ptbl pages were freed.
*/
static int
-ptbl_unhold(pmap_t pmap, unsigned int pdir_idx)
+ptbl_unhold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
{
pte_t *ptbl;
vm_paddr_t pa;
@@ -492,7 +590,7 @@
/* decrement hold count */
for (i = 0; i < PTBL_PAGES; i++) {
- pa = pte_vatopa(kernel_pmap, (vm_offset_t)ptbl + (i * PAGE_SIZE));
+ pa = pte_vatopa(mmu, kernel_pmap, (vm_offset_t)ptbl + (i * PAGE_SIZE));
m = PHYS_TO_VM_PAGE(pa);
m->wire_count--;
}
@@ -503,7 +601,7 @@
* the last page.
*/
if (m->wire_count == 0) {
- ptbl_free(pmap, pdir_idx);
+ ptbl_free(mmu, pmap, pdir_idx);
//debugf("ptbl_unhold: e (freed ptbl)\n");
return (1);
@@ -518,7 +616,7 @@
* new pte entry is being inserted into ptbl.
*/
static void
-ptbl_hold(pmap_t pmap, unsigned int pdir_idx)
+ptbl_hold(mmu_t mmu, pmap_t pmap, unsigned int pdir_idx)
{
vm_paddr_t pa;
pte_t *ptbl;
@@ -537,7 +635,7 @@
KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl"));
for (i = 0; i < PTBL_PAGES; i++) {
- pa = pte_vatopa(kernel_pmap, (vm_offset_t)ptbl + (i * PAGE_SIZE));
+ pa = pte_vatopa(mmu, kernel_pmap, (vm_offset_t)ptbl + (i * PAGE_SIZE));
m = PHYS_TO_VM_PAGE(pa);
m->wire_count++;
}
@@ -546,22 +644,22 @@
}
/* Allocate pv_entry structure. */
-static __inline pv_entry_t
+pv_entry_t
pv_alloc(void)
{
pv_entry_t pv;
- //debugf("pv_alloc: s\n");
+ debugf("pv_alloc: s\n");
pv_entry_count++;
if ((pv_entry_count > pv_entry_high_water) &&
- (pmap_pagedaemon_waken == 0)) {
- pmap_pagedaemon_waken = 1;
+ (pagedaemon_waken == 0)) {
+ pagedaemon_waken = 1;
wakeup (&vm_pages_needed);
}
pv = uma_zalloc(pvzone, M_NOWAIT);
- //debugf("pv_alloc: s\n");
+ debugf("pv_alloc: e\n");
return (pv);
}
@@ -585,7 +683,8 @@
pv_entry_t pve;
//int su = (pmap == kernel_pmap);
- //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x)\n", su, (u_int32_t)pmap, va);
+ //debugf("pv_insert: s (su = %d pmap = 0x%08x va = 0x%08x m = 0x%08x)\n", su,
+ // (u_int32_t)pmap, va, (u_int32_t)m);
pve = pv_alloc();
if (pve == NULL)
@@ -597,6 +696,7 @@
/* add to pv_list */
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
+
TAILQ_INSERT_TAIL(&m->md.pv_list, pve, pv_link);
//debugf("pv_insert: e\n");
@@ -638,7 +738,7 @@
* Return 1 if ptbl pages were freed, otherwise return 0.
*/
static int
-pte_remove(pmap_t pmap, vm_offset_t va, u_int8_t flags)
+pte_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, u_int8_t flags)
{
unsigned int pdir_idx = PDIR_IDX(va);
unsigned int ptbl_idx = PTBL_IDX(va);
@@ -670,7 +770,7 @@
/* Handle modified pages. */
if (PTE_ISMODIFIED(pte)) {
- if (pmap_track_modified(pmap, va))
+ if (mmu_booke_track_modified(pmap, va))
vm_page_dirty(m);
}
@@ -689,7 +789,7 @@
if (flags & PTBL_UNHOLD) {
//debugf("pte_remove: e (unhold)\n");
- return (ptbl_unhold(pmap, pdir_idx));
+ return (ptbl_unhold(mmu, pmap, pdir_idx));
}
//debugf("pte_remove: e\n");
@@ -699,8 +799,8 @@
/*
* Insert PTE for a given page and virtual address.
*/
-static void
-pte_enter(pmap_t pmap, vm_page_t m, vm_offset_t va, u_int32_t flags)
+void
+pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, u_int32_t flags)
{
unsigned int pdir_idx = PDIR_IDX(va);
unsigned int ptbl_idx = PTBL_IDX(va);
@@ -720,18 +820,18 @@
*/
pte = &pmap->pm_pdir[pdir_idx][ptbl_idx];
if (PTE_ISVALID(pte)) {
- pte_remove(pmap, va, PTBL_HOLD);
+ pte_remove(mmu, pmap, va, PTBL_HOLD);
} else {
/*
* pte is not used, increment hold count
* for ptbl pages.
*/
if (pmap != kernel_pmap)
- ptbl_hold(pmap, pdir_idx);
+ ptbl_hold(mmu, pmap, pdir_idx);
}
} else {
/* Allocate page table pages. */
- ptbl_alloc(pmap, pdir_idx);
+ ptbl_alloc(mmu, pmap, pdir_idx);
}
/* Flush entry from TLB. */
@@ -763,12 +863,12 @@
/* Return the pa for the given pmap/va. */
static vm_paddr_t
-pte_vatopa(pmap_t pmap, vm_offset_t va)
+pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va)
{
vm_paddr_t pa = 0;
pte_t *pte;
- pte = pte_find(pmap, va);
+ pte = pte_find(mmu, pmap, va);
if ((pte != NULL) && PTE_ISVALID(pte))
pa = (PTE_PA(pte) | (va & PTE_PA_MASK));
return (pa);
@@ -776,7 +876,7 @@
/* Get a pointer to a PTE in a page table. */
static pte_t *
-pte_find(pmap_t pmap, vm_offset_t va)
+pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va)
{
unsigned int pdir_idx = PDIR_IDX(va);
unsigned int ptbl_idx = PTBL_IDX(va);
@@ -797,16 +897,17 @@
* This is called during e500_init, before the system is really initialized.
*/
void
-pmap_bootstrap(vm_offset_t kernelstart, vm_offset_t kernelend)
+mmu_booke_bootstrap(mmu_t mmu, vm_offset_t kernelstart, vm_offset_t kernelend)
{
vm_offset_t phys_kernelend;
struct mem_region *mp, *mp1;
int cnt, i, j;
u_int s, e, sz;
+ u_int phys_avail_count;
vm_size_t physsz, hwphyssz;
vm_offset_t kernel_pdir;
- debugf("pmap_bootstrap: entered\n");
+ debugf("mmu_booke_bootstrap: entered\n");
/* Align kernel start and end address (kernel image). */
kernelstart = trunc_page(kernelstart);
@@ -861,18 +962,18 @@
virtual_end = VM_MAX_KERNEL_ADDRESS;
/* Allocate KVA space for page zero/copy operations. */
- pmap_zero_page_va = virtual_avail;
+ zero_page_va = virtual_avail;
virtual_avail += PAGE_SIZE;
- pmap_zero_page_idle_va = virtual_avail;
+ zero_page_idle_va = virtual_avail;
virtual_avail += PAGE_SIZE;
- pmap_copy_page_src_va = virtual_avail;
+ copy_page_src_va = virtual_avail;
virtual_avail += PAGE_SIZE;
- pmap_copy_page_dst_va = virtual_avail;
+ copy_page_dst_va = virtual_avail;
virtual_avail += PAGE_SIZE;
/* Initialize page zero/copy mutexes. */
- mtx_init(&pmap_zero_page_mutex, "pmap_zero_page", NULL, MTX_DEF);
- mtx_init(&pmap_copy_page_mutex, "pmap_copy_page", NULL, MTX_DEF);
+ mtx_init(&zero_page_mutex, "mmu_booke_zero_page", NULL, MTX_DEF);
+ mtx_init(©_page_mutex, "mmu_booke_copy_page", NULL, MTX_DEF);
/* Initialize tlb0 table mutex. */
mtx_init(&tlb0_mutex, "tlb0", NULL, MTX_SPIN | MTX_RECURSE);
@@ -894,8 +995,8 @@
debugf(" kernelend = 0x%08x\n", kernelend);
debugf(" kernel size = 0x%08x\n", kernelend - kernelstart);
- if (sizeof(phys_avail)/sizeof(phys_avail[0]) < availmem_regions_sz)
- panic("pmap_bootstrap: phys_avail too small");
+ if (sizeof(phys_avail) / sizeof(phys_avail[0]) < availmem_regions_sz)
+ panic("mmu_booke_bootstrap: phys_avail too small");
/*
* Removed kernel physical address range from avail
@@ -1032,19 +1133,19 @@
/* Initialize TLB0 handling. */
tlb0_init();
- debugf("pmap_bootstrap: exit\n");
+ debugf("mmu_booke_bootstrap: exit\n");
}
/*
* Get the physical page address for the given pmap/virtual address.
*/
vm_paddr_t
-pmap_extract(pmap_t pmap, vm_offset_t va)
+mmu_booke_extract(mmu_t mmu, pmap_t pmap, vm_offset_t va)
{
vm_paddr_t pa;
PMAP_LOCK(pmap);
- pa = pte_vatopa(pmap, va);
+ pa = pte_vatopa(mmu, pmap, va);
PMAP_UNLOCK(pmap);
return (pa);
@@ -1055,9 +1156,9 @@
* kernel virtual address.
*/
vm_paddr_t
-pmap_kextract(vm_offset_t va)
+mmu_booke_kextract(mmu_t mmu, vm_offset_t va)
{
- return pte_vatopa(kernel_pmap, va);
+ return pte_vatopa(mmu, kernel_pmap, va);
}
/*
@@ -1066,11 +1167,11 @@
* system needs to map virtual memory.
*/
void
-pmap_init(void)
+mmu_booke_init(mmu_t mmu)
{
int shpgperproc = PMAP_SHPGPERPROC;
- //debugf("pmap_init: s\n");
+ //debugf("mmu_booke_init: s\n");
/*
* Initialize the address space (zone) for the pv entries. Set a
@@ -1094,7 +1195,7 @@
/* Initialize ptbl allocation. */
ptbl_init();
- //debugf("pmap_init: e\n");
+ //debugf("mmu_booke_init: e\n");
}
/*
@@ -1103,58 +1204,58 @@
* references recorded. Existing mappings in the region are overwritten.
*/
void
-pmap_qenter(vm_offset_t sva, vm_page_t *m, int count)
+mmu_booke_qenter(mmu_t mmu, vm_offset_t sva, vm_page_t *m, int count)
{
vm_offset_t va;
- //debugf("pmap_qenter: s (sva = 0x%08x count = %d)\n", sva, count);
+ //debugf("mmu_booke_qenter: s (sva = 0x%08x count = %d)\n", sva, count);
va = sva;
while (count-- > 0) {
- pmap_kenter(va, VM_PAGE_TO_PHYS(*m));
+ mmu_booke_kenter(mmu, va, VM_PAGE_TO_PHYS(*m));
va += PAGE_SIZE;
m++;
}
- //debugf("pmap_qenter: e\n");
+ //debugf("mmu_booke_qenter: e\n");
}
/*
* Remove page mappings from kernel virtual address space. Intended for
- * temporary mappings entered by pmap_qenter.
+ * temporary mappings entered by mmu_booke_qenter.
*/
void
-pmap_qremove(vm_offset_t sva, int count)
+mmu_booke_qremove(mmu_t mmu, vm_offset_t sva, int count)
{
vm_offset_t va;
- //debugf("pmap_qremove: s (sva = 0x%08x count = %d)\n", sva, count);
+ //debugf("mmu_booke_qremove: s (sva = 0x%08x count = %d)\n", sva, count);
va = sva;
while (count-- > 0) {
- pmap_kremove(va);
+ mmu_booke_kremove(mmu, va);
va += PAGE_SIZE;
}
- //debugf("pmap_qremove: e\n");
+ //debugf("mmu_booke_qremove: e\n");
}
/*
* Map a wired page into kernel virtual address space.
*/
void
-pmap_kenter(vm_offset_t va, vm_offset_t pa)
+mmu_booke_kenter(mmu_t mmu, vm_offset_t va, vm_offset_t pa)
{
unsigned int pdir_idx = PDIR_IDX(va);
unsigned int ptbl_idx = PTBL_IDX(va);
u_int32_t flags;
pte_t *pte;
- //debugf("pmap_kenter: s (pdir_idx = %d ptbl_idx = %d va=0x%08x pa=0x%08x)\n",
+ //debugf("mmu_booke_kenter: s (pdir_idx = %d ptbl_idx = %d va=0x%08x pa=0x%08x)\n",
// pdir_idx, ptbl_idx, va, pa);
KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)),
- ("pmap_kenter: invalid va"));
+ ("mmu_booke_kenter: invalid va"));
#if 0
/* assume IO mapping, set I, G bits */
@@ -1178,7 +1279,7 @@
pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);
if (PTE_ISVALID(pte)) {
- //debugf("pmap_kenter: replacing entry!\n");
+ //debugf("mmu_booke_kenter: replacing entry!\n");
/* Flush entry from TLB0 */
tlb0_flush_entry(kernel_pmap, va);
@@ -1187,7 +1288,7 @@
pte->rpn = pa & ~PTE_PA_MASK;
pte->flags = flags;
- //debugf("pmap_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x "
+ //debugf("mmu_booke_kenter: pdir_idx = %d ptbl_idx = %d va=0x%08x "
// "pa=0x%08x rpn=0x%08x flags=0x%08x\n",
// pdir_idx, ptbl_idx, va, pa, pte->rpn, pte->flags);
@@ -1196,28 +1297,28 @@
__syncicache((void *)va, PAGE_SIZE);
}
- //debugf("pmap_kenter: e\n");
+ //debugf("mmu_booke_kenter: e\n");
}
/*
* Remove a page from kernel page table.
*/
void
-pmap_kremove(vm_offset_t va)
+mmu_booke_kremove(mmu_t mmu, vm_offset_t va)
{
unsigned int pdir_idx = PDIR_IDX(va);
unsigned int ptbl_idx = PTBL_IDX(va);
pte_t *pte;
- //debugf("pmap_kremove: s (va = 0x%08x)\n", va);
+ //debugf("mmu_booke_kremove: s (va = 0x%08x)\n", va);
KASSERT(((va >= VM_MIN_KERNEL_ADDRESS) && (va <= VM_MAX_KERNEL_ADDRESS)),
- ("pmap_kremove: invalid va"));
+ ("mmu_booke_kremove: invalid va"));
pte = &(kernel_pmap->pm_pdir[pdir_idx][ptbl_idx]);
if (!PTE_ISVALID(pte)) {
- //debugf("pmap_kremove: e (invalid pte)\n");
+ //debugf("mmu_booke_kremove: e (invalid pte)\n");
return;
}
@@ -1227,37 +1328,37 @@
pte->flags = 0;
pte->rpn = 0;
- //debugf("pmap_kremove: e\n");
+ //debugf("mmu_booke_kremove: e\n");
}
/*
* Initialize the pmap associated with process 0.
*/
void
-pmap_pinit0(pmap_t pmap)
+mmu_booke_pinit0(mmu_t mmu, pmap_t pmap)
{
- //debugf("pmap_pinit0: s (pmap = 0x%08x)\n", (u_int32_t)pmap);
- pmap_pinit(pmap);
+ //debugf("mmu_booke_pinit0: s (pmap = 0x%08x)\n", (u_int32_t)pmap);
+ mmu_booke_pinit(mmu, pmap);
PCPU_SET(curpmap, pmap);
- //debugf("pmap_pinit0: e\n");
+ //debugf("mmu_booke_pinit0: e\n");
}
/*
* Initialize a preallocated and zeroed pmap structure,
* such as one in a vmspace structure.
*/
-int
-pmap_pinit(pmap_t pmap)
+void
+mmu_booke_pinit(mmu_t mmu, pmap_t pmap)
{
//struct thread *td;
//struct proc *p;
//td = PCPU_GET(curthread);
//p = td->td_proc;
- //debugf("pmap_pinit: s (pmap = 0x%08x)\n", (u_int32_t)pmap);
- //printf("pmap_pinit: proc %d '%s'\n", p->p_pid, p->p_comm);
+ //debugf("mmu_booke_pinit: s (pmap = 0x%08x)\n", (u_int32_t)pmap);
+ //printf("mmu_booke_pinit: proc %d '%s'\n", p->p_pid, p->p_comm);
- KASSERT((pmap != kernel_pmap), ("pmap_pinit: initializing kernel_pmap"));
+ KASSERT((pmap != kernel_pmap), ("mmu_booke_pinit: initializing kernel_pmap"));
PMAP_LOCK_INIT(pmap);
pmap->pm_tid = 0;
@@ -1267,30 +1368,31 @@
TAILQ_INIT(&pmap->ptbl_list);
- //debugf("pmap_pinit: e\n");
- return (1);
+ //debugf("mmu_booke_pinit: e\n");
}
/*
* Release any resources held by the given physical map.
- * Called when a pmap initialized by pmap_pinit is being released.
+ * Called when a pmap initialized by mmu_booke_pinit is being released.
* Should only be called if the map contains no valid mappings.
*/
void
-pmap_release(pmap_t pmap)
+mmu_booke_release(mmu_t mmu, pmap_t pmap)
{
- //debugf("pmap_release: s\n");
+ //debugf("mmu_booke_release: s\n");
PMAP_LOCK_DESTROY(pmap);
- //debugf("pmap_release: e\n");
+ //debugf("mmu_booke_release: e\n");
}
+#if 0
/* Not needed, kernel page tables are statically allocated. */
void
-pmap_growkernel(vm_offset_t maxkvaddr)
+mmu_booke_growkernel(vm_offset_t maxkvaddr)
{
}
+#endif
/*
* Insert the given physical page at the specified virtual address in the
@@ -1298,19 +1400,19 @@
* will be wired down.
*/
void
-pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
- boolean_t wired)
+mmu_booke_enter(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
+ vm_prot_t prot, boolean_t wired)
{
vm_page_lock_queues();
PMAP_LOCK(pmap);
- pmap_enter_locked(pmap, va, m, prot, wired);
+ mmu_booke_enter_locked(mmu, pmap, va, m, prot, wired);
vm_page_unlock_queues();
PMAP_UNLOCK(pmap);
}
static void
-pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot,
- boolean_t wired)
+mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m,
+ vm_prot_t prot, boolean_t wired)
{
pte_t *pte;
vm_paddr_t pa;
@@ -1321,17 +1423,17 @@
su = (pmap == kernel_pmap);
sync = 0;
- //debugf("pmap_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x "
+ //debugf("mmu_booke_enter_locked: s (pmap=0x%08x su=%d tid=%d m=0x%08x va=0x%08x "
// "pa=0x%08x prot=0x%08x wired=%d)\n",
// (u_int32_t)pmap, su, pmap->pm_tid,
// (u_int32_t)m, va, pa, prot, wired);
if (su) {
KASSERT(((va >= virtual_avail) && (va <= VM_MAX_KERNEL_ADDRESS)),
- ("pmap_enter_locked: kernel pmap, non kernel va"));
+ ("mmu_booke_enter_locked: kernel pmap, non kernel va"));
} else {
KASSERT((va <= VM_MAXUSER_ADDRESS),
- ("pmap_enter_locked: user pmap, non user va"));
+ ("mmu_booke_enter_locked: user pmap, non user va"));
}
PMAP_LOCK_ASSERT(pmap, MA_OWNED);
@@ -1340,10 +1442,10 @@
* If there is an existing mapping, and the physical address has not
* changed, must be protection or wiring change.
*/
- if (((pte = pte_find(pmap, va)) != NULL) &&
+ if (((pte = pte_find(mmu, pmap, va)) != NULL) &&
(PTE_ISVALID(pte)) && (PTE_PA(pte) == pa)) {
- //debugf("pmap_enter_locked: update\n");
+ //debugf("mmu_booke_enter_locked: update\n");
/* Wiring change, just update stats. */
if (wired) {
@@ -1371,7 +1473,7 @@
} else {
/* Handle modified pages, sense modify status. */
if (PTE_ISMODIFIED(pte)) {
- if (pmap_track_modified(pmap, va))
+ if (mmu_booke_track_modified(pmap, va))
vm_page_dirty(m);
}
}
@@ -1395,9 +1497,9 @@
* physical address, pte_enter() will delete the old mapping.
*/
//if ((pte != NULL) && PTE_ISVALID(pte))
- // debugf("pmap_enter_locked: replace\n");
+ // debugf("mmu_booke_enter_locked: replace\n");
//else
- // debugf("pmap_enter_locked: new\n");
+ // debugf("mmu_booke_enter_locked: new\n");
/* Now set up the flags and install the new mapping. */
flags = (PTE_SR | PTE_VALID);
@@ -1423,7 +1525,7 @@
flags |= PTE_WIRED;
}
- pte_enter(pmap, m, va, flags);
+ pte_enter(mmu, pmap, m, va, flags);
/* Flush the real memory from the instruction cache. */
if (prot & VM_PROT_EXECUTE)
@@ -1440,16 +1542,16 @@
pmap = PCPU_GET(curpmap);
va = 0;
- pte = pte_find(pmap, va);
+ pte = pte_find(mmu, pmap, va);
KASSERT(pte == NULL, ("%s:%d", __func__, __LINE__));
flags = PTE_SR | PTE_VALID | PTE_UR;
- pte_enter(pmap, m, va, flags);
+ pte_enter(mmu, pmap, m, va, flags);
__syncicache((void *)va, PAGE_SIZE);
- pte_remove(pmap, va, PTBL_UNHOLD);
+ pte_remove(mmu, pmap, va, PTBL_UNHOLD);
}
- //debugf("pmap_enter_locked: e\n");
+ //debugf("mmu_booke_enter_locked: e\n");
}
/*
@@ -1465,8 +1567,8 @@
* corresponding offset from m_start are mapped.
*/
void
-pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, vm_page_t m_start,
- vm_prot_t prot)
+mmu_booke_enter_object(mmu_t mmu, pmap_t pmap, vm_offset_t start,
>>> TRUNCATED FOR MAIL (1000 lines) <<<
More information about the p4-projects
mailing list