PERFORCE change 135809 for review
Randall R. Stewart
rrs at FreeBSD.org
Wed Feb 20 17:59:50 UTC 2008
http://perforce.freebsd.org/chv.cgi?CH=135809
Change 135809 by rrs at rrs-mips2-jnpr on 2008/02/20 17:59:35
s9indent of file.
Affected files ...
.. //depot/projects/mips2-jnpr/src/sys/mips/mips/pmap.c#21 edit
Differences ...
==== //depot/projects/mips2-jnpr/src/sys/mips/mips/pmap.c#21 (text+ko) ====
@@ -161,22 +161,23 @@
*/
static uma_zone_t pvzone;
static struct vm_object pvzone_obj;
-static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0;
+static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0;
int pmap_pagedaemon_waken = 0;
struct fpage fpages_shared[FPAGES_SHARED];
struct sysmaps sysmaps_pcpu[MAXCPU];
-static PMAP_INLINE void free_pv_entry(pv_entry_t pv);
-static pv_entry_t get_pv_entry(void);
-static __inline void pmap_changebit(vm_page_t m, int bit, boolean_t setem);
+static PMAP_INLINE void free_pv_entry(pv_entry_t pv);
+static pv_entry_t get_pv_entry(void);
+static __inline void pmap_changebit(vm_page_t m, int bit, boolean_t setem);
static int pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq, vm_offset_t va);
static void pmap_remove_page(struct pmap *pmap, vm_offset_t va);
static void pmap_remove_entry(struct pmap *pmap, vm_page_t m, vm_offset_t va);
static boolean_t pmap_testbit(vm_page_t m, int bit);
-static void pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte,
+static void
+pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte,
vm_page_t m, boolean_t wired);
static vm_page_t pmap_allocpte(pmap_t pmap, vm_offset_t va, int flags);
@@ -187,19 +188,21 @@
static void pmap_TLB_invalidate_kernel(vm_offset_t);
static void pmap_TLB_update_kernel(vm_offset_t, pt_entry_t);
static void pmap_init_fpage(void);
+
#ifdef SMP
static void pmap_invalidate_page_action(void *arg);
static void pmap_invalidate_all_action(void *arg);
static void pmap_update_page_action(void *arg);
+
#endif
struct local_sysmaps {
- struct mtx lock;
+ struct mtx lock;
pt_entry_t CMAP1;
pt_entry_t CMAP2;
- caddr_t CADDR1;
- caddr_t CADDR2;
- uint16_t valid1, valid2;
+ caddr_t CADDR1;
+ caddr_t CADDR2;
+ uint16_t valid1, valid2;
};
/* This structure is for large memory
@@ -216,9 +219,9 @@
pmap_segmap(pmap_t pmap, vm_offset_t va)
{
if (pmap->pm_segtab)
- return(pmap->pm_segtab[((vm_offset_t)(va) >> SEGSHIFT)]);
+ return (pmap->pm_segtab[((vm_offset_t)(va) >> SEGSHIFT)]);
else
- return((pd_entry_t) 0);
+ return ((pd_entry_t)0);
}
/*
@@ -233,7 +236,7 @@
pt_entry_t *pdeaddr;
if (pmap) {
- pdeaddr = (pt_entry_t *) pmap_segmap(pmap, va);
+ pdeaddr = (pt_entry_t *)pmap_segmap(pmap, va);
if (pdeaddr) {
return pdeaddr + vad_to_pte_offset(va);
}
@@ -253,12 +256,13 @@
bank_size = phys_avail[1] - phys_avail[0];
while (size > bank_size) {
int i;
- for (i = 0; phys_avail[i+2]; i+= 2) {
- phys_avail[i] = phys_avail[i+2];
- phys_avail[i+1] = phys_avail[i+3];
+
+ for (i = 0; phys_avail[i + 2]; i += 2) {
+ phys_avail[i] = phys_avail[i + 2];
+ phys_avail[i + 1] = phys_avail[i + 3];
}
phys_avail[i] = 0;
- phys_avail[i+1] = 0;
+ phys_avail[i + 1] = 0;
if (!phys_avail[0])
panic("pmap_steal_memory: out of memory");
bank_size = phys_avail[1] - phys_avail[0];
@@ -267,10 +271,10 @@
pa = phys_avail[0];
phys_avail[0] += size;
if (pa >= MIPS_KSEG0_LARGEST_PHYS) {
- panic("Out of memory below 512Meg?");
+ panic("Out of memory below 512Meg?");
}
va = MIPS_PHYS_TO_CACHED(pa);
- bzero((caddr_t) va, size);
+ bzero((caddr_t)va, size);
return va;
}
@@ -289,8 +293,8 @@
/* Sort. */
again:
for (i = 0; phys_avail[i + 1] != 0; i += 2) {
- if(phys_avail[i+1] >= MIPS_KSEG0_LARGEST_PHYS) {
- memory_larger_than_512meg++;
+ if (phys_avail[i + 1] >= MIPS_KSEG0_LARGEST_PHYS) {
+ memory_larger_than_512meg++;
}
if (i < 2)
continue;
@@ -298,8 +302,8 @@
vm_paddr_t ptemp[2];
- ptemp[0] = phys_avail[i+0];
- ptemp[1] = phys_avail[i+1];
+ ptemp[0] = phys_avail[i + 0];
+ ptemp[1] = phys_avail[i + 1];
phys_avail[i + 0] = phys_avail[i - 2];
phys_avail[i + 1] = phys_avail[i - 1];
@@ -317,17 +321,15 @@
size = phys_avail[i + 1] - phys_avail[i];
printf("%#08jx - %#08jx, %ju bytes (%ju pages)\n",
- (uintmax_t)phys_avail[i],
- (uintmax_t)phys_avail[i + 1] - 1,
- (uintmax_t)size, (uintmax_t)size / PAGE_SIZE);
+ (uintmax_t) phys_avail[i],
+ (uintmax_t) phys_avail[i + 1] - 1,
+ (uintmax_t) size, (uintmax_t) size / PAGE_SIZE);
}
}
-
-
/*
* Steal the message buffer from the beginning of memory.
*/
- msgbufp = (struct msgbuf *) pmap_steal_memory(MSGBUF_SIZE);
+ msgbufp = (struct msgbuf *)pmap_steal_memory(MSGBUF_SIZE);
msgbufinit(msgbufp, MSGBUF_SIZE);
/*
@@ -339,78 +341,77 @@
virtual_avail = VM_MIN_KERNEL_ADDRESS + VM_KERNEL_ALLOC_OFFSET;
virtual_end = VM_MAX_KERNEL_ADDRESS;
- /* Steal some virtual space that will not be in
- * kernel_segmap. This va memory space will be used to
- * map in kernel pages that are outside the 512Meg
- * region. Note that we only do this steal when we
- * do have memory in this region, that way for
- * systems with smaller memory we don't "steal"
- * any va ranges :-)
+ /*
+ * Steal some virtual space that will not be in kernel_segmap. This
+ * va memory space will be used to map in kernel pages that are
+ * outside the 512Meg region. Note that we only do this steal when
+ * we do have memory in this region, that way for systems with
+ * smaller memory we don't "steal" any va ranges :-)
*/
if (memory_larger_than_512meg) {
- for ( i=0; i< MAXCPU; i++) {
- sysmap_lmem[i].CMAP1 = PG_G;
- sysmap_lmem[i].CMAP2 = PG_G;
- sysmap_lmem[i].CADDR1 = (caddr_t)virtual_avail;
- virtual_avail += PAGE_SIZE;
- sysmap_lmem[i].CADDR2 = (caddr_t)virtual_avail;
- virtual_avail += PAGE_SIZE;
- sysmap_lmem[i].valid1 = sysmap_lmem[i].valid2 = 0;
- PMAP_LGMEM_LOCK_INIT(&sysmap_lmem[i]);
- }
+ for (i = 0; i < MAXCPU; i++) {
+ sysmap_lmem[i].CMAP1 = PG_G;
+ sysmap_lmem[i].CMAP2 = PG_G;
+ sysmap_lmem[i].CADDR1 = (caddr_t)virtual_avail;
+ virtual_avail += PAGE_SIZE;
+ sysmap_lmem[i].CADDR2 = (caddr_t)virtual_avail;
+ virtual_avail += PAGE_SIZE;
+ sysmap_lmem[i].valid1 = sysmap_lmem[i].valid2 = 0;
+ PMAP_LGMEM_LOCK_INIT(&sysmap_lmem[i]);
+ }
}
virtual_sys_start = (caddr_t)virtual_avail;
/*
* Allocate segment table for the kernel
*/
- kernel_segmap = (pd_entry_t *) pmap_steal_memory(PAGE_SIZE);
+ kernel_segmap = (pd_entry_t *)pmap_steal_memory(PAGE_SIZE);
/*
* Allocate second level page tables for the kernel
*/
nkpt = NKPT;
if (memory_larger_than_512meg) {
- /* If we have a large memory system
- * we CANNOT afford to hit pmap_growkernel() and allocate memory. Since
- * we MAY end up with a page that is NOT mappable. For
- * that reason we up front grab more. Normall NKPT is 120 (YMMV see pmap.h)
- * this gives us 480meg of kernel virtual addresses at the
- * cost of 120 pages (each page gets us 4 Meg). Since
- * the kernel starts at virtual_avail, we can use this to
- * calculate how many entris are left from there to the end
- * of the segmap, we want to allocate all of it, which would
- * be somewhere above 0xC0000000 - 0xFFFFFFFF which results in
- * about 256 entries or so instead of the 120.
- */
- nkpt = (PAGE_SIZE/sizeof(pd_entry_t)) - (virtual_avail >> SEGSHIFT);
+ /*
+ * If we have a large memory system we CANNOT afford to hit
+ * pmap_growkernel() and allocate memory. Since we MAY end
+ * up with a page that is NOT mappable. For that reason we
+ * up front grab more. Normall NKPT is 120 (YMMV see pmap.h)
+ * this gives us 480meg of kernel virtual addresses at the
+ * cost of 120 pages (each page gets us 4 Meg). Since the
+ * kernel starts at virtual_avail, we can use this to
+ * calculate how many entris are left from there to the end
+ * of the segmap, we want to allocate all of it, which would
+ * be somewhere above 0xC0000000 - 0xFFFFFFFF which results
+ * in about 256 entries or so instead of the 120.
+ */
+ nkpt = (PAGE_SIZE / sizeof(pd_entry_t)) - (virtual_avail >> SEGSHIFT);
}
- pgtab = (pt_entry_t *) pmap_steal_memory(PAGE_SIZE*nkpt);
+ pgtab = (pt_entry_t *)pmap_steal_memory(PAGE_SIZE * nkpt);
/*
* The R[4-7]?00 stores only one copy of the Global bit in the
- * translation lookaside buffer for each 2 page entry.
- * Thus invalid entrys must have the Global bit set so
- * when Entry LO and Entry HI G bits are anded together
- * they will produce a global bit to store in the tlb.
+ * translation lookaside buffer for each 2 page entry. Thus invalid
+ * entrys must have the Global bit set so when Entry LO and Entry HI
+ * G bits are anded together they will produce a global bit to store
+ * in the tlb.
*/
for (i = 0, pte = pgtab; i < (nkpt * NPTEPG); i++, pte++)
*pte = PG_G;
printf("Va=0x%x Ve=%x\n", virtual_avail, virtual_end);
/*
- * The segment table contains the KVA of the pages in the
- * second level page table.
+ * The segment table contains the KVA of the pages in the second
+ * level page table.
*/
printf("init kernel_segmap va >> = %d nkpt:%d\n",
- (virtual_avail >> SEGSHIFT),
- nkpt);
+ (virtual_avail >> SEGSHIFT),
+ nkpt);
for (i = 0, j = (virtual_avail >> SEGSHIFT); i < nkpt; i++, j++)
kernel_segmap[j] = (pd_entry_t)(pgtab + (i * NPTEPG));
avail_start = phys_avail[0];
- for (i = 0; phys_avail[i+2]; i+= 2)
- ;
- avail_end = phys_avail[i+1];
+ for (i = 0; phys_avail[i + 2]; i += 2);
+ avail_end = phys_avail[i + 1];
/*
* The kernel's pmap is statically allocated so we don't have to use
@@ -424,8 +425,8 @@
kernel_pmap->pm_active = ~0;
TAILQ_INIT(&kernel_pmap->pm_pvlist);
printf("avail_start:0x%x avail_end:0x%x\n",
- avail_start, avail_end);
-
+ avail_start, avail_end);
+
kernel_pmap->pm_asid[PCPU_GET(cpuid)].asid = PMAP_ASID_RESERVED;
kernel_pmap->pm_asid[PCPU_GET(cpuid)].gen = 0;
pmap_max_asid = VMNUM_PIDS;
@@ -458,7 +459,6 @@
if (need_wired_tlb_page_pool) {
pmap_init_fpage();
}
-
/*
* Initialize the address space (zone) for the pv entries. Set a
* high water mark so that the system can recover from excessive
@@ -484,11 +484,12 @@
static int
pmap_nw_modified(pt_entry_t pte)
{
- if ((pte & (PG_M|PG_RO)) == (PG_M|PG_RO))
+ if ((pte & (PG_M | PG_RO)) == (PG_M | PG_RO))
return (1);
else
return (0);
}
+
#endif
@@ -500,9 +501,9 @@
pmap_track_modified(vm_offset_t va)
{
/*
- * Kernel submap initialization has been moved for MD to MI
- * code. ie from cpu_startup() to vm_ksubmap_init().
- * clean_sva and clean_eva are part of the kmi structure.
+ * Kernel submap initialization has been moved for MD to MI code. ie
+ * from cpu_startup() to vm_ksubmap_init(). clean_sva and clean_eva
+ * are part of the kmi structure.
*/
if ((va < kmi.clean_sva) || (va >= kmi.clean_eva))
return (1);
@@ -514,19 +515,19 @@
pmap_invalidate_all(pmap_t pmap)
{
#ifdef SMP
- smp_rendezvous(0, pmap_invalidate_all_action, 0, (void *) pmap);
+ smp_rendezvous(0, pmap_invalidate_all_action, 0, (void *)pmap);
}
static void
pmap_invalidate_all_action(void *arg)
{
- pmap_t pmap = (pmap_t) arg;
+ pmap_t pmap = (pmap_t)arg;
+
#endif
if (pmap->pm_active & PCPU_GET(cpumask)) {
pmap_TLB_invalidate_all();
- }
- else
+ } else
pmap->pm_asid[PCPU_GET(cpuid)].gen = 0;
}
@@ -540,31 +541,31 @@
{
#ifdef SMP
struct pmap_invalidate_page_arg arg;
+
arg.pmap = pmap;
arg.va = va;
- smp_rendezvous(0, pmap_invalidate_page_action, 0, (void *) &arg);
+ smp_rendezvous(0, pmap_invalidate_page_action, 0, (void *)&arg);
}
static void
pmap_invalidate_page_action(void *arg)
{
- pmap_t pmap = ((struct pmap_invalidate_page_arg *) arg)->pmap;
- vm_offset_t va = ((struct pmap_invalidate_page_arg *) arg)->va;
+ pmap_t pmap = ((struct pmap_invalidate_page_arg *)arg)->pmap;
+ vm_offset_t va = ((struct pmap_invalidate_page_arg *)arg)->va;
+
#endif
if (is_kernel_pmap(pmap)) {
pmap_TLB_invalidate_kernel(va);
return;
}
-
if (pmap->pm_asid[PCPU_GET(cpuid)].gen != PCPU_GET(asid_generation))
return;
else if (!(pmap->pm_active & PCPU_GET(cpumask))) {
pmap->pm_asid[PCPU_GET(cpuid)].gen = 0;
return;
}
-
va = pmap_va_asid(pmap, (va & ~PGOFSET));
mips_TBIS(va);
}
@@ -590,32 +591,32 @@
{
#ifdef SMP
struct pmap_update_page_arg arg;
+
arg.pmap = pmap;
arg.va = va;
arg.pte = pte;
- smp_rendezvous(0, pmap_update_page_action, 0, (void *) &arg);
+ smp_rendezvous(0, pmap_update_page_action, 0, (void *)&arg);
}
static void
pmap_update_page_action(void *arg)
{
- pmap_t pmap = ((struct pmap_update_page_arg *) arg)->pmap;
- vm_offset_t va = ((struct pmap_update_page_arg *) arg)->va;
- pt_entry_t pte = ((struct pmap_update_page_arg *) arg)->pte;
+ pmap_t pmap = ((struct pmap_update_page_arg *)arg)->pmap;
+ vm_offset_t va = ((struct pmap_update_page_arg *)arg)->va;
+ pt_entry_t pte = ((struct pmap_update_page_arg *)arg)->pte;
+
#endif
if (is_kernel_pmap(pmap)) {
pmap_TLB_update_kernel(va, pte);
return;
}
-
- if (pmap->pm_asid[PCPU_GET(cpuid)].gen != PCPU_GET(asid_generation) )
+ if (pmap->pm_asid[PCPU_GET(cpuid)].gen != PCPU_GET(asid_generation))
return;
else if (!(pmap->pm_active & PCPU_GET(cpumask))) {
pmap->pm_asid[PCPU_GET(cpuid)].gen = 0;
return;
}
-
va = pmap_va_asid(pmap, va);
MachTLBUpdate(va, pte);
@@ -642,7 +643,7 @@
pmap_extract(pmap_t pmap, vm_offset_t va)
{
pt_entry_t *pte;
- vm_offset_t retval=0;
+ vm_offset_t retval = 0;
PMAP_LOCK(pmap);
pte = pmap_pte(pmap, va);
@@ -676,7 +677,6 @@
m = PHYS_TO_VM_PAGE(mips_tlbpfn_to_paddr(pte));
vm_page_hold(m);
}
-
vm_page_unlock_queues();
PMAP_UNLOCK(pmap);
return (m);
@@ -689,7 +689,7 @@
/*
* add a wired page to the kva
*/
-/* PMAP_INLINE */ void
+ /* PMAP_INLINE */ void
pmap_kenter(vm_offset_t va, vm_paddr_t pa)
{
register pt_entry_t *pte;
@@ -712,7 +712,7 @@
/*
* remove a page from the kernel pagetables
*/
-/* PMAP_INLINE */ void
+ /* PMAP_INLINE */ void
pmap_kremove(vm_offset_t va)
{
register pt_entry_t *pte;
@@ -737,18 +737,18 @@
vm_offset_t
pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot)
{
- vm_offset_t va, sva;
+ vm_offset_t va, sva;
- va = sva = *virt;
- printf("pmap_map: enters virt:0x%x start:%x end:0x%x prot:0x%x\n",
- *virt, start, end, prot);
- while (start < end) {
- pmap_kenter(va, start);
- va += PAGE_SIZE;
- start += PAGE_SIZE;
- }
- *virt = va;
- return (sva);
+ va = sva = *virt;
+ printf("pmap_map: enters virt:0x%x start:%x end:0x%x prot:0x%x\n",
+ *virt, start, end, prot);
+ while (start < end) {
+ pmap_kenter(va, start);
+ va += PAGE_SIZE;
+ start += PAGE_SIZE;
+ }
+ *virt = va;
+ return (sva);
}
/*
@@ -809,34 +809,33 @@
struct sysmaps *sysmaps;
/*
- * We allocate a total of (FPAGES*MAXCPU + FPAGES_SHARED + 1)
- * pages at first. FPAGES & FPAGES_SHARED should be EVEN
- * Then we'll adjust 'kva' to be even-page aligned so that
- * the fpage area can be wired in the TLB with a single
- * TLB entry.
+ * We allocate a total of (FPAGES*MAXCPU + FPAGES_SHARED + 1) pages
+ * at first. FPAGES & FPAGES_SHARED should be EVEN Then we'll adjust
+ * 'kva' to be even-page aligned so that the fpage area can be wired
+ * in the TLB with a single TLB entry.
*/
kva = kmem_alloc_nofault(kernel_map,
- (FPAGES*MAXCPU + 1 + FPAGES_SHARED) * PAGE_SIZE);
+ (FPAGES * MAXCPU + 1 + FPAGES_SHARED) * PAGE_SIZE);
if ((void *)kva == NULL)
panic("pmap_init_fpage: fpage allocation failed");
/*
- * Make up start at an even page number so we can wire down
- * the fpage area in the tlb with a single tlb entry.
+ * Make up start at an even page number so we can wire down the
+ * fpage area in the tlb with a single tlb entry.
*/
- if ((((vm_offset_t) kva) >> PGSHIFT) & 1) {
- /*
- * 'kva' is not even-page aligned.
- * Adjust it and free the first page which is unused.
- */
- kmem_free(kernel_map, (vm_offset_t) kva, NBPG);
- kva = ((vm_offset_t) kva) + NBPG;
+ if ((((vm_offset_t)kva) >> PGSHIFT) & 1) {
+ /*
+ * 'kva' is not even-page aligned. Adjust it and free the
+ * first page which is unused.
+ */
+ kmem_free(kernel_map, (vm_offset_t)kva, NBPG);
+ kva = ((vm_offset_t)kva) + NBPG;
} else {
/*
- * 'kva' is even page aligned.
- * We don't need the last page, free it.
+ * 'kva' is even page aligned. We don't need the last page,
+ * free it.
*/
- kmem_free(kernel_map, ((vm_offset_t) kva) + FSPACE, NBPG);
+ kmem_free(kernel_map, ((vm_offset_t)kva) + FSPACE, NBPG);
}
for (i = 0; i < MAXCPU; i++) {
@@ -844,17 +843,16 @@
mtx_init(&sysmaps->lock, "SYSMAPS", NULL, MTX_DEF);
/* Assign FPAGES pages to the CPU */
- for (j =0; j < FPAGES; j++)
+ for (j = 0; j < FPAGES; j++)
sysmaps->fp[j].kva = kva + (j) * PAGE_SIZE;
kva = ((vm_offset_t)kva) + (FPAGES * PAGE_SIZE);
}
/*
- * An additional 2 pages are needed, one for
- * pmap_zero_page_idle() and one for coredump.
- * These pages are shared by all cpu's
+ * An additional 2 pages are needed, one for pmap_zero_page_idle()
+ * and one for coredump. These pages are shared by all cpu's
*/
- fpages_shared[PMAP_FPAGE3].kva = kva;
+ fpages_shared[PMAP_FPAGE3].kva = kva;
fpages_shared[PMAP_FPAGE_KENTER_TEMP].kva = kva + PAGE_SIZE;
}
@@ -878,7 +876,6 @@
else
panic("pmap_map_fpage: fpage is busy");
}
-
fp->state = TRUE;
kva = fp->kva;
@@ -888,7 +885,7 @@
pmap_TLB_update_kernel(kva, npte);
- return(kva);
+ return (kva);
}
/*
@@ -907,7 +904,6 @@
if (!(fp->state)) {
panic("pmap_unmap_fpage: fpage is free");
}
-
kva = fp->kva;
pte = pmap_pte(kernel_pmap, kva);
@@ -933,7 +929,8 @@
* drops to zero, then it decrements the wire count.
*/
static int
-_pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) {
+_pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m)
+{
/*
* unmap the page table page
@@ -978,7 +975,7 @@
if (mpte == NULL) {
ptepindex = (va >> SEGSHIFT);
if (pmap->pm_ptphint &&
- (pmap->pm_ptphint->pindex == ptepindex)) {
+ (pmap->pm_ptphint->pindex == ptepindex)) {
mpte = pmap->pm_ptphint;
} else {
pteva = *pmap_pde(pmap, va);
@@ -986,7 +983,6 @@
pmap->pm_ptphint = mpte;
}
}
-
return pmap_unwire_pte_hold(pmap, mpte);
}
@@ -1021,7 +1017,7 @@
PMAP_LOCK_INIT(pmap);
- req = VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL | VM_ALLOC_WIRED |
+ req = VM_ALLOC_NOOBJ | VM_ALLOC_NORMAL | VM_ALLOC_WIRED |
VM_ALLOC_ZERO;
#ifdef VM_ALLOC_WIRED_TLB_PG_POOL
@@ -1031,12 +1027,12 @@
/*
* allocate the page directory page
*/
- ptdpg = vm_page_alloc( NULL, NUSERPGTBLS, req);
+ ptdpg = vm_page_alloc(NULL, NUSERPGTBLS, req);
#if 0
/* I think we can just delete these, now that PG_BUSY is gone */
vm_page_lock_queues();
- vm_page_flag_clear(ptdpg, PG_BUSY); /* not usually mapped*/
+ vm_page_flag_clear(ptdpg, PG_BUSY); /* not usually mapped */
#endif
ptdpg->valid = VM_PAGE_BITS_ALL;
@@ -1051,9 +1047,9 @@
pmap->pm_active = 0;
pmap->pm_ptphint = NULL;
- for (i=0; i < MAXCPU; i++) {
+ for (i = 0; i < MAXCPU; i++) {
pmap->pm_asid[i].asid = PMAP_ASID_RESERVED;
- pmap->pm_asid[i].gen = 0;
+ pmap->pm_asid[i].gen = 0;
}
TAILQ_INIT(&pmap->pm_pvlist);
bzero(&pmap->pm_stats, sizeof pmap->pm_stats);
@@ -1076,7 +1072,7 @@
(flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
("_pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
- req = VM_ALLOC_WIRED| VM_ALLOC_ZERO| VM_ALLOC_NOOBJ;
+ req = VM_ALLOC_WIRED | VM_ALLOC_ZERO | VM_ALLOC_NOOBJ;
#ifdef VM_ALLOC_WIRED_TLB_PG_POOL
if (need_wired_tlb_page_pool)
req |= VM_ALLOC_WIRED_TLB_PG_POOL;
@@ -1092,14 +1088,12 @@
vm_page_lock_queues();
PMAP_LOCK(pmap);
}
-
/*
- * Indicate the need to retry. While waiting, the page table
- * page may have been allocated.
+ * Indicate the need to retry. While waiting, the page
+ * table page may have been allocated.
*/
return (NULL);
}
-
if ((m->flags & PG_ZERO) == 0)
pmap_zero_page(m);
@@ -1107,15 +1101,15 @@
("_pmap_allocpte: %p->queue != PQ_NONE", m));
/*
- * Map the pagetable page into the process address space, if
- * it isn't already there.
+ * Map the pagetable page into the process address space, if it
+ * isn't already there.
*/
pmap->pm_stats.resident_count++;
ptepa = VM_PAGE_TO_PHYS(m);
pteva = MIPS_PHYS_TO_CACHED(ptepa);
- pmap->pm_segtab[ptepindex] = (pd_entry_t) pteva;
+ pmap->pm_segtab[ptepindex] = (pd_entry_t)pteva;
/*
* Set the page table hint
@@ -1143,8 +1137,8 @@
vm_page_t m;
KASSERT((flags & (M_NOWAIT | M_WAITOK)) == M_NOWAIT ||
- (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
- ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
+ (flags & (M_NOWAIT | M_WAITOK)) == M_WAITOK,
+ ("pmap_allocpte: flags is neither M_NOWAIT nor M_WAITOK"));
/*
* Calculate pagetable page index
@@ -1154,19 +1148,18 @@
/*
* Get the page directory entry
*/
- pteva = (vm_offset_t) pmap->pm_segtab[ptepindex];
+ pteva = (vm_offset_t)pmap->pm_segtab[ptepindex];
/*
- * If the page table page is mapped, we just increment the
- * hold count, and activate it.
+ * If the page table page is mapped, we just increment the hold
+ * count, and activate it.
*/
if (pteva) {
/*
- * In order to get the page table page, try the
- * hint first.
+ * In order to get the page table page, try the hint first.
*/
if (pmap->pm_ptphint &&
- (pmap->pm_ptphint->pindex == ptepindex)) {
+ (pmap->pm_ptphint->pindex == ptepindex)) {
m = pmap->pm_ptphint;
} else {
m = PHYS_TO_VM_PAGE(MIPS_CACHED_TO_PHYS(pteva));
@@ -1175,7 +1168,8 @@
m->wire_count++;
} else {
/*
- * Here if the pte page isn't mapped, or if it has been deallocated.
+ * Here if the pte page isn't mapped, or if it has been
+ * deallocated.
*/
m = _pmap_allocpte(pmap, ptepindex, flags);
if (m == NULL && (flags & M_WAITOK))
@@ -1267,7 +1261,6 @@
}
continue;
}
-
/*
* This index is bogus, but out of the way
*/
@@ -1284,26 +1277,25 @@
ptppaddr = VM_PAGE_TO_PHYS(nkpg);
if (ptppaddr >= MIPS_KSEG0_LARGEST_PHYS) {
- /* We need to do something here, but I am not
- * sure what. We can access anything in the
- * 0 - 512Meg region, but if we get a page to
- * go in the kernel segmap that is outside of
- * of that we really need to have another mapping
- * beyond the temporary ones I have. Not sure
- * how to do this yet. FIXME FIXME.
- */
- panic("Gak, can't handle a k-page table outside of lower 512Meg");
+ /*
+ * We need to do something here, but I am not sure
+ * what. We can access anything in the 0 - 512Meg
+ * region, but if we get a page to go in the kernel
+ * segmap that is outside of of that we really need
+ * to have another mapping beyond the temporary ones
+ * I have. Not sure how to do this yet. FIXME FIXME.
+ */
+ panic("Gak, can't handle a k-page table outside of lower 512Meg");
}
- pte = (pt_entry_t *) MIPS_PHYS_TO_CACHED(ptppaddr);
- segtab_pde(kernel_segmap, kernel_vm_end) = (pd_entry_t) pte;
+ pte = (pt_entry_t *)MIPS_PHYS_TO_CACHED(ptppaddr);
+ segtab_pde(kernel_segmap, kernel_vm_end) = (pd_entry_t)pte;
/*
- * The R[4-7]?00 stores only one copy of the Global bit
- * in the translation lookaside buffer for each 2 page
- * entry. Thus invalid entrys must have the Global bit
- * set so when Entry LO and Entry HI G bits are anded
- * together they will produce a global bit to store in
- * the tlb.
+ * The R[4-7]?00 stores only one copy of the Global bit in
+ * the translation lookaside buffer for each 2 page entry.
+ * Thus invalid entrys must have the Global bit set so when
+ * Entry LO and Entry HI G bits are anded together they will
+ * produce a global bit to store in the tlb.
*/
for (i = 0; i < NPTEPG; i++, pte++)
*pte = PG_G;
@@ -1345,9 +1337,9 @@
pv_entry_count++;
if ((pv_entry_count > pv_entry_high_water) &&
- (pmap_pagedaemon_waken == 0)) {
+ (pmap_pagedaemon_waken == 0)) {
pmap_pagedaemon_waken = 1;
- wakeup (&vm_pages_needed);
+ wakeup(&vm_pages_needed);
}
return uma_zalloc(pvzone, M_NOWAIT);
}
@@ -1407,7 +1399,7 @@
*/
static void
pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_page_t m,
- boolean_t wired)
+ boolean_t wired)
{
pv_entry_t pv;
@@ -1457,7 +1449,7 @@
#if defined(PMAP_DIAGNOSTIC)
if (pmap_nw_modified(oldpte)) {
printf(
- "pmap_remove: modified page not writable: va: 0x%x, pte: 0x%x\n",
+ "pmap_remove: modified page not writable: va: 0x%x, pte: 0x%x\n",
va, oldpte);
}
#endif
@@ -1492,11 +1484,10 @@
if (!ptq || !pmap_pte_v(ptq)) {
return;
}
-
/*
* get a local va for mappings for this pmap.
*/
- (void) pmap_remove_pte(pmap, ptq, va);
+ (void)pmap_remove_pte(pmap, ptq, va);
pmap_invalidate_page(pmap, va);
return;
@@ -1523,21 +1514,18 @@
PMAP_LOCK(pmap);
/*
- * special handling of removing one page. a very
- * common operation and easy to short circuit some
- * code.
+ * special handling of removing one page. a very common operation
+ * and easy to short circuit some code.
*/
if ((sva + PAGE_SIZE) == eva) {
pmap_remove_page(pmap, sva);
goto out;
}
-
for (va = sva; va < eva; va = nva) {
if (!*pmap_pde(pmap, va)) {
nva = mips_segtrunc(va + MIPS_SEGSIZE);
continue;
}
-
pmap_remove_page(pmap, va);
nva = va + PAGE_SIZE;
}
@@ -1599,7 +1587,7 @@
#if defined(PMAP_DIAGNOSTIC)
if (pmap_nw_modified(tpte)) {
printf(
- "pmap_remove_all: modified page not writable: va: 0x%x, pte: 0x%x\n",
+ "pmap_remove_all: modified page not writable: va: 0x%x, pte: 0x%x\n",
pv->pv_va, tpte);
}
#endif
@@ -1636,7 +1624,6 @@
pmap_remove(pmap, sva, eva);
return;
}
-
if (prot & VM_PROT_WRITE)
return;
@@ -1654,7 +1641,6 @@
sva = mips_segtrunc(sva + MIPS_SEGSIZE);
continue;
}
-
/*
* If pte is invalid, skip this page
*/
@@ -1680,7 +1666,6 @@
}
}
}
-
pbits = (pbits & ~PG_M) | PG_RO;
if (pbits != *pte) {
@@ -1688,7 +1673,6 @@
goto retry;
pmap_update_page(pmap, sva, pbits);
}
-
sva += PAGE_SIZE;
}
vm_page_unlock_queues();
@@ -1732,13 +1716,12 @@
PMAP_LOCK(pmap);
/*
- * In the case that a page table page is not
- * resident, we are creating it here.
+ * In the case that a page table page is not resident, we are
+ * creating it here.
*/
if (va < VM_MAXUSER_ADDRESS) {
mpte = pmap_allocpte(pmap, va, M_WAITOK);
}
-
pte = pmap_pte(pmap, va);
/*
@@ -1748,7 +1731,6 @@
panic("pmap_enter: invalid page directory, pdir=%p, va=0x%x\n",
(void *)pmap->pm_segtab, va);
}
-
pa = VM_PAGE_TO_PHYS(m);
om = NULL;
origpte = *pte;
@@ -1761,8 +1743,8 @@
/*
* Wiring change, just update stats. We don't worry about
* wiring PT pages as they remain resident as long as there
- * are valid mappings in them. Hence, if a user page is wired,
- * the PT page will be also.
+ * are valid mappings in them. Hence, if a user page is
+ * wired, the PT page will be also.
*/
if (wired && ((origpte & PG_W) == 0))
pmap->pm_stats.wired_count++;
@@ -1772,7 +1754,7 @@
#if defined(PMAP_DIAGNOSTIC)
if (pmap_nw_modified(origpte)) {
printf(
- "pmap_enter: modified page not writable: va: 0x%x, pte: 0x%x\n",
+ "pmap_enter: modified page not writable: va: 0x%x, pte: 0x%x\n",
va, origpte);
}
#endif
@@ -1784,8 +1766,8 @@
mpte->wire_count--;
/*
- * We might be turning off write access to the page,
- * so we go ahead and sense modify status.
+ * We might be turning off write access to the page, so we
+ * go ahead and sense modify status.
*/
if (page_is_managed(opa)) {
om = m;
@@ -1798,13 +1780,12 @@
*/
if (opa) {
if (origpte & PG_W)
- pmap->pm_stats.wired_count--;
+ pmap->pm_stats.wired_count--;
if (page_is_managed(opa)) {
om = PHYS_TO_VM_PAGE(opa);
pmap_remove_entry(pmap, om, va);
}
-
if (mpte != NULL) {
mpte->wire_count--;
KASSERT(mpte->wire_count > 0,
@@ -1819,10 +1800,9 @@
* raise IPL while manipulating pv_table since pmap_enter can be
* called at interrupt time.
*/
- if((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0) {
+ if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) {
pmap_insert_entry(pmap, va, mpte, m, wired);
}
-
/*
* Increment counters
*/
@@ -1849,8 +1829,8 @@
newpte |= PG_G;
/*
- * if the mapping or permission bits are different, we need
- * to update the pte.
+ * if the mapping or permission bits are different, we need to
+ * update the pte.
*/
if (origpte != newpte) {
>>> TRUNCATED FOR MAIL (1000 lines) <<<
More information about the p4-projects
mailing list