PERFORCE change 71735 for review
John-Mark Gurney
jmg at FreeBSD.org
Thu Feb 24 15:09:32 GMT 2005
http://perforce.freebsd.org/chv.cgi?CH=71735
Change 71735 by jmg at jmg_carbon on 2005/02/24 15:08:45
clean this code up some... add comments that took me a while to
figure out so the next person won't take as long... remove unused
variables...
properly do math for allocating the l2 page tables from a page...
make sure we have l2 mappings after the kernel instead of overwriting
them with the kernel, causing problems if you loaded a large enough
kernel...
add more mappings to phys_avail, we can still get a few more out of
the stack mappings we move to after kernel, add comment reminding
someome to do that..
Affected files ...
.. //depot/projects/arm/src/sys/arm/ep93xx/ts7200_machdep.c#5 edit
Differences ...
==== //depot/projects/arm/src/sys/arm/ep93xx/ts7200_machdep.c#5 (text+ko) ====
@@ -98,15 +98,16 @@
#include <arm/ep93xx/tsarmreg.h>
#include <arm/ep93xx/tsarmvar.h>
+/*
+ * _NUM is count, non _NUM is possition in kernel_pt_table
+ */
#define KERNEL_PT_SYS 0 /* L2 table for mapping vectors page */
-#define KERNEL_PT_KERNEL 1 /* L2 table for mapping kernel */
-#define KERNEL_PT_KERNEL_NUM 4
- /* L2 tables for mapping kernel VM */
-#define KERNEL_PT_VMDATA (KERNEL_PT_KERNEL + KERNEL_PT_KERNEL_NUM)
+#define KERNEL_PT_AFKERNEL 1 /* L2 table for mapping after kernel */
+#define KERNEL_PT_AFKERNEL_NUM 3
-#define KERNEL_PT_VMDATA_NUM 4 /* start with 16MB of KVM */
-#define NUM_KERNEL_PTS (KERNEL_PT_VMDATA + KERNEL_PT_VMDATA_NUM)
+/* this should be evenly divisable by PAGE_SIZE / L2_TABLE_SIZE_REAL (or 4) */
+#define NUM_KERNEL_PTS (KERNEL_PT_AFKERNEL + KERNEL_PT_AFKERNEL_NUM)
/* Define various stack sizes in pages */
#define IRQ_STACK_SIZE 1
@@ -134,7 +135,7 @@
/* Physical and virtual addresses for some global pages */
-vm_paddr_t phys_avail[10];
+vm_paddr_t phys_avail[14];
vm_paddr_t physical_start;
vm_paddr_t physical_end;
vm_offset_t physical_pages;
@@ -208,7 +209,7 @@
{
struct pv_addr kernel_l1pt;
int loop;
- u_int kerneldatasize, symbolsize;
+ u_int kerneldatasize;
u_int l1pagetable;
vm_offset_t freemempos;
vm_offset_t freemem_pt;
@@ -249,8 +250,7 @@
physical_end = (vm_offset_t) &end + SDRAM_START - 0xc0000000;
#define KERNEL_TEXT_BASE (KERNBASE + 0x00200000)
kerneldatasize = (u_int32_t)&end - (u_int32_t)KERNEL_TEXT_BASE;
- symbolsize = 0;
- freemempos = 0x00200000;
+ freemempos = 0x00200000; /* freemempos will grow down from here */
/* Define a macro to simplify memory allocation */
#define valloc_pages(var, np) \
alloc_pages((var).pv_pa, (np)); \
@@ -265,23 +265,29 @@
while (((freemempos - L1_TABLE_SIZE) & (L1_TABLE_SIZE - 1)) != 0)
freemempos -= PAGE_SIZE;
valloc_pages(kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE);
+
+#define NUM_L2_PER_PAGE (PAGE_SIZE / L2_TABLE_SIZE_REAL)
+ /* setup the kernel l2 page tables */
for (loop = 0; loop < NUM_KERNEL_PTS; ++loop) {
- if (!(loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL))) {
- valloc_pages(kernel_pt_table[loop],
- L2_TABLE_SIZE / PAGE_SIZE);
+ if (!(loop % NUM_L2_PER_PAGE)) {
+ /* next page to split up */
+ valloc_pages(kernel_pt_table[loop], 1);
} else {
- kernel_pt_table[loop].pv_pa = freemempos -
- (loop % (PAGE_SIZE / L2_TABLE_SIZE_REAL)) *
+ /* split up last allocated page */
+ kernel_pt_table[loop].pv_pa = kernel_pt_table[(loop /
+ NUM_L2_PER_PAGE) * NUM_L2_PER_PAGE].pv_pa +
+ (loop % NUM_L2_PER_PAGE) *
L2_TABLE_SIZE_REAL;
kernel_pt_table[loop].pv_va =
kernel_pt_table[loop].pv_pa + 0xc0000000;
}
i++;
}
- freemempos -= 2 * PAGE_SIZE;
+ /* we'll add this space 0x100000 -> freemem_pt as available later */
freemem_pt = freemempos;
freemempos = 0x00100000;
+
/*
* Allocate a page for the system page mapped to V0x00000000
* This page will just contain the system vectors and can be
@@ -298,13 +304,6 @@
valloc_pages(msgbufpv, round_page(MSGBUF_SIZE) / PAGE_SIZE);
/*
- * Allocate memory for the l1 and l2 page tables. The scheme to avoid
- * wasting memory by allocating the l1pt on the first 16k memory was
- * taken from NetBSD rpc_machdep.c. NKPT should be greater than 12 for
- * this to work (which is supposed to be the case).
- */
-
- /*
* Now we start construction of the L1 page table
* We start by mapping the L2 page tables into the L1.
* This means that we can replace L1 mappings later on if necessary
@@ -314,31 +313,35 @@
/* Map the L2 pages tables in the L1 page table */
pmap_link_l2pt(l1pagetable, ARM_VECTORS_HIGH & ~(0x00100000 - 1),
&kernel_pt_table[KERNEL_PT_SYS]);
- for (i = 0; i < KERNEL_PT_KERNEL_NUM; i++) {
- pmap_link_l2pt(l1pagetable, KERNBASE + i * 0x00100000,
- &kernel_pt_table[KERNEL_PT_KERNEL + i]);
- }
- for (loop = 0; loop < KERNEL_PT_VMDATA_NUM; ++loop)
- pmap_link_l2pt(l1pagetable, KERNBASE + (i + loop) * 0x00100000,
- &kernel_pt_table[KERNEL_PT_VMDATA + loop]);
- pmap_map_chunk(l1pagetable, KERNBASE, SDRAM_START,
- freemempos - 0x00000000 + 0x1000,
+ /* map kernel stacks */
+ pmap_map_chunk(l1pagetable, KERNBASE, SDRAM_START, 0x100000,
VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
+ /* map the l1 page table, and the l2's so far */
pmap_map_chunk(l1pagetable, KERNBASE + 0x100000, SDRAM_START + 0x100000,
0x100000, VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE);
/* map the kernel into the page table */
pmap_map_chunk(l1pagetable, KERNBASE + 0x200000, SDRAM_START + 0x200000,
(((uint32_t)(&end) - KERNBASE - 0x200000) + L1_S_SIZE) &
~(L1_S_SIZE - 1), VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE);
- freemem_after = ((int)&end + PAGE_SIZE) & ~(PAGE_SIZE - 1);
+ freemem_after = round_page((int)&end + 1);
afterkern = round_page(((vm_offset_t)&end + L1_S_SIZE) & ~(L1_S_SIZE
- 1));
+ for (i = 0; i < KERNEL_PT_AFKERNEL_NUM; i++) {
+ pmap_link_l2pt(l1pagetable, afterkern + i * 0x00100000,
+ &kernel_pt_table[KERNEL_PT_AFKERNEL + i]);
+ }
+
/* Map the stack pages */
#define alloc_afterkern(va, pa, size) \
va = freemem_after; \
pa = freemem_after - 0xc0000000;\
freemem_after += size;
+
+ /*
+ * XXX - if we can fit it after the kernel, we end up dropping the
+ * old mappings on the floor, pick them up, k?
+ */
if (freemem_after + KSTACK_PAGES * PAGE_SIZE < afterkern) {
alloc_afterkern(kernelstack.pv_va, kernelstack.pv_pa,
KSTACK_PAGES * PAGE_SIZE);
@@ -448,8 +451,7 @@
pmap_curmaxkvaddr = afterkern;
- pmap_bootstrap(pmap_curmaxkvaddr,
- 0xd0000000, &kernel_l1pt);
+ pmap_bootstrap(pmap_curmaxkvaddr, 0xd0000000, &kernel_l1pt);
msgbufp = (void*)msgbufpv.pv_va;
msgbufinit(msgbufp, MSGBUF_SIZE);
mutex_init();
@@ -457,16 +459,18 @@
freemempos &= ~(PAGE_SIZE - 1);
phys_avail[0] = SDRAM_START;
phys_avail[1] = freemempos;
- phys_avail[0] = round_page(virtual_avail - KERNBASE + SDRAM_START);
- phys_avail[1] = trunc_page(0x00800000 - 1);
- phys_avail[2] = 0x01000000;
- phys_avail[3] = trunc_page(0x01800000 - 1);
- phys_avail[4] = 0x04000000;
- phys_avail[5] = trunc_page(0x04800000 - 1);
- phys_avail[6] = 0x05000000;
- phys_avail[7] = trunc_page(0x05800000 - 1);
- phys_avail[8] = 0;
- phys_avail[9] = 0;
+ phys_avail[2] = 0x0100000;
+ phys_avail[3] = freemem_pt;
+ phys_avail[4] = round_page(virtual_avail - KERNBASE + SDRAM_START);
+ phys_avail[5] = trunc_page(0x00800000 - 1);
+ phys_avail[6] = 0x01000000;
+ phys_avail[7] = trunc_page(0x01800000 - 1);
+ phys_avail[8] = 0x04000000;
+ phys_avail[9] = trunc_page(0x04800000 - 1);
+ phys_avail[10] = 0x05000000;
+ phys_avail[11] = trunc_page(0x05800000 - 1);
+ phys_avail[12] = 0;
+ phys_avail[13] = 0;
/* Do basic tuning, hz etc */
init_param1();
More information about the p4-projects
mailing list