PERFORCE change 131754 for review
Kip Macy
kmacy at FreeBSD.org
Wed Dec 26 22:42:28 PST 2007
http://perforce.freebsd.org/chv.cgi?CH=131754
Change 131754 by kmacy at pandemonium:kmacy:xen31 on 2007/12/27 06:41:40
move forward from crashing in HYPERVISOR_set_gdt in init386 to
crashing in pmap_copy_ma_range in pmap_pinit while creating init
Affected files ...
.. //depot/projects/xen31/sys/i386/i386/machdep.c#7 edit
.. //depot/projects/xen31/sys/i386/i386/support.s#2 edit
.. //depot/projects/xen31/sys/i386/include/cpufunc.h#3 edit
.. //depot/projects/xen31/sys/i386/include/segments.h#3 edit
.. //depot/projects/xen31/sys/i386/include/xen/xenvar.h#4 edit
.. //depot/projects/xen31/sys/i386/xen/pmap.c#6 edit
.. //depot/projects/xen31/sys/i386/xen/xen_machdep.c#6 edit
Differences ...
==== //depot/projects/xen31/sys/i386/i386/machdep.c#7 (text+ko) ====
@@ -1451,6 +1451,7 @@
0, 0,
1, /* default 32 vs 16 bit size */
1 /* limit granularity (byte/page units)*/ },
+#ifndef XEN
/* GPROC0_SEL 9 Proc 0 Tss Descriptor */
{
0x0, /* segment base address */
@@ -1461,7 +1462,6 @@
0, 0,
0, /* unused - default 32 vs 16 bit size */
0 /* limit granularity (byte/page units)*/ },
-#ifndef XEN
/* GLDT_SEL 10 LDT Descriptor */
{ (int) ldt, /* segment base address */
sizeof(ldt)-1, /* length - all address space */
@@ -2005,6 +2005,7 @@
getenv_quad("dcons.size", &dcons_size) == 0)
dcons_addr = 0;
+#ifndef XEN
/*
* physmap is in bytes, so when converting to page boundaries,
* round up the start address and round down the end address.
@@ -2122,7 +2123,10 @@
}
*pte = 0;
invltlb();
-
+#else
+ phys_avail[1] = xen_start_info->nr_pages*PAGE_SIZE;
+#endif /* XEN */
+
/*
* XXX
* The last chunk must contain at least one page plus the message
@@ -2186,34 +2190,26 @@
init_param1();
/*
- * Make gdt memory segments. All segments cover the full 4GB
- * of address space and permissions are enforced at page level.
- */
-
- /*
- * XEN occupies the upper 64MB of virtual address space
+ * XEN occupies a portion of the upper virtual address space
* At its base it manages an array mapping machine page frames
* to physical page frames - hence we need to be able to
* access 4GB - (64MB - 4MB + 64k)
*/
- gdt_segs[GCODE_SEL].ssd_limit = atop(0 - ((1 << 26) -
- (1 << 22) + (1 << 16)));
- gdt_segs[GDATA_SEL].ssd_limit = atop(0 - ((1 << 26) -
- (1 << 22) + (1 << 16)));
- gdt_segs[GUCODE_SEL].ssd_limit = atop(0 - ((1 << 26) -
- (1 << 22) + (1 << 16)));
- gdt_segs[GUDATA_SEL].ssd_limit = atop(0 - ((1 << 26) -
- (1 << 22) + (1 << 16)));
- gdt_segs[GUFS_SEL].ssd_limit = atop(0 - ((1 << 26) -
- (1 << 22) + (1 << 16)));
- gdt_segs[GUGS_SEL].ssd_limit = atop(0 - ((1 << 26) -
- (1 << 22) + (1 << 16)));
+ gdt_segs[GPRIV_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + 256*PAGE_SIZE);
+ gdt_segs[GUFS_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + 256*PAGE_SIZE);
+ gdt_segs[GUGS_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + 256*PAGE_SIZE);
+ gdt_segs[GCODE_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + 256*PAGE_SIZE);
+ gdt_segs[GDATA_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + 256*PAGE_SIZE);
+ gdt_segs[GUCODE_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + 256*PAGE_SIZE);
+ gdt_segs[GUDATA_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + 256*PAGE_SIZE);
+ gdt_segs[GBIOSLOWMEM_SEL].ssd_limit = atop(HYPERVISOR_VIRT_START + 256*PAGE_SIZE);
pc = &__pcpu[0];
- gdt_segs[GPRIV_SEL].ssd_limit = atop(0 - 1);
gdt_segs[GPRIV_SEL].ssd_base = (int) pc;
gdt_segs[GPROC0_SEL].ssd_base = (int) &pc->pc_common_tss;
+ PT_SET_MA(gdt, xpmap_ptom(VTOP(gdt)) | PG_V | PG_RW);
+ bzero(gdt, PAGE_SIZE);
for (x = 0; x < NGDT; x++)
ssdtosd(&gdt_segs[x], &gdt[x].sd);
@@ -2224,8 +2220,8 @@
printk("PTmap=%p\n", PTmap);
printk("addr=%p\n", *vtopte((unsigned long)gdt) & ~PG_RW);
- PT_SET_MA(gdt, *vtopte((unsigned long)gdt) & ~PG_RW);
gdtmachpfn = vtomach(gdt) >> PAGE_SHIFT;
+ PT_SET_MA(gdt, *vtopte((unsigned long)gdt) & ~(PG_RW|PG_M|PG_A));
PANIC_IF(HYPERVISOR_set_gdt(&gdtmachpfn, 512) != 0);
lgdt(&r_gdt /* unused */);
gdt_set = 1;
@@ -2255,6 +2251,8 @@
mtx_init(&icu_lock, "icu", NULL, MTX_SPIN | MTX_NOWITNESS | MTX_NOPROFILE);
/* make ldt memory segments */
+ PT_SET_MA(ldt, xpmap_ptom(VTOP(ldt)) | PG_V | PG_RW);
+ bzero(ldt, PAGE_SIZE);
ldt_segs[LUCODE_SEL].ssd_limit = atop(0 - 1);
ldt_segs[LUDATA_SEL].ssd_limit = atop(0 - 1);
for (x = 0; x < sizeof ldt_segs / sizeof ldt_segs[0]; x++)
==== //depot/projects/xen31/sys/i386/i386/support.s#2 (text+ko) ====
@@ -1459,10 +1459,11 @@
*/
/* void lgdt(struct region_descriptor *rdp); */
ENTRY(lgdt)
+#ifndef XEN
/* reload the descriptor table */
movl 4(%esp),%eax
lgdt (%eax)
-
+#endif
/* flush the prefetch q */
jmp 1f
nop
==== //depot/projects/xen31/sys/i386/include/cpufunc.h#3 (text+ko) ====
@@ -93,7 +93,11 @@
static __inline void
disable_intr(void)
{
+#ifdef XEN
+ __cli();
+#else
__asm __volatile("cli" : : : "memory");
+#endif
}
static __inline void
@@ -115,7 +119,11 @@
static __inline void
enable_intr(void)
{
+#ifdef XEN
+ __sti();
+#else
__asm __volatile("sti");
+#endif
}
#ifdef _KERNEL
@@ -445,8 +453,11 @@
static __inline void
invltlb(void)
{
-
+#ifdef XEN
+ xen_tlb_flush();
+#else
load_cr3(rcr3());
+#endif
}
/*
@@ -456,8 +467,11 @@
static __inline void
invlpg(u_int addr)
{
-
+#ifdef XEN
+ xen_invlpg(addr);
+#else
__asm __volatile("invlpg %0" : : "m" (*(char *)addr) : "memory");
+#endif
}
static __inline u_int
@@ -662,16 +676,24 @@
intr_disable(void)
{
register_t eflags;
-
+#ifdef XEN
+ __save_and_cli(eflags);
+#else
eflags = read_eflags();
disable_intr();
+#endif
return (eflags);
}
static __inline void
intr_restore(register_t eflags)
{
+
+#ifdef XEN
+ __restore_flags(eflags);
+#else
write_eflags(eflags);
+#endif
}
#else /* !(__GNUCLIKE_ASM && __CC_SUPPORTS___INLINE) */
==== //depot/projects/xen31/sys/i386/include/segments.h#3 (text+ko) ====
@@ -47,7 +47,11 @@
*/
#define ISPL(s) ((s)&3) /* what is the priority level of a selector */
+#ifdef XEN
+#define SEL_KPL 1 /* kernel priority level */
+#else
#define SEL_KPL 0 /* kernel priority level */
+#endif
#define SEL_UPL 3 /* user priority level */
#define ISLDT(s) ((s)&SEL_LDT) /* is it local or global */
#define SEL_LDT 4 /* local descriptor table */
@@ -222,8 +226,11 @@
#define GBIOSARGS_SEL 17 /* BIOS interface (Arguments) */
#define GNDIS_SEL 18 /* For the NDIS layer */
+#ifdef XEN
+#define NGDT 9
+#else
#define NGDT 19
-
+#endif
/*
* Entries in the Local Descriptor Table (LDT)
*/
==== //depot/projects/xen31/sys/i386/include/xen/xenvar.h#4 (text+ko) ====
@@ -22,7 +22,7 @@
#define TRACE_DEBUG(argflags, _f, _a...)
#endif
-extern vm_paddr_t *xen_machine_phys;
+extern xen_pfn_t *xen_machine_phys;
/* Xen starts physical pages after the 4MB ISA hole -
* FreeBSD doesn't
*/
==== //depot/projects/xen31/sys/i386/xen/pmap.c#6 (text+ko) ====
@@ -594,11 +594,6 @@
pt_entry_t *pte;
vm_offset_t va;
- panic("IMPLEMENT XXX");
-
-
-
-
va = *head;
if (va == 0)
return (va); /* Out of memory */
@@ -606,19 +601,28 @@
*head = *pte;
if (*head & PG_V)
panic("pmap_ptelist_alloc: va with PG_V set!");
+#ifdef XEN
+ PT_SET_MA(va, (vm_paddr_t)0);
+#else
*pte = 0;
+#endif
return (va);
}
static void
pmap_ptelist_free(vm_offset_t *head, vm_offset_t va)
{
+#ifndef XEN
pt_entry_t *pte;
if (va & PG_V)
panic("pmap_ptelist_free: freeing va with PG_V set!");
+
pte = vtopte(va);
*pte = *head; /* virtual! PG_V is 0 though */
+#else
+ PT_SET_MA(va, (vm_paddr_t)*head);
+#endif
*head = va;
}
@@ -2524,7 +2528,7 @@
#if 0 && defined(PMAP_DIAGNOSTIC)
else {
pd_entry_t *pdeaddr = pmap_pde(pmap, va);
- origpte = PT_GET(pdeaddr);
+ origpte = *pdeaddr;
if ((origpte & PG_V) == 0) {
panic("pmap_enter: invalid kernel page table page, pdir=%p, pde=%p, va=%p\n",
pmap->pm_pdir[PTDPTDI], origpte, va);
@@ -2533,7 +2537,7 @@
#endif
pde = pmap_pde(pmap, va);
- if ((PT_GET(pde) & PG_PS) != 0)
+ if ((*pde & PG_PS) != 0)
panic("pmap_enter: attempted pmap_enter on 4MB page");
pte = pmap_pte_quick(pmap, va);
@@ -3103,9 +3107,11 @@
if (*sysmaps->CMAP2)
panic("pmap_zero_page: CMAP2 busy");
sched_pin();
+#ifdef XEN
+ PT_SET_VA(sysmaps->CMAP2, PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M, TRUE);
+#else
*sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M;
- PT_SET_VA(sysmaps->CMAP2, PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M, TRUE);
-
+#endif
invlcaddr(sysmaps->CADDR2);
pagezero(sysmaps->CADDR2);
PT_CLEAR_VA(sysmaps->CMAP2, TRUE);
==== //depot/projects/xen31/sys/i386/xen/xen_machdep.c#6 (text+ko) ====
@@ -75,7 +75,7 @@
int xendebug_flags;
start_info_t *xen_start_info;
shared_info_t *HYPERVISOR_shared_info;
-vm_paddr_t *xen_machine_phys = ((vm_paddr_t *)VADDR(1008, 0));
+xen_pfn_t *xen_machine_phys = machine_to_phys_mapping;
xen_pfn_t *xen_phys_machine;
int preemptable, init_first;
extern unsigned int avail_space;
@@ -646,8 +646,7 @@
#ifdef PAE
-static vm_paddr_t *ptdir_shadow;
-static vm_paddr_t *pdir_shadow[4];
+static vm_paddr_t *pdir_shadow;
#else
static vm_paddr_t *pdir_shadow;
#endif
@@ -690,7 +689,7 @@
void
initvalues(start_info_t *startinfo)
{
- int i, l3_pages, l2_pages, l1_pages;
+ int i, l3_pages, l2_pages, l1_pages, offset;
vm_offset_t cur_space;
physdev_op_t op;
vm_paddr_t KPTphys, IdlePTDma;
@@ -698,8 +697,8 @@
vm_offset_t KPTphysoff, tmpva;
vm_paddr_t shinfo;
#ifdef PAE
- vm_paddr_t IdlePDPTma, ptdir_shadow_ma, IdlePDPTnewma, IdlePTDnewma;
- vm_paddr_t pdir_shadow_ma[4];
+ vm_paddr_t IdlePDPTma, IdlePDPTnewma;
+ vm_paddr_t pdir_shadow_ma[4], IdlePTDnewma[4];
pd_entry_t *IdlePDPTnew, *IdlePTDnew;
#else
vm_paddr_t pdir_shadow_ma;
@@ -757,21 +756,11 @@
xen_start_info->nr_pages, xen_start_info->shared_info,
xen_start_info->flags, xen_start_info->pt_base,
xen_start_info->mod_start, xen_start_info->mod_len);
-#if 0
- XENPRINTF("c0100000: %08x\n",
- xpmap_get_bootpte(0xc0100000));
-#endif
/* Map proc0's KSTACK */
proc0kstack = cur_space; cur_space += (KSTACK_PAGES * PAGE_SIZE);
printk("proc0kstack=%u\n", proc0kstack);
- /* allocate page for gdt */
- gdt = (union descriptor *)cur_space; cur_space += PAGE_SIZE;
-
- /* allocate page for ldt */
- ldt = (union descriptor *)cur_space; cur_space += PAGE_SIZE;
-
/* vm86/bios stack */
cur_space += PAGE_SIZE;
@@ -780,13 +769,12 @@
cur_space += (PAGE_SIZE * 3);
#ifdef PAE
- for (i = 0; i < 4; i++) {
+ pdir_shadow = (vm_paddr_t *)cur_space; cur_space += 4*PAGE_SIZE;
+ bzero(pdir_shadow, 4*PAGE_SIZE);
/* initialize page directory shadow page */
- pdir_shadow[i] = (vm_paddr_t *)cur_space;
- cur_space += PAGE_SIZE;
- bzero(pdir_shadow[i], PAGE_SIZE);
- pdir_shadow_ma[i] = xpmap_ptom((vm_paddr_t)VTOP(pdir_shadow[i]));
- }
+ for (i = 0; i < 4; i++)
+ pdir_shadow_ma[i] = xpmap_ptom((vm_paddr_t)
+ VTOP((uint8_t *)pdir_shadow + i*PAGE_SIZE));
#else
/* initialize page directory shadow page */
pdir_shadow = (vm_paddr_t *)cur_space; cur_space += PAGE_SIZE;
@@ -799,59 +787,32 @@
bzero(IdlePDPTnew, PAGE_SIZE);
IdlePDPTnewma = xpmap_ptom(VTOP(IdlePDPTnew));
- IdlePTDnew = (pd_entry_t *)cur_space; cur_space += PAGE_SIZE;
- bzero(IdlePTDnew, PAGE_SIZE);
- IdlePTDnewma = xpmap_ptom(VTOP(IdlePTDnew));
- memcpy(IdlePTDnew, IdlePTD, PAGE_SIZE/2);
-
+ IdlePTDnew = (pd_entry_t *)cur_space; cur_space += 4*PAGE_SIZE;
+ bzero(IdlePTDnew, 4*PAGE_SIZE);
+ for (i = 0; i < 4; i++)
+ IdlePTDnewma[i] = xpmap_ptom(
+ VTOP((uint8_t *)IdlePTDnew + i*PAGE_SIZE));
- /* initialize page directory page table shadow page */
- ptdir_shadow = (vm_paddr_t *)cur_space; cur_space += PAGE_SIZE;
- bzero(ptdir_shadow, PAGE_SIZE);
- ptdir_shadow_ma = xpmap_ptom(VTOP(ptdir_shadow));
-
-
/*
* L3
*/
- IdlePDPTnew[2] = ptdir_shadow_ma | PG_V;
- IdlePDPTnew[3] = IdlePTDnewma | PG_V;
+ for (i = 0; i < 4; i++)
+ IdlePDPTnew[i] = IdlePTDnewma[i] | PG_V;
/*
* L2
*/
-
for (i = 0; i < 4; i++)
- ptdir_shadow[508 + i] = pdir_shadow_ma[i] | PG_V;
- /*
- * L1 - can't copy Xen's mappings
- */
- for (i = 0; i < 256; i++)
- pdir_shadow[3][i] = IdlePTDnew[i] & ~(PG_RW|PG_A);
+ IdlePTDnew[1532 + i] = pdir_shadow_ma[i] | PG_V;
/*
* Map IdlePTD at PTD
*/
- pdir_shadow[2][511] = IdlePTDnewma | PG_V;
+ for (i = 0; i < 4; i++)
+ pdir_shadow[1532 + i] = IdlePTDnewma[i] | PG_V;
- for (i = 0; i < 4; i++)
- PT_SET_MA(pdir_shadow[i], pdir_shadow_ma[i] | PG_V);
- PT_SET_MA(ptdir_shadow, ptdir_shadow_ma | PG_V);
PT_SET_MA(IdlePDPTnew, IdlePDPTnewma | PG_V);
- PT_SET_MA(IdlePTDnew, IdlePTDnewma | PG_V);
-#if 0
- xen_pgd_pin(ptdir_shadow_ma);
- xen_pgd_pin(IdlePTDnewma);
-#endif
- xen_load_cr3(VTOP(IdlePDPTnew));
- IdlePTD = IdlePTDnew;
- IdlePTDma = IdlePTDnewma;
- IdlePDPT = IdlePDPTnew;
- IdlePDPTma = IdlePDPTnewma;
+ xen_pt_unpin(IdlePDPTma);
#endif
-#if 0
- /* setup shadow mapping first so vtomach will work */
- xen_pt_pin(pdir_shadow_ma);
-#endif
#ifndef PAE
xen_queue_pt_update(IdlePTDma + PTDPTDI*sizeof(vm_paddr_t),
pdir_shadow_ma | PG_KERNEL);
@@ -862,33 +823,47 @@
KPTphys | PG_V | PG_A);
xen_flush_queue();
#endif
- /* unmap remaining pages from initial 2/4MB chunk */
- for (tmpva = cur_space; (tmpva & PDRMASK) != 0; tmpva += PAGE_SIZE)
+ /* unmap remaining pages from initial 4MB chunk */
+ for (tmpva = cur_space; (tmpva & ((1<<22)-1)) != 0; tmpva += PAGE_SIZE)
PT_SET_MA(tmpva, (vm_paddr_t)0);
-
+#ifdef PAE
+ offset = 0;
+#else
+ offset = KPTDI;
+#endif
/* allocate remainder of NKPT pages */
- printk("#1\n");
- for (i = 0; i < NKPT-l1_pages; i++, cur_space += PAGE_SIZE) {
- /* KERNBASE left unmapped (+ 1) KERNLOAD already mapped (+1) == + 2 */
- printk("#2: i=%i,offs=%u->%x\n", i, KPTDI + i + 1,
+ for (i = l1_pages; i < NKPT; i++, cur_space += PAGE_SIZE)
+ xen_queue_pt_update((vm_paddr_t)(IdlePTDma + (offset + i)*sizeof(vm_paddr_t)),
xpmap_ptom(VTOP(cur_space)) | PG_KERNEL);
-/*
- xen_queue_pt_update((vm_paddr_t)(IdlePTDma + KPTDI + i + 1),
- xpmap_ptom(VTOP(cur_space)) | PG_KERNEL);*/
+
PT_UPDATES_FLUSH();
-#ifdef PAE
- xen_queue_pt_update(pdir_shadow_ma[3] + (KPTDI + i + 1)*sizeof(vm_paddr_t),
- xpmap_ptom(VTOP(cur_space)) | PG_V | PG_A);
-#else
- xen_queue_pt_update(pdir_shadow_ma + (KPTDI + i + 1)*sizeof(vm_paddr_t),
- xpmap_ptom(VTOP(cur_space)) | PG_V | PG_A);
-#endif
- PT_UPDATES_FLUSH();
+ /*
+ * L1 - can't copy Xen's mappings
+ */
+ for (i = 0; i < 256; i++)
+ pdir_shadow[1536 + i] = IdlePTD[i] & ~(PG_RW|PG_A|PG_M);
+ memcpy((uint8_t *)IdlePTDnew + 3*PAGE_SIZE, IdlePTD, PAGE_SIZE/2);
+ printk("do remapping\n");
+ for (i = 0; i < 4; i++) {
+ PT_SET_MA((uint8_t *)IdlePTDnew + i*PAGE_SIZE,
+ IdlePTDnewma[i] | PG_V);
+ PT_SET_MA((uint8_t *)pdir_shadow + i*PAGE_SIZE,
+ pdir_shadow_ma[i] | PG_V);
}
+
+ xen_load_cr3(VTOP(IdlePDPTnew));
- printk("#3\n");
+ IdlePTD = IdlePTDnew;
+ IdlePDPT = IdlePDPTnew;
+ IdlePDPTma = IdlePDPTnewma;
+
+ /* allocate page for gdt */
+ gdt = (union descriptor *)cur_space; cur_space += PAGE_SIZE;
+ /* allocate page for ldt */
+ ldt = (union descriptor *)cur_space; cur_space += PAGE_SIZE;
HYPERVISOR_shared_info = (shared_info_t *)cur_space;
+ cur_space += PAGE_SIZE;
/*
* shared_info is an unsigned long so this will randomly break if
@@ -897,18 +872,18 @@
*/
shinfo = xen_start_info->shared_info;
PT_SET_MA(HYPERVISOR_shared_info, shinfo | PG_KERNEL);
- cur_space += PAGE_SIZE;
-
+
printk("#4\n");
xen_store = (struct ringbuf_head *)cur_space;
+ cur_space += PAGE_SIZE;
+
xen_store_ma = (xen_start_info->store_mfn << PAGE_SHIFT);
PT_SET_MA(xen_store, xen_store_ma | PG_KERNEL);
+ console_page = (char *)cur_space;
cur_space += PAGE_SIZE;
- console_page = (char *)cur_space;
console_page_ma = (xen_start_info->console.domU.mfn << PAGE_SHIFT);
PT_SET_MA(console_page, console_page_ma | PG_KERNEL);
- cur_space += PAGE_SIZE;
printk("#5\n");
HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = (unsigned long)xen_phys_machine;
@@ -930,7 +905,7 @@
PANIC_IF(HYPERVISOR_physdev_op(&op));
printk("#6\n");
-#if 1
+#if 0
/* add page table for KERNBASE */
xen_queue_pt_update(IdlePTDma + KPTDI*sizeof(vm_paddr_t),
xpmap_ptom(VTOP(cur_space) | PG_KERNEL));
@@ -957,8 +932,9 @@
#endif
printk("#7\n");
physfree = VTOP(cur_space);
- init_first = (cur_space >> PAGE_SHIFT);
-
+ init_first = physfree >> PAGE_SHIFT;
+ IdlePTD = (pd_entry_t *)VTOP(IdlePTD);
+ IdlePDPT = (pd_entry_t *)VTOP(IdlePDPT);
printk("#8, proc0kstack=%u\n", proc0kstack);
}
More information about the p4-projects
mailing list