svn commit: r184040 - in head/sys/i386: include xen

Kip Macy kmacy at FreeBSD.org
Sun Oct 19 01:27:41 UTC 2008


Author: kmacy
Date: Sun Oct 19 01:27:40 2008
New Revision: 184040
URL: http://svn.freebsd.org/changeset/base/184040

Log:
  - move gdt, ldt allocation to before KPT allocation
  - fix bugs where we would:
      - try to map the hypervisors address space
      - accidentally kick out an existing kernel mapping for some domain creation memory allocation sizes
      - accidentally skip a 2MB kernel mapping for some domain creation memory allocation sizes
  - don't rely on trapping in to xen to read rcr2, reference through vcpu
  - whitespace cleanups

Modified:
  head/sys/i386/include/cpufunc.h
  head/sys/i386/xen/xen_machdep.c

Modified: head/sys/i386/include/cpufunc.h
==============================================================================
--- head/sys/i386/include/cpufunc.h	Sun Oct 19 01:23:30 2008	(r184039)
+++ head/sys/i386/include/cpufunc.h	Sun Oct 19 01:27:40 2008	(r184040)
@@ -45,6 +45,7 @@
 #ifdef XEN
 extern void xen_cli(void);
 extern void xen_sti(void);
+extern u_int xen_rcr2(void);
 extern void xen_load_cr3(u_int data);
 extern void xen_tlb_flush(void);
 extern void xen_invlpg(u_int addr);
@@ -94,7 +95,7 @@ disable_intr(void)
 #ifdef XEN
 	xen_cli();
 #else	
-    __asm __volatile("cli" : : : "memory");
+	__asm __volatile("cli" : : : "memory");
 #endif
 }
 
@@ -119,7 +120,7 @@ enable_intr(void)
 {
 #ifdef XEN
 	xen_sti();
-#else	
+#else
 	__asm __volatile("sti");
 #endif
 }
@@ -423,6 +424,9 @@ rcr2(void)
 {
 	u_int	data;
 
+#ifdef XEN
+	return (xen_rcr2());
+#endif
 	__asm __volatile("movl %%cr2,%0" : "=r" (data));
 	return (data);
 }

Modified: head/sys/i386/xen/xen_machdep.c
==============================================================================
--- head/sys/i386/xen/xen_machdep.c	Sun Oct 19 01:23:30 2008	(r184039)
+++ head/sys/i386/xen/xen_machdep.c	Sun Oct 19 01:27:40 2008	(r184040)
@@ -202,21 +202,20 @@ static struct mmu_log xpq_queue_log[MAX_
 static int xpq_idx[MAX_VIRT_CPUS];  
 static mmu_update_t xpq_queue[MAX_VIRT_CPUS][XPQUEUE_SIZE];
 
-#define XPQ_QUEUE xpq_queue[vcpu]
-#define XPQ_IDX xpq_idx[vcpu]
-#define SET_VCPU() int vcpu = smp_processor_id()
-
-
-#define XPQ_QUEUE_LOG xpq_queue_log[vcpu]
+#define	XPQ_QUEUE_LOG xpq_queue_log[vcpu]
+#define	XPQ_QUEUE xpq_queue[vcpu]
+#define	XPQ_IDX xpq_idx[vcpu]
+#define	SET_VCPU() int vcpu = smp_processor_id()
 #else
 	
 static mmu_update_t xpq_queue[XPQUEUE_SIZE];
 static struct mmu_log xpq_queue_log[XPQUEUE_SIZE];
 static int xpq_idx = 0;
 
-#define XPQ_QUEUE xpq_queue
-#define XPQ_IDX xpq_idx
-#define SET_VCPU()
+#define	XPQ_QUEUE_LOG xpq_queue_log
+#define	XPQ_QUEUE xpq_queue
+#define	XPQ_IDX xpq_idx
+#define	SET_VCPU()
 #endif /* !SMP */
 
 #define XPQ_IDX_INC atomic_add_int(&XPQ_IDX, 1);
@@ -371,6 +370,13 @@ xen_sti(void)
 	__sti();
 }
 
+u_int
+xen_rcr2(void)
+{
+
+	return (HYPERVISOR_shared_info->vcpu_info[curcpu].arch.cr2);
+}
+
 void
 _xen_machphys_update(vm_paddr_t mfn, vm_paddr_t pfn, char *file, int line)
 {
@@ -810,7 +816,7 @@ void
 initvalues(start_info_t *startinfo)
 { 
 	int l3_pages, l2_pages, l1_pages, offset;
-	vm_offset_t cur_space;
+	vm_offset_t cur_space, cur_space_pt;
 	struct physdev_set_iopl set_iopl;
 	
 	vm_paddr_t KPTphys, IdlePTDma;
@@ -825,15 +831,13 @@ initvalues(start_info_t *startinfo)
 	vm_paddr_t pdir_shadow_ma;
 #endif
 	unsigned long i;
-	int ncpus;
+	int ncpus = MAXCPU;
 
-	nkpt = min(max((startinfo->nr_pages >> NPGPTD_SHIFT), nkpt),
-	    NPGPTD*NPDEPG - KPTDI);
-#ifdef SMP
-	ncpus = MAXCPU;
-#else
-	ncpus = 1;
-#endif	
+	nkpt = min(
+		min(
+			max((startinfo->nr_pages >> NPGPTD_SHIFT), nkpt),
+		    NPGPTD*NPDEPG - KPTDI),
+		    (HYPERVISOR_VIRT_START - KERNBASE) >> PDRSHIFT);
 
 	HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);	
 #ifdef notyet
@@ -859,7 +863,24 @@ initvalues(start_info_t *startinfo)
 	bootmem_start = bootmem_current = (char *)cur_space;
 	cur_space += (4 * PAGE_SIZE);
 	bootmem_end = (char *)cur_space;
+
+	/* allocate page for gdt */
+	gdt = (union descriptor *)cur_space;
+	cur_space += PAGE_SIZE*ncpus;
+
+        /* allocate page for ldt */
+	ldt = (union descriptor *)cur_space; cur_space += PAGE_SIZE;
+	cur_space += PAGE_SIZE;
 	
+	HYPERVISOR_shared_info = (shared_info_t *)cur_space;
+	cur_space += PAGE_SIZE;
+
+	xen_store = (struct ringbuf_head *)cur_space;
+	cur_space += PAGE_SIZE;
+
+	console_page = (char *)cur_space;
+	cur_space += PAGE_SIZE;
+
 #ifdef ADD_ISA_HOLE
 	shift_phys_machine(xen_phys_machine, xen_start_info->nr_pages);
 #endif
@@ -936,6 +957,16 @@ initvalues(start_info_t *startinfo)
 	 * Unpin the current PDPT
 	 */
 	xen_pt_unpin(IdlePDPTma);
+	
+	for (i = 0; i < 20; i++) {
+		int startidx = ((KERNBASE >> 18) & PAGE_MASK) >> 3;
+
+		if (IdlePTD[startidx + i] == 0) {
+			l1_pages = i;
+			break;
+		}	
+	}
+
 #endif  /* PAE */
 	
 	/* unmap remaining pages from initial 4MB chunk
@@ -947,7 +978,7 @@ initvalues(start_info_t *startinfo)
 	}
 	
 	PT_UPDATES_FLUSH();
-  
+
 	memcpy(((uint8_t *)IdlePTDnew) + ((unsigned int)(KERNBASE >> 18)),
 	    ((uint8_t *)IdlePTD) + ((KERNBASE >> 18) & PAGE_MASK),
 	    l1_pages*sizeof(pt_entry_t));
@@ -960,18 +991,22 @@ initvalues(start_info_t *startinfo)
 	xen_pgdpt_pin(xpmap_ptom(VTOP(IdlePDPTnew)));
 
 	/* allocate remainder of nkpt pages */
-	for (offset = (KERNBASE >> PDRSHIFT), i = l1_pages - 1; i < nkpt;
+	cur_space_pt = cur_space;
+	for (offset = (KERNBASE >> PDRSHIFT), i = l1_pages; i < nkpt;
 	     i++, cur_space += PAGE_SIZE) {
 		pdir = (offset + i) / NPDEPG;
 		curoffset = ((offset + i) % NPDEPG);
-		
+		if (((offset + i) << PDRSHIFT) == VM_MAX_KERNEL_ADDRESS)
+			break;
+
 		/*
 		 * make sure that all the initial page table pages
 		 * have been zeroed
 		 */
-		PT_SET_MA(cur_space, xpmap_ptom(VTOP(cur_space)) | PG_V | PG_RW);
-		bzero((char *)cur_space, PAGE_SIZE);
-		PT_SET_MA(cur_space, (vm_paddr_t)0);
+		PT_SET_MA(cur_space_pt,
+		    xpmap_ptom(VTOP(cur_space)) | PG_V | PG_RW);
+		bzero((char *)cur_space_pt, PAGE_SIZE);
+		PT_SET_MA(cur_space_pt, (vm_paddr_t)0);
 		xen_pt_pin(xpmap_ptom(VTOP(cur_space)));
 		xen_queue_pt_update((vm_paddr_t)(IdlePTDnewma[pdir] +
 			curoffset*sizeof(vm_paddr_t)), 
@@ -994,17 +1029,6 @@ initvalues(start_info_t *startinfo)
 	IdlePDPT = IdlePDPTnew;
 	IdlePDPTma = IdlePDPTnewma;
 	
-	/* allocate page for gdt */
-	gdt = (union descriptor *)cur_space;
-	cur_space += PAGE_SIZE*ncpus;
-
-        /* allocate page for ldt */
-	ldt = (union descriptor *)cur_space; cur_space += PAGE_SIZE;
-	cur_space += PAGE_SIZE;
-	
-	HYPERVISOR_shared_info = (shared_info_t *)cur_space;
-	cur_space += PAGE_SIZE;
-
 	/*
 	 * shared_info is an unsigned long so this will randomly break if
 	 * it is allocated above 4GB - I guess people are used to that
@@ -1015,13 +1039,8 @@ initvalues(start_info_t *startinfo)
 	
 	printk("#4\n");
 
-	xen_store = (struct ringbuf_head *)cur_space;
-	cur_space += PAGE_SIZE;
-	
 	xen_store_ma = (((vm_paddr_t)xen_start_info->store_mfn) << PAGE_SHIFT);
 	PT_SET_MA(xen_store, xen_store_ma | PG_KERNEL);
-	console_page = (char *)cur_space;
-	cur_space += PAGE_SIZE;
 	console_page_ma = (((vm_paddr_t)xen_start_info->console.domU.mfn) << PAGE_SHIFT);
 	PT_SET_MA(console_page, console_page_ma | PG_KERNEL);
 


More information about the svn-src-all mailing list