svn commit: r262750 - head/sys/amd64/amd64

Jung-uk Kim jkim at FreeBSD.org
Tue Mar 4 20:16:01 UTC 2014


Author: jkim
Date: Tue Mar  4 20:16:00 2014
New Revision: 262750
URL: http://svnweb.freebsd.org/changeset/base/262750

Log:
  Revert accidentally committed changes in 262748.

Modified:
  head/sys/amd64/amd64/mpboot.S
  head/sys/amd64/amd64/pmap.c

Modified: head/sys/amd64/amd64/mpboot.S
==============================================================================
--- head/sys/amd64/amd64/mpboot.S	Tue Mar  4 20:09:23 2014	(r262749)
+++ head/sys/amd64/amd64/mpboot.S	Tue Mar  4 20:16:00 2014	(r262750)
@@ -36,7 +36,6 @@
 	.p2align 4,0
 	.globl	mptramp_start
 mptramp_start:
-#ifndef	__clang__
 	.code16
 	/*
 	 * The AP enters here in response to the startup IPI.
@@ -66,43 +65,6 @@ mptramp_start:
 	/* Enable protected mode */
 	movl	$CR0_PE, %eax
 	mov	%eax, %cr0 
-#else
-	/*
-	 * The AP enters here in response to the startup IPI.
-	 * We are in real mode. %cs is the only segment register set.
-	 */
-	cli				/* make sure no interrupts */
-	mov	%cs, %eax		/* copy %cs to %ds.  Remember these */
-	mov	%eax, %ds		/* are offsets rather than selectors */
-	mov	%eax, %ss
-
-	/*
-	 * Find relocation base and patch the gdt descript and ljmp targets
-	 */
-	.byte	0x66
-	xorl	%ebx, %ebx
-	mov	%cs, %ebx
-	.byte	0x66
-	sall	$4, %ebx		/* %ebx is now our relocation base */
-	.byte	0x66, 0x09, 0x1e
-	.word	lgdt_desc-mptramp_start+2
-	.byte	0x66, 0x09, 0x1e
-	.word	jmp_32-mptramp_start+2
-	.byte	0x66, 0x09, 0x1e
-	.word	jmp_64-mptramp_start+1
-
-	/*
-	 * Load the descriptor table pointer.  We'll need it when running
-	 * in 16 bit protected mode.
-	 */
-	.byte	0x0f, 0x01, 0x16
-	.word	lgdt_desc-mptramp_start
-
-	/* Enable protected mode */
-	.byte	0x66
-	movl	$CR0_PE, %eax
-	mov	%eax, %cr0 
-#endif
 
 	/*
 	 * Now execute a far jump to turn on protected mode.  This
@@ -126,7 +88,7 @@ jmp_32:
 	.code32
 protmode:
 	mov	$bootdata-gdt, %eax
-	mov	%eax, %ds
+	mov	%ax, %ds
 
 	/* Turn on the PAE, PSE and PGE bits for when paging is enabled */
 	mov	%cr4, %eax

Modified: head/sys/amd64/amd64/pmap.c
==============================================================================
--- head/sys/amd64/amd64/pmap.c	Tue Mar  4 20:09:23 2014	(r262749)
+++ head/sys/amd64/amd64/pmap.c	Tue Mar  4 20:16:00 2014	(r262750)
@@ -146,13 +146,6 @@ __FBSDID("$FreeBSD$");
 #endif
 
 static __inline boolean_t
-pmap_type_guest(pmap_t pmap)
-{
-
-	return ((pmap->pm_type == PT_EPT) || (pmap->pm_type == PT_RVI));
-}
-
-static __inline boolean_t
 pmap_emulate_ad_bits(pmap_t pmap)
 {
 
@@ -166,7 +159,6 @@ pmap_valid_bit(pmap_t pmap)
 
 	switch (pmap->pm_type) {
 	case PT_X86:
-	case PT_RVI:
 		mask = X86_PG_V;
 		break;
 	case PT_EPT:
@@ -189,7 +181,6 @@ pmap_rw_bit(pmap_t pmap)
 
 	switch (pmap->pm_type) {
 	case PT_X86:
-	case PT_RVI:
 		mask = X86_PG_RW;
 		break;
 	case PT_EPT:
@@ -214,7 +205,6 @@ pmap_global_bit(pmap_t pmap)
 	case PT_X86:
 		mask = X86_PG_G;
 		break;
-	case PT_RVI:
 	case PT_EPT:
 		mask = 0;
 		break;
@@ -232,7 +222,6 @@ pmap_accessed_bit(pmap_t pmap)
 
 	switch (pmap->pm_type) {
 	case PT_X86:
-	case PT_RVI:
 		mask = X86_PG_A;
 		break;
 	case PT_EPT:
@@ -255,7 +244,6 @@ pmap_modified_bit(pmap_t pmap)
 
 	switch (pmap->pm_type) {
 	case PT_X86:
-	case PT_RVI:
 		mask = X86_PG_M;
 		break;
 	case PT_EPT:
@@ -1114,9 +1102,6 @@ pmap_swap_pat(pmap_t pmap, pt_entry_t en
 		if ((entry & x86_pat_bits) != 0)
 			entry ^= x86_pat_bits;
 		break;
-	case PT_RVI:
-		/* XXX: PAT support. */
-		break;
 	case PT_EPT:
 		/*
 		 * Nothing to do - the memory attributes are represented
@@ -1160,11 +1145,6 @@ pmap_cache_bits(pmap_t pmap, int mode, b
 			cache_bits |= PG_NC_PWT;
 		break;
 
-	case PT_RVI:
-		/* XXX: PAT support. */
-		cache_bits = 0;
-		break;
-
 	case PT_EPT:
 		cache_bits = EPT_PG_IGNORE_PAT | EPT_PG_MEMORY_TYPE(mode);
 		break;
@@ -1185,10 +1165,6 @@ pmap_cache_mask(pmap_t pmap, boolean_t i
 	case PT_X86:
 		mask = is_pde ? X86_PG_PDE_CACHE : X86_PG_PTE_CACHE;
 		break;
-	case PT_RVI:
-		/* XXX: PAT support. */
-		mask = 0;
-		break;
 	case PT_EPT:
 		mask = EPT_PG_IGNORE_PAT | EPT_PG_MEMORY_TYPE(0x7);
 		break;
@@ -1213,7 +1189,6 @@ pmap_update_pde_store(pmap_t pmap, pd_en
 	switch (pmap->pm_type) {
 	case PT_X86:
 		break;
-	case PT_RVI:
 	case PT_EPT:
 		/*
 		 * XXX
@@ -1249,7 +1224,7 @@ pmap_update_pde_invalidate(pmap_t pmap, 
 {
 	pt_entry_t PG_G;
 
-	if (pmap_type_guest(pmap))
+	if (pmap->pm_type == PT_EPT)
 		return;
 
 	KASSERT(pmap->pm_type == PT_X86,
@@ -1363,7 +1338,7 @@ pmap_invalidate_page(pmap_t pmap, vm_off
 	cpuset_t other_cpus;
 	u_int cpuid;
 
-	if (pmap_type_guest(pmap)) {
+	if (pmap->pm_type == PT_EPT) {
 		pmap_invalidate_ept(pmap);
 		return;
 	}
@@ -1441,7 +1416,7 @@ pmap_invalidate_range(pmap_t pmap, vm_of
 	vm_offset_t addr;
 	u_int cpuid;
 
-	if (pmap_type_guest(pmap)) {
+	if (pmap->pm_type == PT_EPT) {
 		pmap_invalidate_ept(pmap);
 		return;
 	}
@@ -1500,7 +1475,7 @@ pmap_invalidate_all(pmap_t pmap)
 	uint64_t cr3;
 	u_int cpuid;
 
-	if (pmap_type_guest(pmap)) {
+	if (pmap->pm_type == PT_EPT) {
 		pmap_invalidate_ept(pmap);
 		return;
 	}
@@ -1620,7 +1595,7 @@ pmap_update_pde(pmap_t pmap, vm_offset_t
 	cpuid = PCPU_GET(cpuid);
 	other_cpus = all_cpus;
 	CPU_CLR(cpuid, &other_cpus);
-	if (pmap == kernel_pmap || pmap_type_guest(pmap))
+	if (pmap == kernel_pmap || pmap->pm_type == PT_EPT)
 		active = all_cpus;
 	else {
 		active = pmap->pm_active;
@@ -1658,7 +1633,6 @@ pmap_invalidate_page(pmap_t pmap, vm_off
 		if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
 			invlpg(va);
 		break;
-	case PT_RVI:
 	case PT_EPT:
 		pmap->pm_eptgen++;
 		break;
@@ -1678,7 +1652,6 @@ pmap_invalidate_range(pmap_t pmap, vm_of
 			for (addr = sva; addr < eva; addr += PAGE_SIZE)
 				invlpg(addr);
 		break;
-	case PT_RVI:
 	case PT_EPT:
 		pmap->pm_eptgen++;
 		break;
@@ -1696,7 +1669,6 @@ pmap_invalidate_all(pmap_t pmap)
 		if (pmap == kernel_pmap || !CPU_EMPTY(&pmap->pm_active))
 			invltlb();
 		break;
-	case PT_RVI:
 	case PT_EPT:
 		pmap->pm_eptgen++;
 		break;


More information about the svn-src-all mailing list