socsvn commit: r272441 - soc2014/op/patches

op at FreeBSD.org op at FreeBSD.org
Thu Aug 14 22:01:29 UTC 2014


Author: op
Date: Thu Aug 14 20:33:38 2014
New Revision: 272441
URL: http://svnweb.FreeBSD.org/socsvn/?view=rev&rev=272441

Log:
  added final version of patch freebsd-head+intel-smap+selfpatch-all.diff
  
  Signed-off-by: Oliver Pinter <oliver.pntr at gmail.com>
  
  

Added:
  soc2014/op/patches/
  soc2014/op/patches/freebsd-head+intel-smap+selfpatch-all.diff

Added: soc2014/op/patches/freebsd-head+intel-smap+selfpatch-all.diff
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ soc2014/op/patches/freebsd-head+intel-smap+selfpatch-all.diff	Thu Aug 14 20:33:38 2014	(r272441)
@@ -0,0 +1,2265 @@
+diff --git a/.gitignore b/.gitignore
+new file mode 100644
+index 0000000..f3b9484
+--- /dev/null
++++ b/.gitignore
+@@ -0,0 +1 @@
++.clang_complete
+diff --git a/sys/amd64/amd64/cpu_switch.S b/sys/amd64/amd64/cpu_switch.S
+index 7c37a41..a50a80a 100644
+--- a/sys/amd64/amd64/cpu_switch.S
++++ b/sys/amd64/amd64/cpu_switch.S
+@@ -35,6 +35,7 @@
+ 
+ #include <machine/asmacros.h>
+ #include <machine/specialreg.h>
++#include <machine/selfpatch-asmacros.h>
+ 
+ #include "assym.s"
+ #include "opt_sched.h"
+@@ -121,10 +122,7 @@ done_store_dr:
+ 1:	movq	%rdx,%rcx
+ 	movl	xsave_mask,%eax
+ 	movl	xsave_mask+4,%edx
+-	.globl	ctx_switch_xsave
+-ctx_switch_xsave:
+-	/* This is patched to xsaveopt if supported, see fpuinit_bsp1() */
+-	xsave	(%r8)
++	_xsave_xsaveopt	(%r8)
+ 	movq	%rcx,%rdx
+ 2:	smsw	%ax
+ 	orb	$CR0_TS,%al
+diff --git a/sys/amd64/amd64/exception.S b/sys/amd64/amd64/exception.S
+index 2a908a9..1aed663 100644
+--- a/sys/amd64/amd64/exception.S
++++ b/sys/amd64/amd64/exception.S
+@@ -42,6 +42,7 @@
+ #include <machine/psl.h>
+ #include <machine/trap.h>
+ #include <machine/specialreg.h>
++#include <machine/selfpatch-asmacros.h>
+ 
+ #include "assym.s"
+ 
+@@ -198,6 +199,7 @@ alltraps_pushregs_no_rdi:
+ 	movq	%r14,TF_R14(%rsp)
+ 	movq	%r15,TF_R15(%rsp)
+ 	movl	$TF_HASSEGS,TF_FLAGS(%rsp)
++	_clac
+ 	cld
+ 	FAKE_MCOUNT(TF_RIP(%rsp))
+ #ifdef KDTRACE_HOOKS
+@@ -278,6 +280,7 @@ IDTVEC(dblfault)
+ 	movw	%es,TF_ES(%rsp)
+ 	movw	%ds,TF_DS(%rsp)
+ 	movl	$TF_HASSEGS,TF_FLAGS(%rsp)
++	_clac
+ 	cld
+ 	testb	$SEL_RPL_MASK,TF_CS(%rsp) /* Did we come from kernel? */
+ 	jz	1f			/* already running with kernel GS.base */
+@@ -381,6 +384,7 @@ IDTVEC(fast_syscall)
+ 	movq	%r14,TF_R14(%rsp)	/* C preserved */
+ 	movq	%r15,TF_R15(%rsp)	/* C preserved */
+ 	movl	$TF_HASSEGS,TF_FLAGS(%rsp)
++	_clac
+ 	cld
+ 	FAKE_MCOUNT(TF_RIP(%rsp))
+ 	movq	PCPU(CURTHREAD),%rdi
+@@ -452,6 +456,7 @@ IDTVEC(fast_syscall32)
+  */
+ 
+ IDTVEC(nmi)
++	_clac
+ 	subq	$TF_RIP,%rsp
+ 	movl	$(T_NMI),TF_TRAPNO(%rsp)
+ 	movq	$0,TF_ADDR(%rsp)
+@@ -535,6 +540,7 @@ nmi_calltrap:
+ 	movq	%rdx,%rdi	/* destination stack pointer */
+ 
+ 	shrq	$3,%rcx		/* trap frame size in long words */
++	_clac
+ 	cld
+ 	rep
+ 	movsq			/* copy trapframe */
+diff --git a/sys/amd64/amd64/fpu.c b/sys/amd64/amd64/fpu.c
+index 0f4b2b5..a897ef7 100644
+--- a/sys/amd64/amd64/fpu.c
++++ b/sys/amd64/amd64/fpu.c
+@@ -203,16 +203,6 @@ fpuinit_bsp1(void)
+ 		xsave_mask &= ~XFEATURE_AVX512;
+ 	if ((xsave_mask & XFEATURE_MPX) != XFEATURE_MPX)
+ 		xsave_mask &= ~XFEATURE_MPX;
+-
+-	cpuid_count(0xd, 0x1, cp);
+-	if ((cp[0] & CPUID_EXTSTATE_XSAVEOPT) != 0) {
+-		/*
+-		 * Patch the XSAVE instruction in the cpu_switch code
+-		 * to XSAVEOPT.  We assume that XSAVE encoding used
+-		 * REX byte, and set the bit 4 of the r/m byte.
+-		 */
+-		ctx_switch_xsave[3] |= 0x10;
+-	}
+ }
+ 
+ /*
+diff --git a/sys/amd64/amd64/identcpu.c b/sys/amd64/amd64/identcpu.c
+index 3b66369..226389e 100644
+--- a/sys/amd64/amd64/identcpu.c
++++ b/sys/amd64/amd64/identcpu.c
+@@ -566,13 +566,21 @@ identify_cpu(void)
+ 		 */
+ 		if (cpu_feature2 & CPUID2_HV) {
+ 			cpu_stdext_disable = CPUID_STDEXT_FSGSBASE |
+-			    CPUID_STDEXT_SMEP;
++			    CPUID_STDEXT_SMEP | CPUID_STDEXT_SMAP;
+ 		} else
+ 			cpu_stdext_disable = 0;
+ 		TUNABLE_INT_FETCH("hw.cpu_stdext_disable", &cpu_stdext_disable);
+ 		cpu_stdext_feature &= ~cpu_stdext_disable;
+ 	}
+ 
++	if (cpu_high >= 13) {
++		if (cpu_feature2 & CPUID2_XSAVE) {
++			cpuid_count(13, 1, regs);
++			cpu_extstate = regs[0];
++		} else
++			cpu_extstate = 0;
++	}
++
+ 	if (cpu_vendor_id == CPU_VENDOR_INTEL ||
+ 	    cpu_vendor_id == CPU_VENDOR_AMD ||
+ 	    cpu_vendor_id == CPU_VENDOR_CENTAUR) {
+diff --git a/sys/amd64/amd64/initcpu.c b/sys/amd64/amd64/initcpu.c
+index 34a362d..f5976eb 100644
+--- a/sys/amd64/amd64/initcpu.c
++++ b/sys/amd64/amd64/initcpu.c
+@@ -78,6 +78,7 @@ u_int	cpu_max_ext_state_size;
+ u_int	cpu_mon_mwait_flags;	/* MONITOR/MWAIT flags (CPUID.05H.ECX) */
+ u_int	cpu_mon_min_size;	/* MONITOR minimum range size, bytes */
+ u_int	cpu_mon_max_size;	/* MONITOR minimum range size, bytes */
++u_int	cpu_extstate;		/* Extended State features */
+ 
+ SYSCTL_UINT(_hw, OID_AUTO, via_feature_rng, CTLFLAG_RD,
+ 	&via_feature_rng, 0, "VIA RNG feature available in CPU");
+@@ -168,13 +169,17 @@ initializecpu(void)
+ 		cr4 |= CR4_FSGSBASE;
+ 
+ 	/*
+-	 * Postpone enabling the SMEP on the boot CPU until the page
+-	 * tables are switched from the boot loader identity mapping
++	 * Postpone enabling the SMEP and SMAP on the boot CPU until the
++	 * page tables are switched from the boot loader identity mapping
+ 	 * to the kernel tables.  The boot loader enables the U bit in
+ 	 * its tables.
+ 	 */
+ 	if (!IS_BSP() && (cpu_stdext_feature & CPUID_STDEXT_SMEP))
+ 		cr4 |= CR4_SMEP;
++#ifdef INTEL_SMAP_SUPPORT
++	if (!IS_BSP() && (cpu_stdext_feature & CPUID_STDEXT_SMAP))
++		cr4 |= CR4_SMAP;
++#endif
+ 	load_cr4(cr4);
+ 	if ((amd_feature & AMDID_NX) != 0) {
+ 		msr = rdmsr(MSR_EFER) | EFER_NXE;
+diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c
+index f02045d..237cc7f 100644
+--- a/sys/amd64/amd64/machdep.c
++++ b/sys/amd64/amd64/machdep.c
+@@ -53,6 +53,7 @@ __FBSDID("$FreeBSD$");
+ #include "opt_perfmon.h"
+ #include "opt_platform.h"
+ #include "opt_sched.h"
++#include "opt_selfpatch.h"
+ 
+ #include <sys/param.h>
+ #include <sys/proc.h>
+@@ -81,6 +82,7 @@ __FBSDID("$FreeBSD$");
+ #include <sys/reboot.h>
+ #include <sys/rwlock.h>
+ #include <sys/sched.h>
++#include <sys/selfpatch.h>
+ #include <sys/signalvar.h>
+ #ifdef SMP
+ #include <sys/smp.h>
+@@ -1998,6 +2000,11 @@ hammer_time(u_int64_t modulep, u_int64_t physfree)
+ 	initializecpu();	/* Initialize CPU registers */
+ 	initializecpucache();
+ 
++#if defined(KSP_FRAMEWORK) || defined(INTEL_SMAP_SUPPORT)
++	/* selfmodify kernel text, when needed */
++	lf_selfpatch(linker_kernel_file, 0);
++#endif
++
+ 	/* doublefault stack space, runs on ist1 */
+ 	common_tss[0].tss_ist1 = (long)&dblfault_stack[sizeof(dblfault_stack)];
+ 
+diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
+index 6adc86d..508dcdd 100644
+--- a/sys/amd64/amd64/pmap.c
++++ b/sys/amd64/amd64/pmap.c
+@@ -100,6 +100,7 @@ __FBSDID("$FreeBSD$");
+  *	and to when physical maps must be made correct.
+  */
+ 
++#include "opt_cpu.h"
+ #include "opt_pmap.h"
+ #include "opt_vm.h"
+ 
+@@ -429,7 +430,6 @@ static vm_page_t pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va,
+     vm_page_t m, vm_prot_t prot, vm_page_t mpte, struct rwlock **lockp);
+ static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte);
+ static int pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte);
+-static void pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
+ static vm_page_t pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va);
+ static void pmap_pde_attr(pd_entry_t *pde, int cache_bits, int mask);
+ static void pmap_promote_pde(pmap_t pmap, pd_entry_t *pde, vm_offset_t va,
+@@ -836,6 +836,15 @@ pmap_bootstrap(vm_paddr_t *firstaddr)
+ 	if (cpu_stdext_feature & CPUID_STDEXT_SMEP)
+ 		load_cr4(rcr4() | CR4_SMEP);
+ 
++#ifdef INTEL_SMAP_SUPPORT
++	if (cpu_stdext_feature & CPUID_STDEXT_SMAP) {
++		printf("Intel SMAP: enabled\n");
++		load_cr4(rcr4() | CR4_SMAP);
++	} else {
++		printf("Intel SMAP: not supported or disabled\n");
++	}
++#endif
++
+ 	/*
+ 	 * Initialize the kernel pmap (which is statically allocated).
+ 	 */
+@@ -1915,7 +1924,7 @@ pmap_kenter(vm_offset_t va, vm_paddr_t pa)
+ 	pte_store(pte, pa | X86_PG_RW | X86_PG_V | X86_PG_G);
+ }
+ 
+-static __inline void
++__inline void
+ pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode)
+ {
+ 	pt_entry_t *pte;
+diff --git a/sys/amd64/amd64/support.S b/sys/amd64/amd64/support.S
+index 77dbf63..ed92e36 100644
+--- a/sys/amd64/amd64/support.S
++++ b/sys/amd64/amd64/support.S
+@@ -35,6 +35,8 @@
+ #include <machine/asmacros.h>
+ #include <machine/intr_machdep.h>
+ #include <machine/pmap.h>
++#include <machine/specialreg.h>
++#include <machine/selfpatch-asmacros.h>
+ 
+ #include "assym.s"
+ 
+@@ -59,7 +61,7 @@ ENTRY(bzero)
+ 	stosb
+ 	ret
+ END(bzero)
+-	
++
+ /* Address: %rdi */
+ ENTRY(pagezero)
+ 	movq	$-PAGE_SIZE,%rdx
+@@ -137,7 +139,7 @@ ENTRY(bcopy)
+ 	cld
+ 	ret
+ END(bcopy)
+-	
++
+ /*
+  * Note: memcpy does not support overlapping copies
+  */
+@@ -181,10 +183,10 @@ ENTRY(pagecopy)
+ 	ret
+ END(pagecopy)
+ 
+-/* fillw(pat, base, cnt) */  
++/* fillw(pat, base, cnt) */
+ /*       %rdi,%rsi, %rdx */
+ ENTRY(fillw)
+-	movq	%rdi,%rax   
++	movq	%rdi,%rax
+ 	movq	%rsi,%rdi
+ 	movq	%rdx,%rcx
+ 	cld
+@@ -203,6 +205,9 @@ END(fillw)
+  * These routines set curpcb->pcb_onfault for the time they execute. When a
+  * protection violation occurs inside the functions, the trap handler
+  * returns to *curpcb->pcb_onfault instead of the function.
++ *
++ * Warning: when Intel SMAP are enabled on CPU, the EFLAGS.AC bit gets
++ * cleared before reaches the fault handler.
+  */
+ 
+ /*
+@@ -244,12 +249,16 @@ ENTRY(copyout)
+ 
+ 	shrq	$3,%rcx
+ 	cld
++	_stac					/* open user-space */
+ 	rep
+ 	movsq
++	_clac					/* close user-space */
+ 	movb	%dl,%cl
+ 	andb	$7,%cl
++	_stac					/* open user-space */
+ 	rep
+ 	movsb
++	_clac					/* close user-space */
+ 
+ done_copyout:
+ 	xorl	%eax,%eax
+@@ -259,6 +268,9 @@ done_copyout:
+ 
+ 	ALIGN_TEXT
+ copyout_fault:
++	/*
++	 * WARNING: on fault EFLAGS.AC gets cleared by Intel SMAP if available
++	 */
+ 	movq	PCPU(CURPCB),%rdx
+ 	movq	$0,PCB_ONFAULT(%rdx)
+ 	movq	$EFAULT,%rax
+@@ -290,12 +302,16 @@ ENTRY(copyin)
+ 	movb	%cl,%al
+ 	shrq	$3,%rcx				/* copy longword-wise */
+ 	cld
++	_stac					/* open user-space */
+ 	rep
+ 	movsq
++	_clac					/* close user-space */
+ 	movb	%al,%cl
+ 	andb	$7,%cl				/* copy remaining bytes */
++	_stac					/* open user-space */
+ 	rep
+ 	movsb
++	_clac					/* close user-space */
+ 
+ done_copyin:
+ 	xorl	%eax,%eax
+@@ -305,6 +321,9 @@ done_copyin:
+ 
+ 	ALIGN_TEXT
+ copyin_fault:
++	/*
++	 * WARNING: on fault EFLAGS.AC gets cleared by Intel SMAP if available
++	 */
+ 	movq	PCPU(CURPCB),%rdx
+ 	movq	$0,PCB_ONFAULT(%rdx)
+ 	movq	$EFAULT,%rax
+@@ -324,10 +343,12 @@ ENTRY(casuword32)
+ 	ja	fusufault
+ 
+ 	movl	%esi,%eax			/* old */
++	_stac					/* open user-space */
+ #ifdef SMP
+ 	lock
+ #endif
+ 	cmpxchgl %edx,(%rdi)			/* new = %edx */
++	_clac					/* close user-space */
+ 
+ 	/*
+ 	 * The old value is in %eax.  If the store succeeded it will be the
+@@ -353,10 +374,12 @@ ENTRY(casuword)
+ 	ja	fusufault
+ 
+ 	movq	%rsi,%rax			/* old */
++	_stac					/* open user-space */
+ #ifdef SMP
+ 	lock
+ #endif
+ 	cmpxchgq %rdx,(%rdi)			/* new = %rdx */
++	_clac					/* close user-space */
+ 
+ 	/*
+ 	 * The old value is in %eax.  If the store succeeded it will be the
+@@ -385,10 +408,12 @@ ENTRY(fuword)
+ 	cmpq	%rax,%rdi			/* verify address is valid */
+ 	ja	fusufault
+ 
++	_stac					/* open user-space */
+ 	movq	(%rdi),%rax
++	_clac					/* close user-space */
+ 	movq	$0,PCB_ONFAULT(%rcx)
+ 	ret
+-END(fuword64)	
++END(fuword64)
+ END(fuword)
+ 
+ ENTRY(fuword32)
+@@ -399,7 +424,9 @@ ENTRY(fuword32)
+ 	cmpq	%rax,%rdi			/* verify address is valid */
+ 	ja	fusufault
+ 
++	_stac					/* open user-space */
+ 	movl	(%rdi),%eax
++	_clac					/* close user-space */
+ 	movq	$0,PCB_ONFAULT(%rcx)
+ 	ret
+ END(fuword32)
+@@ -426,7 +453,9 @@ ENTRY(fuword16)
+ 	cmpq	%rax,%rdi
+ 	ja	fusufault
+ 
++	_stac					/* open user-space */
+ 	movzwl	(%rdi),%eax
++	_clac					/* close user-space */
+ 	movq	$0,PCB_ONFAULT(%rcx)
+ 	ret
+ END(fuword16)
+@@ -439,13 +468,18 @@ ENTRY(fubyte)
+ 	cmpq	%rax,%rdi
+ 	ja	fusufault
+ 
++	_stac					/* open user-space */
+ 	movzbl	(%rdi),%eax
++	_clac					/* close user-space */
+ 	movq	$0,PCB_ONFAULT(%rcx)
+ 	ret
+ END(fubyte)
+ 
+ 	ALIGN_TEXT
+ fusufault:
++	/*
++	 * WARNING: on fault EFLAGS.AC gets cleared by Intel SMAP if available
++	 */
+ 	movq	PCPU(CURPCB),%rcx
+ 	xorl	%eax,%eax
+ 	movq	%rax,PCB_ONFAULT(%rcx)
+@@ -466,7 +500,9 @@ ENTRY(suword)
+ 	cmpq	%rax,%rdi			/* verify address validity */
+ 	ja	fusufault
+ 
++	_stac					/* open user-space */
+ 	movq	%rsi,(%rdi)
++	_clac					/* close user-space */
+ 	xorl	%eax,%eax
+ 	movq	PCPU(CURPCB),%rcx
+ 	movq	%rax,PCB_ONFAULT(%rcx)
+@@ -482,7 +518,9 @@ ENTRY(suword32)
+ 	cmpq	%rax,%rdi			/* verify address validity */
+ 	ja	fusufault
+ 
++	_stac					/* open user-space */
+ 	movl	%esi,(%rdi)
++	_clac					/* close user-space */
+ 	xorl	%eax,%eax
+ 	movq	PCPU(CURPCB),%rcx
+ 	movq	%rax,PCB_ONFAULT(%rcx)
+@@ -497,7 +535,9 @@ ENTRY(suword16)
+ 	cmpq	%rax,%rdi			/* verify address validity */
+ 	ja	fusufault
+ 
++	_stac					/* open user-space */
+ 	movw	%si,(%rdi)
++	_clac					/* close user-space */
+ 	xorl	%eax,%eax
+ 	movq	PCPU(CURPCB),%rcx		/* restore trashed register */
+ 	movq	%rax,PCB_ONFAULT(%rcx)
+@@ -513,7 +553,9 @@ ENTRY(subyte)
+ 	ja	fusufault
+ 
+ 	movl	%esi,%eax
++	_stac					/* open user-space */
+ 	movb	%al,(%rdi)
++	_clac					/* close user-space */
+ 	xorl	%eax,%eax
+ 	movq	PCPU(CURPCB),%rcx		/* restore trashed register */
+ 	movq	%rax,PCB_ONFAULT(%rcx)
+@@ -555,7 +597,9 @@ ENTRY(copyinstr)
+ 	decq	%rdx
+ 	jz	3f
+ 
++	_stac					/* open user-space */
+ 	lodsb
++	_clac					/* close user-space */
+ 	stosb
+ 	orb	%al,%al
+ 	jnz	2b
+@@ -584,7 +628,9 @@ cpystrflt_x:
+ 	testq	%r9,%r9
+ 	jz	1f
+ 	subq	%rdx,%r8
++	_stac					/* open user-space */
+ 	movq	%r8,(%r9)
++	_clac					/* close user-space */
+ 1:
+ 	ret
+ END(copyinstr)
+diff --git a/sys/amd64/amd64/trap.c b/sys/amd64/amd64/trap.c
+index f6459f4..de7fe00 100644
+--- a/sys/amd64/amd64/trap.c
++++ b/sys/amd64/amd64/trap.c
+@@ -104,6 +104,7 @@ void dblfault_handler(struct trapframe *frame);
+ 
+ static int trap_pfault(struct trapframe *, int);
+ static void trap_fatal(struct trapframe *, vm_offset_t);
++static bool smap_access_violation(struct trapframe *, int usermode);
+ 
+ #define MAX_TRAP_MSG		32
+ static char *trap_msg[] = {
+@@ -697,6 +698,20 @@ trap_pfault(frame, usermode)
+ 
+ 		map = &vm->vm_map;
+ 
++#ifdef INTEL_SMAP_SUPPORT
++		/*
++		 * If CPL < 3, SMAP protections are disabled if EFLAGS.AC = 1.
++		 * If CPL = 3, SMAP applies to all supervisor-mode data accesses
++		 *  (these are implicit supervisor accesses) regardless of the
++		 *  value of EFLAGS.AC." - Intel Ref. # 319433-014 9.3.2
++		 */
++		if (__predict_false(smap_access_violation(frame, usermode))) {
++			printf("\nSupervisor Mode Access Prevention\n");
++			trap_fatal(frame, eva);
++			return(-1);
++		}
++#endif
++
+ 		/*
+ 		 * When accessing a usermode address, kernel must be
+ 		 * ready to accept the page fault, and provide a
+@@ -868,6 +883,33 @@ trap_fatal(frame, eva)
+ 		panic("unknown/reserved trap");
+ }
+ 
++#ifdef INTEL_SMAP_SUPPORT
++/*
++ * Supervisor Mode Access Prevention violation
++ *
++ * If CPL < 3, SMAP protections are disabled if EFLAGS.AC = 1.
++ * If CPL = 3, SMAP applies to all supervisor-mode data accesses
++ *  (these are implicit supervisor accesses) regardless of the
++ *  value of EFLAGS.AC." - Intel Ref. # 319433-014 9.3.2
++ */
++static bool
++smap_access_violation(struct trapframe *frame, int usermode)
++{
++	/* SMAP disabled */
++	if ((cpu_stdext_feature & CPUID_STDEXT_SMAP) == 0)
++		return (false);
++
++	/* CPL == 3 or EFLAGS.AC == 1 */
++	if (usermode || (frame->tf_rflags & PSL_AC) != 0)
++		return (false);
++
++	/*
++	 * CPL < 3 and EFLAGS.AC == 0
++	 */
++	return (true);
++}
++#endif
++
+ /*
+  * Double fault handler. Called when a fault occurs while writing
+  * a frame for a trap/exception onto the stack. This usually occurs
+diff --git a/sys/amd64/conf/GENERIC b/sys/amd64/conf/GENERIC
+index 4269889..3a0ccf9 100644
+--- a/sys/amd64/conf/GENERIC
++++ b/sys/amd64/conf/GENERIC
+@@ -26,6 +26,7 @@ makeoptions	WITH_CTF=1		# Run ctfconvert(1) for DTrace support
+ 
+ options 	SCHED_ULE		# ULE scheduler
+ options 	PREEMPTION		# Enable kernel thread preemption
++options 	KSP_FRAMEWORK		# Kernel run-time selfpatches and updates
+ options 	INET			# InterNETworking
+ options 	INET6			# IPv6 communications protocols
+ options 	TCP_OFFLOAD		# TCP offload
+@@ -88,6 +89,9 @@ options 	MALLOC_DEBUG_MAXZONES=8	# Separate malloc(9) zones
+ # Make an SMP-capable kernel by default
+ options 	SMP			# Symmetric MultiProcessor Kernel
+ 
++# Intel Supervisor Mode Access Prevention
++options 	INTEL_SMAP_SUPPORT
++
+ # CPU frequency control
+ device		cpufreq
+ 
+diff --git a/sys/amd64/ia32/ia32_exception.S b/sys/amd64/ia32/ia32_exception.S
+index fe1a676..8ba7c88 100644
+--- a/sys/amd64/ia32/ia32_exception.S
++++ b/sys/amd64/ia32/ia32_exception.S
+@@ -27,6 +27,8 @@
+  */
+ 
+ #include <machine/asmacros.h>
++#include <machine/specialreg.h>
++#include <machine/selfpatch-asmacros.h>
+ 
+ #include "assym.s"
+ 
+@@ -67,6 +69,7 @@ IDTVEC(int0x80_syscall)
+ 	movq	%r14,TF_R14(%rsp)
+ 	movq	%r15,TF_R15(%rsp)
+ 	movl	$TF_HASSEGS,TF_FLAGS(%rsp)
++	_clac
+ 	cld
+ 	FAKE_MCOUNT(TF_RIP(%rsp))
+ 	movq	%rsp, %rdi
+diff --git a/sys/amd64/include/asmacros.h b/sys/amd64/include/asmacros.h
+index ce8dce4..d08cab9 100644
+--- a/sys/amd64/include/asmacros.h
++++ b/sys/amd64/include/asmacros.h
+@@ -33,6 +33,8 @@
+ #define _MACHINE_ASMACROS_H_
+ 
+ #include <sys/cdefs.h>
++#include <machine/specialreg.h>
++#include <machine/selfpatch-asmacros.h>
+ 
+ /* XXX too much duplication in various asm*.h's. */
+ 
+@@ -167,6 +169,7 @@
+ 	movw	%es,TF_ES(%rsp) ;					\
+ 	movw	%ds,TF_DS(%rsp) ;					\
+ 	movl	$TF_HASSEGS,TF_FLAGS(%rsp) ;				\
++	_clac;								\
+ 	cld
+ 
+ #define POP_FRAME							\
+diff --git a/sys/amd64/include/cpufunc.h b/sys/amd64/include/cpufunc.h
+index 7464739..acec833 100644
+--- a/sys/amd64/include/cpufunc.h
++++ b/sys/amd64/include/cpufunc.h
+@@ -43,6 +43,9 @@
+ #error this file needs sys/cdefs.h as a prerequisite
+ #endif
+ 
++#include <machine/specialreg.h>
++#include <machine/selfpatch-asmacros.h>
++
+ struct region_descriptor;
+ 
+ #define readb(va)	(*(volatile uint8_t *) (va))
+@@ -587,6 +590,59 @@ cpu_mwait(u_long extensions, u_int hints)
+ 	__asm __volatile("mwait" : : "a" (hints), "c" (extensions));
+ }
+ 
++/*
++ * Intel SMAP related functions.
++ */
++static __inline void
++clac(void)
++{
++
++	__asm __volatile(
++	"0723:                                                           "
++        "        " KSP_INSTR_NOP3_C ";		                         "
++        "0724:                                                           "
++        "        .pushsection set_selfpatch_patch_set, \"ax\" ;          "
++        "0725:                                                           "
++        "        clac ;                                                  "
++        "0726:                                                           "
++        "        .popsection                                             "
++        "        .pushsection set_selfpatch_set, \"a\" ;                 "
++        "                .quad   0723b ;                                 "
++        "                .quad   0725b ;                                 "
++        "                .int    0724b-0723b ;                           "
++        "                .int    0726b-0725b ;                           "
++        "                .int    " __XSTRING(KSP_CPUID_STDEXT) " ;       "
++        "                .int    " __XSTRING(CPUID_STDEXT_SMAP) " ;      "
++        "                .quad   0 ;                                     "
++        "        .popsection ;						 "
++			: : : "memory");
++}
++
++static __inline void
++stac(void)
++{
++
++	__asm __volatile(
++	"0723:                                                           "
++        "        " KSP_INSTR_NOP3_C ";		                         "
++        "0724:                                                           "
++        "        .pushsection set_selfpatch_patch_set, \"ax\" ;          "
++        "0725:                                                           "
++        "        stac ;                                                  "
++        "0726:                                                           "
++        "        .popsection                                             "
++        "        .pushsection set_selfpatch_set, \"a\" ;                 "
++        "                .quad   0723b ;                                 "
++        "                .quad   0725b ;                                 "
++        "                .int    0724b-0723b ;                           "
++        "                .int    0726b-0725b ;                           "
++        "                .int    " __XSTRING(KSP_CPUID_STDEXT) " ;       "
++        "                .int    " __XSTRING(CPUID_STDEXT_SMAP) " ;      "
++        "                .quad   0 ;                                     "
++        "        .popsection ;						 "
++			: : : "memory");
++}
++
+ #ifdef _KERNEL
+ /* This is defined in <machine/specialreg.h> but is too painful to get to */
+ #ifndef	MSR_FSBASE
+diff --git a/sys/amd64/include/md_var.h b/sys/amd64/include/md_var.h
+index 5ddfbbd..643623b 100644
+--- a/sys/amd64/include/md_var.h
++++ b/sys/amd64/include/md_var.h
+@@ -61,7 +61,7 @@ extern	u_int	cpu_vendor_id;
+ extern	u_int	cpu_mon_mwait_flags;
+ extern	u_int	cpu_mon_min_size;
+ extern	u_int	cpu_mon_max_size;
+-extern	char	ctx_switch_xsave[];
++extern	u_int	cpu_extstate;
+ extern	char	kstack[];
+ extern	char	sigcode[];
+ extern	int	szsigcode;
+diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h
+index e83e07e..c6f3a89 100644
+--- a/sys/amd64/include/pmap.h
++++ b/sys/amd64/include/pmap.h
+@@ -380,6 +380,7 @@ int	pmap_change_attr(vm_offset_t, vm_size_t, int);
+ void	pmap_demote_DMAP(vm_paddr_t base, vm_size_t len, boolean_t invalidate);
+ void	pmap_init_pat(void);
+ void	pmap_kenter(vm_offset_t va, vm_paddr_t pa);
++void	pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int mode);
+ void	*pmap_kenter_temporary(vm_paddr_t pa, int i);
+ vm_paddr_t pmap_kextract(vm_offset_t);
+ void	pmap_kremove(vm_offset_t);
+diff --git a/sys/amd64/include/selfpatch-asmacros.h b/sys/amd64/include/selfpatch-asmacros.h
+new file mode 100644
+index 0000000..fb723de
+--- /dev/null
++++ b/sys/amd64/include/selfpatch-asmacros.h
+@@ -0,0 +1,34 @@
++/*-
++ * Copyright (c) 2014, by Oliver Pinter <oliver.pntr at gmail.com>
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *    notice, this list of conditions and the following disclaimer in the
++ *    documentation and/or other materials provided with the distribution.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
++ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
++ * SUCH DAMAGE.
++ *
++ * $FreeBSD$
++ */
++
++#ifndef __AMD64_SELFPATH_ASMACROS_H__
++#define __AMD64_SELFPATH_ASMACROS_H__
++
++#include <x86/selfpatch-asmacros.h>
++
++#endif /* __AMD64_SELFPATH_ASMACROS_H__ */
+diff --git a/sys/amd64/include/selfpatch-machdep.h b/sys/amd64/include/selfpatch-machdep.h
+new file mode 100644
+index 0000000..caa22f7
+--- /dev/null
++++ b/sys/amd64/include/selfpatch-machdep.h
+@@ -0,0 +1,34 @@
++/*-
++ * Copyright (c) 2014, by Oliver Pinter <oliver.pntr at gmail.com>
++ * All rights reserved.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ *    notice, this list of conditions and the following disclaimer.
++ * 2. Redistributions in binary form must reproduce the above copyright
++ *    notice, this list of conditions and the following disclaimer in the
++ *    documentation and/or other materials provided with the distribution.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
++ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
++ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
++ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
++ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
++ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
++ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
++ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
++ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
++ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
++ * SUCH DAMAGE.
++ *
++ * $FreeBSD$
++ */
++
++#ifndef __AMD64_SELFPATH_MACHDEP_H__
++#define __AMD64_SELFPATH_MACHDEP_H__
++
++#include <x86/selfpatch-machdep.h>
++
++#endif /* __AMD64_SELFPATH_MACHDEP_H__ */
+diff --git a/sys/conf/NOTES b/sys/conf/NOTES
+index 6959425..dd49c17 100644
+--- a/sys/conf/NOTES
++++ b/sys/conf/NOTES
+@@ -2986,3 +2986,7 @@ options 	RANDOM_RWFILE	# Read and write entropy cache
+ 
+ # Module to enable execution of application via emulators like QEMU
+ options         IMAGACT_BINMISC
++
++# Kernel selfpatch framework
++options 	KSP_FRAMEWORK
++options 	KSP_DEBUG
+diff --git a/sys/conf/files.amd64 b/sys/conf/files.amd64
+index b63044d..d4f4803 100644
+--- a/sys/conf/files.amd64
++++ b/sys/conf/files.amd64
+@@ -439,6 +439,7 @@ dev/virtio/random/virtio_random.c	optional	virtio_random
+ isa/syscons_isa.c		optional	sc
+ isa/vga_isa.c			optional	vga
+ kern/kern_clocksource.c		standard
++kern/kern_selfpatch.c		optional	ksp_framework | intel_smap_support
+ kern/link_elf_obj.c		standard
+ #
+ # IA32 binary support
+@@ -553,6 +554,7 @@ x86/x86/msi.c			optional	pci
+ x86/x86/nexus.c			standard
+ x86/x86/tsc.c			standard
+ x86/x86/delay.c			standard
++x86/x86/selfpatch_machdep.c	optional	ksp_framework | intel_smap_support
+ x86/xen/hvm.c			optional	xenhvm
+ x86/xen/xen_intr.c		optional	xen | xenhvm
+ x86/xen/pv.c			optional	xenhvm
+diff --git a/sys/conf/ldscript.amd64 b/sys/conf/ldscript.amd64
+index 9210a73..5a97c52 100644
+--- a/sys/conf/ldscript.amd64
++++ b/sys/conf/ldscript.amd64
+@@ -30,6 +30,10 @@ SECTIONS
+   .rela.data      : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) }
+   .rel.tdata	  : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) }
+   .rela.tdata	  : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) }
++  .rel.set_selfpatch_set		: { *(.rel.set_selfpatch_set) }
++  .rela.set_selfpatch_set		: { *(.rela.set_selfpatch_set) }
++  .rel.set_selfpatch_patch_set		: { *(.rel.set_selfpatch_patch_set) }
++  .rela.set_selfpatch_patch_set	: { *(.rela.set_selfpatch_patch_set) }
+   .rel.tbss	  : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
+   .rela.tbss	  : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) }
+   .rel.ctors      : { *(.rel.ctors) }
+@@ -140,6 +144,18 @@ SECTIONS
+   .got            : { *(.got) }
+   . = DATA_SEGMENT_RELRO_END (24, .);
+   .got.plt        : { *(.got.plt) }
++  set_selfpatch_set   :
++  {
++    PROVIDE ( __start_set_selfpatch_set = . );
++    KEEP (*(set_selfpatch_set));
++    PROVIDE ( __stop_set_selfpatch_set = . );
++  }
++  set_selfpatch_patch_set     :
++  {
++    PROVIDE ( __start_set_selfpatch_patch_set = . );
++    KEEP (*(set_selfpatch_patch_set));
++    PROVIDE ( __stop_set_selfpatch_patch_set = . );
++  }
+   .data           :
+   {
+     *(.data .data.* .gnu.linkonce.d.*)
+diff --git a/sys/conf/options b/sys/conf/options
+index 6ed7ce9..1e9f21d 100644
+--- a/sys/conf/options
++++ b/sys/conf/options
+@@ -927,3 +927,7 @@ RANDOM_YARROW	opt_random.h
+ RANDOM_FORTUNA	opt_random.h
+ RANDOM_DEBUG	opt_random.h
+ RANDOM_RWFILE	opt_random.h
++
++# kernel selfpatch
++KSP_FRAMEWORK	opt_selfpatch.h
++KSP_DEBUG	opt_selfpatch.h
+diff --git a/sys/conf/options.amd64 b/sys/conf/options.amd64
+index f1d4b4a..cebe69c 100644
+--- a/sys/conf/options.amd64
++++ b/sys/conf/options.amd64
+@@ -65,3 +65,6 @@ XENHVM			opt_global.h
+ 
+ # options for the Intel C600 SAS driver (isci)
+ ISCI_LOGGING	opt_isci.h
++
++# Intel Supervisor Mode Access Prevention
++INTEL_SMAP_SUPPORT	opt_cpu.h
+diff --git a/sys/i386/i386/db_disasm.c b/sys/i386/i386/db_disasm.c
+index 719c9f7..b0176a9 100644
+--- a/sys/i386/i386/db_disasm.c
++++ b/sys/i386/i386/db_disasm.c
+@@ -1266,13 +1266,23 @@ db_disasm(loc, altfmt)
+ 		case 0xc8:
+ 			i_name = "monitor";
+ 			i_size = NONE;
+-			i_mode = 0;			
++			i_mode = 0;
+ 			break;
+ 		case 0xc9:
+ 			i_name = "mwait";
+ 			i_size = NONE;
+ 			i_mode = 0;
+ 			break;
++		case 0xca:
++			i_name = "clac";
++			i_size = NONE;
++			i_mode = 0;
++			break;
++		case 0xcb:
++			i_name = "stac";
++			i_size = NONE;
++			i_mode = 0;
++			break;
+ 		}
+ 	}
+ 	if (ip->i_extra == db_Grp15 && f_mod(regmodrm) == 3) {
+diff --git a/sys/i386/i386/exception.s b/sys/i386/i386/exception.s
+index 39a8828..ff0c25e 100644
+--- a/sys/i386/i386/exception.s
++++ b/sys/i386/i386/exception.s
+@@ -162,6 +162,7 @@ alltraps:
+ 	pushl	%fs
+ alltraps_with_regs_pushed:
+ 	SET_KERNEL_SREGS
++	clac
+ 	cld
+ 	FAKE_MCOUNT(TF_EIP(%esp))
+ calltrap:
+diff --git a/sys/i386/i386/identcpu.c b/sys/i386/i386/identcpu.c
+index 9a09adb..4cea372 100644
+--- a/sys/i386/i386/identcpu.c
++++ b/sys/i386/i386/identcpu.c
+@@ -860,6 +860,47 @@ printcpuinfo(void)
+ 				);
+ 			}
+ 
++			if (cpu_stdext_feature != 0) {
++				printf("\n  Structured Extended Features=0x%b",
++				    cpu_stdext_feature,
++				       "\020"
++				       /* RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE */
++				       "\001FSGSBASE"
++				       "\002TSCADJ"
++				       /* Bit Manipulation Instructions */
++				       "\004BMI1"
++				       /* Hardware Lock Elision */
++				       "\005HLE"
++				       /* Advanced Vector Instructions 2 */
++				       "\006AVX2"
++				       /* Supervisor Mode Execution Prot. */
++				       "\010SMEP"
++				       /* Bit Manipulation Instructions */
++				       "\011BMI2"
++				       "\012ERMS"
++				       /* Invalidate Processor Context ID */
++				       "\013INVPCID"
++				       /* Restricted Transactional Memory */
++				       "\014RTM"
++				       /* Intel Memory Protection Extensions */
++				       "\017MPX"
++				       /* AVX512 Foundation */
++				       "\021AVX512F"
++				       /* Enhanced NRBG */
++				       "\023RDSEED"
++				       /* ADCX + ADOX */
++				       "\024ADX"
++				       /* Supervisor Mode Access Prevention */
++				       "\025SMAP"
++				       "\030CLFLUSHOPT"
++				       "\032PROCTRACE"
++				       "\033AVX512PF"
++				       "\034AVX512ER"
++				       "\035AVX512CD"
++				       "\036SHA"
++				       );
++			}
++
+ 			if (via_feature_rng != 0 || via_feature_xcrypt != 0)
+ 				print_via_padlock_info();
+ 
+@@ -1099,7 +1140,7 @@ finishidentcpu(void)
+ {
+ 	int	isblue = 0;
+ 	u_char	ccr3;
+-	u_int	regs[4];
++	u_int	regs[4], cpu_stdext_disable;
+ 
+ 	cpu_vendor_id = find_cpu_vendor_id();
+ 
+@@ -1128,6 +1169,25 @@ finishidentcpu(void)
+ 		cpu_mon_max_size = regs[1] &  CPUID5_MON_MAX_SIZE;
+ 	}
+ 
++	if (cpu_high >= 7) {
++		cpuid_count(7, 0, regs);
++		cpu_stdext_feature = regs[1];
++
++		/*

*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***


More information about the svn-soc-all mailing list