svn commit: r354193 - head/sys/arm64/arm64
Andrew Turner
andrew at FreeBSD.org
Wed Oct 30 17:32:36 UTC 2019
Author: andrew
Date: Wed Oct 30 17:32:35 2019
New Revision: 354193
URL: https://svnweb.freebsd.org/changeset/base/354193
Log:
Set the userspace execute never bit on kernel mappings.
Arm64 allows us to create execute only mappings. To make sure userspace is
unable to accidentally execute kernel code set the user execute never
bit in the kernel page tables.
MFC after: 1 week
Sponsored by: DARPA, AFRL
Modified:
head/sys/arm64/arm64/locore.S
head/sys/arm64/arm64/pmap.c
Modified: head/sys/arm64/arm64/locore.S
==============================================================================
--- head/sys/arm64/arm64/locore.S Wed Oct 30 17:18:11 2019 (r354192)
+++ head/sys/arm64/arm64/locore.S Wed Oct 30 17:32:35 2019 (r354193)
@@ -556,6 +556,7 @@ build_l2_block_pagetable:
lsl x12, x7, #2
orr x12, x12, #L2_BLOCK
orr x12, x12, #(ATTR_AF)
+ orr x12, x12, #(ATTR_UXN)
#ifdef SMP
orr x12, x12, ATTR_SH(ATTR_SH_IS)
#endif
Modified: head/sys/arm64/arm64/pmap.c
==============================================================================
--- head/sys/arm64/arm64/pmap.c Wed Oct 30 17:18:11 2019 (r354192)
+++ head/sys/arm64/arm64/pmap.c Wed Oct 30 17:32:35 2019 (r354193)
@@ -761,7 +761,7 @@ pmap_bootstrap_l3(vm_offset_t l1pt, vm_offset_t va, vm
pa = pmap_early_vtophys(l1pt, l3pt);
pmap_store(&l2[l2_slot],
- (pa & ~Ln_TABLE_MASK) | L2_TABLE);
+ (pa & ~Ln_TABLE_MASK) | ATTR_UXN | L2_TABLE);
l3pt += PAGE_SIZE;
}
@@ -1174,6 +1174,8 @@ pmap_kenter(vm_offset_t sva, vm_size_t size, vm_paddr_
attr = ATTR_DEFAULT | ATTR_IDX(mode) | L3_PAGE;
if (mode == DEVICE_MEMORY)
attr |= ATTR_XN;
+ else
+ attr |= ATTR_UXN;
va = sva;
while (size != 0) {
@@ -1289,7 +1291,7 @@ pmap_qenter(vm_offset_t sva, vm_page_t *ma, int count)
m = ma[i];
pa = VM_PAGE_TO_PHYS(m) | ATTR_DEFAULT | ATTR_AP(ATTR_AP_RW) |
- ATTR_IDX(m->md.pv_memattr) | L3_PAGE;
+ ATTR_UXN | ATTR_IDX(m->md.pv_memattr) | L3_PAGE;
if (m->md.pv_memattr == DEVICE_MEMORY)
pa |= ATTR_XN;
pte = pmap_l2_to_l3(pde, va);
@@ -3194,6 +3196,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, v
new_l3 |= ATTR_SW_WIRED;
if (va < VM_MAXUSER_ADDRESS)
new_l3 |= ATTR_AP(ATTR_AP_USER) | ATTR_PXN;
+ else
+ new_l3 |= ATTR_UXN;
if ((m->oflags & VPO_UNMANAGED) == 0) {
new_l3 |= ATTR_SW_MANAGED;
if ((prot & VM_PROT_WRITE) != 0) {
@@ -3456,6 +3460,8 @@ pmap_enter_2mpage(pmap_t pmap, vm_offset_t va, vm_page
new_l2 |= ATTR_XN;
if (va < VM_MAXUSER_ADDRESS)
new_l2 |= ATTR_AP(ATTR_AP_USER) | ATTR_PXN;
+ else
+ new_l2 |= ATTR_UXN;
return (pmap_enter_l2(pmap, va, new_l2, PMAP_ENTER_NOSLEEP |
PMAP_ENTER_NOREPLACE | PMAP_ENTER_NORECLAIM, NULL, lockp) ==
KERN_SUCCESS);
@@ -3754,6 +3760,8 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, v
l3_val |= ATTR_XN;
if (va < VM_MAXUSER_ADDRESS)
l3_val |= ATTR_AP(ATTR_AP_USER) | ATTR_PXN;
+ else
+ l3_val |= ATTR_UXN;
/*
* Now validate mapping with RO protection
More information about the svn-src-head
mailing list