git: 4b38880259e8 - stable/13 - Clear the accessed bit when copying a managed superpage mapping
Mark Johnston
markj at FreeBSD.org
Wed Sep 1 13:31:11 UTC 2021
The branch stable/13 has been updated by markj:
URL: https://cgit.FreeBSD.org/src/commit/?id=4b38880259e81043978f4be27ef2289120c1b9d3
commit 4b38880259e81043978f4be27ef2289120c1b9d3
Author: Alan Cox <alc at FreeBSD.org>
AuthorDate: 2021-07-13 07:30:43 +0000
Commit: Mark Johnston <markj at FreeBSD.org>
CommitDate: 2021-09-01 13:29:01 +0000
Clear the accessed bit when copying a managed superpage mapping
pmap_copy() is used to speculatively create mappings, so those mappings
should not have their access bit preset.
Reviewed by: kib, markj
(cherry picked from commit 325ff9327459bc7307130675fa19367ff8b02310)
---
sys/amd64/amd64/pmap.c | 17 ++++++++++++++++-
sys/arm64/arm64/pmap.c | 18 ++++++++++++++++--
2 files changed, 32 insertions(+), 3 deletions(-)
diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index 1d0b6aa0cce1..b920426e6996 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -7717,6 +7717,9 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
continue;
if (srcptepaddr & PG_PS) {
+ /*
+ * We can only virtual copy whole superpages.
+ */
if ((addr & PDRMASK) != 0 || addr + NBPDR > end_addr)
continue;
pde = pmap_alloc_pde(dst_pmap, addr, &dst_pdpg, NULL);
@@ -7725,7 +7728,19 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
if (*pde == 0 && ((srcptepaddr & PG_MANAGED) == 0 ||
pmap_pv_insert_pde(dst_pmap, addr, srcptepaddr,
PMAP_ENTER_NORECLAIM, &lock))) {
- *pde = srcptepaddr & ~PG_W;
+ /*
+ * We leave the dirty bit unchanged because
+ * managed read/write superpage mappings are
+ * required to be dirty. However, managed
+ * superpage mappings are not required to
+ * have their accessed bit set, so we clear
+ * it because we don't know if this mapping
+ * will be used.
+ */
+ srcptepaddr &= ~PG_W;
+ if ((srcptepaddr & PG_MANAGED) != 0)
+ srcptepaddr &= ~PG_A;
+ *pde = srcptepaddr;
pmap_resident_count_adj(dst_pmap, NBPDR /
PAGE_SIZE);
counter_u64_add(pmap_pde_mappings, 1);
diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
index a8216c6b4d7b..f0419beace37 100644
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -4774,6 +4774,9 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
if (srcptepaddr == 0)
continue;
if ((srcptepaddr & ATTR_DESCR_MASK) == L2_BLOCK) {
+ /*
+ * We can only virtual copy whole superpages.
+ */
if ((addr & L2_OFFSET) != 0 ||
addr + L2_SIZE > end_addr)
continue;
@@ -4784,8 +4787,19 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, vm_size_t len,
((srcptepaddr & ATTR_SW_MANAGED) == 0 ||
pmap_pv_insert_l2(dst_pmap, addr, srcptepaddr,
PMAP_ENTER_NORECLAIM, &lock))) {
- mask = ATTR_SW_WIRED;
- pmap_store(l2, srcptepaddr & ~mask);
+ /*
+ * We leave the dirty bit unchanged because
+ * managed read/write superpage mappings are
+ * required to be dirty. However, managed
+ * superpage mappings are not required to
+ * have their accessed bit set, so we clear
+ * it because we don't know if this mapping
+ * will be used.
+ */
+ srcptepaddr &= ~ATTR_SW_WIRED;
+ if ((srcptepaddr & ATTR_SW_MANAGED) != 0)
+ srcptepaddr &= ~ATTR_AF;
+ pmap_store(l2, srcptepaddr);
pmap_resident_count_inc(dst_pmap, L2_SIZE /
PAGE_SIZE);
atomic_add_long(&pmap_l2_mappings, 1);
More information about the dev-commits-src-branches
mailing list