svn commit: r354895 - in head/sys: security/mac vm

Doug Moore dougm at FreeBSD.org
Wed Nov 20 16:06:49 UTC 2019


Author: dougm
Date: Wed Nov 20 16:06:48 2019
New Revision: 354895
URL: https://svnweb.freebsd.org/changeset/base/354895

Log:
  Instead of looking up a predecessor or successor to the current map
  entry, when that entry has been seen already, keep the
  already-looked-up value in a variable and use that instead of looking
  it up again.
  
  Approved by: alc, markj (earlier version), kib (earlier version)
  Differential Revision: https://reviews.freebsd.org/D22348

Modified:
  head/sys/security/mac/mac_process.c
  head/sys/vm/vm_map.c
  head/sys/vm/vm_map.h

Modified: head/sys/security/mac/mac_process.c
==============================================================================
--- head/sys/security/mac/mac_process.c	Wed Nov 20 14:37:48 2019	(r354894)
+++ head/sys/security/mac/mac_process.c	Wed Nov 20 16:06:48 2019	(r354895)
@@ -252,7 +252,7 @@ static void
 mac_proc_vm_revoke_recurse(struct thread *td, struct ucred *cred,
     struct vm_map *map)
 {
-	vm_map_entry_t vme;
+	vm_map_entry_t prev, vme;
 	int result;
 	vm_prot_t revokeperms;
 	vm_object_t backing_object, object;
@@ -263,8 +263,10 @@ mac_proc_vm_revoke_recurse(struct thread *td, struct u
 	if (!mac_mmap_revocation)
 		return;
 
+	prev = &map->header;
 	vm_map_lock(map);
-	VM_MAP_ENTRY_FOREACH(vme, map) {
+	for (vme = vm_map_entry_first(map); vme != &map->header;
+	    prev = vme, vme = vm_map_entry_succ(prev)) {
 		if (vme->eflags & MAP_ENTRY_IS_SUB_MAP) {
 			mac_proc_vm_revoke_recurse(td, cred,
 			    vme->object.sub_map);
@@ -363,8 +365,7 @@ mac_proc_vm_revoke_recurse(struct thread *td, struct u
 			}
 			pmap_protect(map->pmap, vme->start, vme->end,
 			    vme->protection & ~revokeperms);
-			vm_map_try_merge_entries(map, vm_map_entry_pred(vme),
-			    vme);
+			vm_map_try_merge_entries(map, prev, vme);
 		}
 	}
 	vm_map_unlock(map);

Modified: head/sys/vm/vm_map.c
==============================================================================
--- head/sys/vm/vm_map.c	Wed Nov 20 14:37:48 2019	(r354894)
+++ head/sys/vm/vm_map.c	Wed Nov 20 16:06:48 2019	(r354895)
@@ -978,6 +978,21 @@ vm_map_entry_max_free_right(vm_map_entry_t root, vm_ma
 	    root->right->max_free : right_ancestor->start - root->end);
 }
 
+/*
+ *	vm_map_entry_{pred,succ}:
+ *
+ *	Find the {predecessor, successor} of the entry by taking one step
+ *	in the appropriate direction and backtracking as much as necessary.
+ */
+static inline vm_map_entry_t
+vm_map_entry_pred(vm_map_entry_t entry)
+{
+
+	return (entry->prev);
+}
+
+/* vm_map_entry_succ is defined in vm_map.h. */
+
 #define SPLAY_LEFT_STEP(root, y, rlist, test) do {			\
 	vm_size_t max_free;						\
 									\
@@ -1412,7 +1427,7 @@ int
 vm_map_insert(vm_map_t map, vm_object_t object, vm_ooffset_t offset,
     vm_offset_t start, vm_offset_t end, vm_prot_t prot, vm_prot_t max, int cow)
 {
-	vm_map_entry_t new_entry, prev_entry;
+	vm_map_entry_t new_entry, next_entry, prev_entry;
 	struct ucred *cred;
 	vm_eflags_t protoeflags;
 	vm_inherit_t inheritance;
@@ -1443,7 +1458,8 @@ vm_map_insert(vm_map_t map, vm_object_t object, vm_oof
 	/*
 	 * Assert that the next entry doesn't overlap the end point.
 	 */
-	if (vm_map_entry_succ(prev_entry)->start < end)
+	next_entry = vm_map_entry_succ(prev_entry);
+	if (next_entry->start < end)
 		return (KERN_NO_SPACE);
 
 	if ((cow & MAP_CREATE_GUARD) != 0 && (object != NULL ||
@@ -1538,8 +1554,7 @@ charged:
 				map->size += end - prev_entry->end;
 			vm_map_entry_resize(map, prev_entry,
 			    end - prev_entry->end);
-			vm_map_try_merge_entries(map, prev_entry,
-			    vm_map_entry_succ(prev_entry));
+			vm_map_try_merge_entries(map, prev_entry, next_entry);
 			return (KERN_SUCCESS);
 		}
 
@@ -1600,7 +1615,7 @@ charged:
 	 * other cases, which are less common.
 	 */
 	vm_map_try_merge_entries(map, prev_entry, new_entry);
-	vm_map_try_merge_entries(map, new_entry, vm_map_entry_succ(new_entry));
+	vm_map_try_merge_entries(map, new_entry, next_entry);
 
 	if ((cow & (MAP_PREFAULT | MAP_PREFAULT_PARTIAL)) != 0) {
 		vm_map_pmap_enter(map, start, prot, object, OFF_TO_IDX(offset),
@@ -2430,7 +2445,7 @@ int
 vm_map_protect(vm_map_t map, vm_offset_t start, vm_offset_t end,
 	       vm_prot_t new_prot, boolean_t set_max)
 {
-	vm_map_entry_t current, entry, in_tran;
+	vm_map_entry_t current, entry, in_tran, prev_entry;
 	vm_object_t obj;
 	struct ucred *cred;
 	vm_prot_t old_prot;
@@ -2555,9 +2570,10 @@ again:
 	 * Otherwise, just simplify entries, since some may have been modified.
 	 * [Note that clipping is not necessary the second time.]
 	 */
-	for (current = entry; current->start < end;
-	     vm_map_try_merge_entries(map, vm_map_entry_pred(current), current),
-	    current = vm_map_entry_succ(current)) {
+	for (prev_entry = vm_map_entry_pred(entry), current = entry;
+	    current->start < end;
+	    vm_map_try_merge_entries(map, prev_entry, current),
+	    prev_entry = current, current = vm_map_entry_succ(current)) {
 		if (rv != KERN_SUCCESS ||
 		    (current->eflags & MAP_ENTRY_GUARD) != 0)
 			continue;
@@ -2595,7 +2611,7 @@ again:
 #undef	MASK
 		}
 	}
-	vm_map_try_merge_entries(map, vm_map_entry_pred(current), current);
+	vm_map_try_merge_entries(map, prev_entry, current);
 	vm_map_unlock(map);
 	return (rv);
 }
@@ -2615,7 +2631,7 @@ vm_map_madvise(
 	vm_offset_t end,
 	int behav)
 {
-	vm_map_entry_t current, entry;
+	vm_map_entry_t current, prev_entry;
 	bool modify_map;
 
 	/*
@@ -2654,11 +2670,13 @@ vm_map_madvise(
 	 */
 	VM_MAP_RANGE_CHECK(map, start, end);
 
-	if (vm_map_lookup_entry(map, start, &entry)) {
+	if (vm_map_lookup_entry(map, start, &current)) {
 		if (modify_map)
-			vm_map_clip_start(map, entry, start);
+			vm_map_clip_start(map, current, start);
+		prev_entry = vm_map_entry_pred(current);
 	} else {
-		entry = vm_map_entry_succ(entry);
+		prev_entry = current;
+		current = vm_map_entry_succ(current);
 	}
 
 	if (modify_map) {
@@ -2668,7 +2686,7 @@ vm_map_madvise(
 		 * We clip the vm_map_entry so that behavioral changes are
 		 * limited to the specified address range.
 		 */
-		for (current = entry; current->start < end;
+		for (; current->start < end; prev_entry = current,
 		    current = vm_map_entry_succ(current)) {
 			if (current->eflags & MAP_ENTRY_IS_SUB_MAP)
 				continue;
@@ -2700,11 +2718,9 @@ vm_map_madvise(
 			default:
 				break;
 			}
-			vm_map_try_merge_entries(map,
-			    vm_map_entry_pred(current), current);
+			vm_map_try_merge_entries(map, prev_entry, current);
 		}
-		vm_map_try_merge_entries(map, vm_map_entry_pred(current),
-		    current);
+		vm_map_try_merge_entries(map, prev_entry, current);
 		vm_map_unlock(map);
 	} else {
 		vm_pindex_t pstart, pend;
@@ -2716,7 +2732,7 @@ vm_map_madvise(
 		 * Since we don't clip the vm_map_entry, we have to clip
 		 * the vm_object pindex and count.
 		 */
-		for (current = entry; current->start < end;
+		for (; current->start < end;
 		    current = vm_map_entry_succ(current)) {
 			vm_offset_t useEnd, useStart;
 
@@ -2804,8 +2820,7 @@ int
 vm_map_inherit(vm_map_t map, vm_offset_t start, vm_offset_t end,
 	       vm_inherit_t new_inheritance)
 {
-	vm_map_entry_t entry;
-	vm_map_entry_t temp_entry;
+	vm_map_entry_t entry, prev_entry;
 
 	switch (new_inheritance) {
 	case VM_INHERIT_NONE:
@@ -2820,20 +2835,21 @@ vm_map_inherit(vm_map_t map, vm_offset_t start, vm_off
 		return (KERN_SUCCESS);
 	vm_map_lock(map);
 	VM_MAP_RANGE_CHECK(map, start, end);
-	if (vm_map_lookup_entry(map, start, &temp_entry)) {
-		entry = temp_entry;
+	if (vm_map_lookup_entry(map, start, &prev_entry)) {
+		entry = prev_entry;
 		vm_map_clip_start(map, entry, start);
+		prev_entry = vm_map_entry_pred(entry);
 	} else
-		entry = vm_map_entry_succ(temp_entry);
-	while (entry->start < end) {
+		entry = vm_map_entry_succ(prev_entry);
+	for (; entry->start < end;
+	    prev_entry = entry, entry = vm_map_entry_succ(entry)) {
 		vm_map_clip_end(map, entry, end);
 		if ((entry->eflags & MAP_ENTRY_GUARD) == 0 ||
 		    new_inheritance != VM_INHERIT_ZERO)
 			entry->inheritance = new_inheritance;
-		vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry);
-		entry = vm_map_entry_succ(entry);
+		vm_map_try_merge_entries(map, prev_entry, entry);
 	}
-	vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry);
+	vm_map_try_merge_entries(map, prev_entry, entry);
 	vm_map_unlock(map);
 	return (KERN_SUCCESS);
 }
@@ -2896,9 +2912,9 @@ int
 vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
     int flags)
 {
-	vm_map_entry_t entry, first_entry;
+	vm_map_entry_t entry, first_entry, next_entry, prev_entry;
 	int rv;
-	bool first_iteration, holes_ok, need_wakeup, user_unwire;
+	bool holes_ok, need_wakeup, user_unwire;
 
 	if (start == end)
 		return (KERN_SUCCESS);
@@ -2914,28 +2930,26 @@ vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offs
 			return (KERN_INVALID_ADDRESS);
 		}
 	}
-	first_iteration = true;
-	entry = first_entry;
 	rv = KERN_SUCCESS;
-	while (entry->start < end) {
+	for (entry = first_entry; entry->start < end; entry = next_entry) {
 		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
 			/*
 			 * We have not yet clipped the entry.
 			 */
-			entry = vm_map_entry_in_transition(map, start, &end,
-			    holes_ok, entry);
-			if (entry == NULL) {
-				if (first_iteration) {
+			next_entry = vm_map_entry_in_transition(map, start,
+			    &end, holes_ok, entry);
+			if (next_entry == NULL) {
+				if (entry == first_entry) {
 					vm_map_unlock(map);
 					return (KERN_INVALID_ADDRESS);
 				}
 				rv = KERN_INVALID_ADDRESS;
 				break;
 			}
-			first_entry = first_iteration ? entry : NULL;
+			first_entry = (entry == first_entry) ?
+			    next_entry : NULL;
 			continue;
 		}
-		first_iteration = false;
 		vm_map_clip_start(map, entry, start);
 		vm_map_clip_end(map, entry, end);
 		/*
@@ -2947,13 +2961,13 @@ vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offs
 		    ("owned map entry %p", entry));
 		entry->eflags |= MAP_ENTRY_IN_TRANSITION;
 		entry->wiring_thread = curthread;
+		next_entry = vm_map_entry_succ(entry);
 		/*
 		 * Check the map for holes in the specified region.
 		 * If holes_ok, skip this check.
 		 */
 		if (!holes_ok &&
-		    (entry->end < end &&
-		    vm_map_entry_succ(entry)->start > entry->end)) {
+		    entry->end < end && next_entry->start > entry->end) {
 			end = entry->end;
 			rv = KERN_INVALID_ADDRESS;
 			break;
@@ -2967,16 +2981,19 @@ vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offs
 			rv = KERN_INVALID_ARGUMENT;
 			break;
 		}
-		entry = vm_map_entry_succ(entry);
 	}
 	need_wakeup = false;
 	if (first_entry == NULL &&
 	    !vm_map_lookup_entry(map, start, &first_entry)) {
 		KASSERT(holes_ok, ("vm_map_unwire: lookup failed"));
-		first_entry = vm_map_entry_succ(first_entry);
+		prev_entry = first_entry;
+		entry = vm_map_entry_succ(first_entry);
+	} else {
+		prev_entry = vm_map_entry_pred(first_entry);
+		entry = first_entry;
 	}
-	for (entry = first_entry; entry->start < end;
-	    entry = vm_map_entry_succ(entry)) {
+	for (; entry->start < end;
+	    prev_entry = entry, entry = vm_map_entry_succ(entry)) {
 		/*
 		 * If holes_ok was specified, an empty
 		 * space in the unwired region could have been mapped
@@ -3012,9 +3029,9 @@ vm_map_unwire(vm_map_t map, vm_offset_t start, vm_offs
 			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
 			need_wakeup = true;
 		}
-		vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry);
+		vm_map_try_merge_entries(map, prev_entry, entry);
 	}
-	vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry);
+	vm_map_try_merge_entries(map, prev_entry, entry);
 	vm_map_unlock(map);
 	if (need_wakeup)
 		vm_map_wakeup(map);
@@ -3100,12 +3117,12 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset
 int
 vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm_offset_t end, int flags)
 {
-	vm_map_entry_t entry, first_entry, tmp_entry;
+	vm_map_entry_t entry, first_entry, next_entry, prev_entry;
 	vm_offset_t faddr, saved_end, saved_start;
 	u_long npages;
 	u_int last_timestamp;
 	int rv;
-	bool first_iteration, holes_ok, need_wakeup, user_wire;
+	bool holes_ok, need_wakeup, user_wire;
 	vm_prot_t prot;
 
 	VM_MAP_ASSERT_LOCKED(map);
@@ -3124,25 +3141,23 @@ vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm
 		else
 			return (KERN_INVALID_ADDRESS);
 	}
-	first_iteration = true;
-	entry = first_entry;
-	while (entry->start < end) {
+	for (entry = first_entry; entry->start < end; entry = next_entry) {
 		if (entry->eflags & MAP_ENTRY_IN_TRANSITION) {
 			/*
 			 * We have not yet clipped the entry.
 			 */
-			entry = vm_map_entry_in_transition(map, start, &end,
-			    holes_ok, entry);
-			if (entry == NULL) {
-				if (first_iteration)
+			next_entry = vm_map_entry_in_transition(map, start,
+			    &end, holes_ok, entry);
+			if (next_entry == NULL) {
+				if (entry == first_entry)
 					return (KERN_INVALID_ADDRESS);
 				rv = KERN_INVALID_ADDRESS;
 				goto done;
 			}
-			first_entry = first_iteration ? entry : NULL;
+			first_entry = (entry == first_entry) ?
+			    next_entry : NULL;
 			continue;
 		}
-		first_iteration = false;
 		vm_map_clip_start(map, entry, start);
 		vm_map_clip_end(map, entry, end);
 		/*
@@ -3205,15 +3220,13 @@ vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm
 				 * deleted.
 				 */
 				if (!vm_map_lookup_entry(map, saved_start,
-				    &tmp_entry))
+				    &next_entry))
 					KASSERT(false,
 					    ("vm_map_wire: lookup failed"));
-				if (entry == first_entry)
-					first_entry = tmp_entry;
-				else
-					first_entry = NULL;
-				entry = tmp_entry;
-				while (entry->end < saved_end) {
+				first_entry = (entry == first_entry) ?
+				    next_entry : NULL;
+				for (entry = next_entry; entry->end < saved_end;
+				    entry = vm_map_entry_succ(entry)) {
 					/*
 					 * In case of failure, handle entries
 					 * that were not fully wired here;
@@ -3224,7 +3237,6 @@ vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm
 					    faddr < entry->end)
 						vm_map_wire_entry_failure(map,
 						    entry, faddr);
-					entry = vm_map_entry_succ(entry);
 				}
 			}
 			if (rv != KERN_SUCCESS) {
@@ -3242,14 +3254,13 @@ vm_map_wire_locked(vm_map_t map, vm_offset_t start, vm
 		 * Check the map for holes in the specified region.
 		 * If holes_ok was specified, skip this check.
 		 */
+		next_entry = vm_map_entry_succ(entry);
 		if (!holes_ok &&
-		    entry->end < end &&
-		    vm_map_entry_succ(entry)->start > entry->end) {
+		    entry->end < end && next_entry->start > entry->end) {
 			end = entry->end;
 			rv = KERN_INVALID_ADDRESS;
 			goto done;
 		}
-		entry = vm_map_entry_succ(entry);
 	}
 	rv = KERN_SUCCESS;
 done:
@@ -3257,10 +3268,14 @@ done:
 	if (first_entry == NULL &&
 	    !vm_map_lookup_entry(map, start, &first_entry)) {
 		KASSERT(holes_ok, ("vm_map_wire: lookup failed"));
-		first_entry = vm_map_entry_succ(first_entry);
+		prev_entry = first_entry;
+		entry = vm_map_entry_succ(first_entry);
+	} else {
+		prev_entry = vm_map_entry_pred(first_entry);
+		entry = first_entry;
 	}
-	for (entry = first_entry; entry->start < end;
-	    entry = vm_map_entry_succ(entry)) {
+	for (; entry->start < end;
+	    prev_entry = entry, entry = vm_map_entry_succ(entry)) {
 		/*
 		 * If holes_ok was specified, an empty
 		 * space in the unwired region could have been mapped
@@ -3313,9 +3328,9 @@ done:
 			entry->eflags &= ~MAP_ENTRY_NEEDS_WAKEUP;
 			need_wakeup = true;
 		}
-		vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry);
+		vm_map_try_merge_entries(map, prev_entry, entry);
 	}
-	vm_map_try_merge_entries(map, vm_map_entry_pred(entry), entry);
+	vm_map_try_merge_entries(map, prev_entry, entry);
 	if (need_wakeup)
 		vm_map_wakeup(map);
 	return (rv);
@@ -3345,8 +3360,7 @@ vm_map_sync(
 	boolean_t syncio,
 	boolean_t invalidate)
 {
-	vm_map_entry_t current;
-	vm_map_entry_t entry;
+	vm_map_entry_t current, entry, next_entry;
 	vm_size_t size;
 	vm_object_t object;
 	vm_ooffset_t offset;
@@ -3366,13 +3380,14 @@ vm_map_sync(
 	 * Make a first pass to check for user-wired memory and holes.
 	 */
 	for (current = entry; current->start < end;
-	    current = vm_map_entry_succ(current)) {
+	    current = next_entry) {
 		if (invalidate && (current->eflags & MAP_ENTRY_USER_WIRED)) {
 			vm_map_unlock_read(map);
 			return (KERN_INVALID_ARGUMENT);
 		}
+		next_entry = vm_map_entry_succ(current);
 		if (end > current->end &&
-		    current->end != vm_map_entry_succ(current)->start) {
+		    current->end != next_entry->start) {
 			vm_map_unlock_read(map);
 			return (KERN_INVALID_ADDRESS);
 		}
@@ -3914,7 +3929,7 @@ vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_c
 
 	new_map->anon_loc = old_map->anon_loc;
 
-	old_entry = old_map->header.next;
+	old_entry = vm_map_entry_first(old_map);
 
 	while (old_entry != &old_map->header) {
 		if (old_entry->eflags & MAP_ENTRY_IS_SUB_MAP)

Modified: head/sys/vm/vm_map.h
==============================================================================
--- head/sys/vm/vm_map.h	Wed Nov 20 14:37:48 2019	(r354894)
+++ head/sys/vm/vm_map.h	Wed Nov 20 16:06:48 2019	(r354895)
@@ -421,21 +421,21 @@ void vm_map_lookup_done (vm_map_t, vm_map_entry_t);
 boolean_t vm_map_lookup_entry (vm_map_t, vm_offset_t, vm_map_entry_t *);
 
 static inline vm_map_entry_t
-vm_map_entry_succ(vm_map_entry_t entry)
+vm_map_entry_first(vm_map_t map)
 {
 
-	return (entry->next);
+	return (map->header.next);
 }
 
 static inline vm_map_entry_t
-vm_map_entry_pred(vm_map_entry_t entry)
+vm_map_entry_succ(vm_map_entry_t entry)
 {
 
-	return (entry->prev);
+	return (entry->next);
 }
 
-#define VM_MAP_ENTRY_FOREACH(it, map)			\
-	for ((it) = (map)->header.next;		\
+#define VM_MAP_ENTRY_FOREACH(it, map)		\
+	for ((it) = vm_map_entry_first(map);	\
 	    (it) != &(map)->header;		\
 	    (it) = vm_map_entry_succ(it))
 int vm_map_protect (vm_map_t, vm_offset_t, vm_offset_t, vm_prot_t, boolean_t);


More information about the svn-src-head mailing list