PERFORCE change 181481 for review

Edward Tomasz Napierala trasz at FreeBSD.org
Wed Jul 28 08:21:44 UTC 2010


http://p4web.freebsd.org/@@181481?ac=10

Change 181481 by trasz at trasz_victim on 2010/07/27 20:03:12

	Enforce RLIMIT_MEMLOCK.  Doesn't support MCL_FUTURE.  This will
	be redone after the resource limit gets moved into vm_map_wire().  

Affected files ...

.. //depot/projects/soc2009/trasz_limits/TODO#19 edit
.. //depot/projects/soc2009/trasz_limits/sys/kern/kern_container.c#22 edit
.. //depot/projects/soc2009/trasz_limits/sys/vm/vm_glue.c#7 edit
.. //depot/projects/soc2009/trasz_limits/sys/vm/vm_mmap.c#12 edit

Differences ...

==== //depot/projects/soc2009/trasz_limits/TODO#19 (text+ko) ====

@@ -6,6 +6,7 @@
  - number of processes (RUSAGE_NPROC)
  - virtual memory usage (address space limit) (RUSAGE_VMEM), in megabytes
  - maximum core size (RUSAGE_CORE), in megabytes (core, as in ELF program state dump)
+ - locked memory usage (RUSAGE_MEMLOCK), in megabytes
 
 Limits to do:
 
@@ -13,7 +14,6 @@
 
  - stack size (RUSAGE_STACK), in megabytes,
  - resident set size (physical memory usage) (RUSAGE_RSS), in megabytes
- - locked memory usage (RUSAGE_MEMLOCK), in megabytes
  - swap usage (RUSAGE_SWAP), in megabytes
  - number of file descriptors (RUSAGE_NOFILE)
  - amount of memory consumed by socket buffers (RUSAGE_SBSIZE), in megabytes

==== //depot/projects/soc2009/trasz_limits/sys/kern/kern_container.c#22 (text+ko) ====

@@ -76,6 +76,7 @@
 	switch (resource) {
 	case RUSAGE_CPU:
 	case RUSAGE_CORE:
+	case RUSAGE_MEMLOCK:
 	case RUSAGE_NPROC:
 		return (0);
 	default:

==== //depot/projects/soc2009/trasz_limits/sys/vm/vm_glue.c#7 (text+ko) ====

@@ -65,6 +65,7 @@
 
 #include <sys/param.h>
 #include <sys/systm.h>
+#include <sys/container.h>
 #include <sys/limits.h>
 #include <sys/lock.h>
 #include <sys/mutex.h>
@@ -184,6 +185,7 @@
 vslock(void *addr, size_t len)
 {
 	vm_offset_t end, last, start;
+	unsigned long nsize;
 	vm_size_t npages;
 	int error;
 
@@ -196,13 +198,17 @@
 	if (npages > vm_page_max_wired)
 		return (ENOMEM);
 	PROC_LOCK(curproc);
-	if (ptoa(npages +
-	    pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))) >
-	    lim_cur(curproc, RLIMIT_MEMLOCK)) {
+	nsize = ptoa(npages +
+	    pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map)));
+	if (nsize > lim_cur(curproc, RLIMIT_MEMLOCK)) {
 		PROC_UNLOCK(curproc);
 		return (ENOMEM);
 	}
 	PROC_UNLOCK(curproc);
+#ifdef CONTAINERS
+	if (rusage_set(curproc, RUSAGE_MEMLOCK, nsize))
+		return (ENOMEM);
+#endif
 #if 0
 	/*
 	 * XXX - not yet
@@ -218,6 +224,11 @@
 #endif
 	error = vm_map_wire(&curproc->p_vmspace->vm_map, start, end,
 	    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
+#ifdef CONTAINERS
+	if (error != KERN_SUCCESS)
+		rusage_sub(curproc, RUSAGE_MEMLOCK, 
+		    ptoa(pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))));
+#endif
 	/*
 	 * Return EFAULT on error to match copy{in,out}() behaviour
 	 * rather than returning ENOMEM like mlock() would.
@@ -233,6 +244,11 @@
 	(void)vm_map_unwire(&curproc->p_vmspace->vm_map,
 	    trunc_page((vm_offset_t)addr), round_page((vm_offset_t)addr + len),
 	    VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
+
+#ifdef CONTAINERS
+	rusage_set(curproc, RUSAGE_MEMLOCK,
+	    ptoa(pmap_wired_count(vm_map_pmap(&curproc->p_vmspace->vm_map))));
+#endif
 }
 
 /*

==== //depot/projects/soc2009/trasz_limits/sys/vm/vm_mmap.c#12 (text+ko) ====

@@ -46,6 +46,7 @@
 #include "opt_compat.h"
 #include "opt_hwpmc_hooks.h"
 
+#include <sys/container.h>
 #include <sys/param.h>
 #include <sys/systm.h>
 #include <sys/kernel.h>
@@ -1022,6 +1023,7 @@
 	struct proc *proc;
 	vm_offset_t addr, end, last, start;
 	vm_size_t npages, size;
+	unsigned long nsize;
 	int error;
 
 	error = priv_check(td, PRIV_VM_MLOCK);
@@ -1039,17 +1041,26 @@
 		return (ENOMEM);
 	proc = td->td_proc;
 	PROC_LOCK(proc);
-	if (ptoa(npages +
-	    pmap_wired_count(vm_map_pmap(&proc->p_vmspace->vm_map))) >
-	    lim_cur(proc, RLIMIT_MEMLOCK)) {
+	nsize = ptoa(npages +
+	    pmap_wired_count(vm_map_pmap(&proc->p_vmspace->vm_map)));
+	if (nsize > lim_cur(proc, RLIMIT_MEMLOCK)) {
 		PROC_UNLOCK(proc);
 		return (ENOMEM);
 	}
 	PROC_UNLOCK(proc);
 	if (npages + cnt.v_wire_count > vm_page_max_wired)
 		return (EAGAIN);
+#ifdef CONTAINERS
+	if (rusage_set(proc, RUSAGE_MEMLOCK, nsize))
+		return (ENOMEM);
+#endif
 	error = vm_map_wire(&proc->p_vmspace->vm_map, start, end,
 	    VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
+#ifdef CONTAINERS
+	if (error != KERN_SUCCESS)
+		rusage_set(proc, RUSAGE_MEMLOCK,
+		    ptoa(pmap_wired_count(vm_map_pmap(&proc->p_vmspace->vm_map))));
+#endif
 	return (error == KERN_SUCCESS ? 0 : ENOMEM);
 }
 
@@ -1082,8 +1093,7 @@
 	 * a hard resource limit, return ENOMEM.
 	 */
 	PROC_LOCK(td->td_proc);
-	if (map->size - ptoa(pmap_wired_count(vm_map_pmap(map)) >
-		lim_cur(td->td_proc, RLIMIT_MEMLOCK))) {
+	if (map->size > lim_cur(td->td_proc, RLIMIT_MEMLOCK)) {
 		PROC_UNLOCK(td->td_proc);
 		return (ENOMEM);
 	}
@@ -1093,6 +1103,10 @@
 	if (error)
 		return (error);
 #endif
+#ifdef CONTAINERS
+	if (rusage_set(td->td_proc, RUSAGE_MEMLOCK, map->size))
+		return (ENOMEM);
+#endif
 
 	if (uap->how & MCL_FUTURE) {
 		vm_map_lock(map);
@@ -1112,6 +1126,11 @@
 		    VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
 		error = (error == KERN_SUCCESS ? 0 : EAGAIN);
 	}
+#ifdef CONTAINERS
+	if (error != KERN_SUCCESS)
+		rusage_set(td->td_proc, RUSAGE_MEMLOCK,
+		    ptoa(pmap_wired_count(vm_map_pmap(&td->td_proc->p_vmspace->vm_map))));
+#endif
 
 	return (error);
 }
@@ -1146,6 +1165,10 @@
 	/* Forcibly unwire all pages. */
 	error = vm_map_unwire(map, vm_map_min(map), vm_map_max(map),
 	    VM_MAP_WIRE_USER|VM_MAP_WIRE_HOLESOK);
+#ifdef CONTAINERS
+	if (error == KERN_SUCCESS)
+		rusage_set(td->td_proc, RUSAGE_MEMLOCK, 0);
+#endif
 
 	return (error);
 }
@@ -1180,6 +1203,10 @@
 		return (EINVAL);
 	error = vm_map_unwire(&td->td_proc->p_vmspace->vm_map, start, end,
 	    VM_MAP_WIRE_USER | VM_MAP_WIRE_NOHOLES);
+#ifdef CONTAINERS
+	if (error == KERN_SUCCESS)
+		rusage_sub(td->td_proc, RUSAGE_MEMLOCK, ptoa(end - start));
+#endif
 	return (error == KERN_SUCCESS ? 0 : ENOMEM);
 }
 


More information about the p4-projects mailing list