PERFORCE change 165083 for review
John Baldwin
jhb at FreeBSD.org
Wed Jun 24 18:53:06 UTC 2009
http://perforce.freebsd.org/chv.cgi?CH=165083
Change 165083 by jhb at jhb_jhbbsd on 2009/06/24 18:53:03
Missing bits of kib's RLIMIT_SWAP commit.
Affected files ...
.. //depot/projects/smpng/sys/vm/vm_extern.h#34 edit
.. //depot/projects/smpng/sys/vm/vm_fault.c#76 edit
.. //depot/projects/smpng/sys/vm/vm_kern.c#44 edit
.. //depot/projects/smpng/sys/vm/vm_map.c#99 edit
.. //depot/projects/smpng/sys/vm/vm_map.h#43 edit
.. //depot/projects/smpng/sys/vm/vm_mmap.c#78 edit
.. //depot/projects/smpng/sys/vm/vm_object.c#109 edit
.. //depot/projects/smpng/sys/vm/vm_object.h#38 edit
.. //depot/projects/smpng/sys/vm/vm_pager.c#27 edit
.. //depot/projects/smpng/sys/vm/vm_pager.h#15 edit
.. //depot/projects/smpng/sys/vm/vnode_pager.c#74 edit
Differences ...
==== //depot/projects/smpng/sys/vm/vm_extern.h#34 (text+ko) ====
@@ -63,7 +63,7 @@
int vm_mmap(vm_map_t, vm_offset_t *, vm_size_t, vm_prot_t, vm_prot_t, int, objtype_t, void *, vm_ooffset_t);
void vm_set_page_size(void);
struct vmspace *vmspace_alloc(vm_offset_t, vm_offset_t);
-struct vmspace *vmspace_fork(struct vmspace *);
+struct vmspace *vmspace_fork(struct vmspace *, vm_ooffset_t *);
int vmspace_exec(struct proc *, vm_offset_t, vm_offset_t);
int vmspace_unshare(struct proc *);
void vmspace_exit(struct thread *);
==== //depot/projects/smpng/sys/vm/vm_fault.c#76 (text+ko) ====
@@ -1163,7 +1163,11 @@
VM_OBJECT_LOCK(dst_object);
dst_entry->object.vm_object = dst_object;
dst_entry->offset = 0;
-
+ if (dst_entry->uip != NULL) {
+ dst_object->uip = dst_entry->uip;
+ dst_object->charge = dst_entry->end - dst_entry->start;
+ dst_entry->uip = NULL;
+ }
prot = dst_entry->max_protection;
/*
==== //depot/projects/smpng/sys/vm/vm_kern.c#44 (text+ko) ====
@@ -235,7 +235,8 @@
*min = vm_map_min(parent);
ret = vm_map_find(parent, NULL, 0, min, size, superpage_align ?
- VMFS_ALIGNED_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL, 0);
+ VMFS_ALIGNED_SPACE : VMFS_ANY_SPACE, VM_PROT_ALL, VM_PROT_ALL,
+ MAP_ACC_NO_CHARGE);
if (ret != KERN_SUCCESS)
panic("kmem_suballoc: bad status return of %d", ret);
*max = *min + size;
@@ -422,6 +423,8 @@
vm_offset_t addr;
size = round_page(size);
+ if (!swap_reserve(size))
+ return (0);
for (;;) {
/*
@@ -434,12 +437,14 @@
/* no space now; see if we can ever get space */
if (vm_map_max(map) - vm_map_min(map) < size) {
vm_map_unlock(map);
+ swap_release(size);
return (0);
}
map->needs_wakeup = TRUE;
vm_map_unlock_and_wait(map, 0);
}
- vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL, VM_PROT_ALL, 0);
+ vm_map_insert(map, NULL, 0, addr, addr + size, VM_PROT_ALL,
+ VM_PROT_ALL, MAP_ACC_CHARGED);
vm_map_unlock(map);
return (addr);
}
==== //depot/projects/smpng/sys/vm/vm_map.c#99 (text+ko) ====
@@ -149,6 +149,10 @@
static void vmspace_zdtor(void *mem, int size, void *arg);
#endif
+#define ENTRY_CHARGED(e) ((e)->uip != NULL || \
+ ((e)->object.vm_object != NULL && (e)->object.vm_object->uip != NULL && \
+ !((e)->eflags & MAP_ENTRY_NEEDS_COPY)))
+
/*
* PROC_VMSPACE_{UN,}LOCK() can be a noop as long as vmspaces are type
* stable.
@@ -1076,6 +1080,8 @@
vm_map_entry_t prev_entry;
vm_map_entry_t temp_entry;
vm_eflags_t protoeflags;
+ struct uidinfo *uip;
+ boolean_t charge_prev_obj;
VM_MAP_ASSERT_LOCKED(map);
@@ -1103,6 +1109,7 @@
return (KERN_NO_SPACE);
protoeflags = 0;
+ charge_prev_obj = FALSE;
if (cow & MAP_COPY_ON_WRITE)
protoeflags |= MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY;
@@ -1118,6 +1125,27 @@
if (cow & MAP_DISABLE_COREDUMP)
protoeflags |= MAP_ENTRY_NOCOREDUMP;
+ uip = NULL;
+ KASSERT((object != kmem_object && object != kernel_object) ||
+ ((object == kmem_object || object == kernel_object) &&
+ !(protoeflags & MAP_ENTRY_NEEDS_COPY)),
+ ("kmem or kernel object and cow"));
+ if (cow & (MAP_ACC_NO_CHARGE | MAP_NOFAULT))
+ goto charged;
+ if ((cow & MAP_ACC_CHARGED) || ((prot & VM_PROT_WRITE) &&
+ ((protoeflags & MAP_ENTRY_NEEDS_COPY) || object == NULL))) {
+ if (!(cow & MAP_ACC_CHARGED) && !swap_reserve(end - start))
+ return (KERN_RESOURCE_SHORTAGE);
+ KASSERT(object == NULL || (cow & MAP_ENTRY_NEEDS_COPY) ||
+ object->uip == NULL,
+ ("OVERCOMMIT: vm_map_insert o %p", object));
+ uip = curthread->td_ucred->cr_ruidinfo;
+ uihold(uip);
+ if (object == NULL && !(protoeflags & MAP_ENTRY_NEEDS_COPY))
+ charge_prev_obj = TRUE;
+ }
+
+charged:
if (object != NULL) {
/*
* OBJ_ONEMAPPING must be cleared unless this mapping
@@ -1135,11 +1163,13 @@
(prev_entry->eflags == protoeflags) &&
(prev_entry->end == start) &&
(prev_entry->wired_count == 0) &&
- ((prev_entry->object.vm_object == NULL) ||
- vm_object_coalesce(prev_entry->object.vm_object,
- prev_entry->offset,
- (vm_size_t)(prev_entry->end - prev_entry->start),
- (vm_size_t)(end - prev_entry->end)))) {
+ (prev_entry->uip == uip ||
+ (prev_entry->object.vm_object != NULL &&
+ (prev_entry->object.vm_object->uip == uip))) &&
+ vm_object_coalesce(prev_entry->object.vm_object,
+ prev_entry->offset,
+ (vm_size_t)(prev_entry->end - prev_entry->start),
+ (vm_size_t)(end - prev_entry->end), charge_prev_obj)) {
/*
* We were able to extend the object. Determine if we
* can extend the previous map entry to include the
@@ -1152,6 +1182,8 @@
prev_entry->end = end;
vm_map_entry_resize_free(map, prev_entry);
vm_map_simplify_entry(map, prev_entry);
+ if (uip != NULL)
+ uifree(uip);
return (KERN_SUCCESS);
}
@@ -1165,6 +1197,12 @@
offset = prev_entry->offset +
(prev_entry->end - prev_entry->start);
vm_object_reference(object);
+ if (uip != NULL && object != NULL && object->uip != NULL &&
+ !(prev_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
+ /* Object already accounts for this uid. */
+ uifree(uip);
+ uip = NULL;
+ }
}
/*
@@ -1179,6 +1217,7 @@
new_entry = vm_map_entry_create(map);
new_entry->start = start;
new_entry->end = end;
+ new_entry->uip = NULL;
new_entry->eflags = protoeflags;
new_entry->object.vm_object = object;
@@ -1190,6 +1229,10 @@
new_entry->max_protection = max;
new_entry->wired_count = 0;
+ KASSERT(uip == NULL || !ENTRY_CHARGED(new_entry),
+ ("OVERCOMMIT: vm_map_insert leaks vm_map %p", new_entry));
+ new_entry->uip = uip;
+
/*
* Insert the new entry into the list
*/
@@ -1398,7 +1441,8 @@
(prev->protection == entry->protection) &&
(prev->max_protection == entry->max_protection) &&
(prev->inheritance == entry->inheritance) &&
- (prev->wired_count == entry->wired_count)) {
+ (prev->wired_count == entry->wired_count) &&
+ (prev->uip == entry->uip)) {
vm_map_entry_unlink(map, prev);
entry->start = prev->start;
entry->offset = prev->offset;
@@ -1416,6 +1460,8 @@
*/
if (prev->object.vm_object)
vm_object_deallocate(prev->object.vm_object);
+ if (prev->uip != NULL)
+ uifree(prev->uip);
vm_map_entry_dispose(map, prev);
}
}
@@ -1431,7 +1477,8 @@
(next->protection == entry->protection) &&
(next->max_protection == entry->max_protection) &&
(next->inheritance == entry->inheritance) &&
- (next->wired_count == entry->wired_count)) {
+ (next->wired_count == entry->wired_count) &&
+ (next->uip == entry->uip)) {
vm_map_entry_unlink(map, next);
entry->end = next->end;
vm_map_entry_resize_free(map, entry);
@@ -1441,6 +1488,8 @@
*/
if (next->object.vm_object)
vm_object_deallocate(next->object.vm_object);
+ if (next->uip != NULL)
+ uifree(next->uip);
vm_map_entry_dispose(map, next);
}
}
@@ -1489,6 +1538,21 @@
atop(entry->end - entry->start));
entry->object.vm_object = object;
entry->offset = 0;
+ if (entry->uip != NULL) {
+ object->uip = entry->uip;
+ object->charge = entry->end - entry->start;
+ entry->uip = NULL;
+ }
+ } else if (entry->object.vm_object != NULL &&
+ ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
+ entry->uip != NULL) {
+ VM_OBJECT_LOCK(entry->object.vm_object);
+ KASSERT(entry->object.vm_object->uip == NULL,
+ ("OVERCOMMIT: vm_entry_clip_start: both uip e %p", entry));
+ entry->object.vm_object->uip = entry->uip;
+ entry->object.vm_object->charge = entry->end - entry->start;
+ VM_OBJECT_UNLOCK(entry->object.vm_object);
+ entry->uip = NULL;
}
new_entry = vm_map_entry_create(map);
@@ -1497,6 +1561,8 @@
new_entry->end = start;
entry->offset += (start - entry->start);
entry->start = start;
+ if (new_entry->uip != NULL)
+ uihold(entry->uip);
vm_map_entry_link(map, entry->prev, new_entry);
@@ -1542,6 +1608,21 @@
atop(entry->end - entry->start));
entry->object.vm_object = object;
entry->offset = 0;
+ if (entry->uip != NULL) {
+ object->uip = entry->uip;
+ object->charge = entry->end - entry->start;
+ entry->uip = NULL;
+ }
+ } else if (entry->object.vm_object != NULL &&
+ ((entry->eflags & MAP_ENTRY_NEEDS_COPY) == 0) &&
+ entry->uip != NULL) {
+ VM_OBJECT_LOCK(entry->object.vm_object);
+ KASSERT(entry->object.vm_object->uip == NULL,
+ ("OVERCOMMIT: vm_entry_clip_end: both uip e %p", entry));
+ entry->object.vm_object->uip = entry->uip;
+ entry->object.vm_object->charge = entry->end - entry->start;
+ VM_OBJECT_UNLOCK(entry->object.vm_object);
+ entry->uip = NULL;
}
/*
@@ -1552,6 +1633,8 @@
new_entry->start = entry->end = end;
new_entry->offset += (end - entry->start);
+ if (new_entry->uip != NULL)
+ uihold(entry->uip);
vm_map_entry_link(map, entry, new_entry);
@@ -1724,6 +1807,8 @@
{
vm_map_entry_t current;
vm_map_entry_t entry;
+ vm_object_t obj;
+ struct uidinfo *uip;
vm_map_lock(map);
@@ -1751,6 +1836,61 @@
current = current->next;
}
+
+ /*
+ * Do an accounting pass for private read-only mappings that
+ * now will do cow due to allowed write (e.g. debugger sets
+ * breakpoint on text segment)
+ */
+ for (current = entry; (current != &map->header) &&
+ (current->start < end); current = current->next) {
+
+ vm_map_clip_end(map, current, end);
+
+ if (set_max ||
+ ((new_prot & ~(current->protection)) & VM_PROT_WRITE) == 0 ||
+ ENTRY_CHARGED(current)) {
+ continue;
+ }
+
+ uip = curthread->td_ucred->cr_ruidinfo;
+ obj = current->object.vm_object;
+
+ if (obj == NULL || (current->eflags & MAP_ENTRY_NEEDS_COPY)) {
+ if (!swap_reserve(current->end - current->start)) {
+ vm_map_unlock(map);
+ return (KERN_RESOURCE_SHORTAGE);
+ }
+ uihold(uip);
+ current->uip = uip;
+ continue;
+ }
+
+ VM_OBJECT_LOCK(obj);
+ if (obj->type != OBJT_DEFAULT && obj->type != OBJT_SWAP) {
+ VM_OBJECT_UNLOCK(obj);
+ continue;
+ }
+
+ /*
+ * Charge for the whole object allocation now, since
+ * we cannot distinguish between non-charged and
+ * charged clipped mapping of the same object later.
+ */
+ KASSERT(obj->charge == 0,
+ ("vm_map_protect: object %p overcharged\n", obj));
+ if (!swap_reserve(ptoa(obj->size))) {
+ VM_OBJECT_UNLOCK(obj);
+ vm_map_unlock(map);
+ return (KERN_RESOURCE_SHORTAGE);
+ }
+
+ uihold(uip);
+ obj->uip = uip;
+ obj->charge = ptoa(obj->size);
+ VM_OBJECT_UNLOCK(obj);
+ }
+
/*
* Go back and fix up protections. [Note that clipping is not
* necessary the second time.]
@@ -1759,8 +1899,6 @@
while ((current != &map->header) && (current->start < end)) {
vm_prot_t old_prot;
- vm_map_clip_end(map, current, end);
-
old_prot = current->protection;
if (set_max)
current->protection =
@@ -2470,14 +2608,25 @@
vm_map_entry_delete(vm_map_t map, vm_map_entry_t entry)
{
vm_object_t object;
- vm_pindex_t offidxstart, offidxend, count;
+ vm_pindex_t offidxstart, offidxend, count, size1;
+ vm_ooffset_t size;
vm_map_entry_unlink(map, entry);
- map->size -= entry->end - entry->start;
+ object = entry->object.vm_object;
+ size = entry->end - entry->start;
+ map->size -= size;
+
+ if (entry->uip != NULL) {
+ swap_release_by_uid(size, entry->uip);
+ uifree(entry->uip);
+ }
if ((entry->eflags & MAP_ENTRY_IS_SUB_MAP) == 0 &&
- (object = entry->object.vm_object) != NULL) {
- count = OFF_TO_IDX(entry->end - entry->start);
+ (object != NULL)) {
+ KASSERT(entry->uip == NULL || object->uip == NULL ||
+ (entry->eflags & MAP_ENTRY_NEEDS_COPY),
+ ("OVERCOMMIT vm_map_entry_delete: both uip %p", entry));
+ count = OFF_TO_IDX(size);
offidxstart = OFF_TO_IDX(entry->offset);
offidxend = offidxstart + count;
VM_OBJECT_LOCK(object);
@@ -2489,8 +2638,17 @@
if (object->type == OBJT_SWAP)
swap_pager_freespace(object, offidxstart, count);
if (offidxend >= object->size &&
- offidxstart < object->size)
+ offidxstart < object->size) {
+ size1 = object->size;
object->size = offidxstart;
+ if (object->uip != NULL) {
+ size1 -= object->size;
+ KASSERT(object->charge >= ptoa(size1),
+ ("vm_map_entry_delete: object->charge < 0"));
+ swap_release_by_uid(ptoa(size1), object->uip);
+ object->charge -= ptoa(size1);
+ }
+ }
}
VM_OBJECT_UNLOCK(object);
} else
@@ -2664,9 +2822,13 @@
vm_map_t src_map,
vm_map_t dst_map,
vm_map_entry_t src_entry,
- vm_map_entry_t dst_entry)
+ vm_map_entry_t dst_entry,
+ vm_ooffset_t *fork_charge)
{
vm_object_t src_object;
+ vm_offset_t size;
+ struct uidinfo *uip;
+ int charged;
VM_MAP_ASSERT_LOCKED(dst_map);
@@ -2689,8 +2851,10 @@
/*
* Make a copy of the object.
*/
+ size = src_entry->end - src_entry->start;
if ((src_object = src_entry->object.vm_object) != NULL) {
VM_OBJECT_LOCK(src_object);
+ charged = ENTRY_CHARGED(src_entry);
if ((src_object->handle == NULL) &&
(src_object->type == OBJT_DEFAULT ||
src_object->type == OBJT_SWAP)) {
@@ -2702,14 +2866,39 @@
}
vm_object_reference_locked(src_object);
vm_object_clear_flag(src_object, OBJ_ONEMAPPING);
+ if (src_entry->uip != NULL &&
+ !(src_entry->eflags & MAP_ENTRY_NEEDS_COPY)) {
+ KASSERT(src_object->uip == NULL,
+ ("OVERCOMMIT: vm_map_copy_entry: uip %p",
+ src_object));
+ src_object->uip = src_entry->uip;
+ src_object->charge = size;
+ }
VM_OBJECT_UNLOCK(src_object);
dst_entry->object.vm_object = src_object;
+ if (charged) {
+ uip = curthread->td_ucred->cr_ruidinfo;
+ uihold(uip);
+ dst_entry->uip = uip;
+ *fork_charge += size;
+ if (!(src_entry->eflags &
+ MAP_ENTRY_NEEDS_COPY)) {
+ uihold(uip);
+ src_entry->uip = uip;
+ *fork_charge += size;
+ }
+ }
src_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
dst_entry->eflags |= (MAP_ENTRY_COW|MAP_ENTRY_NEEDS_COPY);
dst_entry->offset = src_entry->offset;
} else {
dst_entry->object.vm_object = NULL;
dst_entry->offset = 0;
+ if (src_entry->uip != NULL) {
+ dst_entry->uip = curthread->td_ucred->cr_ruidinfo;
+ uihold(dst_entry->uip);
+ *fork_charge += size;
+ }
}
pmap_copy(dst_map->pmap, src_map->pmap, dst_entry->start,
@@ -2766,7 +2955,7 @@
* The source map must not be locked.
*/
struct vmspace *
-vmspace_fork(struct vmspace *vm1)
+vmspace_fork(struct vmspace *vm1, vm_ooffset_t *fork_charge)
{
struct vmspace *vm2;
vm_map_t old_map = &vm1->vm_map;
@@ -2777,7 +2966,6 @@
int locked;
vm_map_lock(old_map);
-
vm2 = vmspace_alloc(old_map->min_offset, old_map->max_offset);
if (vm2 == NULL)
goto unlock_and_return;
@@ -2809,6 +2997,12 @@
atop(old_entry->end - old_entry->start));
old_entry->object.vm_object = object;
old_entry->offset = 0;
+ if (old_entry->uip != NULL) {
+ object->uip = old_entry->uip;
+ object->charge = old_entry->end -
+ old_entry->start;
+ old_entry->uip = NULL;
+ }
}
/*
@@ -2835,6 +3029,12 @@
}
VM_OBJECT_LOCK(object);
vm_object_clear_flag(object, OBJ_ONEMAPPING);
+ if (old_entry->uip != NULL) {
+ KASSERT(object->uip == NULL, ("vmspace_fork both uip"));
+ object->uip = old_entry->uip;
+ object->charge = old_entry->end - old_entry->start;
+ old_entry->uip = NULL;
+ }
VM_OBJECT_UNLOCK(object);
/*
@@ -2877,7 +3077,7 @@
new_entry);
vmspace_map_entry_forked(vm1, vm2, new_entry);
vm_map_copy_entry(old_map, new_map, old_entry,
- new_entry);
+ new_entry, fork_charge);
break;
}
old_entry = old_entry->next;
@@ -3005,6 +3205,7 @@
size_t grow_amount, max_grow;
rlim_t stacklim, vmemlim;
int is_procstack, rv;
+ struct uidinfo *uip;
Retry:
PROC_LOCK(p);
@@ -3170,13 +3371,17 @@
}
grow_amount = addr - stack_entry->end;
-
+ uip = stack_entry->uip;
+ if (uip == NULL && stack_entry->object.vm_object != NULL)
+ uip = stack_entry->object.vm_object->uip;
+ if (uip != NULL && !swap_reserve_by_uid(grow_amount, uip))
+ rv = KERN_NO_SPACE;
/* Grow the underlying object if applicable. */
- if (stack_entry->object.vm_object == NULL ||
- vm_object_coalesce(stack_entry->object.vm_object,
- stack_entry->offset,
- (vm_size_t)(stack_entry->end - stack_entry->start),
- (vm_size_t)grow_amount)) {
+ else if (stack_entry->object.vm_object == NULL ||
+ vm_object_coalesce(stack_entry->object.vm_object,
+ stack_entry->offset,
+ (vm_size_t)(stack_entry->end - stack_entry->start),
+ (vm_size_t)grow_amount, uip != NULL)) {
map->size += (addr - stack_entry->end);
/* Update the current entry. */
stack_entry->end = addr;
@@ -3249,12 +3454,18 @@
{
struct vmspace *oldvmspace = p->p_vmspace;
struct vmspace *newvmspace;
+ vm_ooffset_t fork_charge;
if (oldvmspace->vm_refcnt == 1)
return (0);
- newvmspace = vmspace_fork(oldvmspace);
+ fork_charge = 0;
+ newvmspace = vmspace_fork(oldvmspace, &fork_charge);
if (newvmspace == NULL)
return (ENOMEM);
+ if (!swap_reserve_by_uid(fork_charge, p->p_ucred->cr_ruidinfo)) {
+ vmspace_free(newvmspace);
+ return (ENOMEM);
+ }
PROC_VMSPACE_LOCK(p);
p->p_vmspace = newvmspace;
PROC_VMSPACE_UNLOCK(p);
@@ -3300,6 +3511,9 @@
vm_map_t map = *var_map;
vm_prot_t prot;
vm_prot_t fault_type = fault_typea;
+ vm_object_t eobject;
+ struct uidinfo *uip;
+ vm_ooffset_t size;
RetryLookup:;
@@ -3356,7 +3570,7 @@
*wired = (entry->wired_count != 0);
if (*wired)
prot = fault_type = entry->protection;
-
+ size = entry->end - entry->start;
/*
* If the entry was copy-on-write, we either ...
*/
@@ -3378,11 +3592,40 @@
if (vm_map_lock_upgrade(map))
goto RetryLookup;
+ if (entry->uip == NULL) {
+ /*
+ * The debugger owner is charged for
+ * the memory.
+ */
+ uip = curthread->td_ucred->cr_ruidinfo;
+ uihold(uip);
+ if (!swap_reserve_by_uid(size, uip)) {
+ uifree(uip);
+ vm_map_unlock(map);
+ return (KERN_RESOURCE_SHORTAGE);
+ }
+ entry->uip = uip;
+ }
vm_object_shadow(
&entry->object.vm_object,
&entry->offset,
- atop(entry->end - entry->start));
+ atop(size));
entry->eflags &= ~MAP_ENTRY_NEEDS_COPY;
+ eobject = entry->object.vm_object;
+ if (eobject->uip != NULL) {
+ /*
+ * The object was not shadowed.
+ */
+ swap_release_by_uid(size, entry->uip);
+ uifree(entry->uip);
+ entry->uip = NULL;
+ } else if (entry->uip != NULL) {
+ VM_OBJECT_LOCK(eobject);
+ eobject->uip = entry->uip;
+ eobject->charge = size;
+ VM_OBJECT_UNLOCK(eobject);
+ entry->uip = NULL;
+ }
vm_map_lock_downgrade(map);
} else {
@@ -3402,8 +3645,15 @@
if (vm_map_lock_upgrade(map))
goto RetryLookup;
entry->object.vm_object = vm_object_allocate(OBJT_DEFAULT,
- atop(entry->end - entry->start));
+ atop(size));
entry->offset = 0;
+ if (entry->uip != NULL) {
+ VM_OBJECT_LOCK(entry->object.vm_object);
+ entry->object.vm_object->uip = entry->uip;
+ entry->object.vm_object->charge = size;
+ VM_OBJECT_UNLOCK(entry->object.vm_object);
+ entry->uip = NULL;
+ }
vm_map_lock_downgrade(map);
}
@@ -3583,9 +3833,15 @@
db_indent -= 2;
}
} else {
+ if (entry->uip != NULL)
+ db_printf(", uip %d", entry->uip->ui_uid);
db_printf(", object=%p, offset=0x%jx",
(void *)entry->object.vm_object,
(uintmax_t)entry->offset);
+ if (entry->object.vm_object && entry->object.vm_object->uip)
+ db_printf(", obj uip %d charge %jx",
+ entry->object.vm_object->uip->ui_uid,
+ (uintmax_t)entry->object.vm_object->charge);
if (entry->eflags & MAP_ENTRY_COW)
db_printf(", copy (%s)",
(entry->eflags & MAP_ENTRY_NEEDS_COPY) ? "needed" : "done");
==== //depot/projects/smpng/sys/vm/vm_map.h#43 (text+ko) ====
@@ -114,6 +114,7 @@
vm_inherit_t inheritance; /* inheritance */
int wired_count; /* can be paged if = 0 */
vm_pindex_t lastr; /* last read */
+ struct uidinfo *uip; /* tmp storage for creator ref */
};
#define MAP_ENTRY_NOSYNC 0x0001
@@ -310,6 +311,8 @@
#define MAP_PREFAULT_MADVISE 0x0200 /* from (user) madvise request */
#define MAP_STACK_GROWS_DOWN 0x1000
#define MAP_STACK_GROWS_UP 0x2000
+#define MAP_ACC_CHARGED 0x4000
+#define MAP_ACC_NO_CHARGE 0x8000
/*
* vm_fault option flags
==== //depot/projects/smpng/sys/vm/vm_mmap.c#78 (text+ko) ====
@@ -633,6 +633,8 @@
return (0);
case KERN_PROTECTION_FAILURE:
return (EACCES);
+ case KERN_RESOURCE_SHORTAGE:
+ return (ENOMEM);
}
return (EINVAL);
}
@@ -1208,7 +1210,7 @@
objsize = round_page(va.va_size);
if (va.va_nlink == 0)
flags |= MAP_NOSYNC;
- obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff);
+ obj = vm_pager_allocate(OBJT_VNODE, vp, objsize, prot, foff, td->td_ucred);
if (obj == NULL) {
error = ENOMEM;
goto done;
@@ -1289,7 +1291,8 @@
dev_relthread(cdev);
if (error != ENODEV)
return (error);
- obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, *foff);
+ obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, *foff,
+ td->td_ucred);
if (obj == NULL)
return (EINVAL);
*objp = obj;
==== //depot/projects/smpng/sys/vm/vm_object.c#109 (text+ko) ====
@@ -77,6 +77,7 @@
#include <sys/mutex.h>
#include <sys/proc.h> /* for curproc, pageproc */
#include <sys/socket.h>
+#include <sys/resourcevar.h>
#include <sys/vnode.h>
#include <sys/vmmeter.h>
#include <sys/sx.h>
@@ -222,6 +223,8 @@
object->generation = 1;
object->ref_count = 1;
object->flags = 0;
+ object->uip = NULL;
+ object->charge = 0;
if ((object->type == OBJT_DEFAULT) || (object->type == OBJT_SWAP))
object->flags = OBJ_ONEMAPPING;
object->pg_color = 0;
@@ -609,6 +612,20 @@
mtx_unlock(&vm_object_list_mtx);
/*
+ * Release the allocation charge.
+ */
+ if (object->uip != NULL) {
+ KASSERT(object->type == OBJT_DEFAULT ||
+ object->type == OBJT_SWAP,
+ ("vm_object_terminate: non-swap obj %p has uip",
+ object));
+ swap_release_by_uid(object->charge, object->uip);
+ object->charge = 0;
+ uifree(object->uip);
+ object->uip = NULL;
+ }
+
+ /*
* Free the space for the object.
*/
uma_zfree(obj_zone, object);
@@ -1347,6 +1364,14 @@
orig_object->backing_object_offset + entry->offset;
new_object->backing_object = source;
}
+ if (orig_object->uip != NULL) {
+ new_object->uip = orig_object->uip;
+ uihold(orig_object->uip);
+ new_object->charge = ptoa(size);
+ KASSERT(orig_object->charge >= ptoa(size),
+ ("orig_object->charge < 0"));
+ orig_object->charge -= ptoa(size);
+ }
retry:
if ((m = TAILQ_FIRST(&orig_object->memq)) != NULL) {
if (m->pindex < offidxstart) {
@@ -1757,6 +1782,13 @@
* and no object references within it, all that is
* necessary is to dispose of it.
*/
+ if (backing_object->uip != NULL) {
+ swap_release_by_uid(backing_object->charge,
+ backing_object->uip);
+ backing_object->charge = 0;
+ uifree(backing_object->uip);
+ backing_object->uip = NULL;
+ }
KASSERT(backing_object->ref_count == 1, ("backing_object %p was somehow re-referenced during collapse!", backing_object));
VM_OBJECT_UNLOCK(backing_object);
@@ -1994,13 +2026,15 @@
* prev_offset Offset into prev_object
* prev_size Size of reference to prev_object
* next_size Size of reference to the second object
+ * reserved Indicator that extension region has
+ * swap accounted for
*
* Conditions:
* The object must *not* be locked.
*/
boolean_t
vm_object_coalesce(vm_object_t prev_object, vm_ooffset_t prev_offset,
- vm_size_t prev_size, vm_size_t next_size)
+ vm_size_t prev_size, vm_size_t next_size, boolean_t reserved)
{
vm_pindex_t next_pindex;
@@ -2039,6 +2073,28 @@
}
/*
+ * Account for the charge.
+ */
+ if (prev_object->uip != NULL) {
+
+ /*
+ * If prev_object was charged, then this mapping,
+ * althought not charged now, may become writable
+ * later. Non-NULL uip in the object would prevent
+ * swap reservation during enabling of the write
+ * access, so reserve swap now. Failed reservation
+ * cause allocation of the separate object for the map
+ * entry, and swap reservation for this entry is
+ * managed in appropriate time.
+ */
+ if (!reserved && !swap_reserve_by_uid(ptoa(next_size),
+ prev_object->uip)) {
+ return (FALSE);
+ }
+ prev_object->charge += ptoa(next_size);
+ }
+
+ /*
* Remove any pages that may still be in the object from a previous
* deallocation.
*/
@@ -2049,6 +2105,16 @@
if (prev_object->type == OBJT_SWAP)
swap_pager_freespace(prev_object,
next_pindex, next_size);
+#if 0
+ if (prev_object->uip != NULL) {
+ KASSERT(prev_object->charge >=
+ ptoa(prev_object->size - next_pindex),
+ ("object %p overcharged 1 %jx %jx", prev_object,
+ (uintmax_t)next_pindex, (uintmax_t)next_size));
+ prev_object->charge -= ptoa(prev_object->size -
+ next_pindex);
+ }
+#endif
}
/*
@@ -2198,9 +2264,10 @@
return;
db_iprintf(
- "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x\n",
+ "Object %p: type=%d, size=0x%jx, res=%d, ref=%d, flags=0x%x uip %d charge %jx\n",
object, (int)object->type, (uintmax_t)object->size,
- object->resident_page_count, object->ref_count, object->flags);
+ object->resident_page_count, object->ref_count, object->flags,
+ object->uip ? object->uip->ui_uid : -1, (uintmax_t)object->charge);
db_iprintf(" sref=%d, backing_object(%d)=(%p)+0x%jx\n",
object->shadow_count,
object->backing_object ? object->backing_object->ref_count : 0,
==== //depot/projects/smpng/sys/vm/vm_object.h#38 (text+ko) ====
@@ -133,6 +133,8 @@
int swp_bcount;
} swp;
} un_pager;
+ struct uidinfo *uip;
+ vm_ooffset_t charge;
};
/*
@@ -198,7 +200,8 @@
vm_object_t vm_object_allocate (objtype_t, vm_pindex_t);
void _vm_object_allocate (objtype_t, vm_pindex_t, vm_object_t);
-boolean_t vm_object_coalesce(vm_object_t, vm_ooffset_t, vm_size_t, vm_size_t);
+boolean_t vm_object_coalesce(vm_object_t, vm_ooffset_t, vm_size_t, vm_size_t,
+ boolean_t);
void vm_object_collapse (vm_object_t);
void vm_object_deallocate (vm_object_t);
void vm_object_destroy (vm_object_t);
==== //depot/projects/smpng/sys/vm/vm_pager.c#27 (text+ko) ====
@@ -88,7 +88,7 @@
static int dead_pager_getpages(vm_object_t, vm_page_t *, int, int);
static vm_object_t dead_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
- vm_ooffset_t);
+ vm_ooffset_t, struct ucred *);
static void dead_pager_putpages(vm_object_t, vm_page_t *, int, int, int *);
static boolean_t dead_pager_haspage(vm_object_t, vm_pindex_t, int *, int *);
static void dead_pager_dealloc(vm_object_t);
@@ -105,7 +105,7 @@
static vm_object_t
dead_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot,
- vm_ooffset_t off)
+ vm_ooffset_t off, struct ucred *cred)
{
return NULL;
}
@@ -227,14 +227,14 @@
*/
vm_object_t
vm_pager_allocate(objtype_t type, void *handle, vm_ooffset_t size,
- vm_prot_t prot, vm_ooffset_t off)
+ vm_prot_t prot, vm_ooffset_t off, struct ucred *cred)
{
vm_object_t ret;
struct pagerops *ops;
ops = pagertab[type];
if (ops)
- ret = (*ops->pgo_alloc) (handle, size, prot, off);
+ ret = (*ops->pgo_alloc) (handle, size, prot, off, cred);
else
ret = NULL;
return (ret);
==== //depot/projects/smpng/sys/vm/vm_pager.h#15 (text+ko) ====
@@ -47,7 +47,8 @@
TAILQ_HEAD(pagerlst, vm_object);
typedef void pgo_init_t(void);
-typedef vm_object_t pgo_alloc_t(void *, vm_ooffset_t, vm_prot_t, vm_ooffset_t);
+typedef vm_object_t pgo_alloc_t(void *, vm_ooffset_t, vm_prot_t, vm_ooffset_t,
+ struct ucred *);
typedef void pgo_dealloc_t(vm_object_t);
typedef int pgo_getpages_t(vm_object_t, vm_page_t *, int, int);
typedef void pgo_putpages_t(vm_object_t, vm_page_t *, int, int, int *);
@@ -100,7 +101,8 @@
extern struct pagerops *pagertab[];
extern struct mtx pbuf_mtx;
-vm_object_t vm_pager_allocate(objtype_t, void *, vm_ooffset_t, vm_prot_t, vm_ooffset_t);
+vm_object_t vm_pager_allocate(objtype_t, void *, vm_ooffset_t, vm_prot_t,
+ vm_ooffset_t, struct ucred *);
void vm_pager_bufferinit(void);
void vm_pager_deallocate(vm_object_t);
static __inline int vm_pager_get_pages(vm_object_t, vm_page_t *, int, int);
==== //depot/projects/smpng/sys/vm/vnode_pager.c#74 (text+ko) ====
@@ -83,7 +83,8 @@
static int vnode_pager_getpages(vm_object_t, vm_page_t *, int, int);
static void vnode_pager_putpages(vm_object_t, vm_page_t *, int, boolean_t, int *);
static boolean_t vnode_pager_haspage(vm_object_t, vm_pindex_t, int *, int *);
-static vm_object_t vnode_pager_alloc(void *, vm_ooffset_t, vm_prot_t, vm_ooffset_t);
+static vm_object_t vnode_pager_alloc(void *, vm_ooffset_t, vm_prot_t,
+ vm_ooffset_t, struct ucred *cred);
struct pagerops vnodepagerops = {
.pgo_alloc = vnode_pager_alloc,
@@ -128,7 +129,7 @@
}
}
- object = vnode_pager_alloc(vp, size, 0, 0);
+ object = vnode_pager_alloc(vp, size, 0, 0, td->td_ucred);
/*
* Dereference the reference we just created. This assumes
* that the object is associated with the vp.
@@ -185,7 +186,7 @@
>>> TRUNCATED FOR MAIL (1000 lines) <<<
More information about the p4-projects
mailing list