sysvshm: replace Giant with a local sx lock
Mateusz Guzik
mjguzik at gmail.com
Tue Apr 23 20:38:28 UTC 2013
Hello,
I would like to replace Giant with a local sx lock in sysvshm code.
Looked really straightforward so maybe I missed something.
Patch: http://people.freebsd.org/~mjg/patches/sysvshm-giant-sx.patch
Inlined version for comments:
diff --git a/sys/kern/sysv_shm.c b/sys/kern/sysv_shm.c
index a1c6b34..480a3b2 100644
--- a/sys/kern/sysv_shm.c
+++ b/sys/kern/sysv_shm.c
@@ -111,6 +111,7 @@ static int shmget_existing(struct thread *td, struct shmget_args *uap,
#define SHMSEG_ALLOCATED 0x0800
#define SHMSEG_WANTED 0x1000
+static struct sx shm_lock;
static int shm_last_free, shm_nused, shmalloced;
vm_size_t shm_committed;
static struct shmid_kernel *shmsegs;
@@ -181,8 +182,8 @@ SYSCTL_INT(_kern_ipc, OID_AUTO, shm_use_phys, CTLFLAG_RW,
SYSCTL_INT(_kern_ipc, OID_AUTO, shm_allow_removed, CTLFLAG_RW,
&shm_allow_removed, 0,
"Enable/Disable attachment to attached segments marked for removal");
-SYSCTL_PROC(_kern_ipc, OID_AUTO, shmsegs, CTLTYPE_OPAQUE | CTLFLAG_RD,
- NULL, 0, sysctl_shmsegs, "",
+SYSCTL_PROC(_kern_ipc, OID_AUTO, shmsegs, CTLTYPE_OPAQUE | CTLFLAG_RD |
+ CTLFLAG_MPSAFE, NULL, 0, sysctl_shmsegs, "",
"Current number of shared memory segments allocated");
static int
@@ -191,6 +192,8 @@ shm_find_segment_by_key(key)
{
int i;
+ sx_assert(&shm_lock, SA_XLOCKED);
+
for (i = 0; i < shmalloced; i++)
if ((shmsegs[i].u.shm_perm.mode & SHMSEG_ALLOCATED) &&
shmsegs[i].u.shm_perm.key == key)
@@ -204,6 +207,8 @@ shm_find_segment_by_shmid(int shmid)
int segnum;
struct shmid_kernel *shmseg;
+ sx_assert(&shm_lock, SA_XLOCKED);
+
segnum = IPCID_TO_IX(shmid);
if (segnum < 0 || segnum >= shmalloced)
return (NULL);
@@ -221,6 +226,8 @@ shm_find_segment_by_shmidx(int segnum)
{
struct shmid_kernel *shmseg;
+ sx_assert(&shm_lock, SA_XLOCKED);
+
if (segnum < 0 || segnum >= shmalloced)
return (NULL);
shmseg = &shmsegs[segnum];
@@ -237,7 +244,7 @@ shm_deallocate_segment(shmseg)
{
vm_size_t size;
- GIANT_REQUIRED;
+ sx_assert(&shm_lock, SA_XLOCKED);
vm_object_deallocate(shmseg->object);
shmseg->object = NULL;
@@ -261,7 +268,7 @@ shm_delete_mapping(struct vmspace *vm, struct shmmap_state *shmmap_s)
int segnum, result;
vm_size_t size;
- GIANT_REQUIRED;
+ sx_assert(&shm_lock, SA_XLOCKED);
segnum = IPCID_TO_IX(shmmap_s->shmid);
shmseg = &shmsegs[segnum];
@@ -299,7 +306,7 @@ sys_shmdt(td, uap)
if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC))
return (ENOSYS);
- mtx_lock(&Giant);
+ sx_xlock(&shm_lock);
shmmap_s = p->p_vmspace->vm_shm;
if (shmmap_s == NULL) {
error = EINVAL;
@@ -323,7 +330,7 @@ sys_shmdt(td, uap)
#endif
error = shm_delete_mapping(p->p_vmspace, shmmap_s);
done2:
- mtx_unlock(&Giant);
+ sx_xunlock(&shm_lock);
return (error);
}
@@ -353,7 +360,7 @@ kern_shmat(td, shmid, shmaddr, shmflg)
if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC))
return (ENOSYS);
- mtx_lock(&Giant);
+ sx_xlock(&shm_lock);
shmmap_s = p->p_vmspace->vm_shm;
if (shmmap_s == NULL) {
shmmap_s = malloc(shminfo.shmseg * sizeof(struct shmmap_state),
@@ -428,7 +435,7 @@ kern_shmat(td, shmid, shmaddr, shmflg)
shmseg->u.shm_nattch++;
td->td_retval[0] = attach_va;
done2:
- mtx_unlock(&Giant);
+ sx_xunlock(&shm_lock);
return (error);
}
@@ -454,7 +461,7 @@ kern_shmctl(td, shmid, cmd, buf, bufsz)
if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC))
return (ENOSYS);
- mtx_lock(&Giant);
+ sx_xlock(&shm_lock);
switch (cmd) {
/*
* It is possible that kern_shmctl is being called from the Linux ABI
@@ -546,7 +553,7 @@ kern_shmctl(td, shmid, cmd, buf, bufsz)
break;
}
done2:
- mtx_unlock(&Giant);
+ sx_xunlock(&shm_lock);
return (error);
}
@@ -611,6 +618,8 @@ shmget_existing(td, uap, mode, segnum)
struct shmid_kernel *shmseg;
int error;
+ sx_assert(&shm_lock, SA_XLOCKED);
+
shmseg = &shmsegs[segnum];
if (shmseg->u.shm_perm.mode & SHMSEG_REMOVED) {
/*
@@ -649,7 +658,7 @@ shmget_allocate_segment(td, uap, mode)
struct shmid_kernel *shmseg;
vm_object_t shm_object;
- GIANT_REQUIRED;
+ sx_assert(&shm_lock, SA_XLOCKED);
if (uap->size < shminfo.shmmin || uap->size > shminfo.shmmax)
return (EINVAL);
@@ -758,7 +767,7 @@ sys_shmget(td, uap)
if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC))
return (ENOSYS);
- mtx_lock(&Giant);
+ sx_xlock(&shm_lock);
mode = uap->shmflg & ACCESSPERMS;
if (uap->key != IPC_PRIVATE) {
again:
@@ -776,7 +785,7 @@ sys_shmget(td, uap)
}
error = shmget_allocate_segment(td, uap, mode);
done2:
- mtx_unlock(&Giant);
+ sx_xunlock(&shm_lock);
return (error);
}
@@ -788,7 +797,7 @@ shmfork_myhook(p1, p2)
size_t size;
int i;
- mtx_lock(&Giant);
+ sx_xlock(&shm_lock);
size = shminfo.shmseg * sizeof(struct shmmap_state);
shmmap_s = malloc(size, M_SHM, M_WAITOK);
bcopy(p1->p_vmspace->vm_shm, shmmap_s, size);
@@ -796,7 +805,7 @@ shmfork_myhook(p1, p2)
for (i = 0; i < shminfo.shmseg; i++, shmmap_s++)
if (shmmap_s->shmid != -1)
shmsegs[IPCID_TO_IX(shmmap_s->shmid)].u.shm_nattch++;
- mtx_unlock(&Giant);
+ sx_xunlock(&shm_lock);
}
static void
@@ -807,12 +816,12 @@ shmexit_myhook(struct vmspace *vm)
if ((base = vm->vm_shm) != NULL) {
vm->vm_shm = NULL;
- mtx_lock(&Giant);
+ sx_xlock(&shm_lock);
for (i = 0, shm = base; i < shminfo.shmseg; i++, shm++) {
if (shm->shmid != -1)
shm_delete_mapping(vm, shm);
}
- mtx_unlock(&Giant);
+ sx_xunlock(&shm_lock);
free(base, M_SHM);
}
}
@@ -823,6 +832,8 @@ shmrealloc(void)
int i;
struct shmid_kernel *newsegs;
+ sx_assert(&shm_lock, SA_XLOCKED);
+
if (shmalloced >= shminfo.shmmni)
return;
@@ -918,6 +929,8 @@ shminit()
shmexit_hook = &shmexit_myhook;
shmfork_hook = &shmfork_myhook;
+ sx_init(&shm_lock, "sysvshm");
+
error = syscall_helper_register(shm_syscalls);
if (error != 0)
return (error);
@@ -955,6 +968,7 @@ shmunload()
vm_object_deallocate(shmsegs[i].object);
}
free(shmsegs, M_SHM);
+ sx_destroy(&shm_lock);
shmexit_hook = NULL;
shmfork_hook = NULL;
return (0);
@@ -963,8 +977,12 @@ shmunload()
static int
sysctl_shmsegs(SYSCTL_HANDLER_ARGS)
{
+ int error;
- return (SYSCTL_OUT(req, shmsegs, shmalloced * sizeof(shmsegs[0])));
+ sx_xlock(&shm_lock);
+ error = SYSCTL_OUT(req, shmsegs, shmalloced * sizeof(shmsegs[0]));
+ sx_xunlock(&shm_lock);
+ return (error);
}
#if defined(__i386__) && (defined(COMPAT_FREEBSD4) || defined(COMPAT_43))
@@ -996,7 +1014,7 @@ oshmctl(struct thread *td, struct oshmctl_args *uap)
if (!prison_allow(td->td_ucred, PR_ALLOW_SYSVIPC))
return (ENOSYS);
- mtx_lock(&Giant);
+ sx_xlock(&shm_lock);
shmseg = shm_find_segment_by_shmid(uap->shmid);
if (shmseg == NULL) {
error = EINVAL;
@@ -1030,7 +1048,7 @@ oshmctl(struct thread *td, struct oshmctl_args *uap)
break;
}
done2:
- mtx_unlock(&Giant);
+ sx_xunlock(&shm_lock);
return (error);
#else
return (EINVAL);
@@ -1062,9 +1080,9 @@ sys_shmsys(td, uap)
if (uap->which < 0 ||
uap->which >= sizeof(shmcalls)/sizeof(shmcalls[0]))
return (EINVAL);
- mtx_lock(&Giant);
+ sx_xlock(&shm_lock);
error = (*shmcalls[uap->which])(td, &uap->a2);
- mtx_unlock(&Giant);
+ sx_xunlock(&shm_lock);
return (error);
}
--
Mateusz Guzik <mjguzik gmail.com>
More information about the freebsd-current
mailing list