cvs commit: src/sys/kern syscalls.master vfs_aio.c

David Xu davidxu at freebsd.org
Mon Jan 23 16:16:16 PST 2006


John Baldwin wrote:

>On Sunday 22 January 2006 00:59, David Xu wrote:
>  
>
>>davidxu     2006-01-22 05:59:28 UTC
>>
>>  FreeBSD src repository
>>
>>  Modified files:
>>    sys/kern             syscalls.master vfs_aio.c
>>  Log:
>>  Make aio code MP safe.
>>    
>>
>
>Thanks for doing this.  This is something I had sort of been working on but 
>hadn't gotten too in a while.  (I don't think I had many uncommitted diffs, I 
>think mostly I had just done the fget() and TAILQ_FOREACH_SAFE() cleanups I 
>committed earlier.)  One thing I would appreciate is if you could add some 
>comments explaining the locking strategy and annotating which fields are 
>protected by which locks similar to sys/proc.h, etc.  I've attached my 
>current set of diffs in case you wanted to look at them.
>  
>
I will add locking annotating, in fact, I will do it today,
it is morning here, I have slept, so now I have interest to work
on it again : -)

>  
>
>------------------------------------------------------------------------
>
>--- //depot/projects/smpng/sys/kern/vfs_aio.c	2006/01/17 17:16:05
>+++ //depot/user/jhb/proc/kern/vfs_aio.c	2006/01/17 17:49:55
>@@ -108,6 +108,8 @@
> #define AIOD_LIFETIME_DEFAULT	(30 * hz)
> #endif
> 
>+static int aiod_creation_in_progress;
>+
> static SYSCTL_NODE(_vfs, OID_AUTO, aio, CTLFLAG_RW, 0, "Async IO management");
> 
> static int max_aio_procs = MAX_AIO_PROCS;
>@@ -266,7 +268,7 @@
> static void	aio_onceonly(void);
> static int	aio_free_entry(struct aiocblist *aiocbe);
> static void	aio_process(struct aiocblist *aiocbe);
>-static int	aio_newproc(void);
>+static int	aio_newproc(int always);
> static int	aio_aqueue(struct thread *td, struct aiocb *job, int type,
> 			int osigev);
> static void	aio_physwakeup(struct buf *bp);
>@@ -411,9 +413,17 @@
> 	error = kqueue_del_filteropts(EVFILT_AIO);
> 	if (error)
> 		return error;
>+	error = kqueue_del_filteropts(EVFILT_LIO);
>+	if (error)
>+		return error;
> 
> 	async_io_version = 0;
> 	aio_swake = NULL;
>+	uma_zdestroy(kaio_zone);
>+	uma_zdestroy(aiop_zone);
>+	uma_zdestroy(aiocb_zone);
>+	uma_zdestroy(aiol_zone);
>+	uma_zdestroy(aiolio_zone);
> 	EVENTHANDLER_DEREGISTER(process_exit, exit_tag);
> 	EVENTHANDLER_DEREGISTER(process_exec, exec_tag);
> 	mtx_destroy(&aio_freeproc_mtx);
>@@ -456,8 +466,10 @@
> 		uma_zfree(kaio_zone, ki);
> 	}
> 
>+	mtx_lock(&aio_freeproc_mtx);
> 	while (num_aio_procs < target_aio_procs)
>-		aio_newproc();
>+		aio_newproc(0);
>+	mtx_unlock(&aio_freeproc_mtx);
> }
> 
> static int
>@@ -901,8 +913,6 @@
> 	struct proc *curcp, *mycp, *userp;
> 	struct vmspace *myvm, *tmpvm;
> 	struct thread *td = curthread;
>-	struct pgrp *newpgrp;
>-	struct session *newsess;
> 
> 	/*
> 	 * Local copies of curproc (cp) and vmspace (myvm)
>@@ -918,16 +928,8 @@
> 	 */
> 	aiop = uma_zalloc(aiop_zone, M_WAITOK);
> 	aiop->aiothread = td;
>-	aiop->aiothreadflags |= AIOP_FREE;
> 
> 	/*
>-	 * Place thread (lightweight process) onto the AIO free thread list.
>-	 */
>-	mtx_lock(&aio_freeproc_mtx);
>-	TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list);
>-	mtx_unlock(&aio_freeproc_mtx);
>-
>-	/*
> 	 * Get rid of our current filedescriptors.  AIOD's don't need any
> 	 * filedescriptors, except as temporarily inherited from the client.
> 	 */
>@@ -936,15 +938,16 @@
> 
> 	mtx_unlock(&Giant);
> 	/* The daemon resides in its own pgrp. */
>-	MALLOC(newpgrp, struct pgrp *, sizeof(struct pgrp), M_PGRP,
>-		M_WAITOK | M_ZERO);
>-	MALLOC(newsess, struct session *, sizeof(struct session), M_SESSION,
>-		M_WAITOK | M_ZERO);
>+	setsid(td, NULL);
> 
>-	sx_xlock(&proctree_lock);
>-	enterpgrp(mycp, mycp->p_pid, newpgrp, newsess);
>-	sx_xunlock(&proctree_lock);
>-	mtx_lock(&Giant);
>+	/*
>+	 * Place thread (lightweight process) onto the AIO free thread list.
>+	 */
>+	mtx_lock(&aio_freeproc_mtx);
>+	num_aio_procs++;
>+	aiop->aiothreadflags |= AIOP_FREE;
>+	TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list);
>+	mtx_unlock(&aio_freeproc_mtx);
> 
> 	/*
> 	 * Wakeup parent process.  (Parent sleeps to keep from blasting away
>@@ -952,6 +955,7 @@
> 	 */
> 	wakeup(mycp);
> 
>+	mtx_lock(&aio_freeproc_mtx);
> 	for (;;) {
> 		/*
> 		 * curcp is the current daemon process context.
>@@ -962,7 +966,6 @@
> 		/*
> 		 * Take daemon off of free queue
> 		 */
>-		mtx_lock(&aio_freeproc_mtx);
> 		if (aiop->aiothreadflags & AIOP_FREE) {
> 			TAILQ_REMOVE(&aio_freeproc, aiop, list);
> 			aiop->aiothreadflags &= ~AIOP_FREE;
>@@ -972,6 +975,7 @@
> 		/*
> 		 * Check for jobs.
> 		 */
>+		mtx_lock(&Giant);
> 		while ((aiocbe = aio_selectjob(aiop)) != NULL) {
> 			cb = &aiocbe->uaiocb;
> 			userp = aiocbe->userproc;
>@@ -1054,6 +1058,7 @@
> 
> 			curcp = mycp;
> 		}
>+		mtx_unlock(&Giant);
> 
> 		mtx_lock(&aio_freeproc_mtx);
> 		TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list);
>@@ -1063,18 +1068,17 @@
> 		 * If daemon is inactive for a long time, allow it to exit,
> 		 * thereby freeing resources.
> 		 */
>-		if (msleep(aiop->aiothread, &aio_freeproc_mtx, PDROP | PRIBIO,
>+		if (msleep(aiop->aiothread, &aio_freeproc_mtx, PRIBIO,
> 		    "aiordy", aiod_lifetime)) {
> 			s = splnet();
> 			if (TAILQ_EMPTY(&aio_jobs)) {
>-				mtx_lock(&aio_freeproc_mtx);
> 				if ((aiop->aiothreadflags & AIOP_FREE) &&
> 				    (num_aio_procs > target_aio_procs)) {
> 					TAILQ_REMOVE(&aio_freeproc, aiop, list);
>+					num_aio_procs--;
> 					mtx_unlock(&aio_freeproc_mtx);
> 					splx(s);
> 					uma_zfree(aiop_zone, aiop);
>-					num_aio_procs--;
> #ifdef DIAGNOSTIC
> 					if (mycp->p_vmspace->vm_refcnt <= 1) {
> 						printf("AIOD: bad vm refcnt for"
>@@ -1084,7 +1088,6 @@
> #endif
> 					kthread_exit(0);
> 				}
>-				mtx_unlock(&aio_freeproc_mtx);
> 			}
> 			splx(s);
> 		}
>@@ -1096,24 +1099,36 @@
>  * AIO daemon modifies its environment itself.
>  */
> static int
>-aio_newproc(void)
>+aio_newproc(int always)
> {
>-	int error;
> 	struct proc *p;
>+	int error, n;
> 
>+	mtx_assert(&aio_freeproc_mtx, MA_OWNED);
>+	n = num_aio_procs;
>+	while (aiod_creation_in_progress) {
>+		error = msleep(&aiod_creation_in_progress, &aio_freeproc_mtx,
>+		    PZERO, "aionew", aiod_timeout);
>+		if (error || !always)
>+			return (error);
>+	}
>+	aiod_creation_in_progress = 1;
>+	mtx_unlock(&aio_freeproc_mtx);
>+	
> 	error = kthread_create(aio_daemon, curproc, &p, RFNOWAIT, 0, "aiod%d",
>-	    num_aio_procs);
>-	if (error)
>-		return (error);
>+	    n);
> 
>-	/*
>-	 * Wait until daemon is started, but continue on just in case to
>-	 * handle error conditions.
>-	 */
>-	error = tsleep(p, PZERO, "aiosta", aiod_timeout);
>+	mtx_lock(&aio_freeproc_mtx);
>+	if (error == 0)
>+		/*
>+		 * Wait until daemon is started, but continue on just in
>+		 * case to handle error conditions.
>+		 */
>+		error = msleep(p, &aio_freeproc_mtx, PZERO, "aiosta",
>+		    aiod_timeout);
> 
>-	num_aio_procs++;
>-
>+	aiod_creation_in_progress = 0;
>+	wakeup(&aiod_creation_in_progress);
> 	return (error);
> }
> 
>@@ -1597,9 +1612,7 @@
> 	    ((ki->kaio_active_count + num_aio_resv_start) <
> 	    ki->kaio_maxactive_count)) {
> 		num_aio_resv_start++;
>-		mtx_unlock(&aio_freeproc_mtx);ing
>-		error = aio_newproc();
>-		mtx_lock(&aio_freeproc_mtx);
>+		error = aio_newproc(1);
> 		num_aio_resv_start--;
> 		if (error)
> 			goto retryproc;
>  
>
I will check my code to see if I missed something and will integrate
your code if it is needed.
Thank you!



More information about the cvs-src mailing list