svn commit: r294482 - head/sys/kern

John Baldwin jhb at FreeBSD.org
Thu Jan 21 02:20:40 UTC 2016


Author: jhb
Date: Thu Jan 21 02:20:38 2016
New Revision: 294482
URL: https://svnweb.freebsd.org/changeset/base/294482

Log:
  AIO daemons have always been kernel processes to facilitate switching to
  user VM spaces while servicing jobs.  Update various comments and data
  structures that refer to AIO daemons as threads to refer to processes
  instead.
  
  Reviewed by:	kib
  Sponsored by:	Chelsio Communications
  Differential Revision:	https://reviews.freebsd.org/D4999

Modified:
  head/sys/kern/vfs_aio.c

Modified: head/sys/kern/vfs_aio.c
==============================================================================
--- head/sys/kern/vfs_aio.c	Thu Jan 21 01:28:31 2016	(r294481)
+++ head/sys/kern/vfs_aio.c	Thu Jan 21 02:20:38 2016	(r294482)
@@ -130,12 +130,12 @@ static SYSCTL_NODE(_vfs, OID_AUTO, aio, 
 static int max_aio_procs = MAX_AIO_PROCS;
 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_procs,
 	CTLFLAG_RW, &max_aio_procs, 0,
-	"Maximum number of kernel threads to use for handling async IO ");
+	"Maximum number of kernel processes to use for handling async IO ");
 
 static int num_aio_procs = 0;
 SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_procs,
 	CTLFLAG_RD, &num_aio_procs, 0,
-	"Number of presently active kernel threads for async IO");
+	"Number of presently active kernel processes for async IO");
 
 /*
  * The code will adjust the actual number of AIO processes towards this
@@ -143,7 +143,7 @@ SYSCTL_INT(_vfs_aio, OID_AUTO, num_aio_p
  */
 static int target_aio_procs = TARGET_AIO_PROCS;
 SYSCTL_INT(_vfs_aio, OID_AUTO, target_aio_procs, CTLFLAG_RW, &target_aio_procs,
-	0, "Preferred number of ready kernel threads for async IO");
+	0, "Preferred number of ready kernel processes for async IO");
 
 static int max_queue_count = MAX_AIO_QUEUE;
 SYSCTL_INT(_vfs_aio, OID_AUTO, max_aio_queue, CTLFLAG_RW, &max_queue_count, 0,
@@ -157,7 +157,7 @@ static int num_buf_aio = 0;
 SYSCTL_INT(_vfs_aio, OID_AUTO, num_buf_aio, CTLFLAG_RD, &num_buf_aio, 0,
     "Number of aio requests presently handled by the buf subsystem");
 
-/* Number of async I/O thread in the process of being started */
+/* Number of async I/O processes in the process of being started */
 /* XXX This should be local to aio_aqueue() */
 static int num_aio_resv_start = 0;
 
@@ -210,8 +210,8 @@ typedef struct oaiocb {
  * Current, there is only two backends: BIO and generic file I/O.
  * socket I/O is served by generic file I/O, this is not a good idea, since
  * disk file I/O and any other types without O_NONBLOCK flag can block daemon
- * threads, if there is no thread to serve socket I/O, the socket I/O will be
- * delayed too long or starved, we should create some threads dedicated to
+ * processes, if there is no thread to serve socket I/O, the socket I/O will be
+ * delayed too long or starved, we should create some processes dedicated to
  * sockets to do non-blocking I/O, same for pipe and fifo, for these I/O
  * systems we really need non-blocking interface, fiddling O_NONBLOCK in file
  * structure is not safe because there is race between userland and aio
@@ -253,10 +253,10 @@ struct aiocblist {
  */
 #define AIOP_FREE	0x1			/* proc on free queue */
 
-struct aiothreadlist {
-	int aiothreadflags;			/* (c) AIO proc flags */
-	TAILQ_ENTRY(aiothreadlist) list;	/* (c) list of processes */
-	struct thread *aiothread;		/* (*) the AIO thread */
+struct aioproc {
+	int aioprocflags;			/* (c) AIO proc flags */
+	TAILQ_ENTRY(aioproc) list;		/* (c) list of processes */
+	struct proc *aioproc;			/* (*) the AIO proc */
 };
 
 /*
@@ -294,7 +294,7 @@ struct kaioinfo {
 	TAILQ_HEAD(,aiocblist) kaio_jobqueue;	/* (a) job queue for process */
 	TAILQ_HEAD(,aiocblist) kaio_bufqueue;	/* (a) buffer job queue for process */
 	TAILQ_HEAD(,aiocblist) kaio_syncqueue;	/* (a) queue for aio_fsync */
-	struct	task	kaio_task;	/* (*) task to kick aio threads */
+	struct	task	kaio_task;	/* (*) task to kick aio processes */
 };
 
 #define AIO_LOCK(ki)		mtx_lock(&(ki)->kaio_mtx)
@@ -319,7 +319,7 @@ struct aiocb_ops {
 	int	(*store_aiocb)(struct aiocb **ujobp, struct aiocb *ujob);
 };
 
-static TAILQ_HEAD(,aiothreadlist) aio_freeproc;		/* (c) Idle daemons */
+static TAILQ_HEAD(,aioproc) aio_freeproc;		/* (c) Idle daemons */
 static struct sema aio_newproc_sem;
 static struct mtx aio_job_mtx;
 static TAILQ_HEAD(,aiocblist) aio_jobs;			/* (c) Async job list */
@@ -357,7 +357,7 @@ static int	filt_lio(struct knote *kn, lo
 /*
  * Zones for:
  * 	kaio	Per process async io info
- *	aiop	async io thread data
+ *	aiop	async io process data
  *	aiocb	async io jobs
  *	aiol	list io job pointer - internal to aio_suspend XXX
  *	aiolio	list io jobs
@@ -483,7 +483,7 @@ aio_onceonly(void)
 	aiod_unr = new_unrhdr(1, INT_MAX, NULL);
 	kaio_zone = uma_zcreate("AIO", sizeof(struct kaioinfo), NULL, NULL,
 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
-	aiop_zone = uma_zcreate("AIOP", sizeof(struct aiothreadlist), NULL,
+	aiop_zone = uma_zcreate("AIOP", sizeof(struct aioproc), NULL,
 	    NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
 	aiocb_zone = uma_zcreate("AIOCB", sizeof(struct aiocblist), NULL, NULL,
 	    NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE);
@@ -796,7 +796,7 @@ restart:
  * Select a job to run (called by an AIO daemon).
  */
 static struct aiocblist *
-aio_selectjob(struct aiothreadlist *aiop)
+aio_selectjob(struct aioproc *aiop)
 {
 	struct aiocblist *aiocbe;
 	struct kaioinfo *ki;
@@ -1056,7 +1056,7 @@ static void
 aio_daemon(void *_id)
 {
 	struct aiocblist *aiocbe;
-	struct aiothreadlist *aiop;
+	struct aioproc *aiop;
 	struct kaioinfo *ki;
 	struct proc *p, *userp;
 	struct vmspace *myvm;
@@ -1078,8 +1078,8 @@ aio_daemon(void *_id)
 	 * per daemon.
 	 */
 	aiop = uma_zalloc(aiop_zone, M_WAITOK);
-	aiop->aiothread = td;
-	aiop->aiothreadflags = 0;
+	aiop->aioproc = p;
+	aiop->aioprocflags = 0;
 
 	/*
 	 * Wakeup parent process.  (Parent sleeps to keep from blasting away
@@ -1092,9 +1092,9 @@ aio_daemon(void *_id)
 		/*
 		 * Take daemon off of free queue
 		 */
-		if (aiop->aiothreadflags & AIOP_FREE) {
+		if (aiop->aioprocflags & AIOP_FREE) {
 			TAILQ_REMOVE(&aio_freeproc, aiop, list);
-			aiop->aiothreadflags &= ~AIOP_FREE;
+			aiop->aioprocflags &= ~AIOP_FREE;
 		}
 
 		/*
@@ -1155,15 +1155,15 @@ aio_daemon(void *_id)
 		mtx_assert(&aio_job_mtx, MA_OWNED);
 
 		TAILQ_INSERT_HEAD(&aio_freeproc, aiop, list);
-		aiop->aiothreadflags |= AIOP_FREE;
+		aiop->aioprocflags |= AIOP_FREE;
 
 		/*
 		 * If daemon is inactive for a long time, allow it to exit,
 		 * thereby freeing resources.
 		 */
-		if (msleep(aiop->aiothread, &aio_job_mtx, PRIBIO, "aiordy",
+		if (msleep(p, &aio_job_mtx, PRIBIO, "aiordy",
 		    aiod_lifetime) == EWOULDBLOCK && TAILQ_EMPTY(&aio_jobs) &&
-		    (aiop->aiothreadflags & AIOP_FREE) &&
+		    (aiop->aioprocflags & AIOP_FREE) &&
 		    num_aio_procs > target_aio_procs)
 			break;
 	}
@@ -1781,13 +1781,13 @@ static void
 aio_kick_nowait(struct proc *userp)
 {
 	struct kaioinfo *ki = userp->p_aioinfo;
-	struct aiothreadlist *aiop;
+	struct aioproc *aiop;
 
 	mtx_assert(&aio_job_mtx, MA_OWNED);
 	if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
 		TAILQ_REMOVE(&aio_freeproc, aiop, list);
-		aiop->aiothreadflags &= ~AIOP_FREE;
-		wakeup(aiop->aiothread);
+		aiop->aioprocflags &= ~AIOP_FREE;
+		wakeup(aiop->aioproc);
 	} else if (((num_aio_resv_start + num_aio_procs) < max_aio_procs) &&
 	    ((ki->kaio_active_count + num_aio_resv_start) <
 	    ki->kaio_maxactive_count)) {
@@ -1799,15 +1799,15 @@ static int
 aio_kick(struct proc *userp)
 {
 	struct kaioinfo *ki = userp->p_aioinfo;
-	struct aiothreadlist *aiop;
+	struct aioproc *aiop;
 	int error, ret = 0;
 
 	mtx_assert(&aio_job_mtx, MA_OWNED);
 retryproc:
 	if ((aiop = TAILQ_FIRST(&aio_freeproc)) != NULL) {
 		TAILQ_REMOVE(&aio_freeproc, aiop, list);
-		aiop->aiothreadflags &= ~AIOP_FREE;
-		wakeup(aiop->aiothread);
+		aiop->aioprocflags &= ~AIOP_FREE;
+		wakeup(aiop->aioproc);
 	} else if (((num_aio_resv_start + num_aio_procs) < max_aio_procs) &&
 	    ((ki->kaio_active_count + num_aio_resv_start) <
 	    ki->kaio_maxactive_count)) {


More information about the svn-src-head mailing list