PERFORCE change 121634 for review

John Baldwin jhb at FreeBSD.org
Thu Jun 14 15:33:03 UTC 2007


http://perforce.freebsd.org/chv.cgi?CH=121634

Change 121634 by jhb at jhb_mutex on 2007/06/14 15:32:32

	Fix various ktrace bogons:
	- Don't let users enable ktrace on exiting processes.  Otherwise,
	  tracing could be enabled on an exiting process after it has
	  done ktrprocexit() and we'd never teardown the state or drop
	  the references on the ucred or vnode.
	- When clearing ktrace from a process, stash any pending events on
	  a local queue so we can free them.  Otherwise we might leak memory
	  and ktrace request objects.  In theory we might should drain the
	  requests first, but since we don't store vnode and cred state in
	  the requests, this would be tricky to do.
	- Optimize the locking so that we pass the proc lock to ktrops and
	  ktrsetchildren rather than dropping it just so we can acquire it
	  again.

Affected files ...

.. //depot/projects/smpng/sys/kern/kern_ktrace.c#61 edit

Differences ...

==== //depot/projects/smpng/sys/kern/kern_ktrace.c#61 (text+ko) ====

@@ -98,6 +98,7 @@
 	} ktr_data;
 	STAILQ_ENTRY(ktr_request) ktr_list;
 };
+STAILQ_HEAD(ktr_request_queue, ktr_request);
 
 static int data_lengths[] = {
 	0,					/* none */
@@ -110,7 +111,7 @@
 	0					/* KTR_USER */
 };
 
-static STAILQ_HEAD(, ktr_request) ktr_free;
+static struct ktr_request_queue ktr_free;
 
 static SYSCTL_NODE(_kern, OID_AUTO, ktrace, CTLFLAG_RD, 0, "KTRACE options");
 
@@ -134,8 +135,10 @@
 static void ktr_freerequest(struct ktr_request *req);
 static void ktr_writerequest(struct thread *td, struct ktr_request *req);
 static int ktrcanset(struct thread *,struct proc *);
-static int ktrsetchildren(struct thread *,struct proc *,int,int,struct vnode *);
-static int ktrops(struct thread *,struct proc *,int,int,struct vnode *);
+static int ktrsetchildren(struct thread *,struct proc *,int,int,struct vnode *,
+    struct ktr_request_queue *);
+static int ktrops(struct thread *,struct proc *,int,int,struct vnode *,
+    struct ktr_request_queue *);
 
 /*
  * ktrace itself generates events, such as context switches, which we do not
@@ -321,13 +324,13 @@
 static void
 ktr_drain(struct thread *td)
 {
+	struct ktr_request_queue local_queue;
 	struct ktr_request *queued_req;
-	STAILQ_HEAD(, ktr_request) local_queue;
 
 	ktrace_assert(td);
 	sx_assert(&ktrace_sx, SX_XLOCKED);
 
-	STAILQ_INIT(&local_queue);	/* XXXRW: needed? */
+	STAILQ_INIT(&local_queue);
 
 	if (!STAILQ_EMPTY(&td->td_proc->p_ktr)) {
 		mtx_lock(&ktrace_mtx);
@@ -575,6 +578,8 @@
 #ifdef KTRACE
 	register struct vnode *vp = NULL;
 	register struct proc *p;
+	struct ktr_request_queue local_queue;
+	struct ktr_request *req;
 	struct pgrp *pg;
 	int facs = uap->facs & ~KTRFAC_ROOT;
 	int ops = KTROP(uap->ops);
@@ -590,6 +595,7 @@
 	if (ops != KTROP_CLEARFILE && facs == 0)
 		return (EINVAL);
 
+	STAILQ_INIT(&local_queue);
 	ktrace_enter(td);
 	if (ops != KTROP_CLEAR) {
 		/*
@@ -632,6 +638,8 @@
 					p->p_tracecred = NULL;
 					p->p_tracevp = NULL;
 					p->p_traceflag = 0;
+					STAILQ_CONCAT(&local_queue,
+					    &td->td_proc->p_ktr);
 					mtx_unlock(&ktrace_mtx);
 					vrele_count++;
 					crfree(cred);
@@ -675,12 +683,13 @@
 				PROC_UNLOCK(p); 
 				continue;
 			}
-			PROC_UNLOCK(p); 
 			nfound++;
 			if (descend)
-				ret |= ktrsetchildren(td, p, ops, facs, vp);
+				ret |= ktrsetchildren(td, p, ops, facs, vp,
+				    &local_queue);
 			else
-				ret |= ktrops(td, p, ops, facs, vp);
+				ret |= ktrops(td, p, ops, facs, vp,
+				    &local_queue);
 		}
 		if (nfound == 0) {
 			sx_sunlock(&proctree_lock);
@@ -692,25 +701,21 @@
 		 * by pid
 		 */
 		p = pfind(uap->pid);
-		if (p == NULL) {
-			sx_sunlock(&proctree_lock);
+		if (p == NULL)
 			error = ESRCH;
-			goto done;
-		}
-		error = p_cansee(td, p);
-		/*
-		 * The slock of the proctree lock will keep this process
-		 * from going away, so unlocking the proc here is ok.
-		 */
-		PROC_UNLOCK(p);
+		else
+			error = p_cansee(td, p);
 		if (error) {
+			if (p != NULL)
+				PROC_UNLOCK(p);
 			sx_sunlock(&proctree_lock);
 			goto done;
 		}
 		if (descend)
-			ret |= ktrsetchildren(td, p, ops, facs, vp);
+			ret |= ktrsetchildren(td, p, ops, facs, vp,
+			    &local_queue);
 		else
-			ret |= ktrops(td, p, ops, facs, vp);
+			ret |= ktrops(td, p, ops, facs, vp, &local_queue);
 	}
 	sx_sunlock(&proctree_lock);
 	if (!ret)
@@ -721,6 +726,15 @@
 		(void) vn_close(vp, FWRITE, td->td_ucred, td);
 		VFS_UNLOCK_GIANT(vfslocked);
 	}
+
+	/*
+	 * Free any pending requests from processes where tracing was
+	 * disabled.
+	 */
+	while ((req = STAILQ_FIRST(&local_queue))) {
+		STAILQ_REMOVE_HEAD(&local_queue, ktr_list);
+		ktr_freerequest(req);
+	}
 	ktrace_exit(td);
 	return (error);
 #else /* !KTRACE */
@@ -766,20 +780,30 @@
 
 #ifdef KTRACE
 static int
-ktrops(td, p, ops, facs, vp)
+ktrops(td, p, ops, facs, vp, request_queue)
 	struct thread *td;
 	struct proc *p;
 	int ops, facs;
 	struct vnode *vp;
+	struct ktr_request_queue *request_queue;
 {
 	struct vnode *tracevp = NULL;
 	struct ucred *tracecred = NULL;
 
-	PROC_LOCK(p);
+	PROC_LOCK_ASSERT(p, MA_OWNED);
 	if (!ktrcanset(td, p)) {
 		PROC_UNLOCK(p);
 		return (0);
 	}
+	if (p->p_flag & P_WEXIT) {
+		/*
+		 * If the process is exiting, just ignore it.  We
+		 * don't want to return EPERM for this (maybe ESRCH?)
+		 * so we fake success.
+		 */
+		PROC_UNLOCK(p);
+		return (1);
+	}
 	mtx_lock(&ktrace_mtx);
 	if (ops == KTROP_SET) {
 		if (p->p_tracevp != vp) {
@@ -807,6 +831,7 @@
 			p->p_tracevp = NULL;
 			tracecred = p->p_tracecred;
 			p->p_tracecred = NULL;
+			STAILQ_CONCAT(request_queue, &td->td_proc->p_ktr);
 		}
 	}
 	mtx_unlock(&ktrace_mtx);
@@ -825,19 +850,21 @@
 }
 
 static int
-ktrsetchildren(td, top, ops, facs, vp)
+ktrsetchildren(td, top, ops, facs, vp, request_queue)
 	struct thread *td;
 	struct proc *top;
 	int ops, facs;
 	struct vnode *vp;
+	struct ktr_request_queue *request_queue;
 {
 	register struct proc *p;
 	register int ret = 0;
 
 	p = top;
+	PROC_LOCK_ASSERT(p, MA_OWNED);
 	sx_assert(&proctree_lock, SX_LOCKED);
 	for (;;) {
-		ret |= ktrops(td, p, ops, facs, vp);
+		ret |= ktrops(td, p, ops, facs, vp, request_queue);
 		/*
 		 * If this process has children, descend to them next,
 		 * otherwise do any siblings, and if done with this level,
@@ -854,6 +881,7 @@
 			}
 			p = p->p_pptr;
 		}
+		PROC_LOCK(p);
 	}
 	/*NOTREACHED*/
 }
@@ -861,6 +889,7 @@
 static void
 ktr_writerequest(struct thread *td, struct ktr_request *req)
 {
+	struct ktr_request_queue local_queue;
 	struct ktr_header *kth;
 	struct vnode *vp;
 	struct proc *p;
@@ -955,6 +984,7 @@
 	 * we really do this?  Other processes might have suitable
 	 * credentials for the operation.
 	 */
+	STAILQ_INIT(&local_queue);
 	cred = NULL;
 	sx_slock(&allproc_lock);
 	FOREACH_PROC_IN_SYSTEM(p) {
@@ -965,6 +995,7 @@
 			p->p_traceflag = 0;
 			cred = p->p_tracecred;
 			p->p_tracecred = NULL;
+			STAILQ_CONCAT(&local_queue, &td->td_proc->p_ktr);
 			mtx_unlock(&ktrace_mtx);
 			vrele_count++;
 		}
@@ -977,10 +1008,14 @@
 	sx_sunlock(&allproc_lock);
 
 	/*
-	 * We can't clear any pending requests in threads that have cached
-	 * them but not yet committed them, as those are per-thread.  The
-	 * thread will have to clear it itself on system call return.
+	 * Free any pending requests from processes where tracing was
+	 * disabled.
 	 */
+	while ((req = STAILQ_FIRST(&local_queue))) {
+		STAILQ_REMOVE_HEAD(&local_queue, ktr_list);
+		ktr_freerequest(req);
+	}
+
 	vfslocked = VFS_LOCK_GIANT(vp->v_mount);
 	while (vrele_count-- > 0)
 		vrele(vp);


More information about the p4-projects mailing list