suspending threads before devices

Konstantin Belousov kostikbel at gmail.com
Sat Nov 22 17:06:06 UTC 2014


On Fri, Nov 21, 2014 at 06:06:29PM +0200, Konstantin Belousov wrote:
> Below is the current version of the patch.  I still have to write the
> code to sync local filesystems,

I decided to utilize syncer shutdown mode for this.  It seems to be easy
and does what we need.

An interesting corner case are the pagedaemon and vmdaemon.  Both of them
could generate more writes to the filesystems even after all userspace
is stopped and the sync flushed everything.  It is more probable for
pagedaemon.  Might be, it would need a suspend/resume methods in future.

Note that bufdaemon does not need suspend, since all dirty buffers are
handled by syncer.

diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c
index 7ae7d4e..19c33b6 100644
--- a/sys/kern/kern_exec.c
+++ b/sys/kern/kern_exec.c
@@ -289,7 +289,7 @@ kern_execve(td, args, mac_p)
 	    args->endp - args->begin_envv);
 	if (p->p_flag & P_HADTHREADS) {
 		PROC_LOCK(p);
-		if (thread_single(SINGLE_BOUNDARY)) {
+		if (thread_single(p, SINGLE_BOUNDARY)) {
 			PROC_UNLOCK(p);
 	       		exec_free_args(args);
 			return (ERESTART);	/* Try again later. */
@@ -308,9 +308,9 @@ kern_execve(td, args, mac_p)
 		 * force other threads to suicide.
 		 */
 		if (error == 0)
-			thread_single(SINGLE_EXIT);
+			thread_single(p, SINGLE_EXIT);
 		else
-			thread_single_end();
+			thread_single_end(p, SINGLE_BOUNDARY);
 		PROC_UNLOCK(p);
 	}
 	if ((td->td_pflags & TDP_EXECVMSPC) != 0) {
diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c
index 1e4c095..b58e830 100644
--- a/sys/kern/kern_exit.c
+++ b/sys/kern/kern_exit.c
@@ -206,7 +206,7 @@ exit1(struct thread *td, int rv)
 		 * re-check all suspension request, the thread should
 		 * either be suspended there or exit.
 		 */
-		if (!thread_single(SINGLE_EXIT))
+		if (!thread_single(p, SINGLE_EXIT))
 			break;
 
 		/*
diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c
index 62f43ba..80d7f82 100644
--- a/sys/kern/kern_fork.c
+++ b/sys/kern/kern_fork.c
@@ -317,7 +317,7 @@ fork_norfproc(struct thread *td, int flags)
 	if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) &&
 	    (flags & (RFCFDG | RFFDG))) {
 		PROC_LOCK(p1);
-		if (thread_single(SINGLE_BOUNDARY)) {
+		if (thread_single(p1, SINGLE_BOUNDARY)) {
 			PROC_UNLOCK(p1);
 			return (ERESTART);
 		}
@@ -348,7 +348,7 @@ fail:
 	if (((p1->p_flag & (P_HADTHREADS|P_SYSTEM)) == P_HADTHREADS) &&
 	    (flags & (RFCFDG | RFFDG))) {
 		PROC_LOCK(p1);
-		thread_single_end();
+		thread_single_end(p1, SINGLE_BOUNDARY);
 		PROC_UNLOCK(p1);
 	}
 	return (error);
diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c
index 495139f..1a11655 100644
--- a/sys/kern/kern_proc.c
+++ b/sys/kern/kern_proc.c
@@ -2893,3 +2893,128 @@ static SYSCTL_NODE(_kern_proc, KERN_PROC_OSREL, osrel, CTLFLAG_RW |
 static SYSCTL_NODE(_kern_proc, KERN_PROC_SIGTRAMP, sigtramp, CTLFLAG_RD |
 	CTLFLAG_MPSAFE, sysctl_kern_proc_sigtramp,
 	"Process signal trampoline location");
+
+void
+stop_all_proc(void)
+{
+	struct proc *cp, *p;
+	int r;
+	bool restart, seen_stopped, stopped_some;
+
+	cp = curproc;
+	/*
+	 * stop_all_proc() assumes that all process which have
+	 * usermode must be stopped, except current process, for
+	 * obvious reasons.  Since other threads in the process
+	 * establishing global stop could unstop something, disable
+	 * calls from multithreaded processes as precaution.  The
+	 * service must not be user-callable anyway.
+	 */
+	KASSERT((cp->p_flag & P_HADTHREADS) == 0 ||
+	    (cp->p_flag & P_KTHREAD) != 0, ("mt stop_all_proc"));
+
+allproc_loop:
+	sx_xlock(&allproc_lock);
+	seen_stopped = stopped_some = restart = false;
+	LIST_REMOVE(cp, p_list);
+	LIST_INSERT_HEAD(&allproc, cp, p_list);
+	for (;;) {
+		p = LIST_NEXT(cp, p_list);
+		if (p == NULL)
+			break;
+		LIST_REMOVE(cp, p_list);
+		LIST_INSERT_AFTER(p, cp, p_list);
+		PROC_LOCK(p);
+		if ((p->p_flag & (P_KTHREAD | P_SYSTEM |
+		    P_TOTAL_STOP)) != 0) {
+			PROC_UNLOCK(p);
+			continue;
+		}
+		if (P_SHOULDSTOP(p)) {
+			/*
+			 * Stopped processes are only tolerated when
+			 * there are no other processes which might
+			 * continue them.
+			 */
+			seen_stopped = true;
+			PROC_UNLOCK(p);
+			continue;
+		}
+		_PHOLD(p);
+		sx_xunlock(&allproc_lock);
+		r = thread_single(p, SINGLE_ALLPROC);
+		if (r != 0)
+			restart = true;
+		else
+			stopped_some = true;
+		_PRELE(p);
+		PROC_UNLOCK(p);
+		sx_xlock(&allproc_lock);
+	}
+	sx_xunlock(&allproc_lock);
+	if (restart || (seen_stopped && stopped_some)) {
+		kern_yield(PRI_USER);
+		goto allproc_loop;
+	}
+}
+
+void
+resume_all_proc(void)
+{
+	struct proc *cp, *p;
+
+	cp = curproc;
+	sx_xlock(&allproc_lock);
+	LIST_REMOVE(cp, p_list);
+	LIST_INSERT_HEAD(&allproc, cp, p_list);
+	for (;;) {
+		p = LIST_NEXT(cp, p_list);
+		if (p == NULL)
+			break;
+		LIST_REMOVE(cp, p_list);
+		LIST_INSERT_AFTER(p, cp, p_list);
+		PROC_LOCK(p);
+		if ((p->p_flag & P_TOTAL_STOP) != 0) {
+			sx_xunlock(&allproc_lock);
+			_PHOLD(p);
+			thread_single_end(p, SINGLE_ALLPROC);
+			_PRELE(p);
+			PROC_UNLOCK(p);
+			sx_xlock(&allproc_lock);
+		} else {
+			PROC_UNLOCK(p);
+		}
+	}
+	sx_xunlock(&allproc_lock);
+}
+
+#define	TOTAL_STOP_DEBUG	1
+#ifdef TOTAL_STOP_DEBUG
+volatile static int ap_resume;
+#include <sys/mount.h>
+
+static int
+sysctl_debug_stop_all_proc(SYSCTL_HANDLER_ARGS)
+{
+	int error, val;
+
+	val = 0;
+	ap_resume = 0;
+	error = sysctl_handle_int(oidp, &val, 0, req);
+	if (error != 0 || req->newptr == NULL)
+		return (error);
+	if (val != 0) {
+		stop_all_proc();
+		syncer_suspend();
+		while (ap_resume == 0)
+			;
+		syncer_resume();
+		resume_all_proc();
+	}
+	return (0);
+}
+
+SYSCTL_PROC(_debug, OID_AUTO, stop_all_proc, CTLTYPE_INT | CTLFLAG_RW |
+    CTLFLAG_MPSAFE, (void *)&ap_resume, 0, sysctl_debug_stop_all_proc, "I",
+    "");
+#endif
diff --git a/sys/kern/kern_sig.c b/sys/kern/kern_sig.c
index 5cdc2ce..97245fb 100644
--- a/sys/kern/kern_sig.c
+++ b/sys/kern/kern_sig.c
@@ -2471,7 +2471,7 @@ ptracestop(struct thread *td, int sig)
 			cv_broadcast(&p->p_dbgwait);
 		}
 stopme:
-		thread_suspend_switch(td);
+		thread_suspend_switch(td, p);
 		if (p->p_xthread == td)
 			p->p_xthread = NULL;
 		if (!(p->p_flag & P_TRACED))
@@ -2730,7 +2730,7 @@ issignal(struct thread *td)
 				p->p_xstat = sig;
 				PROC_SLOCK(p);
 				sig_suspend_threads(td, p, 0);
-				thread_suspend_switch(td);
+				thread_suspend_switch(td, p);
 				PROC_SUNLOCK(p);
 				mtx_lock(&ps->ps_mtx);
 				break;
@@ -2919,7 +2919,7 @@ sigexit(td, sig)
 	 * XXX If another thread attempts to single-thread before us
 	 *     (e.g. via fork()), we won't get a dump at all.
 	 */
-	if ((sigprop(sig) & SA_CORE) && (thread_single(SINGLE_NO_EXIT) == 0)) {
+	if ((sigprop(sig) & SA_CORE) && thread_single(p, SINGLE_NO_EXIT) == 0) {
 		p->p_sig = sig;
 		/*
 		 * Log signals which would cause core dumps
diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c
index ec084ed..d10e1f3 100644
--- a/sys/kern/kern_thread.c
+++ b/sys/kern/kern_thread.c
@@ -64,7 +64,6 @@ __FBSDID("$FreeBSD$");
 SDT_PROVIDER_DECLARE(proc);
 SDT_PROBE_DEFINE(proc, , , lwp__exit);
 
-
 /*
  * thread related storage.
  */
@@ -446,7 +445,7 @@ thread_exit(void)
 				if (p->p_numthreads == p->p_suspcount) {
 					thread_lock(p->p_singlethread);
 					wakeup_swapper = thread_unsuspend_one(
-						p->p_singlethread);
+						p->p_singlethread, p);
 					thread_unlock(p->p_singlethread);
 					if (wakeup_swapper)
 						kick_proc0();
@@ -575,13 +574,54 @@ calc_remaining(struct proc *p, int mode)
 		remaining = p->p_numthreads;
 	else if (mode == SINGLE_BOUNDARY)
 		remaining = p->p_numthreads - p->p_boundary_count;
-	else if (mode == SINGLE_NO_EXIT)
+	else if (mode == SINGLE_NO_EXIT || mode == SINGLE_ALLPROC)
 		remaining = p->p_numthreads - p->p_suspcount;
 	else
 		panic("calc_remaining: wrong mode %d", mode);
 	return (remaining);
 }
 
+static int
+remain_for_mode(int mode)
+{
+
+	return (mode == SINGLE_ALLPROC ? 0 : 1);
+}
+
+static int
+weed_inhib(int mode, struct thread *td2, struct proc *p)
+{
+	int wakeup_swapper;
+
+	PROC_LOCK_ASSERT(p, MA_OWNED);
+	PROC_SLOCK_ASSERT(p, MA_OWNED);
+	THREAD_LOCK_ASSERT(td2, MA_OWNED);
+
+	wakeup_swapper = 0;
+	switch (mode) {
+	case SINGLE_EXIT:
+		if (TD_IS_SUSPENDED(td2))
+			wakeup_swapper |= thread_unsuspend_one(td2, p);
+		if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
+			wakeup_swapper |= sleepq_abort(td2, EINTR);
+		break;
+	case SINGLE_BOUNDARY:
+		if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0)
+			wakeup_swapper |= thread_unsuspend_one(td2, p);
+		if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
+			wakeup_swapper |= sleepq_abort(td2, ERESTART);
+		break;
+	case SINGLE_ALLPROC:
+	case SINGLE_NO_EXIT:
+		if (TD_IS_SUSPENDED(td2) && (td2->td_flags & TDF_BOUNDARY) == 0)
+			wakeup_swapper |= thread_unsuspend_one(td2, p);
+		if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR) != 0)
+			wakeup_swapper |= sleepq_abort(td2, ERESTART);
+		break;
+	}
+	return (wakeup_swapper);
+}
+
 /*
  * Enforce single-threading.
  *
@@ -596,19 +636,29 @@ calc_remaining(struct proc *p, int mode)
  * any sleeping threads that are interruptable. (PCATCH).
  */
 int
-thread_single(int mode)
+thread_single(struct proc *p, int mode)
 {
 	struct thread *td;
 	struct thread *td2;
-	struct proc *p;
 	int remaining, wakeup_swapper;
 
 	td = curthread;
-	p = td->td_proc;
+	KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
+	    mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
+	    ("invalid mode %d", mode));
+	/*
+	 * If allowing non-ALLPROC singlethreading for non-curproc
+	 * callers, calc_remaining() and remain_for_mode() should be
+	 * adjusted to also account for td->td_proc != p.  For now
+	 * this is not implemented because it is not used.
+	 */
+	KASSERT((mode == SINGLE_ALLPROC && td->td_proc != p) ||
+	    (mode != SINGLE_ALLPROC && td->td_proc == p),
+	    ("mode %d proc %p curproc %p", mode, p, td->td_proc));
 	mtx_assert(&Giant, MA_NOTOWNED);
 	PROC_LOCK_ASSERT(p, MA_OWNED);
 
-	if ((p->p_flag & P_HADTHREADS) == 0)
+	if ((p->p_flag & P_HADTHREADS) == 0 && mode != SINGLE_ALLPROC)
 		return (0);
 
 	/* Is someone already single threading? */
@@ -625,11 +675,13 @@ thread_single(int mode)
 		else
 			p->p_flag &= ~P_SINGLE_BOUNDARY;
 	}
+	if (mode == SINGLE_ALLPROC)
+		p->p_flag |= P_TOTAL_STOP;
 	p->p_flag |= P_STOPPED_SINGLE;
 	PROC_SLOCK(p);
 	p->p_singlethread = td;
 	remaining = calc_remaining(p, mode);
-	while (remaining != 1) {
+	while (remaining != remain_for_mode(mode)) {
 		if (P_SHOULDSTOP(p) != P_STOPPED_SINGLE)
 			goto stopme;
 		wakeup_swapper = 0;
@@ -638,41 +690,8 @@ thread_single(int mode)
 				continue;
 			thread_lock(td2);
 			td2->td_flags |= TDF_ASTPENDING | TDF_NEEDSUSPCHK;
-			if (TD_IS_INHIBITED(td2)) {
-				switch (mode) {
-				case SINGLE_EXIT:
-					if (TD_IS_SUSPENDED(td2))
-						wakeup_swapper |=
-						    thread_unsuspend_one(td2);
-					if (TD_ON_SLEEPQ(td2) &&
-					    (td2->td_flags & TDF_SINTR))
-						wakeup_swapper |=
-						    sleepq_abort(td2, EINTR);
-					break;
-				case SINGLE_BOUNDARY:
-					if (TD_IS_SUSPENDED(td2) &&
-					    !(td2->td_flags & TDF_BOUNDARY))
-						wakeup_swapper |=
-						    thread_unsuspend_one(td2);
-					if (TD_ON_SLEEPQ(td2) &&
-					    (td2->td_flags & TDF_SINTR))
-						wakeup_swapper |=
-						    sleepq_abort(td2, ERESTART);
-					break;
-				case SINGLE_NO_EXIT:
-					if (TD_IS_SUSPENDED(td2) &&
-					    !(td2->td_flags & TDF_BOUNDARY))
-						wakeup_swapper |=
-						    thread_unsuspend_one(td2);
-					if (TD_ON_SLEEPQ(td2) &&
-					    (td2->td_flags & TDF_SINTR))
-						wakeup_swapper |=
-						    sleepq_abort(td2, ERESTART);
-					break;
-				default:
-					break;
-				}
-			}
+			if (TD_IS_INHIBITED(td2))
+				wakeup_swapper |= weed_inhib(mode, td2, p);
 #ifdef SMP
 			else if (TD_IS_RUNNING(td2) && td != td2) {
 				forward_signal(td2);
@@ -687,7 +706,7 @@ thread_single(int mode)
 		/*
 		 * Maybe we suspended some threads.. was it enough?
 		 */
-		if (remaining == 1)
+		if (remaining == remain_for_mode(mode))
 			break;
 
 stopme:
@@ -695,7 +714,7 @@ stopme:
 		 * Wake us up when everyone else has suspended.
 		 * In the mean time we suspend as well.
 		 */
-		thread_suspend_switch(td);
+		thread_suspend_switch(td, p);
 		remaining = calc_remaining(p, mode);
 	}
 	if (mode == SINGLE_EXIT) {
@@ -821,7 +840,7 @@ thread_suspend_check(int return_instead)
 			if (p->p_numthreads == p->p_suspcount + 1) {
 				thread_lock(p->p_singlethread);
 				wakeup_swapper =
-				    thread_unsuspend_one(p->p_singlethread);
+				    thread_unsuspend_one(p->p_singlethread, p);
 				thread_unlock(p->p_singlethread);
 				if (wakeup_swapper)
 					kick_proc0();
@@ -854,11 +873,9 @@ thread_suspend_check(int return_instead)
 }
 
 void
-thread_suspend_switch(struct thread *td)
+thread_suspend_switch(struct thread *td, struct proc *p)
 {
-	struct proc *p;
 
-	p = td->td_proc;
 	KASSERT(!TD_IS_SUSPENDED(td), ("already suspended"));
 	PROC_LOCK_ASSERT(p, MA_OWNED);
 	PROC_SLOCK_ASSERT(p, MA_OWNED);
@@ -866,8 +883,10 @@ thread_suspend_switch(struct thread *td)
 	 * We implement thread_suspend_one in stages here to avoid
 	 * dropping the proc lock while the thread lock is owned.
 	 */
-	thread_stopped(p);
-	p->p_suspcount++;
+	if (p == td->td_proc) {
+		thread_stopped(p);
+		p->p_suspcount++;
+	}
 	PROC_UNLOCK(p);
 	thread_lock(td);
 	td->td_flags &= ~TDF_NEEDSUSPCHK;
@@ -897,15 +916,16 @@ thread_suspend_one(struct thread *td)
 }
 
 int
-thread_unsuspend_one(struct thread *td)
+thread_unsuspend_one(struct thread *td, struct proc *p)
 {
-	struct proc *p = td->td_proc;
 
-	PROC_SLOCK_ASSERT(p, MA_OWNED);
 	THREAD_LOCK_ASSERT(td, MA_OWNED);
 	KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended"));
 	TD_CLR_SUSPENDED(td);
-	p->p_suspcount--;
+	if (td->td_proc == p) {
+		PROC_SLOCK_ASSERT(p, MA_OWNED);
+		p->p_suspcount--;
+	}
 	return (setrunnable(td));
 }
 
@@ -925,7 +945,7 @@ thread_unsuspend(struct proc *p)
                 FOREACH_THREAD_IN_PROC(p, td) {
 			thread_lock(td);
 			if (TD_IS_SUSPENDED(td)) {
-				wakeup_swapper |= thread_unsuspend_one(td);
+				wakeup_swapper |= thread_unsuspend_one(td, p);
 			}
 			thread_unlock(td);
 		}
@@ -936,9 +956,12 @@ thread_unsuspend(struct proc *p)
 		 * threading request. Now we've downgraded to single-threaded,
 		 * let it continue.
 		 */
-		thread_lock(p->p_singlethread);
-		wakeup_swapper = thread_unsuspend_one(p->p_singlethread);
-		thread_unlock(p->p_singlethread);
+		if (p->p_singlethread->td_proc == p) {
+			thread_lock(p->p_singlethread);
+			wakeup_swapper = thread_unsuspend_one(
+			    p->p_singlethread, p);
+			thread_unlock(p->p_singlethread);
+		}
 	}
 	if (wakeup_swapper)
 		kick_proc0();
@@ -948,16 +971,20 @@ thread_unsuspend(struct proc *p)
  * End the single threading mode..
  */
 void
-thread_single_end(void)
+thread_single_end(struct proc *p, int mode)
 {
 	struct thread *td;
-	struct proc *p;
 	int wakeup_swapper;
 
-	td = curthread;
-	p = td->td_proc;
+	KASSERT(mode == SINGLE_EXIT || mode == SINGLE_BOUNDARY ||
+	    mode == SINGLE_ALLPROC || mode == SINGLE_NO_EXIT,
+	    ("invalid mode %d", mode));
 	PROC_LOCK_ASSERT(p, MA_OWNED);
-	p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY);
+	KASSERT((mode == SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) != 0) ||
+	    (mode != SINGLE_ALLPROC && (p->p_flag & P_TOTAL_STOP) == 0),
+	    ("mode %d does not match P_TOTAL_STOP", mode));
+	p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY |
+	    P_TOTAL_STOP);
 	PROC_SLOCK(p);
 	p->p_singlethread = NULL;
 	wakeup_swapper = 0;
@@ -967,11 +994,11 @@ thread_single_end(void)
 	 * on the process. The single threader must be allowed
 	 * to continue however as this is a bad place to stop.
 	 */
-	if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) {
+	if (p->p_numthreads != remain_for_mode(mode) && !P_SHOULDSTOP(p)) {
                 FOREACH_THREAD_IN_PROC(p, td) {
 			thread_lock(td);
 			if (TD_IS_SUSPENDED(td)) {
-				wakeup_swapper |= thread_unsuspend_one(td);
+				wakeup_swapper |= thread_unsuspend_one(td, p);
 			}
 			thread_unlock(td);
 		}
diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c
index 345aad6..b4dde06 100644
--- a/sys/kern/vfs_subr.c
+++ b/sys/kern/vfs_subr.c
@@ -1773,6 +1773,8 @@ sync_vnode(struct synclist *slp, struct bufobj **bo, struct thread *td)
 	return (0);
 }
 
+static int first_printf = 1;
+
 /*
  * System filesystem synchronizer daemon.
  */
@@ -1791,7 +1793,6 @@ sched_sync(void)
 
 	last_work_seen = 0;
 	syncer_final_iter = 0;
-	first_printf = 1;
 	syncer_state = SYNCER_RUNNING;
 	starttime = time_uptime;
 	td->td_pflags |= TDP_NORUNNINGBUF;
@@ -1955,6 +1956,25 @@ syncer_shutdown(void *arg, int howto)
 	kproc_shutdown(arg, howto);
 }
 
+void
+syncer_suspend(void)
+{
+
+	syncer_shutdown(updateproc, 0);
+}
+
+void
+syncer_resume(void)
+{
+
+	mtx_lock(&sync_mtx);
+	first_printf = 1;
+	syncer_state = SYNCER_RUNNING;
+	mtx_unlock(&sync_mtx);
+	cv_broadcast(&sync_wakeup);
+	kproc_resume(updateproc);
+}
+
 /*
  * Reassign a buffer from one vnode to another.
  * Used to assign file specific control information
diff --git a/sys/sys/mount.h b/sys/sys/mount.h
index c4e1145..07b9c7a 100644
--- a/sys/sys/mount.h
+++ b/sys/sys/mount.h
@@ -917,6 +917,9 @@ vfs_uninit_t		vfs_stduninit;
 vfs_extattrctl_t	vfs_stdextattrctl;
 vfs_sysctl_t		vfs_stdsysctl;
 
+void	syncer_suspend(void);
+void	syncer_resume(void);
+				   
 #else /* !_KERNEL */
 
 #include <sys/cdefs.h>
diff --git a/sys/sys/proc.h b/sys/sys/proc.h
index fac0915..2163543 100644
--- a/sys/sys/proc.h
+++ b/sys/sys/proc.h
@@ -635,7 +635,7 @@ struct proc {
 #define	P_SINGLE_BOUNDARY 0x400000 /* Threads should suspend at user boundary. */
 #define	P_HWPMC		0x800000 /* Process is using HWPMCs */
 #define	P_JAILED	0x1000000 /* Process is in jail. */
-#define	P_UNUSED1	0x2000000
+#define	P_TOTAL_STOP	0x2000000 /* Stopped in proc_stop_total. */
 #define	P_INEXEC	0x4000000 /* Process is in execve(). */
 #define	P_STATCHILD	0x8000000 /* Child process stopped or exited. */
 #define	P_INMEM		0x10000000 /* Loaded into memory. */
@@ -696,6 +696,7 @@ struct proc {
 #define	SINGLE_NO_EXIT	0
 #define	SINGLE_EXIT	1
 #define	SINGLE_BOUNDARY	2
+#define	SINGLE_ALLPROC	3
 
 #ifdef MALLOC_DECLARE
 MALLOC_DECLARE(M_PARGS);
@@ -945,22 +946,25 @@ void	thread_exit(void) __dead2;
 void	thread_free(struct thread *td);
 void	thread_link(struct thread *td, struct proc *p);
 void	thread_reap(void);
-int	thread_single(int how);
-void	thread_single_end(void);
+int	thread_single(struct proc *p, int how);
+void	thread_single_end(struct proc *p, int how);
 void	thread_stash(struct thread *td);
 void	thread_stopped(struct proc *p);
 void	childproc_stopped(struct proc *child, int reason);
 void	childproc_continued(struct proc *child);
 void	childproc_exited(struct proc *child);
 int	thread_suspend_check(int how);
-void	thread_suspend_switch(struct thread *);
+void	thread_suspend_switch(struct thread *, struct proc *p);
 void	thread_suspend_one(struct thread *td);
 void	thread_unlink(struct thread *td);
 void	thread_unsuspend(struct proc *p);
-int	thread_unsuspend_one(struct thread *td);
+int	thread_unsuspend_one(struct thread *td, struct proc *p);
 void	thread_wait(struct proc *p);
 struct thread	*thread_find(struct proc *p, lwpid_t tid);
 
+void	stop_all_proc(void);
+void	resume_all_proc(void);
+
 static __inline int
 curthread_pflags_set(int flags)
 {


More information about the freebsd-arch mailing list