PERFORCE change 65498 for review
David Xu
davidxu at FreeBSD.org
Fri Nov 19 19:05:03 PST 2004
http://perforce.freebsd.org/chv.cgi?CH=65498
Change 65498 by davidxu at davidxu_alona on 2004/11/20 03:04:34
follow the change in thr_private.h, no sched queue anymore.
Affected files ...
.. //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_kern.c#2 edit
Differences ...
==== //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_kern.c#2 (text+ko) ====
@@ -36,23 +36,15 @@
__FBSDID("$FreeBSD: src/lib/libpthread/thread/thr_kern.c,v 1.115 2004/10/23 23:28:36 davidxu Exp $");
#include <sys/types.h>
-#include <sys/kse.h>
-#include <sys/ptrace.h>
#include <sys/signalvar.h>
#include <sys/queue.h>
-#include <machine/atomic.h>
-#include <machine/sigframe.h>
-#include <assert.h>
-#include <errno.h>
#include <signal.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
-#include <ucontext.h>
#include <unistd.h>
-#include "atomic_ops.h"
#include "thr_private.h"
#include "libc_private.h"
@@ -78,29 +70,6 @@
#define MAX_CACHED_KSES ((_thread_scope_system <= 0) ? 50 : 100)
#define MAX_CACHED_KSEGS ((_thread_scope_system <= 0) ? 50 : 100)
-#define KSE_SET_MBOX(kse, thrd) \
- (kse)->k_kcb->kcb_kmbx.km_curthread = &(thrd)->tcb->tcb_tmbx
-
-#define KSE_SET_EXITED(kse) (kse)->k_flags |= KF_EXITED
-
-/*
- * Macros for manipulating the run queues. The priority queue
- * routines use the thread's pqe link and also handle the setting
- * and clearing of the thread's THR_FLAGS_IN_RUNQ flag.
- */
-#define KSE_RUNQ_INSERT_HEAD(kse, thrd) \
- _pq_insert_head(&(kse)->k_schedq->sq_runq, thrd)
-#define KSE_RUNQ_INSERT_TAIL(kse, thrd) \
- _pq_insert_tail(&(kse)->k_schedq->sq_runq, thrd)
-#define KSE_RUNQ_REMOVE(kse, thrd) \
- _pq_remove(&(kse)->k_schedq->sq_runq, thrd)
-#define KSE_RUNQ_FIRST(kse) \
- ((_libkse_debug == 0) ? \
- _pq_first(&(kse)->k_schedq->sq_runq) : \
- _pq_first_debug(&(kse)->k_schedq->sq_runq))
-
-#define KSE_RUNQ_THREADS(kse) ((kse)->k_schedq->sq_runq.pq_threads)
-
#define THR_NEED_CANCEL(thrd) \
(((thrd)->cancelflags & THR_CANCELLING) != 0 && \
((thrd)->cancelflags & PTHREAD_CANCEL_DISABLE) == 0 && \
@@ -118,78 +87,26 @@
* to have a speedy free list, but also so they can be deallocated
* after a fork().
*/
-static TAILQ_HEAD(, kse) active_kseq;
-static TAILQ_HEAD(, kse) free_kseq;
-static TAILQ_HEAD(, kse_group) free_kse_groupq;
-static TAILQ_HEAD(, kse_group) active_kse_groupq;
-static TAILQ_HEAD(, kse_group) gc_ksegq;
-static struct lock kse_lock; /* also used for kseg queue */
-static int free_kse_count = 0;
-static int free_kseg_count = 0;
static TAILQ_HEAD(, pthread) free_threadq;
-static struct lock thread_lock;
+static struct lock free_thread_lock;
static int free_thread_count = 0;
static int inited = 0;
-static int active_kse_count = 0;
-static int active_kseg_count = 0;
static u_int64_t next_uniqueid = 1;
LIST_HEAD(thread_hash_head, pthread);
-#define THREAD_HASH_QUEUES 127
-static struct thread_hash_head thr_hashtable[THREAD_HASH_QUEUES];
-#define THREAD_HASH(thrd) ((unsigned long)thrd % THREAD_HASH_QUEUES)
+#define HASH_QUEUES 128
+static struct thread_hash_head thr_hashtable[HASH_QUEUES];
+#define THREAD_HASH(thrd) (((unsigned long)thrd >> 12) % HASH_QUEUES)
/* Lock for thread tcb constructor/destructor */
-static pthread_mutex_t _tcb_mutex;
+static struct lock tcb_lock;
-#ifdef DEBUG_THREAD_KERN
-static void dump_queues(struct kse *curkse);
-#endif
-static void kse_check_completed(struct kse *kse);
-static void kse_check_waitq(struct kse *kse);
-static void kse_fini(struct kse *curkse);
-static void kse_reinit(struct kse *kse, int sys_scope);
-static void kse_sched_multi(struct kse_mailbox *kmbx);
-static void kse_sched_single(struct kse_mailbox *kmbx);
-static void kse_switchout_thread(struct kse *kse, struct pthread *thread);
-static void kse_wait(struct kse *kse, struct pthread *td_wait, int sigseq);
-static void kse_free_unlocked(struct kse *kse);
-static void kse_destroy(struct kse *kse);
-static void kseg_free_unlocked(struct kse_group *kseg);
-static void kseg_init(struct kse_group *kseg);
-static void kseg_reinit(struct kse_group *kseg);
-static void kseg_destroy(struct kse_group *kseg);
-static void kse_waitq_insert(struct pthread *thread);
-static void kse_wakeup_multi(struct kse *curkse);
-static struct kse_mailbox *kse_wakeup_one(struct pthread *thread);
-static void thr_cleanup(struct kse *kse, struct pthread *curthread);
-static void thr_link(struct pthread *thread);
-static void thr_resume_wrapper(int sig, siginfo_t *, ucontext_t *);
-static void thr_resume_check(struct pthread *curthread, ucontext_t *ucp,
- struct pthread_sigframe *psf);
+static void thr_wait(struct pthread *td_wait, int sigseq);
+static void thr_cleanup(struct pthread *curthread);
static int thr_timedout(struct pthread *thread, struct timespec *curtime);
-static void thr_unlink(struct pthread *thread);
static void thr_destroy(struct pthread *curthread, struct pthread *thread);
static void thread_gc(struct pthread *thread);
-static void kse_gc(struct pthread *thread);
-static void kseg_gc(struct pthread *thread);
-static void __inline
-thr_accounting(struct pthread *thread)
-{
- if ((thread->slice_usec != -1) &&
- (thread->slice_usec <= TIMESLICE_USEC) &&
- (thread->attr.sched_policy != SCHED_FIFO)) {
- thread->slice_usec += (thread->tcb->tcb_tmbx.tm_uticks
- + thread->tcb->tcb_tmbx.tm_sticks) * _clock_res_usec;
- /* Check for time quantum exceeded: */
- if (thread->slice_usec > TIMESLICE_USEC)
- thread->slice_usec = -1;
- }
- thread->tcb->tcb_tmbx.tm_uticks = 0;
- thread->tcb->tcb_tmbx.tm_sticks = 0;
-}
-
/*
* This is called after a fork().
* No locks need to be taken here since we are guaranteed to be
@@ -205,160 +122,15 @@
* structures may be in inconsistent state.
*/
void
-_kse_single_thread(struct pthread *curthread)
+_thr_single_thread(struct pthread *curthread)
{
-#ifdef NOTYET
- struct kse *kse;
- struct kse_group *kseg;
- struct pthread *thread;
- kse_critical_t crit;
- int i;
-
- if (__isthreaded) {
- _thr_rtld_fini();
- _thr_signal_deinit();
- }
- __isthreaded = 0;
- /*
- * Restore signal mask early, so any memory problems could
- * dump core.
- */
- sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL);
- _thread_active_threads = 1;
-
- /*
- * Enter a loop to remove and free all threads other than
- * the running thread from the active thread list:
- */
- while ((thread = TAILQ_FIRST(&_thread_list)) != NULL) {
- THR_GCLIST_REMOVE(thread);
- /*
- * Remove this thread from the list (the current
- * thread will be removed but re-added by libpthread
- * initialization.
- */
- TAILQ_REMOVE(&_thread_list, thread, tle);
- /* Make sure this isn't the running thread: */
- if (thread != curthread) {
- _thr_stack_free(&thread->attr);
- if (thread->specific != NULL)
- free(thread->specific);
- thr_destroy(curthread, thread);
- }
- }
-
- TAILQ_INIT(&curthread->mutexq); /* initialize mutex queue */
- curthread->joiner = NULL; /* no joining threads yet */
- curthread->refcount = 0;
- SIGEMPTYSET(curthread->sigpend); /* clear pending signals */
- if (curthread->specific != NULL) {
- free(curthread->specific);
- curthread->specific = NULL;
- curthread->specific_data_count = 0;
- }
-
- /* Free the free KSEs: */
- while ((kse = TAILQ_FIRST(&free_kseq)) != NULL) {
- TAILQ_REMOVE(&free_kseq, kse, k_qe);
- kse_destroy(kse);
- }
- free_kse_count = 0;
-
- /* Free the active KSEs: */
- while ((kse = TAILQ_FIRST(&active_kseq)) != NULL) {
- TAILQ_REMOVE(&active_kseq, kse, k_qe);
- kse_destroy(kse);
- }
- active_kse_count = 0;
-
- /* Free the free KSEGs: */
- while ((kseg = TAILQ_FIRST(&free_kse_groupq)) != NULL) {
- TAILQ_REMOVE(&free_kse_groupq, kseg, kg_qe);
- kseg_destroy(kseg);
- }
- free_kseg_count = 0;
-
- /* Free the active KSEGs: */
- while ((kseg = TAILQ_FIRST(&active_kse_groupq)) != NULL) {
- TAILQ_REMOVE(&active_kse_groupq, kseg, kg_qe);
- kseg_destroy(kseg);
- }
- active_kseg_count = 0;
-
- /* Free the free threads. */
- while ((thread = TAILQ_FIRST(&free_threadq)) != NULL) {
- TAILQ_REMOVE(&free_threadq, thread, tle);
- thr_destroy(curthread, thread);
- }
- free_thread_count = 0;
-
- /* Free the to-be-gc'd threads. */
- while ((thread = TAILQ_FIRST(&_thread_gc_list)) != NULL) {
- TAILQ_REMOVE(&_thread_gc_list, thread, gcle);
- thr_destroy(curthread, thread);
- }
- TAILQ_INIT(&gc_ksegq);
- _gc_count = 0;
-
- if (inited != 0) {
- /*
- * Destroy these locks; they'll be recreated to assure they
- * are in the unlocked state.
- */
- _lock_destroy(&kse_lock);
- _lock_destroy(&thread_lock);
- _lock_destroy(&_thread_list_lock);
- inited = 0;
- }
-
- /*
- * After a fork(), the leftover thread goes back to being
- * scope process.
- */
- curthread->attr.flags &= ~PTHREAD_SCOPE_SYSTEM;
- curthread->attr.flags |= PTHREAD_SCOPE_PROCESS;
-
- /*
- * After a fork, we are still operating on the thread's original
- * stack. Don't clear the THR_FLAGS_USER from the thread's
- * attribute flags.
- */
-
- /* Initialize the threads library. */
- curthread->kse = NULL;
- curthread->kseg = NULL;
- _kse_initial = NULL;
_libpthread_init(curthread);
-#else
- int i;
-
- /* Reset the current thread and KSE lock data. */
- for (i = 0; i < curthread->locklevel; i++) {
- _lockuser_reinit(&curthread->lockusers[i], (void *)curthread);
- }
- curthread->locklevel = 0;
- for (i = 0; i < curthread->kse->k_locklevel; i++) {
- _lockuser_reinit(&curthread->kse->k_lockusers[i],
- (void *)curthread->kse);
- _LCK_SET_PRIVATE2(&curthread->kse->k_lockusers[i], NULL);
- }
- curthread->kse->k_locklevel = 0;
- _thr_spinlock_init();
+#if 0
if (__isthreaded) {
_thr_rtld_fini();
- _thr_signal_deinit();
}
+#endif
__isthreaded = 0;
- curthread->kse->k_kcb->kcb_kmbx.km_curthread = NULL;
- curthread->attr.flags |= PTHREAD_SCOPE_SYSTEM;
-
- /*
- * Restore signal mask early, so any memory problems could
- * dump core.
- */
- sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL);
- _thread_active_threads = 1;
-#endif
}
/*
@@ -366,29 +138,21 @@
* KSD for the KSE.
*/
void
-_kse_init(void)
+_thr_kern_init(void)
{
+ TAILQ_INIT(&free_threadq);
+ _gc_count = 0;
if (inited == 0) {
- TAILQ_INIT(&active_kseq);
- TAILQ_INIT(&active_kse_groupq);
- TAILQ_INIT(&free_kseq);
- TAILQ_INIT(&free_kse_groupq);
- TAILQ_INIT(&free_threadq);
- TAILQ_INIT(&gc_ksegq);
- if (_lock_init(&kse_lock, LCK_ADAPTIVE,
- _kse_lock_wait, _kse_lock_wakeup) != 0)
- PANIC("Unable to initialize free KSE queue lock");
- if (_lock_init(&thread_lock, LCK_ADAPTIVE,
- _kse_lock_wait, _kse_lock_wakeup) != 0)
+ if (_lock_init(&free_thread_lock) != 0)
PANIC("Unable to initialize free thread queue lock");
- if (_lock_init(&_thread_list_lock, LCK_ADAPTIVE,
- _kse_lock_wait, _kse_lock_wakeup) != 0)
- PANIC("Unable to initialize thread list lock");
- _pthread_mutex_init(&_tcb_mutex, NULL);
- active_kse_count = 0;
- active_kseg_count = 0;
- _gc_count = 0;
+ if (_lock_init(&tcb_lock) != 0)
+ PANIC("Unable to initialize tcb lock");
inited = 1;
+ } else {
+ if (_lock_reinit(&free_thread_lock) != 0)
+ PANIC("Unable to reinitialize free thread queue lock");
+ if (_lock_reinit(&tcb_lock) != 0)
+ PANIC("Unable to reinitialize tcb lock");
}
}
@@ -397,193 +161,22 @@
* thread) is created.
*/
int
-_kse_setthreaded(int threaded)
+_thr_setthreaded(int threaded)
{
- sigset_t sigset;
-
if ((threaded != 0) && (__isthreaded == 0)) {
- SIGFILLSET(sigset);
- __sys_sigprocmask(SIG_SETMASK, &sigset, &_thr_initial->sigmask);
-
- /*
- * Tell the kernel to create a KSE for the initial thread
- * and enable upcalls in it.
- */
- _kse_initial->k_flags |= KF_STARTED;
-
- if (_thread_scope_system <= 0) {
- _thr_initial->attr.flags &= ~PTHREAD_SCOPE_SYSTEM;
- _kse_initial->k_kseg->kg_flags &= ~KGF_SINGLE_THREAD;
- _kse_initial->k_kcb->kcb_kmbx.km_curthread = NULL;
- }
- else {
- /*
- * For bound thread, kernel reads mailbox pointer
- * once, we'd set it here before calling kse_create.
- */
- _tcb_set(_kse_initial->k_kcb, _thr_initial->tcb);
- KSE_SET_MBOX(_kse_initial, _thr_initial);
- _kse_initial->k_kcb->kcb_kmbx.km_flags |= KMF_BOUND;
- }
-
+#if 0
/*
* Locking functions in libc are required when there are
* threads other than the initial thread.
*/
_thr_rtld_init();
-
+#endif
__isthreaded = 1;
- if (kse_create(&_kse_initial->k_kcb->kcb_kmbx, 0) != 0) {
- _kse_initial->k_flags &= ~KF_STARTED;
- __isthreaded = 0;
- PANIC("kse_create() failed\n");
- return (-1);
- }
- _thr_initial->tcb->tcb_tmbx.tm_lwp =
- _kse_initial->k_kcb->kcb_kmbx.km_lwp;
- _thread_activated = 1;
-
-#ifndef SYSTEM_SCOPE_ONLY
- if (_thread_scope_system <= 0) {
- /* Set current thread to initial thread */
- _tcb_set(_kse_initial->k_kcb, _thr_initial->tcb);
- KSE_SET_MBOX(_kse_initial, _thr_initial);
- _thr_start_sig_daemon();
- _thr_setmaxconcurrency();
- }
- else
-#endif
- __sys_sigprocmask(SIG_SETMASK, &_thr_initial->sigmask,
- NULL);
}
return (0);
}
-/*
- * Lock wait and wakeup handlers for KSE locks. These are only used by
- * KSEs, and should never be used by threads. KSE locks include the
- * KSE group lock (used for locking the scheduling queue) and the
- * kse_lock defined above.
- *
- * When a KSE lock attempt blocks, the entire KSE blocks allowing another
- * KSE to run. For the most part, it doesn't make much sense to try and
- * schedule another thread because you need to lock the scheduling queue
- * in order to do that. And since the KSE lock is used to lock the scheduling
- * queue, you would just end up blocking again.
- */
-void
-_kse_lock_wait(struct lock *lock, struct lockuser *lu)
-{
- struct kse *curkse = (struct kse *)_LCK_GET_PRIVATE(lu);
- struct timespec ts;
- int saved_flags;
-
- if (curkse->k_kcb->kcb_kmbx.km_curthread != NULL)
- PANIC("kse_lock_wait does not disable upcall.\n");
- /*
- * Enter a loop to wait until we get the lock.
- */
- ts.tv_sec = 0;
- ts.tv_nsec = 1000000; /* 1 sec */
- while (!_LCK_GRANTED(lu)) {
- /*
- * Yield the kse and wait to be notified when the lock
- * is granted.
- */
- saved_flags = curkse->k_kcb->kcb_kmbx.km_flags;
- curkse->k_kcb->kcb_kmbx.km_flags |= KMF_NOUPCALL |
- KMF_NOCOMPLETED;
- kse_release(&ts);
- curkse->k_kcb->kcb_kmbx.km_flags = saved_flags;
- }
-}
-
-void
-_kse_lock_wakeup(struct lock *lock, struct lockuser *lu)
-{
- struct kse *curkse;
- struct kse *kse;
- struct kse_mailbox *mbx;
-
- curkse = _get_curkse();
- kse = (struct kse *)_LCK_GET_PRIVATE(lu);
-
- if (kse == curkse)
- PANIC("KSE trying to wake itself up in lock");
- else {
- mbx = &kse->k_kcb->kcb_kmbx;
- _lock_grant(lock, lu);
- /*
- * Notify the owning kse that it has the lock.
- * It is safe to pass invalid address to kse_wakeup
- * even if the mailbox is not in kernel at all,
- * and waking up a wrong kse is also harmless.
- */
- kse_wakeup(mbx);
- }
-}
-
-/*
- * Thread wait and wakeup handlers for thread locks. These are only used
- * by threads, never by KSEs. Thread locks include the per-thread lock
- * (defined in its structure), and condition variable and mutex locks.
- */
-void
-_thr_lock_wait(struct lock *lock, struct lockuser *lu)
-{
- struct pthread *curthread = (struct pthread *)lu->lu_private;
-
- do {
- THR_LOCK_SWITCH(curthread);
- THR_SET_STATE(curthread, PS_LOCKWAIT);
- _thr_sched_switch_unlocked(curthread);
- } while (!_LCK_GRANTED(lu));
-}
-
-void
-_thr_lock_wakeup(struct lock *lock, struct lockuser *lu)
-{
- struct pthread *thread;
- struct pthread *curthread;
- struct kse_mailbox *kmbx;
-
- curthread = _get_curthread();
- thread = (struct pthread *)_LCK_GET_PRIVATE(lu);
-
- THR_SCHED_LOCK(curthread, thread);
- _lock_grant(lock, lu);
- kmbx = _thr_setrunnable_unlocked(thread);
- THR_SCHED_UNLOCK(curthread, thread);
- if (kmbx != NULL)
- kse_wakeup(kmbx);
-}
-
-kse_critical_t
-_kse_critical_enter(void)
-{
- kse_critical_t crit;
-
- crit = (kse_critical_t)_kcb_critical_enter();
- return (crit);
-}
-
void
-_kse_critical_leave(kse_critical_t crit)
-{
- struct pthread *curthread;
-
- _kcb_critical_leave((struct kse_thr_mailbox *)crit);
- if ((crit != NULL) && ((curthread = _get_curthread()) != NULL))
- THR_YIELD_CHECK(curthread);
-}
-
-int
-_kse_in_critical(void)
-{
- return (_kcb_in_critical());
-}
-
-void
_thr_critical_enter(struct pthread *thread)
{
thread->critical_count++;
@@ -599,162 +192,25 @@
void
_thr_sched_switch(struct pthread *curthread)
{
- struct kse *curkse;
-
- (void)_kse_critical_enter();
- curkse = _get_curkse();
- KSE_SCHED_LOCK(curkse, curkse->k_kseg);
+ THR_LOCK_SWITCH(curthread);
_thr_sched_switch_unlocked(curthread);
}
/*
- * XXX - We may need to take the scheduling lock before calling
- * this, or perhaps take the lock within here before
- * doing anything else.
+ * Must hold thread lock before calling this function.
*/
void
_thr_sched_switch_unlocked(struct pthread *curthread)
{
- struct pthread_sigframe psf;
- struct kse *curkse;
- volatile int resume_once = 0;
- ucontext_t *uc;
-
- /* We're in the scheduler, 5 by 5: */
- curkse = _get_curkse();
-
- curthread->need_switchout = 1; /* The thread yielded on its own. */
- curthread->critical_yield = 0; /* No need to yield anymore. */
-
- /* Thread can unlock the scheduler lock. */
- curthread->lock_switch = 1;
-
- /*
- * The signal frame is allocated off the stack because
- * a thread can be interrupted by other signals while
- * it is running down pending signals.
- */
- psf.psf_valid = 0;
- curthread->curframe = &psf;
-
- if (curthread->attr.flags & PTHREAD_SCOPE_SYSTEM)
- kse_sched_single(&curkse->k_kcb->kcb_kmbx);
- else {
- if (__predict_false(_libkse_debug != 0)) {
- /*
- * Because debugger saves single step status in thread
- * mailbox's tm_dflags, we can safely clear single
- * step status here. the single step status will be
- * restored by kse_switchin when the thread is
- * switched in again. This also lets uts run in full
- * speed.
- */
- ptrace(PT_CLEARSTEP, curkse->k_kcb->kcb_kmbx.km_lwp,
- (caddr_t) 1, 0);
- }
-
- KSE_SET_SWITCH(curkse);
- _thread_enter_uts(curthread->tcb, curkse->k_kcb);
- }
-
- /*
- * It is ugly we must increase critical count, because we
- * have a frame saved, we must backout state in psf
- * before we can process signals.
- */
- curthread->critical_count += psf.psf_valid;
-
- /*
- * Unlock the scheduling queue and leave the
- * critical region.
- */
- /* Don't trust this after a switch! */
- curkse = _get_curkse();
-
- curthread->lock_switch = 0;
- KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
- _kse_critical_leave(&curthread->tcb->tcb_tmbx);
-
- /*
- * This thread is being resumed; check for cancellations.
- */
- if ((psf.psf_valid ||
- ((curthread->check_pending || THR_NEED_ASYNC_CANCEL(curthread))
- && !THR_IN_CRITICAL(curthread)))) {
- uc = alloca(sizeof(ucontext_t));
- resume_once = 0;
- THR_GETCONTEXT(uc);
- if (resume_once == 0) {
- resume_once = 1;
- curthread->check_pending = 0;
- thr_resume_check(curthread, uc, &psf);
- }
- }
- THR_ACTIVATE_LAST_LOCK(curthread);
-}
-
-/*
- * This is the scheduler for a KSE which runs a scope system thread.
- * The multi-thread KSE scheduler should also work for a single threaded
- * KSE, but we use a separate scheduler so that it can be fine-tuned
- * to be more efficient (and perhaps not need a separate stack for
- * the KSE, allowing it to use the thread's stack).
- */
-
-static void
-kse_sched_single(struct kse_mailbox *kmbx)
-{
- struct kse *curkse;
- struct pthread *curthread;
struct timespec ts;
sigset_t sigmask;
- int i, sigseqno, level, first = 0;
+ int i, sigseqno;
- curkse = (struct kse *)kmbx->km_udata;
- curthread = curkse->k_curthread;
-
- if (__predict_false((curkse->k_flags & KF_INITIALIZED) == 0)) {
- /* Setup this KSEs specific data. */
- _kcb_set(curkse->k_kcb);
- _tcb_set(curkse->k_kcb, curthread->tcb);
- curkse->k_flags |= KF_INITIALIZED;
- first = 1;
- curthread->active = 1;
-
- /* Setup kernel signal masks for new thread. */
- __sys_sigprocmask(SIG_SETMASK, &curthread->sigmask, NULL);
- /*
- * Enter critical region, this is meanless for bound thread,
- * It is used to let other code work, those code want mailbox
- * to be cleared.
- */
- (void)_kse_critical_enter();
- } else {
- /*
- * Bound thread always has tcb set, this prevent some
- * code from blindly setting bound thread tcb to NULL,
- * buggy code ?
- */
- _tcb_set(curkse->k_kcb, curthread->tcb);
- }
-
- curthread->critical_yield = 0;
- curthread->need_switchout = 0;
-
- /*
- * Lock the scheduling queue.
- *
- * There is no scheduling queue for single threaded KSEs,
- * but we need a lock for protection regardless.
- */
- if (curthread->lock_switch == 0)
- KSE_SCHED_LOCK(curkse, curkse->k_kseg);
-
+ THR_ASSERT(curthread->lock_switch == 1, "lockswitch?");
/*
* This has to do the job of kse_switchout_thread(), only
* for a single threaded KSE/KSEG.
*/
-
switch (curthread->state) {
case PS_MUTEX_WAIT:
case PS_COND_WAIT:
@@ -765,23 +221,10 @@
}
break;
- case PS_LOCKWAIT:
- /*
- * This state doesn't timeout.
- */
- curthread->wakeup_time.tv_sec = -1;
- curthread->wakeup_time.tv_nsec = -1;
- level = curthread->locklevel - 1;
- if (_LCK_GRANTED(&curthread->lockusers[level]))
- THR_SET_STATE(curthread, PS_RUNNING);
- break;
-
case PS_DEAD:
curthread->check_pending = 0;
- /* Unlock the scheduling queue and exit the KSE and thread. */
- thr_cleanup(curkse, curthread);
- KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
- PANIC("bound thread shouldn't get here\n");
+ /* exit thread. */
+ thr_cleanup(curthread);
break;
case PS_JOIN:
@@ -822,15 +265,6 @@
}
break;
- case PS_SIGWAIT:
- PANIC("bound thread does not have SIGWAIT state\n");
-
- case PS_SLEEP_WAIT:
- PANIC("bound thread does not have SLEEP_WAIT state\n");
-
- case PS_SIGSUSPEND:
- PANIC("bound thread does not have SIGSUSPEND state\n");
-
case PS_DEADLOCK:
/*
* These states don't timeout and don't need
@@ -846,7 +280,7 @@
}
while (curthread->state != PS_RUNNING) {
- sigseqno = curkse->k_sigseqno;
+ sigseqno = curthread->sigseqno;
if (curthread->check_pending != 0) {
/*
* Install pending signals into the frame, possible
@@ -874,11 +308,9 @@
if (curthread->state == PS_RUNNING)
break;
}
- THR_DEACTIVATE_LAST_LOCK(curthread);
- kse_wait(curkse, curthread, sigseqno);
- THR_ACTIVATE_LAST_LOCK(curthread);
+ thr_wait(curthread, sigseqno);
if (curthread->wakeup_time.tv_sec >= 0) {
- KSE_GET_TOD(curkse, &ts);
+ clock_gettime(CLOCK_REALTIME, &ts);
if (thr_timedout(curthread, &ts)) {
/* Indicate the thread timedout: */
curthread->timeout = 1;
@@ -888,386 +320,80 @@
}
}
- /* Remove the frame reference. */
- curthread->curframe = NULL;
-
- if (curthread->lock_switch == 0) {
- /* Unlock the scheduling queue. */
- KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
- }
-
- DBG_MSG("Continuing bound thread %p\n", curthread);
- if (first) {
- _kse_critical_leave(&curthread->tcb->tcb_tmbx);
- pthread_exit(curthread->start_routine(curthread->arg));
- }
-}
-
-#ifdef DEBUG_THREAD_KERN
-static void
-dump_queues(struct kse *curkse)
-{
- struct pthread *thread;
-
- DBG_MSG("Threads in waiting queue:\n");
- TAILQ_FOREACH(thread, &curkse->k_kseg->kg_schedq.sq_waitq, pqe) {
- DBG_MSG(" thread %p, state %d, blocked %d\n",
- thread, thread->state, thread->blocked);
- }
-}
-#endif
-
-/*
- * This is the scheduler for a KSE which runs multiple threads.
- */
-static void
-kse_sched_multi(struct kse_mailbox *kmbx)
-{
- struct kse *curkse;
- struct pthread *curthread, *td_wait;
- struct pthread_sigframe *curframe;
- int ret;
-
- curkse = (struct kse *)kmbx->km_udata;
- THR_ASSERT(curkse->k_kcb->kcb_kmbx.km_curthread == NULL,
- "Mailbox not null in kse_sched_multi");
+ THR_UNLOCK_SWITCH(curthread);
- /* Check for first time initialization: */
- if (__predict_false((curkse->k_flags & KF_INITIALIZED) == 0)) {
- /* Setup this KSEs specific data. */
- _kcb_set(curkse->k_kcb);
-
- /* Set this before grabbing the context. */
- curkse->k_flags |= KF_INITIALIZED;
- }
-
/*
- * No current thread anymore, calling _get_curthread in UTS
- * should dump core
+ * This thread is being resumed; check for cancellations.
*/
- _tcb_set(curkse->k_kcb, NULL);
-
- /* If this is an upcall; take the scheduler lock. */
- if (!KSE_IS_SWITCH(curkse))
- KSE_SCHED_LOCK(curkse, curkse->k_kseg);
- else
- KSE_CLEAR_SWITCH(curkse);
-
- if (KSE_IS_IDLE(curkse)) {
- KSE_CLEAR_IDLE(curkse);
- curkse->k_kseg->kg_idle_kses--;
- }
-
- /*
- * Now that the scheduler lock is held, get the current
- * thread. The KSE's current thread cannot be safely
- * examined without the lock because it could have returned
- * as completed on another KSE. See kse_check_completed().
- */
- curthread = curkse->k_curthread;
-
- /*
- * If the current thread was completed in another KSE, then
- * it will be in the run queue. Don't mark it as being blocked.
- */
- if ((curthread != NULL) &&
- ((curthread->flags & THR_FLAGS_IN_RUNQ) == 0) &&
- (curthread->need_switchout == 0)) {
- /*
- * Assume the current thread is blocked; when the
- * completed threads are checked and if the current
- * thread is among the completed, the blocked flag
- * will be cleared.
- */
- curthread->blocked = 1;
- }
-
- /* Check for any unblocked threads in the kernel. */
- kse_check_completed(curkse);
-
- /*
- * Check for threads that have timed-out.
- */
- kse_check_waitq(curkse);
-
- /*
- * Switchout the current thread, if necessary, as the last step
- * so that it is inserted into the run queue (if it's runnable)
- * _after_ any other threads that were added to it above.
- */
- if (curthread == NULL)
- ; /* Nothing to do here. */
- else if ((curthread->need_switchout == 0) && DBG_CAN_RUN(curthread) &&
- (curthread->blocked == 0) && (THR_IN_CRITICAL(curthread))) {
- /*
- * Resume the thread and tell it to yield when
- * it leaves the critical region.
- */
- curthread->critical_yield = 1;
- curthread->active = 1;
- if ((curthread->flags & THR_FLAGS_IN_RUNQ) != 0)
- KSE_RUNQ_REMOVE(curkse, curthread);
- curkse->k_curthread = curthread;
- curthread->kse = curkse;
- DBG_MSG("Continuing thread %p in critical region\n",
- curthread);
- kse_wakeup_multi(curkse);
- KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
- ret = _thread_switch(curkse->k_kcb, curthread->tcb, 1);
- if (ret != 0)
- PANIC("Can't resume thread in critical region\n");
- }
- else if ((curthread->flags & THR_FLAGS_IN_RUNQ) == 0) {
- curthread->tcb->tcb_tmbx.tm_lwp = 0;
- kse_switchout_thread(curkse, curthread);
- }
- curkse->k_curthread = NULL;
-
-#ifdef DEBUG_THREAD_KERN
- dump_queues(curkse);
-#endif
-
- /* Check if there are no threads ready to run: */
- while (((curthread = KSE_RUNQ_FIRST(curkse)) == NULL) &&
- (curkse->k_kseg->kg_threadcount != 0) &&
- ((curkse->k_flags & KF_TERMINATED) == 0)) {
- /*
- * Wait for a thread to become active or until there are
- * no more threads.
- */
- td_wait = KSE_WAITQ_FIRST(curkse);
- kse_wait(curkse, td_wait, 0);
- kse_check_completed(curkse);
- kse_check_waitq(curkse);
- }
-
- /* Check for no more threads: */
- if ((curkse->k_kseg->kg_threadcount == 0) ||
- ((curkse->k_flags & KF_TERMINATED) != 0)) {
- /*
- * Normally this shouldn't return, but it will if there
- * are other KSEs running that create new threads that
- * are assigned to this KSE[G]. For instance, if a scope
- * system thread were to create a scope process thread
- * and this kse[g] is the initial kse[g], then that newly
- * created thread would be assigned to us (the initial
- * kse[g]).
- */
- kse_wakeup_multi(curkse);
- KSE_SCHED_UNLOCK(curkse, curkse->k_kseg);
- kse_fini(curkse);
- /* never returns */
- }
-
- THR_ASSERT(curthread != NULL,
- "Return from kse_wait/fini without thread.");
- THR_ASSERT(curthread->state != PS_DEAD,
- "Trying to resume dead thread!");
- KSE_RUNQ_REMOVE(curkse, curthread);
-
- /*
- * Make the selected thread the current thread.
- */
- curkse->k_curthread = curthread;
-
- /*
- * Make sure the current thread's kse points to this kse.
- */
- curthread->kse = curkse;
-
- /*
- * Reset the time slice if this thread is running for the first
- * time or running again after using its full time slice allocation.
- */
- if (curthread->slice_usec == -1)
- curthread->slice_usec = 0;
-
- /* Mark the thread active. */
- curthread->active = 1;
-
- /* Remove the frame reference. */
- curframe = curthread->curframe;
- curthread->curframe = NULL;
-
- /*
- * The thread's current signal frame will only be NULL if it
- * is being resumed after being blocked in the kernel. In
- * this case, and if the thread needs to run down pending
- * signals or needs a cancellation check, we need to add a
- * signal frame to the thread's context.
- */
>>> TRUNCATED FOR MAIL (1000 lines) <<<
More information about the p4-projects
mailing list