PERFORCE change 67984 for review
David Xu
davidxu at FreeBSD.org
Fri Dec 31 07:03:18 PST 2004
http://perforce.freebsd.org/chv.cgi?CH=67984
Change 67984 by davidxu at davidxu_tiger on 2004/12/31 15:02:44
remove unused code. merge thread searching code into this file.
Affected files ...
.. //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_kern.c#7 edit
Differences ...
==== //depot/projects/davidxu_thread/src/lib/libthread/thread/thr_kern.c#7 (text+ko) ====
@@ -1,39 +1,34 @@
/*
+ * Copyright (c) 2005, David Xu <davidxu at freebsd.org>
* Copyright (C) 2003 Daniel M. Eischen <deischen at freebsd.org>
- * Copyright (C) 2002 Jonathon Mini <mini at freebsd.org>
- * Copyright (c) 1995-1998 John Birrell <jb at cimlogic.com.au>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This product includes software developed by John Birrell.
- * 4. Neither the name of the author nor the names of any co-contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
*
- * THIS SOFTWARE IS PROVIDED BY JOHN BIRRELL AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
+ * $FreeBSD$
*/
+
#include <sys/cdefs.h>
-__FBSDID("$FreeBSD: src/lib/libpthread/thread/thr_kern.c,v 1.115 2004/10/23 23:28:36 davidxu Exp $");
+__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <sys/signalvar.h>
@@ -70,18 +65,6 @@
#define MAX_CACHED_KSES ((_thread_scope_system <= 0) ? 50 : 100)
#define MAX_CACHED_KSEGS ((_thread_scope_system <= 0) ? 50 : 100)
-#define THR_NEED_CANCEL(thrd) \
- (((thrd)->cancelflags & THR_CANCELLING) != 0 && \
- ((thrd)->cancelflags & PTHREAD_CANCEL_DISABLE) == 0 && \
- (((thrd)->cancelflags & THR_AT_CANCEL_POINT) != 0 || \
- ((thrd)->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0))
-
-#define THR_NEED_ASYNC_CANCEL(thrd) \
- (((thrd)->cancelflags & THR_CANCELLING) != 0 && \
- ((thrd)->cancelflags & PTHREAD_CANCEL_DISABLE) == 0 && \
- (((thrd)->cancelflags & THR_AT_CANCEL_POINT) == 0 && \
- ((thrd)->cancelflags & PTHREAD_CANCEL_ASYNCHRONOUS) != 0))
-
/*
* We've got to keep track of everything that is allocated, not only
* to have a speedy free list, but also so they can be deallocated
@@ -101,9 +84,6 @@
/* Lock for thread tcb constructor/destructor */
static struct umtx tcb_lock;
-static void thr_wait(struct pthread *td_wait, int sigseq);
-static void thr_cleanup(struct pthread *curthread);
-static int thr_timedout(struct pthread *thread, struct timespec *curtime);
static void thr_destroy(struct pthread *curthread, struct pthread *thread);
static void thread_gc(struct pthread *thread);
@@ -124,8 +104,8 @@
void
_thr_single_thread(struct pthread *curthread)
{
- curthread->cancelflags &= ~THR_CANCELLING;
- /* clear aother thread locked us. */
+ curthread->cancelflags &= ~THR_CANCEL_NEEDED;
+ /* clear other threads locked us. */
umtx_init(&curthread->lock);
thr_self(&curthread->tid);
/* reinitialize libc spinlocks, this includes __malloc_lock. */
@@ -190,192 +170,10 @@
_thr_critical_leave(struct pthread *thread)
{
thread->critical_count--;
- THR_YIELD_CHECK(thread);
-}
-
-void
-_thr_sched_switch(struct pthread *curthread)
-{
- THR_LOCK_SWITCH(curthread);
- _thr_sched_switch_unlocked(curthread);
+ THR_CRITICAL_CHECK(thread);
}
-/*
- * Must hold thread lock before calling this function.
- */
void
-_thr_sched_switch_unlocked(struct pthread *curthread)
-{
- struct timespec ts;
- sigset_t sigmask;
- int i, sigseqno;
-
- THR_ASSERT(curthread->lock_switch == 1, "lockswitch?");
- /*
- * This has to do the job of kse_switchout_thread(), only
- * for a single threaded KSE/KSEG.
- */
- switch (curthread->state) {
- case PS_MUTEX_WAIT:
- if (THR_NEED_CANCEL(curthread)) {
- curthread->interrupted = 1;
- curthread->continuation = _thr_finish_cancellation;
- THR_SET_STATE(curthread, PS_RUNNING);
- }
- break;
-
- case PS_DEAD:
- curthread->check_pending = 0;
- /* exit thread. */
- thr_cleanup(curthread);
- break;
-
- case PS_JOIN:
- if (THR_NEED_CANCEL(curthread)) {
- curthread->join_status.thread = NULL;
- THR_SET_STATE(curthread, PS_RUNNING);
- } else {
- /*
- * This state doesn't timeout.
- */
- curthread->wakeup_time.tv_sec = -1;
- curthread->wakeup_time.tv_nsec = -1;
- }
- break;
-
- case PS_SUSPENDED:
- if (THR_NEED_CANCEL(curthread)) {
- curthread->interrupted = 1;
- THR_SET_STATE(curthread, PS_RUNNING);
- } else {
- /*
- * These states don't timeout.
- */
- curthread->wakeup_time.tv_sec = -1;
- curthread->wakeup_time.tv_nsec = -1;
- }
- break;
-
- case PS_RUNNING:
- if ((curthread->flags & THR_FLAGS_SUSPENDED) != 0 &&
- !THR_NEED_CANCEL(curthread)) {
- THR_SET_STATE(curthread, PS_SUSPENDED);
- /*
- * These states don't timeout.
- */
- curthread->wakeup_time.tv_sec = -1;
- curthread->wakeup_time.tv_nsec = -1;
- }
- break;
-
- case PS_DEADLOCK:
- /*
- * These states don't timeout and don't need
- * to be in the waiting queue.
- */
- curthread->wakeup_time.tv_sec = -1;
- curthread->wakeup_time.tv_nsec = -1;
- break;
-
- default:
- PANIC("Unknown state\n");
- break;
- }
-
- while (curthread->state != PS_RUNNING) {
- sigseqno = curthread->sigseqno;
- if (curthread->check_pending != 0) {
- /*
- * Install pending signals into the frame, possible
- * cause mutex or condvar backout.
- */
- curthread->check_pending = 0;
- SIGFILLSET(sigmask);
-
- /*
- * Lock out kernel signal code when we are processing
- * signals, and get a fresh copy of signal mask.
- */
- __sys_sigprocmask(SIG_SETMASK, &sigmask,
- &curthread->sigmask);
- for (i = 1; i <= _SIG_MAXSIG; i++) {
- if (SIGISMEMBER(curthread->sigmask, i))
- continue;
- if (SIGISMEMBER(curthread->sigpend, i))
- (void)_thr_sig_add(curthread, i,
- &curthread->siginfo[i-1]);
- }
- __sys_sigprocmask(SIG_SETMASK, &curthread->sigmask,
- NULL);
- /* The above code might make thread runnable */
- if (curthread->state == PS_RUNNING)
- break;
- }
- thr_wait(curthread, sigseqno);
- if (curthread->wakeup_time.tv_sec >= 0) {
- clock_gettime(CLOCK_REALTIME, &ts);
- if (thr_timedout(curthread, &ts)) {
- /* Indicate the thread timedout: */
- curthread->timeout = 1;
- /* Make the thread runnable. */
- THR_SET_STATE(curthread, PS_RUNNING);
- }
- }
- }
-
- THR_UNLOCK_SWITCH(curthread);
-
- /*
- * This thread is being resumed; check for cancellations.
- */
- if (THR_NEED_ASYNC_CANCEL(curthread) && !THR_IN_CRITICAL(curthread))
- pthread_testcancel();
-}
-
-/*
- * Clean up a thread. This must be called with the thread's LOCK
- * held.
- */
-static void
-thr_cleanup(struct pthread *curthread)
-{
- struct pthread *joiner;
- long tid = -1;
-
- if ((joiner = curthread->joiner) != NULL) {
- THR_UNLOCK_SWITCH(curthread);
- /* The joiner may have removed itself and exited. */
- if (_thr_ref_add(curthread, joiner, 0) == 0) {
- THR_THREAD_LOCK(curthread, joiner);
- if (joiner->join_status.thread == curthread) {
- joiner->join_status.thread = NULL;
- joiner->join_status.ret = curthread->ret;
- tid = _thr_setrunnable_unlocked(joiner);
- }
- THR_THREAD_UNLOCK(curthread, joiner);
- _thr_ref_delete(curthread, joiner);
- if (tid != -1)
- thr_wake(tid);
- }
- THR_LOCK_SWITCH(curthread);
- curthread->attr.flags |= PTHREAD_DETACHED;
- }
-
- /*
- * We can't hold the thread list lock while holding the
- * scheduler lock.
- */
- THR_UNLOCK_SWITCH(curthread);
- DBG_MSG("Adding thread %p to GC list\n", thread);
- THR_LOCK_ACQUIRE(curthread, &_thread_list_lock);
- curthread->tlflags |= TLFLAGS_GC_SAFE;
- THR_GCLIST_ADD(curthread);
- THR_LOCK_RELEASE(curthread, &_thread_list_lock);
- thr_exit(&curthread->isdead);
- PANIC("thr_exit() returned");
-}
-
-void
_thr_gc(struct pthread *curthread)
{
thread_gc(curthread);
@@ -388,7 +186,7 @@
TAILQ_HEAD(, pthread) worklist;
TAILQ_INIT(&worklist);
- THR_LOCK_ACQUIRE(curthread, &_thread_list_lock);
+ THREAD_LIST_LOCK(curthread);
/* Check the threads waiting for GC. */
for (td = TAILQ_FIRST(&_thread_gc_list); td != NULL; td = td_next) {
@@ -411,7 +209,7 @@
* in use.
*/
_thr_stack_free(&td->attr);
- if (((td->attr.flags & PTHREAD_DETACHED) != 0) &&
+ if (((td->tlflags & TLFLAGS_DETACHED) != 0) &&
(td->refcount == 0)) {
/*
* The thread has detached and is no longer
@@ -422,7 +220,7 @@
TAILQ_INSERT_HEAD(&worklist, td, gcle);
}
}
- THR_LOCK_RELEASE(curthread, &_thread_list_lock);
+ THREAD_LIST_UNLOCK(curthread);
while ((td = TAILQ_FIRST(&worklist)) != NULL) {
TAILQ_REMOVE(&worklist, td, gcle);
@@ -441,111 +239,6 @@
}
}
-static int
-thr_timedout(struct pthread *curthread, struct timespec *curtime)
-{
- if (curthread->wakeup_time.tv_sec < 0)
- return (0);
- else if (curthread->wakeup_time.tv_sec > curtime->tv_sec)
- return (0);
- else if ((curthread->wakeup_time.tv_sec == curtime->tv_sec) &&
- (curthread->wakeup_time.tv_nsec > curtime->tv_nsec))
- return (0);
- else
- return (1);
-}
-
-/*
- * This function waits for the smallest timeout value of any waiting
- * thread, or until it receives a message from another KSE.
- *
- * This must be called with the scheduling lock held.
- */
-static void
-thr_wait(struct pthread *curthread, int sigseqno)
-{
- struct timespec ts, ts_sleep;
-
- if ((curthread->wakeup_time.tv_sec < 0)) {
- /* Limit sleep to no more than 1 minute. */
- ts_sleep.tv_sec = 60;
- ts_sleep.tv_nsec = 0;
- } else {
- clock_gettime(CLOCK_REALTIME, &ts);
- TIMESPEC_SUB(&ts_sleep, &curthread->wakeup_time, &ts);
- if (ts_sleep.tv_sec > 60) {
- ts_sleep.tv_sec = 60;
- ts_sleep.tv_nsec = 0;
- }
- }
- /* Don't sleep for negative times. */
- if ((ts_sleep.tv_sec >= 0) && (ts_sleep.tv_nsec >= 0)) {
- /* prevent thr_sig_check_pending to run */
- curthread->critical_count++;
- curthread->idle = 1;
- THR_UNLOCK_SWITCH(curthread);
- if (curthread->sigseqno != sigseqno)
- ; /* don't sleep */
- else {
- thr_suspend(&ts_sleep);
- }
- THR_LOCK_SWITCH(curthread);
- curthread->idle = 0;
- curthread->critical_count--;
- }
-}
-
-void
-_thr_set_timeout(const struct timespec *timeout)
-{
- struct pthread *curthread = _get_curthread();
- struct timespec ts;
-
- /* Reset the timeout flag for the running thread: */
- curthread->timeout = 0;
-
- /* Check if the thread is to wait forever: */
- if (timeout == NULL) {
- /*
- * Set the wakeup time to something that can be recognised as
- * different to an actual time of day:
- */
- curthread->wakeup_time.tv_sec = -1;
- curthread->wakeup_time.tv_nsec = -1;
- }
- /* Check if no waiting is required: */
- else if ((timeout->tv_sec == 0) && (timeout->tv_nsec == 0)) {
- /* Set the wake up time to 'immediately': */
- curthread->wakeup_time.tv_sec = 0;
- curthread->wakeup_time.tv_nsec = 0;
- } else {
- /* Calculate the time for the current thread to wakeup: */
- clock_gettime(CLOCK_REALTIME, &ts);
- TIMESPEC_ADD(&curthread->wakeup_time, &ts, timeout);
- }
-}
-
-void
-_thr_setrunnable(struct pthread *curthread, struct pthread *thread)
-{
- long tid;
-
- THR_THREAD_LOCK(curthread, thread);
- tid = _thr_setrunnable_unlocked(thread);
- THR_THREAD_UNLOCK(curthread, thread);
- thr_wake(tid);
-}
-
-long
-_thr_setrunnable_unlocked(struct pthread *thread)
-{
- if ((thread->flags & THR_FLAGS_SUSPENDED) != 0)
- THR_SET_STATE(thread, PS_SUSPENDED);
- else
- THR_SET_STATE(thread, PS_RUNNING);
- return (thread->tid);
-}
-
struct pthread *
_thr_alloc(struct pthread *curthread)
{
@@ -631,7 +324,7 @@
void
_thr_link(struct pthread *curthread, struct pthread *thread)
{
- THR_LOCK_ACQUIRE(curthread, &_thread_list_lock);
+ THREAD_LIST_LOCK(curthread);
/*
* Initialize the unique id (which GDB uses to track
* threads), add the thread to the list of all threads,
@@ -639,8 +332,10 @@
*/
thread->uniqueid = next_uniqueid++;
THR_LIST_ADD(thread);
+ if (thread->attr.flags & PTHREAD_DETACHED)
+ thread->tlflags |= TLFLAGS_DETACHED;
_thread_active_threads++;
- THR_LOCK_RELEASE(curthread, &_thread_list_lock);
+ THREAD_LIST_UNLOCK(curthread);
}
/*
@@ -649,10 +344,10 @@
void
_thr_unlink(struct pthread *curthread, struct pthread *thread)
{
- THR_LOCK_ACQUIRE(curthread, &_thread_list_lock);
+ THREAD_LIST_LOCK(curthread);
THR_LIST_REMOVE(thread);
_thread_active_threads--;
- THR_LOCK_RELEASE(curthread, &_thread_list_lock);
+ THREAD_LIST_UNLOCK(curthread);
}
void
@@ -683,3 +378,71 @@
}
return (NULL);
}
+
+/*
+ * Find a thread in the linked list of active threads and add a reference
+ * to it. Threads with positive reference counts will not be deallocated
+ * until all references are released.
+ */
+int
+_thr_ref_add(struct pthread *curthread, struct pthread *thread,
+ int include_dead)
+{
+ int ret;
+
+ if (thread == NULL)
+ /* Invalid thread: */
+ return (EINVAL);
+
+ THREAD_LIST_LOCK(curthread);
+ if ((ret = _thr_find_thread(curthread, thread, include_dead)) == 0) {
+ thread->refcount++;
+ if (curthread != NULL)
+ curthread->critical_count++;
+ }
+ THREAD_LIST_UNLOCK(curthread);
+
+ /* Return zero if the thread exists: */
+ return (ret);
+}
+
+void
+_thr_ref_delete(struct pthread *curthread, struct pthread *thread)
+{
+ if (thread != NULL) {
+ THREAD_LIST_LOCK(curthread);
+ thread->refcount--;
+ if (curthread != NULL)
+ curthread->critical_count--;
+ if ((thread->refcount == 0) &&
+ (thread->tlflags & TLFLAGS_GC_SAFE) != 0)
+ THR_GCLIST_ADD(thread);
+ THREAD_LIST_UNLOCK(curthread);
+ }
+}
+
+int
+_thr_find_thread(struct pthread *curthread, struct pthread *thread,
+ int include_dead)
+{
+ struct pthread *pthread;
+
+ if (thread == NULL)
+ /* Invalid thread: */
+ return (EINVAL);
+
+ pthread = _thr_hash_find(thread);
+ if (pthread) {
+ if (include_dead == 0 && pthread->state == PS_DEAD)
+ pthread = NULL;
+ }
+
+ /* Return zero if the thread exists: */
+ return ((pthread != NULL) ? 0 : ESRCH);
+}
+
+void
+_thr_assert_lock_level()
+{
+ PANIC("lockleve <=0");
+}
More information about the p4-projects
mailing list