[Bug 254995] pthread_cond_timedwait() returns EDEADLK
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Mon, 17 Jan 2022 15:14:32 UTC
https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=254995
--- Comment #12 from nkoch@demig.de ---
I modified the kernel code as suggested. I also put some
printf's into libthr.
After a longer time of testing I had the deadlock again:
mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime)
{
struct timespec ts1, ts2;
int ret;
switch (PMUTEX_TYPE(m->m_flags)) {
case PTHREAD_MUTEX_ERRORCHECK:
case PTHREAD_MUTEX_ADAPTIVE_NP:
if (abstime) {
if (abstime->tv_sec < 0 || abstime->tv_nsec < 0 ||
abstime->tv_nsec >= 1000000000) {
ret = EINVAL;
} else {
clock_gettime(CLOCK_REALTIME, &ts1);
TIMESPEC_SUB(&ts2, abstime, &ts1);
__sys_nanosleep(&ts2, NULL);
ret = ETIMEDOUT;
}
} else {
/*
* POSIX specifies that mutexes should return
* EDEADLK if a recursive lock is detected.
*/
#if !defined(_PTHREADS_INVARIANTS)
#error
#endif
ret = EDEADLK;
******** thread 0x6883500(0x1871a), mutex 0x653e800 own 0x1871a list 0x0 0x0
err=11
}
******** called from mutex_lock_sleep():
mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
const struct timespec *abstime)
{
uint32_t id, owner;
int count, ret;
id = TID(curthread);
if (PMUTEX_OWNER_ID(m) == id)
>>>>>>>> return (mutex_self_lock(m, abstime));
******** called from mutex_lock_common():
mutex_lock_common(struct pthread_mutex *m, const struct timespec *abstime,
bool cvattach, bool rb_onlist)
{
struct pthread *curthread;
int ret, robust;
robust = 0; /* pacify gcc */
curthread = _get_curthread();
if (!cvattach && m->m_flags & PMUTEX_FLAG_PRIVATE)
THR_CRITICAL_ENTER(curthread);
if (!rb_onlist)
robust = _mutex_enter_robust(curthread, m);
ret = _thr_umutex_trylock2(&m->m_lock, TID(curthread));
if (ret == 0 || ret == EOWNERDEAD) {
enqueue_mutex(curthread, m, ret);
if (ret == EOWNERDEAD)
m->m_lock.m_flags |= UMUTEX_NONCONSISTENT;
} else {
>>>>>>>> ret = mutex_lock_sleep(curthread, m, abstime);
}
******** called from _mutex_cv_lock():
_mutex_cv_lock(struct pthread_mutex *m, int count, bool rb_onlist)
{
int error;
struct pthread_mutex saved = *m;
error = mutex_lock_common(m, NULL, true, rb_onlist);
if (error == 0 || error == EOWNERDEAD)
m->m_count = count;
else if (error == EDEADLK)
******** thread 0x6883500(0x1871a), mutex 0x653e800 own 0x1871a(0x1871a) list
0x0(0x0) 0x0(0x0) err=11
return (error);
}
********called from cond_wait_kernel():
cond_wait_kernel(struct pthread_cond *cvp, struct pthread_mutex *mp,
const struct timespec *abstime, int cancel)
{
struct pthread *curthread;
int error, error2, recurse, robust;
curthread = _get_curthread();
robust = _mutex_enter_robust(curthread, mp);
error = _mutex_cv_detach(mp, &recurse);
if (error != 0) {
if (robust)
_mutex_leave_robust(curthread, mp);
return (error);
}
if (cancel)
_thr_cancel_enter2(curthread, 0);
error = _thr_ucond_wait(&cvp->kcond, &mp->m_lock, abstime,
CVWAIT_ABSTIME | CVWAIT_CLOCKID);
if (cancel)
_thr_cancel_leave(curthread, 0);
/*
* Note that PP mutex and ROBUST mutex may return
* interesting error codes.
*/
if (error == 0) {
error2 = _mutex_cv_lock(mp, recurse, true);
} else if (error == EINTR || error == ETIMEDOUT) {
error2 = _mutex_cv_lock(mp, recurse, true);
/*
* Do not do cancellation on EOWNERDEAD there. The
* cancellation cleanup handler will use the protected
* state and unlock the mutex without making the state
* consistent and the state will be unrecoverable.
*/
if (error2 == 0 && cancel) {
if (robust) {
_mutex_leave_robust(curthread, mp);
robust = false;
}
_thr_testcancel(curthread);
} else
******** error 11: 0x6883500(0x1871a)error2, curthread, TID(curthread));
--
You are receiving this mail because:
You are the assignee for the bug.