1
0
mirror of https://github.com/RIOT-OS/RIOT.git synced 2024-12-29 04:50:03 +01:00

posix: pthread_rwlock: fix DEBUG messages

This commit is contained in:
Kaspar Schleiser 2017-10-20 22:38:28 +02:00
parent c398b74ae0
commit c6b482d56c

View File

@ -28,12 +28,14 @@
* @}
*/
#include <stdint.h>
#include <string.h>
#include "pthread.h"
#include "sched.h"
#include "xtimer.h"
#include <stdint.h>
#include <string.h>
#include "thread.h"
#define ENABLE_DEBUG (0)
#include "debug.h"
@ -43,7 +45,7 @@ int pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *at
(void) attr;
if (rwlock == NULL) {
DEBUG("Thread %" PRIkernel_pid " pthread_rwlock_%s(): rwlock=NULL supplied\n", thread_pid, "init");
DEBUG("Thread %" PRIkernel_pid " pthread_rwlock_%s(): rwlock=NULL supplied\n", thread_getpid(), "init");
return EINVAL;
}
@ -54,7 +56,7 @@ int pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *at
int pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
{
if (rwlock == NULL) {
DEBUG("Thread %" PRIkernel_pid ": pthread_rwlock_%s(): rwlock=NULL supplied\n", thread_pid, "destroy");
DEBUG("Thread %" PRIkernel_pid ": pthread_rwlock_%s(): rwlock=NULL supplied\n", thread_getpid(), "destroy");
return EINVAL;
}
@ -105,19 +107,19 @@ static int pthread_rwlock_lock(pthread_rwlock_t *rwlock,
{
if (rwlock == NULL) {
DEBUG("Thread %" PRIkernel_pid": pthread_rwlock_%s(): is_writer=%u, allow_spurious=%u %s\n",
thread_pid, "lock", is_writer, allow_spurious, "rwlock=NULL");
thread_getpid(), "lock", is_writer, allow_spurious, "rwlock=NULL");
return EINVAL;
}
mutex_lock(&rwlock->mutex);
if (!is_blocked(rwlock)) {
DEBUG("Thread %" PRIkernel_pid ": pthread_rwlock_%s(): is_writer=%u, allow_spurious=%u %s\n",
thread_pid, "lock", is_writer, allow_spurious, "is open");
thread_getpid(), "lock", is_writer, allow_spurious, "is open");
rwlock->readers += incr_when_held;
}
else {
DEBUG("Thread %" PRIkernel_pid ": pthread_rwlock_%s(): is_writer=%u, allow_spurious=%u %s\n",
thread_pid, "lock", is_writer, allow_spurious, "is locked");
thread_getpid(), "lock", is_writer, allow_spurious, "is locked");
/* queue for the lock */
__pthread_rwlock_waiter_node_t waiting_node = {
@ -140,12 +142,12 @@ static int pthread_rwlock_lock(pthread_rwlock_t *rwlock,
if (waiting_node.continue_) {
/* pthread_rwlock_unlock() already set rwlock->readers */
DEBUG("Thread %" PRIkernel_pid ": pthread_rwlock_%s(): is_writer=%u, allow_spurious=%u %s\n",
thread_pid, "lock", is_writer, allow_spurious, "continued");
thread_getpid(), "lock", is_writer, allow_spurious, "continued");
break;
}
else if (allow_spurious) {
DEBUG("Thread %" PRIkernel_pid ": pthread_rwlock_%s(): is_writer=%u, allow_spurious=%u %s\n",
thread_pid, "lock", is_writer, allow_spurious, "is timed out");
thread_getpid(), "lock", is_writer, allow_spurious, "is timed out");
priority_queue_remove(&rwlock->queue, &waiting_node.qnode);
mutex_unlock(&rwlock->mutex);
return ETIMEDOUT;
@ -162,7 +164,7 @@ static int pthread_rwlock_trylock(pthread_rwlock_t *rwlock,
int incr_when_held)
{
if (rwlock == NULL) {
DEBUG("Thread %" PRIkernel_pid ": pthread_rwlock_%s(): rwlock=NULL supplied\n", thread_pid, "trylock");
DEBUG("Thread %" PRIkernel_pid ": pthread_rwlock_%s(): rwlock=NULL supplied\n", thread_getpid(), "trylock");
return EINVAL;
}
else if (mutex_trylock(&rwlock->mutex) == 0) {
@ -237,30 +239,30 @@ int pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlock, const struct timespec *
int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
{
if (rwlock == NULL) {
DEBUG("Thread %" PRIkernel_pid ": pthread_rwlock_%s(): rwlock=NULL supplied\n", thread_pid, "unlock");
DEBUG("Thread %" PRIkernel_pid ": pthread_rwlock_%s(): rwlock=NULL supplied\n", thread_getpid(), "unlock");
return EINVAL;
}
mutex_lock(&rwlock->mutex);
if (rwlock->readers == 0) {
/* the lock is open */
DEBUG("Thread %" PRIkernel_pid ": pthread_rwlock_%s(): lock is open\n", thread_pid, "unlock");
DEBUG("Thread %" PRIkernel_pid ": pthread_rwlock_%s(): lock is open\n", thread_getpid(), "unlock");
mutex_unlock(&rwlock->mutex);
return EPERM;
}
if (rwlock->readers > 0) {
DEBUG("Thread %" PRIkernel_pid ": pthread_rwlock_%s(): release %s lock\n", thread_pid, "unlock", "read");
DEBUG("Thread %" PRIkernel_pid ": pthread_rwlock_%s(): release %s lock\n", thread_getpid(), "unlock", "read");
--rwlock->readers;
}
else {
DEBUG("Thread %" PRIkernel_pid ": pthread_rwlock_%s(): release %s lock\n", thread_pid, "unlock", "write");
DEBUG("Thread %" PRIkernel_pid ": pthread_rwlock_%s(): release %s lock\n", thread_getpid(), "unlock", "write");
rwlock->readers = 0;
}
if (rwlock->readers != 0 || rwlock->queue.first == NULL) {
/* this thread was not the last reader, or no one is waiting to aquire the lock */
DEBUG("Thread %" PRIkernel_pid ": pthread_rwlock_%s(): no one is waiting\n", thread_pid, "unlock");
DEBUG("Thread %" PRIkernel_pid ": pthread_rwlock_%s(): no one is waiting\n", thread_getpid(), "unlock");
mutex_unlock(&rwlock->mutex);
return 0;
}
@ -274,12 +276,12 @@ int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
if (waiting_node->is_writer) {
DEBUG("Thread %" PRIkernel_pid ": pthread_rwlock_%s(): continue %s %" PRIkernel_pid "\n",
thread_pid, "unlock", "writer", waiting_node->thread->pid);
thread_getpid(), "unlock", "writer", waiting_node->thread->pid);
--rwlock->readers;
}
else {
DEBUG("Thread %" PRIkernel_pid ": pthread_rwlock_%s(): continue %s %" PRIkernel_pid "\n",
thread_pid, "unlock", "reader", waiting_node->thread->pid);
thread_getpid(), "unlock", "reader", waiting_node->thread->pid);
++rwlock->readers;
/* wake up further readers */
@ -288,12 +290,12 @@ int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
if (waiting_node->is_writer) {
/* Not to be unfair to writers, we don't try to wake up readers that came after the first writer. */
DEBUG("Thread %" PRIkernel_pid ": pthread_rwlock_%s(): continuing readers blocked by writer %" PRIkernel_pid "\n",
thread_pid, "unlock", waiting_node->thread->pid);
thread_getpid(), "unlock", waiting_node->thread->pid);
break;
}
waiting_node->continue_ = true;
DEBUG("Thread %" PRIkernel_pid ": pthread_rwlock_%s(): continue %s %" PRIkernel_pid "\n",
thread_pid, "unlock", "reader", waiting_node->thread->pid);
thread_getpid(), "unlock", "reader", waiting_node->thread->pid);
/* wake up this reader */
qnode = priority_queue_remove_head(&rwlock->queue);