1
0
mirror of https://github.com/RIOT-OS/RIOT.git synced 2024-12-29 04:50:03 +01:00

Add pthread_rwlock test

This commit is contained in:
René Kijewski 2014-04-17 14:20:46 +02:00
parent 10d36df795
commit 9a5a8a2452
3 changed files with 180 additions and 0 deletions

View File

@ -35,11 +35,15 @@
#include <stdint.h>
#include <string.h>
#define ENABLE_DEBUG (0)
#include "debug.h"
int pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr)
{
(void) attr;
if (rwlock == NULL) {
DEBUG("Thread %u: pthread_rwlock_%s(): rwlock=NULL supplied\n", thread_pid, "init");
return EINVAL;
}
@ -50,6 +54,7 @@ int pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *at
int pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
{
if (rwlock == NULL) {
DEBUG("Thread %u: pthread_rwlock_%s(): rwlock=NULL supplied\n", thread_pid, "destroy");
return EINVAL;
}
@ -99,14 +104,21 @@ static int pthread_rwlock_lock(pthread_rwlock_t *rwlock,
bool allow_spurious)
{
if (rwlock == NULL) {
DEBUG("Thread %u: pthread_rwlock_%s(): is_writer=%u, allow_spurious=%u %s\n",
thread_pid, "lock", is_writer, allow_spurious, "rwlock=NULL");
return EINVAL;
}
mutex_lock(&rwlock->mutex);
if (!is_blocked(rwlock)) {
DEBUG("Thread %u: pthread_rwlock_%s(): is_writer=%u, allow_spurious=%u %s\n",
thread_pid, "lock", is_writer, allow_spurious, "is open");
rwlock->readers += incr_when_held;
}
else {
DEBUG("Thread %u: pthread_rwlock_%s(): is_writer=%u, allow_spurious=%u %s\n",
thread_pid, "lock", is_writer, allow_spurious, "is locked");
/* queue for the lock */
__pthread_rwlock_waiter_node_t waiting_node = {
.is_writer = is_writer,
@ -127,9 +139,13 @@ static int pthread_rwlock_lock(pthread_rwlock_t *rwlock,
mutex_lock(&rwlock->mutex);
if (waiting_node.continue_) {
/* pthread_rwlock_unlock() already set rwlock->readers */
DEBUG("Thread %u: pthread_rwlock_%s(): is_writer=%u, allow_spurious=%u %s\n",
thread_pid, "lock", is_writer, allow_spurious, "continued");
break;
}
else if (allow_spurious) {
DEBUG("Thread %u: pthread_rwlock_%s(): is_writer=%u, allow_spurious=%u %s\n",
thread_pid, "lock", is_writer, allow_spurious, "is timed out");
queue_remove(&rwlock->queue, &waiting_node.qnode);
mutex_unlock(&rwlock->mutex);
return ETIMEDOUT;
@ -146,6 +162,7 @@ static int pthread_rwlock_trylock(pthread_rwlock_t *rwlock,
int incr_when_held)
{
if (rwlock == NULL) {
DEBUG("Thread %u: pthread_rwlock_%s(): rwlock=NULL supplied\n", thread_pid, "trylock");
return EINVAL;
}
else if (mutex_trylock(&rwlock->mutex) == 0) {
@ -226,25 +243,30 @@ int pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlock, const struct timespec *
int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
{
if (rwlock == NULL) {
DEBUG("Thread %u: pthread_rwlock_%s(): rwlock=NULL supplied\n", thread_pid, "unlock");
return EINVAL;
}
mutex_lock(&rwlock->mutex);
if (rwlock->readers == 0) {
/* the lock is open */
DEBUG("Thread %u: pthread_rwlock_%s(): lock is open\n", thread_pid, "unlock");
mutex_unlock(&rwlock->mutex);
return EPERM;
}
if (rwlock->readers > 0) {
DEBUG("Thread %u: pthread_rwlock_%s(): release %s lock\n", thread_pid, "unlock", "read");
--rwlock->readers;
}
else {
DEBUG("Thread %u: pthread_rwlock_%s(): release %s lock\n", thread_pid, "unlock", "write");
rwlock->readers = 0;
}
if (rwlock->readers != 0 || rwlock->queue.next == NULL) {
/* this thread was not the last reader, or no one is waiting to aquire the lock */
DEBUG("Thread %u: pthread_rwlock_%s(): no one is waiting\n", thread_pid, "unlock");
mutex_unlock(&rwlock->mutex);
return 0;
}
@ -257,9 +279,13 @@ int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
sched_set_status(waiting_node->thread, STATUS_PENDING);
if (waiting_node->is_writer) {
DEBUG("Thread %u: pthread_rwlock_%s(): continue %s %u\n",
thread_pid, "unlock", "writer", waiting_node->thread->pid);
--rwlock->readers;
}
else {
DEBUG("Thread %u: pthread_rwlock_%s(): continue %s %u\n",
thread_pid, "unlock", "reader", waiting_node->thread->pid);
++rwlock->readers;
/* wake up further readers */
@ -267,9 +293,13 @@ int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
waiting_node = (__pthread_rwlock_waiter_node_t *) rwlock->queue.next->data;
if (waiting_node->is_writer) {
/* Not to be unfair to writers, we don't try to wake up readers that came after the first writer. */
DEBUG("Thread %u: pthread_rwlock_%s(): continuing readers blocked by writer %u\n",
thread_pid, "unlock", waiting_node->thread->pid);
break;
}
waiting_node->continue_ = true;
DEBUG("Thread %u: pthread_rwlock_%s(): continue %s %u\n",
thread_pid, "unlock", "reader", waiting_node->thread->pid);
/* wake up this reader */
qnode = queue_remove_head(&rwlock->queue);

View File

@ -0,0 +1,18 @@
PROJECT = test_pthread_rwlock
include ../Makefile.tests_common
USEMODULE += pthread
USEMODULE += vtimer
USEMODULE += random
DISABLE_MODULE += auto_init
CFLAGS += -DNATIVE_AUTO_EXIT
# these boards provide too little RAM for the example to run
BOARD_BLACKLIST += chronos
BOARD_BLACKLIST += mbed_lpc1768
BOARD_BLACKLIST += msb-430
BOARD_BLACKLIST += msb-430h
include $(RIOTBASE)/Makefile.include

View File

@ -0,0 +1,132 @@
/*
* Copyright (C) 2014 René Kijewski <rene.kijewski@fu-berlin.de>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @ingroup tests
* @{
*
* @file
* @brief Test rwlock implementation.
*
* @author René Kijewski <rene.kijewski@fu-berlin.de>
*
* @}
*/
#include <pthread.h>
#include <stdio.h>
#include "kernel.h"
#include "random.h"
#include "sched.h"
#include "thread.h"
#include "vtimer.h"
#define NUM_READERS_HIGH 2
#define NUM_READERS_LOW 3
#define NUM_WRITERS_HIGH 1
#define NUM_WRITERS_LOW 2
#define NUM_READERS (NUM_READERS_HIGH + NUM_READERS_LOW)
#define NUM_WRITERS (NUM_WRITERS_HIGH + NUM_WRITERS_LOW)
#define NUM_CHILDREN (NUM_READERS + NUM_WRITERS)
#define NUM_ITERATIONS 5
#define RAND_SEED 0xC0FFEE
static pthread_rwlock_t rwlock;
static volatile unsigned counter;
#define PRINTF(FMT, ...) \
printf("%c%u (prio=%u): " FMT "\n", active_thread->name[0], thread_pid, active_thread->priority, __VA_ARGS__)
static void do_sleep(int factor)
{
uint32_t timeout_us = (genrand_uint32() % 100000) * factor;
/* PRINTF("sleep for % 8i µs.", timeout_us); */
vtimer_usleep(timeout_us);
}
static void writer(void)
{
/* PRINTF("%s", "start"); */
for (int i = 0; i < NUM_ITERATIONS; ++i) {
pthread_rwlock_wrlock(&rwlock);
unsigned cur = ++counter;
do_sleep(3); /* simulate time that it takes to write the value */
PRINTF("%i: write -> %2u (correct = %u)", i, cur, cur == counter);
pthread_rwlock_unlock(&rwlock);
do_sleep(2);
}
/* PRINTF("%s", "done"); */
}
static void reader(void)
{
/* PRINTF("%s", "start"); */
for (int i = 0; i < NUM_ITERATIONS; ++i) {
pthread_rwlock_rdlock(&rwlock);
unsigned cur = counter;
do_sleep(1); /* simulate time that it takes to read the value */
PRINTF("%i: read <- %2u (correct = %u)", i, cur, cur == counter);
pthread_rwlock_unlock(&rwlock);
do_sleep(1);
}
/* PRINTF("%s", "done"); */
}
int main(void)
{
static char stacks[NUM_CHILDREN][KERNEL_CONF_STACKSIZE_MAIN];
puts("Main start.");
for (unsigned i = 0; i < NUM_CHILDREN; ++i) {
int prio;
void (*fun)(void);
const char *name;
if (i < NUM_READERS) {
if (i < NUM_READERS_HIGH) {
prio = PRIORITY_MAIN + 1;
}
else {
prio = PRIORITY_MAIN + 2;
}
fun = reader;
name = "reader";
}
else {
if (i - NUM_READERS < NUM_WRITERS_HIGH) {
prio = PRIORITY_MAIN + 1;
}
else {
prio = PRIORITY_MAIN + 2;
}
fun = writer;
name = "writer";
}
thread_create(stacks[i], sizeof (stacks[i]), prio, CREATE_WOUT_YIELD | CREATE_STACKTEST, fun, name);
}
puts("Main done.");
return 0;
}