Browse Source

Merge pull request #5731 from melshuber/master

core: recursive mutex implementation
pr/rotary
Kaspar Schleiser 6 years ago committed by GitHub
parent
commit
8d207ca724
  1. 110
      core/include/rmutex.h
  2. 150
      core/rmutex.c
  3. 6
      tests/rmutex/Makefile
  4. 86
      tests/rmutex/README.md
  5. 91
      tests/rmutex/main.c
  6. 48
      tests/rmutex/tests/01-run.py

110
core/include/rmutex.h

@ -0,0 +1,110 @@
/*
* Copyright (C) 2016 Theobroma Systems Design & Consulting GmbH
*
* This file is subject to the terms and conditions of the GNU Lesser
* General Public License v2.1. See the file LICENSE in the top level
* directory for more details.
*/
/**
* @ingroup core_sync Synchronization
* @brief Recursive Mutex for thread synchronization
* @{
*
* @file
* @brief RIOT synchronization API
*
* @author Martin Elshuber <martin.elshuber@theobroma-systems.com>
*
*/
#ifndef RMUTEX_H_
#define RMUTEX_H_
#include <stdatomic.h>
#include "mutex.h"
#include "kernel_types.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Mutex structure. Must never be modified by the user.
*/
typedef struct rmutex_t {
/* fields are managed by mutex functions, don't touch */
/**
* @brief The mutex used for locking. **Must never be changed by
* the user.**
* @internal
*/
mutex_t mutex;
/**
* @brief Number of locks owned by the thread owner
* @internal
*/
uint16_t refcount;
/**
* @brief Owner thread of the mutex.
* @details Owner is written by the mutex holder, and read
* concurrently to ensure consistency,
* atomic_int_least16_t is used. Note @ref kernel_pid_t is an int16
* @internal
*/
atomic_int_least16_t owner;
} rmutex_t;
/**
* @brief Static initializer for rmutex_t.
* @details This initializer is preferable to rmutex_init().
*/
#define RMUTEX_INIT { MUTEX_INIT, 0, ATOMIC_VAR_INIT(KERNEL_PID_UNDEF) }
/**
* @brief Initializes a recursive mutex object.
* @details For initialization of variables use RMUTEX_INIT instead.
* Only use the function call for dynamically allocated mutexes.
* @param[out] rmutex pre-allocated mutex structure, must not be NULL.
*/
static inline void rmutex_init(rmutex_t *rmutex)
{
rmutex_t empty_rmutex = RMUTEX_INIT;
*rmutex = empty_rmutex;
}
/**
* @brief Tries to get a recursive mutex, non-blocking.
*
* @param[in] rmutex Recursive mutex object to lock. Has to be
* initialized first. Must not be NULL.
*
* @return 1 if mutex was unlocked, now it is locked.
* @return 0 if the mutex was locked.
*/
int rmutex_trylock(rmutex_t *rmutex);
/**
* @brief Locks a recursive mutex, blocking.
*
* @param[in] rmutex Recursive mutex object to lock. Has to be
* initialized first. Must not be NULL.
*/
void rmutex_lock(rmutex_t *rmutex);
/**
* @brief Unlocks the recursive mutex.
*
* @param[in] rmutex Recursive mutex object to unlock, must not be NULL.
*/
void rmutex_unlock(rmutex_t *rmutex);
#ifdef __cplusplus
}
#endif
#endif /* RMUTEX_H_ */
/** @} */

150
core/rmutex.c

@ -0,0 +1,150 @@
/*
* Copyright (C) 2016 Theobroma Systems Design & Consulting GmbH
*
* This file is subject to the terms and conditions of the GNU Lesser
* General Public License v2.1. See the file LICENSE in the top level
* directory for more details.
*/
/**
* @ingroup core_sync Synchronization
* @brief Recursive Mutex for thread synchronization
* @{
*
* @file
* @brief RIOT synchronization API
*
* @author Martin Elshuber <martin.elshuber@theobroma-systems.com>
*
* The recursive mutex implementation is inspired by the implementetaion of
* Nick v. IJzendoorn <nijzendoorn@engineering-spirit.nl>
* @see https://github.com/RIOT-OS/RIOT/pull/4529/files#diff-8f48e1b9ed7a0a48d0c686a87cc5084eR35
*
*/
#include <stdio.h>
#include <inttypes.h>
#include "rmutex.h"
#include "thread.h"
#include "assert.h"
#define ENABLE_DEBUG (0)
#include "debug.h"
static int _lock(rmutex_t *rmutex, int trylock)
{
kernel_pid_t owner;
/* try to lock the mutex */
DEBUG("rmutex %" PRIi16" : trylock\n", thread_getpid());
if (mutex_trylock(&rmutex->mutex) == 0) {
DEBUG("rmutex %" PRIi16" : mutex already held\n", thread_getpid());
/* Mutex is already held
*
* Case 1: Mutex is not held by me
* Condition 1: holds
* rmutex->owner != thread_getpid()
*
* Note for Case 1:
*
* As a consequence it is necessaray to call
* mutex_lock(). However the read access to owner is not
* locked, and owner can be changed by a thread that is
* holding the lock (e.g.: holder unlocks the mutex, new
* holder aquired the lock). The atomic access strategy
* 'relaxed' ensures, that the value of rmutex->owner is read
* consistent.
*
* It is not necessary to synchronize (make written values
* visible) read/write with other threads, because every
* write by other threads let evaluate Condition 1 to
* false. They all write either KERNEL_PID_UNDEF or the
* pid of the other thread.
*
* Other threads never set rmutex->owner to the pid of the
* current thread. Hence, it is guaranteed that mutex_lock
* is eventually called.
*
* Case 2: Mutex is held be me (relock)
* Condition 2: holds
* rmutex->owner == thread_getpid()
*
* Note for Case 2:
*
* Because the mutex rmutex->owner is only written be the
* owner (me), rmutex->owner stays constant througout the
* complete call and rmutex->refcount is protected
* (read/write) by the mutex.
*/
/* ensure that owner is read atomically, since I need a consistent value */
owner = atomic_load_explicit(&rmutex->owner, memory_order_relaxed);
DEBUG("rmutex %" PRIi16" : mutex held by %" PRIi16" \n", thread_getpid(), owner);
/* Case 1: Mutex is not held by me */
if (owner != thread_getpid()) {
/* wait for the mutex */
DEBUG("rmutex %" PRIi16" : locking mutex\n", thread_getpid());
if (trylock) {
return 0;
}
else {
mutex_lock(&rmutex->mutex);
}
}
/* Case 2: Mutex is held be me (relock) */
/* Note: There is nothing to do for Case 2; refcount is incremented below */
}
DEBUG("rmutex %" PRIi16" : I am now holding the mutex\n", thread_getpid());
/* I am holding the recursive mutex */
DEBUG("rmutex %" PRIi16" : settting the owner\n", thread_getpid());
/* ensure that owner is written atomically, since others need a consistent value */
atomic_store_explicit(&rmutex->owner, thread_getpid(), memory_order_relaxed);
DEBUG("rmutex %" PRIi16" : increasing refs\n", thread_getpid());
/* increase the refcount */
rmutex->refcount++;
return 1;
}
void rmutex_lock(rmutex_t *rmutex)
{
_lock(rmutex, 0);
}
int rmutex_trylock(rmutex_t *rmutex)
{
return _lock(rmutex, 1);
}
void rmutex_unlock(rmutex_t *rmutex)
{
assert(atomic_load_explicit(&rmutex->owner,memory_order_relaxed) == thread_getpid());
assert(rmutex->refcount > 0);
DEBUG("rmutex %" PRIi16" : decrementing refs refs\n", thread_getpid());
/* decrement refcount */
rmutex->refcount--;
/* check if I should still hold the mutex */
if (rmutex->refcount == 0) {
/* if not release the mutex */
DEBUG("rmutex %" PRIi16" : resetting owner\n", thread_getpid());
/* ensure that owner is written only once */
atomic_store_explicit(&rmutex->owner, KERNEL_PID_UNDEF, memory_order_relaxed);
DEBUG("rmutex %" PRIi16" : releasing mutex\n", thread_getpid());
mutex_unlock(&rmutex->mutex);
}
}

6
tests/rmutex/Makefile

@ -0,0 +1,6 @@
APPLICATION = rmutex
include ../Makefile.tests_common
BOARD_INSUFFICIENT_MEMORY := stm32f0discovery weio nucleo-f030 nucleo32-f042
include $(RIOTBASE)/Makefile.include

86
tests/rmutex/README.md

@ -0,0 +1,86 @@
Expected result
===============
When successful, you should see 5 different threads printing their
PID, priority and recursion depth. The thread with the lowest priority
should be able to lock (and unlock) the mutex first, followed by the
other threads in the order of their priority (highest next). If two
threads have the same priority the lower thread id should acquire the
lock. The output should look like the following:
```
main(): This is RIOT! (Version: xxx)
Recursive Mutex test
Please refer to the README.md for more information
Recursive Mutex test
Please refer to the README.md for more information
T3 (prio 6, depth 0): trying to lock rmutex now
T4 (prio 4, depth 0): trying to lock rmutex now
T5 (prio 5, depth 0): trying to lock rmutex now
T6 (prio 2, depth 0): trying to lock rmutex now
T7 (prio 3, depth 0): trying to lock rmutex now
main: unlocking recursive mutex
T6 (prio 2, depth 0): locked rmutex now
T6 (prio 2, depth 1): trying to lock rmutex now
T6 (prio 2, depth 1): locked rmutex now
T6 (prio 2, depth 2): trying to lock rmutex now
T6 (prio 2, depth 2): locked rmutex now
T6 (prio 2, depth 3): trying to lock rmutex now
T6 (prio 2, depth 3): locked rmutex now
T6 (prio 2, depth 3): unlocked rmutex
T6 (prio 2, depth 2): unlocked rmutex
T6 (prio 2, depth 1): unlocked rmutex
T6 (prio 2, depth 0): unlocked rmutex
T7 (prio 3, depth 0): locked rmutex now
T7 (prio 3, depth 1): trying to lock rmutex now
T7 (prio 3, depth 1): locked rmutex now
T7 (prio 3, depth 2): trying to lock rmutex now
T7 (prio 3, depth 2): locked rmutex now
T7 (prio 3, depth 3): trying to lock rmutex now
T7 (prio 3, depth 3): locked rmutex now
T7 (prio 3, depth 4): trying to lock rmutex now
T7 (prio 3, depth 4): locked rmutex now
T7 (prio 3, depth 4): unlocked rmutex
T7 (prio 3, depth 3): unlocked rmutex
T7 (prio 3, depth 2): unlocked rmutex
T7 (prio 3, depth 1): unlocked rmutex
T7 (prio 3, depth 0): unlocked rmutex
T4 (prio 4, depth 0): locked rmutex now
T4 (prio 4, depth 1): trying to lock rmutex now
T4 (prio 4, depth 1): locked rmutex now
T4 (prio 4, depth 2): trying to lock rmutex now
T4 (prio 4, depth 2): locked rmutex now
T4 (prio 4, depth 2): unlocked rmutex
T4 (prio 4, depth 1): unlocked rmutex
T4 (prio 4, depth 0): unlocked rmutex
T5 (prio 5, depth 0): locked rmutex now
T5 (prio 5, depth 1): trying to lock rmutex now
T5 (prio 5, depth 1): locked rmutex now
T5 (prio 5, depth 2): trying to lock rmutex now
T5 (prio 5, depth 2): locked rmutex now
T5 (prio 5, depth 2): unlocked rmutex
T5 (prio 5, depth 1): unlocked rmutex
T5 (prio 5, depth 0): unlocked rmutex
T3 (prio 6, depth 0): locked rmutex now
T3 (prio 6, depth 1): trying to lock rmutex now
T3 (prio 6, depth 1): locked rmutex now
T3 (prio 6, depth 2): trying to lock rmutex now
T3 (prio 6, depth 2): locked rmutex now
T3 (prio 6, depth 3): trying to lock rmutex now
T3 (prio 6, depth 3): locked rmutex now
T3 (prio 6, depth 4): trying to lock rmutex now
T3 (prio 6, depth 4): locked rmutex now
T3 (prio 6, depth 4): unlocked rmutex
T3 (prio 6, depth 3): unlocked rmutex
T3 (prio 6, depth 2): unlocked rmutex
T3 (prio 6, depth 1): unlocked rmutex
T3 (prio 6, depth 0): unlocked rmutex
Test END, check the order of priorities above.
```
Background
==========
This test application stresses a mutex with a number of threads waiting on it.

91
tests/rmutex/main.c

@ -0,0 +1,91 @@
/*
* Copyright (C) 2016 Theobroma Systems Design & Consulting GmbH
*
* This file is subject to the terms and conditions of the GNU Lesser
* General Public License v2.1. See the file LICENSE in the top level
* directory for more details.
*/
/**
* @ingroup tests
* @{
*
* @file
* @brief Test application for testing recursive mutexes
*
* @author Hauke Petersen <hauke.petersen@fu-berlin.de>
* @author Martin Elshuber <martin.elshuber@theobroma-systems.com>
* @}
*/
#include <stdio.h>
#include "rmutex.h"
#include "thread.h"
#define THREAD_NUMOF (5U)
extern volatile thread_t *sched_active_thread;
static char stacks[THREAD_NUMOF][THREAD_STACKSIZE_MAIN];
static const char prios[THREAD_NUMOF] = {THREAD_PRIORITY_MAIN - 1, 4, 5, 2, 4};
static const char depth[THREAD_NUMOF] = {5, 3, 3, 4, 5};
static rmutex_t testlock;
static void lock_recursive(char n, char depth)
{
volatile thread_t *t = sched_active_thread;
printf("T%i (prio %i, depth %i): trying to lock rmutex now\n",
(int)t->pid, (int)t->priority, (int)n);
rmutex_lock(&testlock);
printf("T%i (prio %i, depth %i): locked rmutex now\n",
(int)t->pid, (int)t->priority, (int)n);
if (n + 1 < depth)
lock_recursive(n + 1, depth);
thread_yield();
rmutex_unlock(&testlock);
printf("T%i (prio %i, depth %i): unlocked rmutex\n",
(int)t->pid, (int)t->priority, (int)n);
}
static void *lockme(void *arg)
{
intptr_t depth = (intptr_t)arg;
lock_recursive(0, depth);
return NULL;
}
int main(void)
{
puts("Recursive Mutex test");
puts("Please refer to the README.md for more information\n");
rmutex_init(&testlock);
/* lock mutex, so that spawned threads have to wait */
rmutex_lock(&testlock);
/* create threads */
for (unsigned i = 0; i < THREAD_NUMOF; i++) {
thread_create(stacks[i], sizeof(stacks[i]), prios[i], 0,
lockme, (void*)(intptr_t)depth[i], "t");
}
/* allow threads to lock the mutex */
printf("main: unlocking recursive mutex\n");
rmutex_unlock(&testlock);
rmutex_lock(&testlock);
puts("\nTest END, check the order of priorities above.");
return 0;
}

48
tests/rmutex/tests/01-run.py

@ -0,0 +1,48 @@
#!/usr/bin/env python3
# Copyright (C) 2016 Theobroma Systems Design & Consulting GmbH
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
# Author: Martin Elshuber <martin.elshuber@theobroma-systems.com>
import os
import sys
sys.path.append(os.path.join(os.environ['RIOTBASE'], 'dist/tools/testrunner'))
import testrunner
thread_prio = {
3: 6,
4: 4,
5: 5,
6: 2,
7: 4
}
lock_depth = {
3: 5,
4: 3,
5: 3,
6: 4,
7: 5
}
def thread_prio_sort(x):
return thread_prio.get(x)*1000 + x
def testfunc(child):
for k in thread_prio.keys():
child.expect(u"T%i \(prio %i, depth 0\): trying to lock rmutex now" %
(k, thread_prio[k]))
pri_sorted = sorted(thread_prio, key=thread_prio_sort);
for T in pri_sorted:
for depth in range(lock_depth[T]):
child.expect(u"T%i \(prio %i, depth %i\): locked rmutex now" %
(T, thread_prio[T], depth))
if __name__ == "__main__":
sys.exit(testrunner.run(testfunc))
Loading…
Cancel
Save