diff --git a/core/cond.c b/core/cond.c index 8ff45f4d7461..242fa0884d79 100644 --- a/core/cond.c +++ b/core/cond.c @@ -34,13 +34,16 @@ void cond_init(cond_t *cond) void cond_wait(cond_t *cond, mutex_t *mutex) { - unsigned irqstate = irq_disable(); + assert(irq_is_enabled()); thread_t *me = thread_get_active(); - mutex_unlock(mutex); + irq_disable(); sched_set_status(me, STATUS_COND_BLOCKED); thread_add_to_list(&cond->queue, me); - irq_restore(irqstate); + irq_enable(); + /* if a higher prio thread blocks on the mutex, mutex_unlock() will + * reschedule, so this has to happen after enabling interrupts */ + mutex_unlock(mutex); thread_yield_higher(); /* diff --git a/core/include/mbox.h b/core/include/mbox.h index 121b9ae65a59..40dab8f9da02 100644 --- a/core/include/mbox.h +++ b/core/include/mbox.h @@ -24,6 +24,7 @@ #include "list.h" #include "cib.h" +#include "irq.h" #include "msg.h" #ifdef __cplusplus @@ -110,6 +111,7 @@ int _mbox_get(mbox_t *mbox, msg_t *msg, int blocking); */ static inline void mbox_put(mbox_t *mbox, msg_t *msg) { + assume(irq_is_in() || irq_is_enabled()); _mbox_put(mbox, msg, BLOCKING); } @@ -140,6 +142,7 @@ static inline int mbox_try_put(mbox_t *mbox, msg_t *msg) */ static inline void mbox_get(mbox_t *mbox, msg_t *msg) { + assume(irq_is_in() || irq_is_enabled()); _mbox_get(mbox, msg, BLOCKING); } diff --git a/core/include/sched.h b/core/include/sched.h index f7384236ffcf..ef934399fe98 100644 --- a/core/include/sched.h +++ b/core/include/sched.h @@ -260,8 +260,6 @@ NORETURN void sched_task_exit(void); /** * @brief Change the priority of the given thread * - * @note This functions expects interrupts to be disabled when called! - * * @pre (thread != NULL) * @pre (priority < SCHED_PRIO_LEVELS) * diff --git a/core/include/thread.h b/core/include/thread.h index cc414c9fb78a..34259b8ab9f1 100644 --- a/core/include/thread.h +++ b/core/include/thread.h @@ -121,6 +121,7 @@ #include "clist.h" #include "cib.h" +#include "irq.h" #include "msg.h" #include "sched.h" #include "thread_config.h" @@ -340,6 +341,11 @@ static inline void thread_yield(void) } #endif +/** + * @brief Arch-specific implementation of @ref thread_yield_higher() + */ +THREAD_MAYBE_INLINE void thread_yield_higher_arch(void); + /** * @brief Lets current thread yield in favor of a higher prioritized thread. * @@ -352,7 +358,21 @@ static inline void thread_yield(void) * * @see thread_yield() */ -THREAD_MAYBE_INLINE void thread_yield_higher(void); +#ifdef DEBUG_THREAD_YIELD_HIGHER +/* A failed assertion will reveal where this was called at the expense of + * increased binary size. */ +#define thread_yield_higher() do { \ + assume(irq_is_in() || irq_is_enabled()); \ + thread_yield_higher_arch(); \ +} while (0) + +#else +static inline void thread_yield_higher(void) +{ + assume(irq_is_in() || irq_is_enabled()); + thread_yield_higher_arch(); +} +#endif /** * @brief Puts the current thread into zombie state. diff --git a/core/mbox.c b/core/mbox.c index f636804a3cc1..9590257d9cbe 100644 --- a/core/mbox.c +++ b/core/mbox.c @@ -41,7 +41,7 @@ static void _wake_waiter(thread_t *thread, unsigned irqstate) sched_switch(process_priority); } -static void _wait(list_node_t *wait_list, unsigned irqstate) +static void _wait(list_node_t *wait_list) { DEBUG("mbox: Thread %" PRIkernel_pid " _wait(): going blocked.\n", thread_getpid()); @@ -50,7 +50,7 @@ static void _wait(list_node_t *wait_list, unsigned irqstate) sched_set_status(me, STATUS_MBOX_BLOCKED); thread_add_to_list(wait_list, me); - irq_restore(irqstate); + irq_enable(); thread_yield(); DEBUG("mbox: Thread %" PRIkernel_pid " _wait(): woke up.\n", @@ -75,8 +75,8 @@ int _mbox_put(mbox_t *mbox, msg_t *msg, int blocking) else { while (cib_full(&mbox->cib)) { if (blocking) { - _wait(&mbox->writers, irqstate); - irqstate = irq_disable(); + _wait(&mbox->writers); + irq_disable(); } else { irq_restore(irqstate); @@ -116,7 +116,7 @@ int _mbox_get(mbox_t *mbox, msg_t *msg, int blocking) } else if (blocking) { thread_get_active()->wait_data = msg; - _wait(&mbox->readers, irqstate); + _wait(&mbox->readers); /* sender has copied message */ return 1; } diff --git a/core/sched.c b/core/sched.c index 236c58e76430..b3f3a33119cb 100644 --- a/core/sched.c +++ b/core/sched.c @@ -334,13 +334,14 @@ void sched_register_cb(void (*callback)(kernel_pid_t, kernel_pid_t)) void sched_change_priority(thread_t *thread, uint8_t priority) { + assume(irq_is_in() || irq_is_enabled()); assert(thread && (priority < SCHED_PRIO_LEVELS)); if (thread->priority == priority) { return; } - unsigned irq_state = irq_disable(); + irq_disable(); if (thread_is_active(thread)) { _runqueue_pop(thread); @@ -348,7 +349,7 @@ void sched_change_priority(thread_t *thread, uint8_t priority) } thread->priority = priority; - irq_restore(irq_state); + irq_enable(); thread_t *active = thread_get_active(); diff --git a/core/thread.c b/core/thread.c index 979a95b90112..c3c5aa29a205 100644 --- a/core/thread.c +++ b/core/thread.c @@ -111,10 +111,11 @@ void thread_sleep(void) return; } - unsigned state = irq_disable(); + assert(irq_is_enabled()); + irq_disable(); sched_set_status(thread_get_active(), STATUS_SLEEPING); - irq_restore(state); + irq_enable(); thread_yield_higher(); } @@ -149,13 +150,14 @@ int thread_wakeup(kernel_pid_t pid) void thread_yield(void) { - unsigned old_state = irq_disable(); + assume(irq_is_in() || irq_is_enabled()); + irq_disable(); thread_t *me = thread_get_active(); if (me->status >= STATUS_ON_RUNQUEUE) { sched_runq_advance(me->priority); } - irq_restore(old_state); + irq_enable(); thread_yield_higher(); } diff --git a/core/thread_flags.c b/core/thread_flags.c index be3c1bf44fa4..3192db1e8323 100644 --- a/core/thread_flags.c +++ b/core/thread_flags.c @@ -70,7 +70,7 @@ static thread_flags_t _thread_flags_clear_atomic(thread_t *thread, } static void _thread_flags_wait(thread_flags_t mask, thread_t *thread, - unsigned threadstate, unsigned irqstate) + unsigned threadstate) { DEBUG( "_thread_flags_wait: me->flags=0x%08x me->mask=0x%08x. going blocked.\n", @@ -78,7 +78,7 @@ static void _thread_flags_wait(thread_flags_t mask, thread_t *thread, thread->wait_data = (void *)(uintptr_t)mask; sched_set_status(thread, threadstate); - irq_restore(irqstate); + irq_enable(); thread_yield_higher(); } @@ -94,14 +94,15 @@ thread_flags_t thread_flags_clear(thread_flags_t mask) static void _thread_flags_wait_any(thread_flags_t mask) { + assume(irq_is_in() || irq_is_enabled()); thread_t *me = thread_get_active(); - unsigned state = irq_disable(); + irq_disable(); if (!(me->flags & mask)) { - _thread_flags_wait(mask, me, STATUS_FLAG_BLOCKED_ANY, state); + _thread_flags_wait(mask, me, STATUS_FLAG_BLOCKED_ANY); } else { - irq_restore(state); + irq_enable(); } } @@ -126,17 +127,18 @@ thread_flags_t thread_flags_wait_one(thread_flags_t mask) thread_flags_t thread_flags_wait_all(thread_flags_t mask) { - unsigned state = irq_disable(); + assume(irq_is_in() || irq_is_enabled()); + irq_disable(); thread_t *me = thread_get_active(); if (!((me->flags & mask) == mask)) { DEBUG( "thread_flags_wait_all(): pid %" PRIkernel_pid " waiting for %08x\n", thread_getpid(), (unsigned)mask); - _thread_flags_wait(mask, me, STATUS_FLAG_BLOCKED_ALL, state); + _thread_flags_wait(mask, me, STATUS_FLAG_BLOCKED_ALL); } else { - irq_restore(state); + irq_enable(); } return _thread_flags_clear_atomic(me, mask); @@ -144,16 +146,17 @@ thread_flags_t thread_flags_wait_all(thread_flags_t mask) void thread_flags_set(thread_t *thread, thread_flags_t mask) { + assume(irq_is_in() || irq_is_enabled()); DEBUG("thread_flags_set(): setting 0x%08x for pid %" PRIkernel_pid "\n", mask, thread->pid); - unsigned state = irq_disable(); + irq_disable(); thread->flags |= mask; if (_thread_flags_wake(thread)) { - irq_restore(state); + irq_enable(); thread_yield_higher(); } else { - irq_restore(state); + irq_enable(); } } diff --git a/cpu/arm7_common/include/thread_arch.h b/cpu/arm7_common/include/thread_arch.h index c96f4498dc74..a1eff8a127db 100644 --- a/cpu/arm7_common/include/thread_arch.h +++ b/cpu/arm7_common/include/thread_arch.h @@ -30,7 +30,7 @@ extern "C" { #ifndef DOXYGEN /* Doxygen is in core/include/thread.h */ -static inline __attribute__((always_inline)) void thread_yield_higher(void) +static inline __attribute__((always_inline)) void thread_yield_higher_arch(void) { if (irq_is_in()) { sched_context_switch_request = 1; diff --git a/cpu/avr8_common/thread_arch.c b/cpu/avr8_common/thread_arch.c index f6529c9fb44b..194b4b3d0059 100644 --- a/cpu/avr8_common/thread_arch.c +++ b/cpu/avr8_common/thread_arch.c @@ -258,7 +258,7 @@ void NORETURN avr8_enter_thread_mode(void) UNREACHABLE(); } -void thread_yield_higher(void) +void thread_yield_higher_arch(void) { if (!IS_USED(MODULE_CORE_THREAD)) { return; diff --git a/cpu/cortexm_common/include/thread_arch.h b/cpu/cortexm_common/include/thread_arch.h index 84d67da0c4a6..e8454ffecfd5 100644 --- a/cpu/cortexm_common/include/thread_arch.h +++ b/cpu/cortexm_common/include/thread_arch.h @@ -30,7 +30,7 @@ extern "C" { #ifndef DOXYGEN /* Doxygen is in core/include/thread.h */ -static inline __attribute__((always_inline)) void thread_yield_higher(void) +static inline __attribute__((always_inline)) void thread_yield_higher_arch(void) { /* trigger the PENDSV interrupt to run scheduler and schedule new thread if * applicable */ diff --git a/cpu/msp430/cpu.c b/cpu/msp430/cpu.c index fe9dfbd83a2f..c1f214de94e5 100644 --- a/cpu/msp430/cpu.c +++ b/cpu/msp430/cpu.c @@ -33,7 +33,7 @@ * execution at the call site using reti. * */ -void thread_yield_higher(void) +void thread_yield_higher_arch(void) { if (irq_is_in()) { sched_context_switch_request = 1; diff --git a/cpu/native/native_cpu.c b/cpu/native/native_cpu.c index c2c1a39ca4af..ca0ef49319dd 100644 --- a/cpu/native/native_cpu.c +++ b/cpu/native/native_cpu.c @@ -241,7 +241,7 @@ void isr_thread_yield(void) } } -void thread_yield_higher(void) +void thread_yield_higher_arch(void) { sched_context_switch_request = 1; diff --git a/cpu/riscv_common/include/thread_arch.h b/cpu/riscv_common/include/thread_arch.h index e4750d03b2b6..b465f2c1d9f9 100644 --- a/cpu/riscv_common/include/thread_arch.h +++ b/cpu/riscv_common/include/thread_arch.h @@ -43,7 +43,7 @@ static inline void _ecall_dispatch(uint32_t num, void *ctx) ); } -static inline __attribute__((always_inline)) void thread_yield_higher(void) +static inline __attribute__((always_inline)) void thread_yield_higher_arch(void) { if (irq_is_in()) { sched_context_switch_request = 1;