Skip to content

Commit

Permalink
Rely on atomic load-store on all targets
Browse files Browse the repository at this point in the history
  • Loading branch information
bugadani committed Dec 16, 2024
1 parent b44ef5c commit 02d7055
Showing 1 changed file with 9 additions and 61 deletions.
70 changes: 9 additions & 61 deletions embassy-executor/src/raw/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,57 +16,6 @@ mod run_queue;
#[cfg_attr(not(target_has_atomic = "8"), path = "state_critical_section.rs")]
pub(crate) mod state;

#[cfg(target_has_atomic = "ptr")]
mod owner {
use core::sync::atomic::{AtomicPtr, Ordering};

use super::{state::Token, SyncExecutor};

pub(crate) struct ExecutorRef(AtomicPtr<SyncExecutor>);

impl ExecutorRef {
pub const fn new() -> Self {
Self(AtomicPtr::new(core::ptr::null_mut()))
}

pub fn set(&self, executor: Option<&'static SyncExecutor>, _: Token) {
let ptr = executor.map(|e| e as *const SyncExecutor).unwrap_or(core::ptr::null());
self.0.store(ptr.cast_mut(), Ordering::Release);
}

pub fn get(&self, _: Token) -> *const SyncExecutor {
self.0.load(Ordering::Acquire).cast_const()
}
}
}
#[cfg(not(target_has_atomic = "ptr"))]
mod owner {
use super::{state::Token, SyncExecutor};
use core::cell::Cell;

use critical_section::Mutex;

pub(crate) struct ExecutorRef(Mutex<Cell<*const SyncExecutor>>);

unsafe impl Send for ExecutorRef {}
unsafe impl Sync for ExecutorRef {}

impl ExecutorRef {
pub const fn new() -> Self {
Self(Mutex::new(Cell::new(core::ptr::null())))
}

pub fn set(&self, executor: Option<&'static SyncExecutor>, cs: Token) {
let ptr = executor.map(|e| e as *const SyncExecutor).unwrap_or(core::ptr::null());
self.0.borrow(cs).set(ptr);
}

pub fn get(&self, cs: Token) -> *const SyncExecutor {
self.0.borrow(cs).get()
}
}
}

pub mod timer_queue;
#[cfg(feature = "trace")]
mod trace;
Expand All @@ -79,10 +28,9 @@ use core::marker::PhantomData;
use core::mem;
use core::pin::Pin;
use core::ptr::NonNull;
use core::sync::atomic::{AtomicPtr, Ordering};
use core::task::{Context, Poll};

use crate::raw::owner::ExecutorRef;

use self::run_queue::{RunQueue, RunQueueItem};
use self::state::State;
use self::util::{SyncUnsafeCell, UninitCell};
Expand All @@ -93,7 +41,7 @@ use super::SpawnToken;
pub(crate) struct TaskHeader {
pub(crate) state: State,
pub(crate) run_queue_item: RunQueueItem,
pub(crate) executor: ExecutorRef,
pub(crate) executor: AtomicPtr<SyncExecutor>,
poll_fn: SyncUnsafeCell<Option<unsafe fn(TaskRef)>>,

/// Integrated timer queue storage. This field should not be accessed outside of the timer queue.
Expand Down Expand Up @@ -139,7 +87,7 @@ impl TaskRef {

/// Returns a reference to the executor that the task is currently running on.
pub unsafe fn executor(self) -> Option<&'static Executor> {
let executor = state::locked(|token| self.header().executor.get(token));
let executor = self.header().executor.load(Ordering::Relaxed);
executor.as_ref().map(|e| Executor::wrap(e))
}

Expand Down Expand Up @@ -207,7 +155,7 @@ impl<F: Future + 'static> TaskStorage<F> {
raw: TaskHeader {
state: State::new(),
run_queue_item: RunQueueItem::new(),
executor: ExecutorRef::new(),
executor: AtomicPtr::new(core::ptr::null_mut()),
// Note: this is lazily initialized so that a static `TaskStorage` will go in `.bss`
poll_fn: SyncUnsafeCell::new(None),

Expand Down Expand Up @@ -450,9 +398,9 @@ impl SyncExecutor {
}

pub(super) unsafe fn spawn(&'static self, task: TaskRef) {
state::locked(|l| {
task.header().executor.set(Some(self), l);
});
task.header()
.executor
.store((self as *const Self).cast_mut(), Ordering::Relaxed);

#[cfg(feature = "trace")]
trace::task_new(self, &task);
Expand Down Expand Up @@ -605,7 +553,7 @@ pub fn wake_task(task: TaskRef) {
header.state.run_enqueue(|l| {
// We have just marked the task as scheduled, so enqueue it.
unsafe {
let executor = header.executor.get(l).as_ref().unwrap_unchecked();
let executor = header.executor.load(Ordering::Acquire).as_ref().unwrap_unchecked();
executor.enqueue(task, l);
}
});
Expand All @@ -619,7 +567,7 @@ pub fn wake_task_no_pend(task: TaskRef) {
header.state.run_enqueue(|l| {
// We have just marked the task as scheduled, so enqueue it.
unsafe {
let executor = header.executor.get(l).as_ref().unwrap_unchecked();
let executor = header.executor.load(Ordering::Acquire).as_ref().unwrap_unchecked();
executor.run_queue.enqueue(task, l);
}
});
Expand Down

0 comments on commit 02d7055

Please sign in to comment.