Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 12 additions & 16 deletions library/std/src/os/xous/services/ticktimer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,28 +3,24 @@ use core::sync::atomic::{Atomic, AtomicU32, Ordering};
use crate::os::xous::ffi::Connection;

pub(crate) enum TicktimerScalar {
ElapsedMs,
SleepMs(usize),
LockMutex(usize /* cookie */),
UnlockMutex(usize /* cookie */),
WaitForCondition(usize /* cookie */, usize /* timeout (ms) */),
NotifyCondition(usize /* cookie */, usize /* count */),
FreeMutex(usize /* cookie */),
FreeCondition(usize /* cookie */),
ElapsedNs,
Sleep { nanoseconds: u64 },
WaitForCondition { cookie: usize, timeout_ns: u64 },
NotifyCondition { cookie: usize, count: usize },
GetSystemTime,
}

impl Into<[usize; 5]> for TicktimerScalar {
fn into(self) -> [usize; 5] {
match self {
TicktimerScalar::ElapsedMs => [0, 0, 0, 0, 0],
TicktimerScalar::SleepMs(msecs) => [1, msecs, 0, 0, 0],
TicktimerScalar::LockMutex(cookie) => [6, cookie, 0, 0, 0],
TicktimerScalar::UnlockMutex(cookie) => [7, cookie, 0, 0, 0],
TicktimerScalar::WaitForCondition(cookie, timeout_ms) => [8, cookie, timeout_ms, 0, 0],
TicktimerScalar::NotifyCondition(cookie, count) => [9, cookie, count, 0, 0],
TicktimerScalar::FreeMutex(cookie) => [10, cookie, 0, 0, 0],
TicktimerScalar::FreeCondition(cookie) => [11, cookie, 0, 0, 0],
TicktimerScalar::ElapsedNs => [0, 0, 0, 0, 0],
TicktimerScalar::Sleep { nanoseconds } => {
[1, (nanoseconds & 0xffffffff) as usize, (nanoseconds >> 32) as usize, 0, 0]
}
TicktimerScalar::WaitForCondition { cookie, timeout_ns } => {
[8, cookie, (timeout_ns & 0xffffffff) as usize, (timeout_ns >> 32) as usize, 0]
}
TicktimerScalar::NotifyCondition { cookie, count } => [9, cookie, count, 0, 0],
TicktimerScalar::GetSystemTime => [12, 0, 0, 0, 0],
}
}
Expand Down
56 changes: 6 additions & 50 deletions library/std/src/sys/alloc/xous.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,72 +3,28 @@

use crate::alloc::{GlobalAlloc, Layout, System};

#[cfg(not(test))]
#[unsafe(export_name = "_ZN16__rust_internals3std3sys4xous5alloc8DLMALLOCE")]
static mut DLMALLOC: dlmalloc::Dlmalloc = dlmalloc::Dlmalloc::new();

#[cfg(test)]
unsafe extern "Rust" {
#[link_name = "_ZN16__rust_internals3std3sys4xous5alloc8DLMALLOCE"]
static mut DLMALLOC: dlmalloc::Dlmalloc;
}
static DLMALLOC: crate::sync::Mutex<dlmalloc::Dlmalloc> =
crate::sync::Mutex::new(dlmalloc::Dlmalloc::new());

#[stable(feature = "alloc_system_type", since = "1.28.0")]
unsafe impl GlobalAlloc for System {
#[inline]
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
// SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access.
// Calling malloc() is safe because preconditions on this function match the trait method preconditions.
let _lock = lock::lock();
unsafe { DLMALLOC.malloc(layout.size(), layout.align()) }
unsafe { DLMALLOC.lock().unwrap().malloc(layout.size(), layout.align()) }
}

#[inline]
unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
// SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access.
// Calling calloc() is safe because preconditions on this function match the trait method preconditions.
let _lock = lock::lock();
unsafe { DLMALLOC.calloc(layout.size(), layout.align()) }
unsafe { DLMALLOC.lock().unwrap().calloc(layout.size(), layout.align()) }
}

#[inline]
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
// SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access.
// Calling free() is safe because preconditions on this function match the trait method preconditions.
let _lock = lock::lock();
unsafe { DLMALLOC.free(ptr, layout.size(), layout.align()) }
unsafe { DLMALLOC.lock().unwrap().free(ptr, layout.size(), layout.align()) }
}

#[inline]
unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
// SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access.
// Calling realloc() is safe because preconditions on this function match the trait method preconditions.
let _lock = lock::lock();
unsafe { DLMALLOC.realloc(ptr, layout.size(), layout.align(), new_size) }
}
}

mod lock {
use crate::sync::atomic::Ordering::{Acquire, Release};
use crate::sync::atomic::{Atomic, AtomicI32};

static LOCKED: Atomic<i32> = AtomicI32::new(0);

pub struct DropLock;

pub fn lock() -> DropLock {
loop {
if LOCKED.swap(1, Acquire) == 0 {
return DropLock;
}
crate::os::xous::ffi::do_yield();
}
}

impl Drop for DropLock {
fn drop(&mut self) {
let r = LOCKED.swap(0, Release);
debug_assert_eq!(r, 1);
}
unsafe { DLMALLOC.lock().unwrap().realloc(ptr, layout.size(), layout.align(), new_size) }
}
}
24 changes: 24 additions & 0 deletions library/std/src/sys/pal/xous/futex.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
use core::{sync::atomic::{Atomic}, time::Duration};
Copy link

Copilot AI Dec 10, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The import statement has a syntax error with an extra comma and missing type parameter. The Atomic type requires a type parameter (e.g., Atomic<usize>), and there's an unnecessary comma before the closing brace.

Suggested change
use core::{sync::atomic::{Atomic}, time::Duration};
use core::{sync::atomic::Atomic, time::Duration};

Copilot uses AI. Check for mistakes.

pub type Futex = Atomic<Primitive>;
pub type Primitive = usize;

pub type SmallFutex = Atomic<SmallPrimitive>;

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

what is this? why do we have two of the same thing? is this something required to be exported from the futex file?

pub type SmallPrimitive = usize;

pub fn futex_wait(futex: &Futex, expected: Primitive, timeout: Option<Duration>) -> bool {
assert!(timeout.is_none(), "Timeouts on xous futexes is not supported");
Copy link

Copilot AI Dec 10, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Spelling error in assertion message: "Timeouts" should be "Timeout" (singular form is more appropriate here).

Suggested change
assert!(timeout.is_none(), "Timeouts on xous futexes is not supported");
assert!(timeout.is_none(), "Timeout on xous futexes is not supported");

Copilot uses AI. Check for mistakes.
xous::futex_wait(futex, expected).ok();
// Never time out, because it is always none.
false
}

pub fn futex_wake(futex: &Futex) -> bool {
xous::futex_wake(futex,1).ok();
Copy link

Copilot AI Dec 10, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Missing space after comma in the function call. Should be xous::futex_wake(futex, 1) for consistency with coding style.

Suggested change
xous::futex_wake(futex,1).ok();
xous::futex_wake(futex, 1).ok();

Copilot uses AI. Check for mistakes.
// We don't know if we woke anyone
false
}

pub fn futex_wake_all(futex: &Futex) {
xous::futex_wake(futex, usize::MAX).ok();
}
2 changes: 2 additions & 0 deletions library/std/src/sys/pal/xous/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@

use crate::os::xous::ffi::exit;

pub mod futex;

pub mod os;
#[path = "../unsupported/pipe.rs"]
pub mod pipe;
Expand Down
12 changes: 7 additions & 5 deletions library/std/src/sys/pal/xous/time.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
use crate::os::xous::ffi::blocking_scalar;
use crate::os::xous::services::TicktimerScalar::{ElapsedMs, GetSystemTime};
use crate::os::xous::services::TicktimerScalar::{ElapsedNs, GetSystemTime};
use crate::os::xous::services::ticktimer_server;
use crate::time::Duration;

Expand All @@ -13,11 +13,11 @@ pub const UNIX_EPOCH: SystemTime = SystemTime(Duration::from_secs(0));

impl Instant {
pub fn now() -> Instant {
let result = blocking_scalar(ticktimer_server(), ElapsedMs.into())
let result = blocking_scalar(ticktimer_server(), ElapsedNs.into())
.expect("failed to request elapsed_ms");
let lower = result[0];
let upper = result[1];
Instant { 0: Duration::from_millis(lower as u64 | (upper as u64) << 32) }
Instant { 0: Duration::from_nanos(lower as u64 | (upper as u64) << 32) }
}

pub fn checked_sub_instant(&self, other: &Instant) -> Option<Duration> {
Expand All @@ -36,8 +36,10 @@ impl Instant {
impl SystemTime {
pub fn now() -> SystemTime {
let result = blocking_scalar(ticktimer_server(), GetSystemTime.into())
.expect("failed to request utc time in seconds");
SystemTime { 0: Duration::from_secs(result[0] as u64) }
.expect("failed to request utc time in nanoseconds");
let lower = result[0];
let upper = result[1];
SystemTime { 0: Duration::from_nanos(lower as u64 | (upper as u64) << 32) }
}

pub fn sub_time(&self, other: &SystemTime) -> Result<Duration, Duration> {
Expand Down
144 changes: 40 additions & 104 deletions library/std/src/sys/sync/condvar/xous.rs
Original file line number Diff line number Diff line change
@@ -1,18 +1,13 @@
use core::sync::atomic::{Atomic, AtomicUsize, Ordering};
use core::sync::atomic::{AtomicUsize, Ordering};

use crate::os::xous::ffi::{blocking_scalar, scalar};
use crate::os::xous::ffi::blocking_scalar;
use crate::os::xous::services::{TicktimerScalar, ticktimer_server};
use crate::sys::sync::Mutex;
use crate::time::Duration;

// The implementation is inspired by Andrew D. Birrell's paper
// "Implementing Condition Variables with Semaphores"

const NOTIFY_TRIES: usize = 3;

pub struct Condvar {
counter: Atomic<usize>,
timed_out: Atomic<usize>,
waiting_on_futex: AtomicUsize,
waiting_on_ticktimer: AtomicUsize,
}

unsafe impl Send for Condvar {}
Expand All @@ -21,58 +16,23 @@ unsafe impl Sync for Condvar {}
impl Condvar {
#[inline]
pub const fn new() -> Condvar {
Condvar { counter: AtomicUsize::new(0), timed_out: AtomicUsize::new(0) }
Condvar { waiting_on_futex: AtomicUsize::new(0), waiting_on_ticktimer: AtomicUsize::new(0) }
}

fn notify_some(&self, to_notify: usize) {
// Assumption: The Mutex protecting this condvar is locked throughout the
// entirety of this call, preventing calls to `wait` and `wait_timeout`.

// Logic check: Ensure that there aren't any missing waiters. Remove any that
// timed-out, ensuring the counter doesn't underflow.
assert!(self.timed_out.load(Ordering::Relaxed) <= self.counter.load(Ordering::Relaxed));
self.counter.fetch_sub(self.timed_out.swap(0, Ordering::Relaxed), Ordering::Relaxed);

// Figure out how many threads to notify. Note that it is impossible for `counter`
// to increase during this operation because Mutex is locked. However, it is
// possible for `counter` to decrease due to a condvar timing out, in which
// case the corresponding `timed_out` will increase accordingly.
let Ok(waiter_count) =
self.counter.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |counter| {
if counter == 0 {
return None;
} else {
Some(counter - counter.min(to_notify))
}
})
else {
// No threads are waiting on this condvar
return;
};

let mut remaining_to_wake = waiter_count.min(to_notify);
if remaining_to_wake == 0 {
return;
}
for _wake_tries in 0..NOTIFY_TRIES {
let result = blocking_scalar(
fn notify_some(&self, mut count: usize) {
if self.waiting_on_ticktimer.load(Ordering::SeqCst) > 0 {
let notified = blocking_scalar(
ticktimer_server(),
TicktimerScalar::NotifyCondition(self.index(), remaining_to_wake).into(),
TicktimerScalar::NotifyCondition { cookie: self.index(), count }.into(),
)
.expect("failure to send NotifyCondition command");

// Remove the list of waiters that were notified
remaining_to_wake -= result[0];
.expect("failure to send NotifyCondition command")[0];
count -= notified;
}

// Also remove the number of waiters that timed out. Clamp it to 0 in order to
// ensure we don't wait forever in case the waiter woke up between the time
// we counted the remaining waiters and now.
remaining_to_wake =
remaining_to_wake.saturating_sub(self.timed_out.swap(0, Ordering::Relaxed));
if remaining_to_wake == 0 {
return;
if count > 0 {
if self.waiting_on_futex.load(Ordering::SeqCst) > 0 {
xous::futex_wake(&self.waiting_on_futex, count).ok();
}
crate::thread::yield_now();
}
}

Expand All @@ -81,68 +41,44 @@ impl Condvar {
}

pub fn notify_all(&self) {
self.notify_some(self.counter.load(Ordering::Relaxed))
self.notify_some(usize::MAX)
}

fn index(&self) -> usize {
core::ptr::from_ref(self).addr()
}

/// Unlock the given Mutex and wait for the notification. Wait at most
/// `ms` milliseconds, or pass `0` to wait forever.
///
/// Returns `true` if the condition was received, `false` if it timed out
fn wait_ms(&self, mutex: &Mutex, ms: usize) -> bool {
self.counter.fetch_add(1, Ordering::Relaxed);
pub unsafe fn wait(&self, mutex: &Mutex) {
let prev_futex_waiters = self.waiting_on_futex.fetch_add(1, Ordering::SeqCst);
unsafe { mutex.unlock() };
// If we got preempted and self.waiting_on_futex changed, this will return immediately.
// Then again, spurious returns from `wait` are documented and should be accounted for,
// so let the caller handle this case.
xous::futex_wait(&self.waiting_on_futex, prev_futex_waiters + 1).ok();
mutex.lock();
self.waiting_on_futex.fetch_sub(1, Ordering::SeqCst);
}

// Returns false on timeout
pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
let mut nanos = dur.as_nanos() as u64;
// Ensure we don't wait for 0 ms, which would cause us to wait forever
if nanos == 0 {
nanos = 1;
}
self.waiting_on_ticktimer.fetch_add(1, Ordering::SeqCst);
unsafe { mutex.unlock() };
// Threading concern: There is a chance that the `notify` thread wakes up here before
// we have a chance to wait for the condition. This is fine because we've recorded
// the fact that we're waiting by incrementing the counter.
let result = blocking_scalar(
ticktimer_server(),
TicktimerScalar::WaitForCondition(self.index(), ms).into(),
);
let awoken = result.expect("Ticktimer: failure to send WaitForCondition command")[0] == 0;

// If we awoke due to a timeout, increment the `timed_out` counter so that the
// main loop of `notify` knows there's a timeout.
//
// This is done with the Mutex still unlocked, because the Mutex might still
// be locked by the `notify` process above.
if !awoken {
self.timed_out.fetch_add(1, Ordering::Relaxed);
}

unsafe { mutex.lock() };
awoken
}

pub unsafe fn wait(&self, mutex: &Mutex) {
// Wait for 0 ms, which is a special case to "wait forever"
self.wait_ms(mutex, 0);
}

pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool {
let mut millis = dur.as_millis() as usize;
// Ensure we don't wait for 0 ms, which would cause us to wait forever
if millis == 0 {
millis = 1;
}
self.wait_ms(mutex, millis)
}
}
TicktimerScalar::WaitForCondition { cookie: self.index(), timeout_ns: nanos }.into(),
)
.expect("Ticktimer: failure to send WaitForCondition command");
mutex.lock();
self.waiting_on_ticktimer.fetch_sub(1, Ordering::SeqCst);

impl Drop for Condvar {
fn drop(&mut self) {
let remaining_count = self.counter.load(Ordering::Relaxed);
let timed_out = self.timed_out.load(Ordering::Relaxed);
assert!(
remaining_count - timed_out == 0,
"counter was {} and timed_out was {} not 0",
remaining_count,
timed_out
);
scalar(ticktimer_server(), TicktimerScalar::FreeCondition(self.index()).into()).ok();
result[0] == 0
}
}
5 changes: 1 addition & 4 deletions library/std/src/sys/sync/mutex/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ cfg_select! {
target_os = "dragonfly",
all(target_family = "wasm", target_feature = "atomics"),
target_os = "hermit",
target_os = "xous",
) => {
mod futex;
pub use futex::Mutex;
Expand Down Expand Up @@ -35,10 +36,6 @@ cfg_select! {
mod itron;
pub use itron::Mutex;
}
target_os = "xous" => {
mod xous;
pub use xous::Mutex;
}
_ => {
mod no_threads;
pub use no_threads::Mutex;
Expand Down
Loading
Loading