From a8ce307426979a967e82a5e9f70f9b709066d16a Mon Sep 17 00:00:00 2001 From: alex Date: Wed, 10 Dec 2025 15:28:17 +0100 Subject: [PATCH 1/2] Simplify sys::sync implementations --- library/std/src/os/xous/services/ticktimer.rs | 28 ++-- library/std/src/sys/pal/xous/futex.rs | 24 +++ library/std/src/sys/pal/xous/mod.rs | 2 + library/std/src/sys/pal/xous/time.rs | 12 +- library/std/src/sys/sync/condvar/xous.rs | 144 +++++------------- library/std/src/sys/sync/mutex/mod.rs | 5 +- library/std/src/sys/sync/mutex/xous.rs | 110 ------------- library/std/src/sys/sync/once/mod.rs | 2 +- library/std/src/sys/sync/rwlock/mod.rs | 2 +- .../std/src/sys/sync/thread_parking/xous.rs | 116 ++++---------- library/std/src/sys/thread/xous.rs | 14 +- 11 files changed, 119 insertions(+), 340 deletions(-) create mode 100644 library/std/src/sys/pal/xous/futex.rs delete mode 100644 library/std/src/sys/sync/mutex/xous.rs diff --git a/library/std/src/os/xous/services/ticktimer.rs b/library/std/src/os/xous/services/ticktimer.rs index 7134c336a7f..90398f22471 100644 --- a/library/std/src/os/xous/services/ticktimer.rs +++ b/library/std/src/os/xous/services/ticktimer.rs @@ -3,28 +3,24 @@ use core::sync::atomic::{Atomic, AtomicU32, Ordering}; use crate::os::xous::ffi::Connection; pub(crate) enum TicktimerScalar { - ElapsedMs, - SleepMs(usize), - LockMutex(usize /* cookie */), - UnlockMutex(usize /* cookie */), - WaitForCondition(usize /* cookie */, usize /* timeout (ms) */), - NotifyCondition(usize /* cookie */, usize /* count */), - FreeMutex(usize /* cookie */), - FreeCondition(usize /* cookie */), + ElapsedNs, + Sleep { nanoseconds: u64 }, + WaitForCondition { cookie: usize, timeout_ns: u64 }, + NotifyCondition { cookie: usize, count: usize }, GetSystemTime, } impl Into<[usize; 5]> for TicktimerScalar { fn into(self) -> [usize; 5] { match self { - TicktimerScalar::ElapsedMs => [0, 0, 0, 0, 0], - TicktimerScalar::SleepMs(msecs) => [1, msecs, 0, 0, 0], - TicktimerScalar::LockMutex(cookie) => [6, cookie, 0, 0, 0], - TicktimerScalar::UnlockMutex(cookie) => [7, cookie, 0, 0, 0], - TicktimerScalar::WaitForCondition(cookie, timeout_ms) => [8, cookie, timeout_ms, 0, 0], - TicktimerScalar::NotifyCondition(cookie, count) => [9, cookie, count, 0, 0], - TicktimerScalar::FreeMutex(cookie) => [10, cookie, 0, 0, 0], - TicktimerScalar::FreeCondition(cookie) => [11, cookie, 0, 0, 0], + TicktimerScalar::ElapsedNs => [0, 0, 0, 0, 0], + TicktimerScalar::Sleep { nanoseconds } => { + [1, (nanoseconds & 0xffffffff) as usize, (nanoseconds >> 32) as usize, 0, 0] + } + TicktimerScalar::WaitForCondition { cookie, timeout_ns } => { + [8, cookie, (timeout_ns & 0xffffffff) as usize, (timeout_ns >> 32) as usize, 0] + } + TicktimerScalar::NotifyCondition { cookie, count } => [9, cookie, count, 0, 0], TicktimerScalar::GetSystemTime => [12, 0, 0, 0, 0], } } diff --git a/library/std/src/sys/pal/xous/futex.rs b/library/std/src/sys/pal/xous/futex.rs new file mode 100644 index 00000000000..5893f3f4b81 --- /dev/null +++ b/library/std/src/sys/pal/xous/futex.rs @@ -0,0 +1,24 @@ +use core::{sync::atomic::{Atomic}, time::Duration}; + +pub type Futex = Atomic; +pub type Primitive = usize; + +pub type SmallFutex = Atomic; +pub type SmallPrimitive = usize; + +pub fn futex_wait(futex: &Futex, expected: Primitive, timeout: Option) -> bool { + assert!(timeout.is_none(), "Timeouts on xous futexes is not supported"); + xous::futex_wait(futex, expected).ok(); + // Never time out, because it is always none. + false +} + +pub fn futex_wake(futex: &Futex) -> bool { + xous::futex_wake(futex,1).ok(); + // We don't know if we woke anyone + false +} + +pub fn futex_wake_all(futex: &Futex) { + xous::futex_wake(futex, usize::MAX).ok(); +} diff --git a/library/std/src/sys/pal/xous/mod.rs b/library/std/src/sys/pal/xous/mod.rs index fffd0ca839e..8cfd19f066b 100644 --- a/library/std/src/sys/pal/xous/mod.rs +++ b/library/std/src/sys/pal/xous/mod.rs @@ -2,6 +2,8 @@ use crate::os::xous::ffi::exit; +pub mod futex; + pub mod os; #[path = "../unsupported/pipe.rs"] pub mod pipe; diff --git a/library/std/src/sys/pal/xous/time.rs b/library/std/src/sys/pal/xous/time.rs index d0ac9910fce..67c23b7ff8f 100644 --- a/library/std/src/sys/pal/xous/time.rs +++ b/library/std/src/sys/pal/xous/time.rs @@ -1,5 +1,5 @@ use crate::os::xous::ffi::blocking_scalar; -use crate::os::xous::services::TicktimerScalar::{ElapsedMs, GetSystemTime}; +use crate::os::xous::services::TicktimerScalar::{ElapsedNs, GetSystemTime}; use crate::os::xous::services::ticktimer_server; use crate::time::Duration; @@ -13,11 +13,11 @@ pub const UNIX_EPOCH: SystemTime = SystemTime(Duration::from_secs(0)); impl Instant { pub fn now() -> Instant { - let result = blocking_scalar(ticktimer_server(), ElapsedMs.into()) + let result = blocking_scalar(ticktimer_server(), ElapsedNs.into()) .expect("failed to request elapsed_ms"); let lower = result[0]; let upper = result[1]; - Instant { 0: Duration::from_millis(lower as u64 | (upper as u64) << 32) } + Instant { 0: Duration::from_nanos(lower as u64 | (upper as u64) << 32) } } pub fn checked_sub_instant(&self, other: &Instant) -> Option { @@ -36,8 +36,10 @@ impl Instant { impl SystemTime { pub fn now() -> SystemTime { let result = blocking_scalar(ticktimer_server(), GetSystemTime.into()) - .expect("failed to request utc time in seconds"); - SystemTime { 0: Duration::from_secs(result[0] as u64) } + .expect("failed to request utc time in nanoseconds"); + let lower = result[0]; + let upper = result[1]; + SystemTime { 0: Duration::from_nanos(lower as u64 | (upper as u64) << 32) } } pub fn sub_time(&self, other: &SystemTime) -> Result { diff --git a/library/std/src/sys/sync/condvar/xous.rs b/library/std/src/sys/sync/condvar/xous.rs index 21a1587214a..5de6d4b865c 100644 --- a/library/std/src/sys/sync/condvar/xous.rs +++ b/library/std/src/sys/sync/condvar/xous.rs @@ -1,18 +1,13 @@ -use core::sync::atomic::{Atomic, AtomicUsize, Ordering}; +use core::sync::atomic::{AtomicUsize, Ordering}; -use crate::os::xous::ffi::{blocking_scalar, scalar}; +use crate::os::xous::ffi::blocking_scalar; use crate::os::xous::services::{TicktimerScalar, ticktimer_server}; use crate::sys::sync::Mutex; use crate::time::Duration; -// The implementation is inspired by Andrew D. Birrell's paper -// "Implementing Condition Variables with Semaphores" - -const NOTIFY_TRIES: usize = 3; - pub struct Condvar { - counter: Atomic, - timed_out: Atomic, + waiting_on_futex: AtomicUsize, + waiting_on_ticktimer: AtomicUsize, } unsafe impl Send for Condvar {} @@ -21,58 +16,23 @@ unsafe impl Sync for Condvar {} impl Condvar { #[inline] pub const fn new() -> Condvar { - Condvar { counter: AtomicUsize::new(0), timed_out: AtomicUsize::new(0) } + Condvar { waiting_on_futex: AtomicUsize::new(0), waiting_on_ticktimer: AtomicUsize::new(0) } } - fn notify_some(&self, to_notify: usize) { - // Assumption: The Mutex protecting this condvar is locked throughout the - // entirety of this call, preventing calls to `wait` and `wait_timeout`. - - // Logic check: Ensure that there aren't any missing waiters. Remove any that - // timed-out, ensuring the counter doesn't underflow. - assert!(self.timed_out.load(Ordering::Relaxed) <= self.counter.load(Ordering::Relaxed)); - self.counter.fetch_sub(self.timed_out.swap(0, Ordering::Relaxed), Ordering::Relaxed); - - // Figure out how many threads to notify. Note that it is impossible for `counter` - // to increase during this operation because Mutex is locked. However, it is - // possible for `counter` to decrease due to a condvar timing out, in which - // case the corresponding `timed_out` will increase accordingly. - let Ok(waiter_count) = - self.counter.fetch_update(Ordering::Relaxed, Ordering::Relaxed, |counter| { - if counter == 0 { - return None; - } else { - Some(counter - counter.min(to_notify)) - } - }) - else { - // No threads are waiting on this condvar - return; - }; - - let mut remaining_to_wake = waiter_count.min(to_notify); - if remaining_to_wake == 0 { - return; - } - for _wake_tries in 0..NOTIFY_TRIES { - let result = blocking_scalar( + fn notify_some(&self, mut count: usize) { + if self.waiting_on_ticktimer.load(Ordering::SeqCst) > 0 { + let notified = blocking_scalar( ticktimer_server(), - TicktimerScalar::NotifyCondition(self.index(), remaining_to_wake).into(), + TicktimerScalar::NotifyCondition { cookie: self.index(), count }.into(), ) - .expect("failure to send NotifyCondition command"); - - // Remove the list of waiters that were notified - remaining_to_wake -= result[0]; + .expect("failure to send NotifyCondition command")[0]; + count -= notified; + } - // Also remove the number of waiters that timed out. Clamp it to 0 in order to - // ensure we don't wait forever in case the waiter woke up between the time - // we counted the remaining waiters and now. - remaining_to_wake = - remaining_to_wake.saturating_sub(self.timed_out.swap(0, Ordering::Relaxed)); - if remaining_to_wake == 0 { - return; + if count > 0 { + if self.waiting_on_futex.load(Ordering::SeqCst) > 0 { + xous::futex_wake(&self.waiting_on_futex, count).ok(); } - crate::thread::yield_now(); } } @@ -81,68 +41,44 @@ impl Condvar { } pub fn notify_all(&self) { - self.notify_some(self.counter.load(Ordering::Relaxed)) + self.notify_some(usize::MAX) } fn index(&self) -> usize { core::ptr::from_ref(self).addr() } - /// Unlock the given Mutex and wait for the notification. Wait at most - /// `ms` milliseconds, or pass `0` to wait forever. - /// - /// Returns `true` if the condition was received, `false` if it timed out - fn wait_ms(&self, mutex: &Mutex, ms: usize) -> bool { - self.counter.fetch_add(1, Ordering::Relaxed); + pub unsafe fn wait(&self, mutex: &Mutex) { + let prev_futex_waiters = self.waiting_on_futex.fetch_add(1, Ordering::SeqCst); unsafe { mutex.unlock() }; + // If we got preempted and self.waiting_on_futex changed, this will return immediately. + // Then again, spurious returns from `wait` are documented and should be accounted for, + // so let the caller handle this case. + xous::futex_wait(&self.waiting_on_futex, prev_futex_waiters + 1).ok(); + mutex.lock(); + self.waiting_on_futex.fetch_sub(1, Ordering::SeqCst); + } + // Returns false on timeout + pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { + let mut nanos = dur.as_nanos() as u64; + // Ensure we don't wait for 0 ms, which would cause us to wait forever + if nanos == 0 { + nanos = 1; + } + self.waiting_on_ticktimer.fetch_add(1, Ordering::SeqCst); + unsafe { mutex.unlock() }; // Threading concern: There is a chance that the `notify` thread wakes up here before // we have a chance to wait for the condition. This is fine because we've recorded // the fact that we're waiting by incrementing the counter. let result = blocking_scalar( ticktimer_server(), - TicktimerScalar::WaitForCondition(self.index(), ms).into(), - ); - let awoken = result.expect("Ticktimer: failure to send WaitForCondition command")[0] == 0; - - // If we awoke due to a timeout, increment the `timed_out` counter so that the - // main loop of `notify` knows there's a timeout. - // - // This is done with the Mutex still unlocked, because the Mutex might still - // be locked by the `notify` process above. - if !awoken { - self.timed_out.fetch_add(1, Ordering::Relaxed); - } - - unsafe { mutex.lock() }; - awoken - } - - pub unsafe fn wait(&self, mutex: &Mutex) { - // Wait for 0 ms, which is a special case to "wait forever" - self.wait_ms(mutex, 0); - } - - pub unsafe fn wait_timeout(&self, mutex: &Mutex, dur: Duration) -> bool { - let mut millis = dur.as_millis() as usize; - // Ensure we don't wait for 0 ms, which would cause us to wait forever - if millis == 0 { - millis = 1; - } - self.wait_ms(mutex, millis) - } -} + TicktimerScalar::WaitForCondition { cookie: self.index(), timeout_ns: nanos }.into(), + ) + .expect("Ticktimer: failure to send WaitForCondition command"); + mutex.lock(); + self.waiting_on_ticktimer.fetch_sub(1, Ordering::SeqCst); -impl Drop for Condvar { - fn drop(&mut self) { - let remaining_count = self.counter.load(Ordering::Relaxed); - let timed_out = self.timed_out.load(Ordering::Relaxed); - assert!( - remaining_count - timed_out == 0, - "counter was {} and timed_out was {} not 0", - remaining_count, - timed_out - ); - scalar(ticktimer_server(), TicktimerScalar::FreeCondition(self.index()).into()).ok(); + result[0] == 0 } } diff --git a/library/std/src/sys/sync/mutex/mod.rs b/library/std/src/sys/sync/mutex/mod.rs index c885b0eabae..398914c5094 100644 --- a/library/std/src/sys/sync/mutex/mod.rs +++ b/library/std/src/sys/sync/mutex/mod.rs @@ -8,6 +8,7 @@ cfg_select! { target_os = "dragonfly", all(target_family = "wasm", target_feature = "atomics"), target_os = "hermit", + target_os = "xous", ) => { mod futex; pub use futex::Mutex; @@ -35,10 +36,6 @@ cfg_select! { mod itron; pub use itron::Mutex; } - target_os = "xous" => { - mod xous; - pub use xous::Mutex; - } _ => { mod no_threads; pub use no_threads::Mutex; diff --git a/library/std/src/sys/sync/mutex/xous.rs b/library/std/src/sys/sync/mutex/xous.rs deleted file mode 100644 index d16faa5aea3..00000000000 --- a/library/std/src/sys/sync/mutex/xous.rs +++ /dev/null @@ -1,110 +0,0 @@ -use crate::os::xous::ffi::{blocking_scalar, do_yield}; -use crate::os::xous::services::{TicktimerScalar, ticktimer_server}; -use crate::sync::atomic::Ordering::{Acquire, Relaxed, Release}; -use crate::sync::atomic::{Atomic, AtomicBool, AtomicUsize}; - -pub struct Mutex { - /// The "locked" value indicates how many threads are waiting on this - /// Mutex. Possible values are: - /// 0: The lock is unlocked - /// 1: The lock is locked and uncontended - /// >=2: The lock is locked and contended - /// - /// A lock is "contended" when there is more than one thread waiting - /// for a lock, or it is locked for long periods of time. Rather than - /// spinning, these locks send a Message to the ticktimer server - /// requesting that they be woken up when a lock is unlocked. - locked: Atomic, - - /// Whether this Mutex ever was contended, and therefore made a trip - /// to the ticktimer server. If this was never set, then we were never - /// on the slow path and can skip deregistering the mutex. - contended: Atomic, -} - -impl Mutex { - #[inline] - pub const fn new() -> Mutex { - Mutex { locked: AtomicUsize::new(0), contended: AtomicBool::new(false) } - } - - fn index(&self) -> usize { - core::ptr::from_ref(self).addr() - } - - #[inline] - pub unsafe fn lock(&self) { - // Try multiple times to acquire the lock without resorting to the ticktimer - // server. For locks that are held for a short amount of time, this will - // result in the ticktimer server never getting invoked. The `locked` value - // will be either 0 or 1. - for _attempts in 0..3 { - if unsafe { self.try_lock() } { - return; - } - do_yield(); - } - - // Try one more time to lock. If the lock is released between the previous code and - // here, then the inner `locked` value will be 1 at the end of this. If it was not - // locked, then the value will be more than 1, for example if there are multiple other - // threads waiting on this lock. - if unsafe { self.try_lock_or_poison() } { - return; - } - - // When this mutex is dropped, we will need to deregister it with the server. - self.contended.store(true, Relaxed); - - // The lock is now "contended". When the lock is released, a Message will get sent to the - // ticktimer server to wake it up. Note that this may already have happened, so the actual - // value of `lock` may be anything (0, 1, 2, ...). - blocking_scalar( - ticktimer_server(), - crate::os::xous::services::TicktimerScalar::LockMutex(self.index()).into(), - ) - .expect("failure to send LockMutex command"); - } - - #[inline] - pub unsafe fn unlock(&self) { - let prev = self.locked.fetch_sub(1, Release); - - // If the previous value was 1, then this was a "fast path" unlock, so no - // need to involve the Ticktimer server - if prev == 1 { - return; - } - - // If it was 0, then something has gone seriously wrong and the counter - // has just wrapped around. - if prev == 0 { - panic!("mutex lock count underflowed"); - } - - // Unblock one thread that is waiting on this message. - blocking_scalar(ticktimer_server(), TicktimerScalar::UnlockMutex(self.index()).into()) - .expect("failure to send UnlockMutex command"); - } - - #[inline] - pub unsafe fn try_lock(&self) -> bool { - self.locked.compare_exchange(0, 1, Acquire, Relaxed).is_ok() - } - - #[inline] - pub unsafe fn try_lock_or_poison(&self) -> bool { - self.locked.fetch_add(1, Acquire) == 0 - } -} - -impl Drop for Mutex { - fn drop(&mut self) { - // If there was Mutex contention, then we involved the ticktimer. Free - // the resources associated with this Mutex as it is deallocated. - if self.contended.load(Relaxed) { - blocking_scalar(ticktimer_server(), TicktimerScalar::FreeMutex(self.index()).into()) - .ok(); - } - } -} diff --git a/library/std/src/sys/sync/once/mod.rs b/library/std/src/sys/sync/once/mod.rs index 8adeb1f259d..3c5f9da00c4 100644 --- a/library/std/src/sys/sync/once/mod.rs +++ b/library/std/src/sys/sync/once/mod.rs @@ -18,6 +18,7 @@ cfg_select! { target_os = "dragonfly", target_os = "fuchsia", target_os = "hermit", + target_os = "xous", ) => { mod futex; pub use futex::{Once, OnceState}; @@ -27,7 +28,6 @@ cfg_select! { target_family = "unix", all(target_vendor = "fortanix", target_env = "sgx"), target_os = "solid_asp3", - target_os = "xous", ) => { mod queue; pub use queue::{Once, OnceState}; diff --git a/library/std/src/sys/sync/rwlock/mod.rs b/library/std/src/sys/sync/rwlock/mod.rs index 82f1dd18dee..867ee75eab8 100644 --- a/library/std/src/sys/sync/rwlock/mod.rs +++ b/library/std/src/sys/sync/rwlock/mod.rs @@ -9,6 +9,7 @@ cfg_select! { target_os = "fuchsia", all(target_family = "wasm", target_feature = "atomics"), target_os = "hermit", + target_os = "xous", ) => { mod futex; pub use futex::RwLock; @@ -17,7 +18,6 @@ cfg_select! { target_family = "unix", all(target_os = "windows", target_vendor = "win7"), all(target_vendor = "fortanix", target_env = "sgx"), - target_os = "xous", ) => { mod queue; pub use queue::RwLock; diff --git a/library/std/src/sys/sync/thread_parking/xous.rs b/library/std/src/sys/sync/thread_parking/xous.rs index 0f451c0ac29..267bda644d1 100644 --- a/library/std/src/sys/sync/thread_parking/xous.rs +++ b/library/std/src/sys/sync/thread_parking/xous.rs @@ -1,110 +1,48 @@ -use crate::os::xous::ffi::{blocking_scalar, scalar}; -use crate::os::xous::services::{TicktimerScalar, ticktimer_server}; +use core::sync::atomic::{AtomicBool, Ordering}; + use crate::pin::Pin; -use crate::ptr; -use crate::sync::atomic::Ordering::{Acquire, Release}; -use crate::sync::atomic::{Atomic, AtomicI8}; +use crate::sys::sync::{Condvar, Mutex}; use crate::time::Duration; -const NOTIFIED: i8 = 1; -const EMPTY: i8 = 0; -const PARKED: i8 = -1; - pub struct Parker { - state: Atomic, + token: AtomicBool, + mtx: Mutex, + condvar: Condvar, } impl Parker { pub unsafe fn new_in_place(parker: *mut Parker) { - unsafe { parker.write(Parker { state: AtomicI8::new(EMPTY) }) } - } - - fn index(&self) -> usize { - ptr::from_ref(self).addr() + unsafe { + parker.write(Parker { + token: AtomicBool::new(false), + mtx: Mutex::new(), + condvar: Condvar::new(), + }) + } } pub unsafe fn park(self: Pin<&Self>) { - // Change NOTIFIED to EMPTY and EMPTY to PARKED. - let state = self.state.fetch_sub(1, Acquire); - if state == NOTIFIED { - // The state has gone from NOTIFIED (1) to EMPTY (0) - return; + self.mtx.lock(); + while !self.token.load(Ordering::SeqCst) { + self.condvar.wait(&self.mtx); } - // The state has gone from EMPTY (0) to PARKED (-1) - assert!(state == EMPTY); - - // The state is now PARKED (-1). Wait until the `unpark` wakes us up. - blocking_scalar( - ticktimer_server(), - TicktimerScalar::WaitForCondition(self.index(), 0).into(), - ) - .expect("failed to send WaitForCondition command"); - - let state = self.state.swap(EMPTY, Acquire); - assert!(state == NOTIFIED || state == PARKED); + self.token.store(false, Ordering::SeqCst); + self.mtx.unlock(); } pub unsafe fn park_timeout(self: Pin<&Self>, timeout: Duration) { - // Change NOTIFIED to EMPTY and EMPTY to PARKED. - let state = self.state.fetch_sub(1, Acquire); - if state == NOTIFIED { - // The state has gone from NOTIFIED (1) to EMPTY (0) - return; + self.mtx.lock(); + while !self.token.load(Ordering::SeqCst) { + self.condvar.wait_timeout(&self.mtx, timeout); } - // The state has gone from EMPTY (0) to PARKED (-1) - assert!(state == EMPTY); - - // A value of zero indicates an indefinite wait. Clamp the number of - // milliseconds to the allowed range. - let millis = usize::max(timeout.as_millis().try_into().unwrap_or(usize::MAX), 1); - - // The state is now PARKED (-1). Wait until the `unpark` wakes us up, - // or things time out. - let _was_timeout = blocking_scalar( - ticktimer_server(), - TicktimerScalar::WaitForCondition(self.index(), millis).into(), - ) - .expect("failed to send WaitForCondition command")[0] - != 0; - - let state = self.state.swap(EMPTY, Acquire); - assert!(state == PARKED || state == NOTIFIED); + self.token.store(false, Ordering::SeqCst); + self.mtx.unlock(); } pub fn unpark(self: Pin<&Self>) { - // If the state is already `NOTIFIED`, then another thread has - // indicated it wants to wake up the target thread. - // - // If the state is `EMPTY` then there is nothing to wake up, and - // the target thread will immediately exit from `park()` the - // next time that function is called. - if self.state.swap(NOTIFIED, Release) != PARKED { - return; - } - - // The thread is parked, wake it up. Keep trying until we wake something up. - // This will happen when the `NotifyCondition` call returns the fact that - // 1 condition was notified. - // Alternately, keep going until the state is seen as `EMPTY`, indicating - // the thread woke up and kept going. This can happen when the Park - // times out before we can send the NotifyCondition message. - while blocking_scalar( - ticktimer_server(), - TicktimerScalar::NotifyCondition(self.index(), 1).into(), - ) - .expect("failed to send NotifyCondition command")[0] - == 0 - && self.state.load(Acquire) != EMPTY - { - // The target thread hasn't yet hit the `WaitForCondition` call. - // Yield to let the target thread run some more. - crate::thread::yield_now(); - } - } -} - -impl Drop for Parker { - fn drop(&mut self) { - scalar(ticktimer_server(), TicktimerScalar::FreeCondition(self.index()).into()).ok(); + self.mtx.lock(); + self.token.store(true, Ordering::SeqCst); + self.condvar.notify_one(); + unsafe { self.mtx.unlock() }; } } diff --git a/library/std/src/sys/thread/xous.rs b/library/std/src/sys/thread/xous.rs index 6a20fe78f5f..e418311a7a7 100644 --- a/library/std/src/sys/thread/xous.rs +++ b/library/std/src/sys/thread/xous.rs @@ -64,14 +64,8 @@ pub fn yield_now() { } pub fn sleep(dur: Duration) { - // Because the sleep server works on units of `usized milliseconds`, split - // the messages up into these chunks. This means we may run into issues - // if you try to sleep a thread for more than 49 days on a 32-bit system. - let mut millis = dur.as_millis(); - while millis > 0 { - let sleep_duration = if millis > (usize::MAX as _) { usize::MAX } else { millis as usize }; - blocking_scalar(ticktimer_server(), TicktimerScalar::SleepMs(sleep_duration).into()) - .expect("failed to send message to ticktimer server"); - millis -= sleep_duration as u128; - } + // u64::MAX nanoseconds is 500 years. + let nanoseconds: u64 = dur.as_nanos().try_into().unwrap_or(u64::MAX).max(1); + blocking_scalar(ticktimer_server(), TicktimerScalar::Sleep { nanoseconds }.into()) + .expect("failed to send message to ticktimer server"); } From 0e910652ea40ef1492709f5002015d54b8ce5327 Mon Sep 17 00:00:00 2001 From: alex Date: Wed, 10 Dec 2025 21:02:04 +0100 Subject: [PATCH 2/2] Put a mutex around sys::alloc --- library/std/src/sys/alloc/xous.rs | 56 ++++--------------------------- 1 file changed, 6 insertions(+), 50 deletions(-) diff --git a/library/std/src/sys/alloc/xous.rs b/library/std/src/sys/alloc/xous.rs index c7f973b8027..e3b39ee95f4 100644 --- a/library/std/src/sys/alloc/xous.rs +++ b/library/std/src/sys/alloc/xous.rs @@ -3,72 +3,28 @@ use crate::alloc::{GlobalAlloc, Layout, System}; -#[cfg(not(test))] -#[unsafe(export_name = "_ZN16__rust_internals3std3sys4xous5alloc8DLMALLOCE")] -static mut DLMALLOC: dlmalloc::Dlmalloc = dlmalloc::Dlmalloc::new(); - -#[cfg(test)] -unsafe extern "Rust" { - #[link_name = "_ZN16__rust_internals3std3sys4xous5alloc8DLMALLOCE"] - static mut DLMALLOC: dlmalloc::Dlmalloc; -} +static DLMALLOC: crate::sync::Mutex = + crate::sync::Mutex::new(dlmalloc::Dlmalloc::new()); #[stable(feature = "alloc_system_type", since = "1.28.0")] unsafe impl GlobalAlloc for System { #[inline] unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - // SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access. - // Calling malloc() is safe because preconditions on this function match the trait method preconditions. - let _lock = lock::lock(); - unsafe { DLMALLOC.malloc(layout.size(), layout.align()) } + unsafe { DLMALLOC.lock().unwrap().malloc(layout.size(), layout.align()) } } #[inline] unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { - // SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access. - // Calling calloc() is safe because preconditions on this function match the trait method preconditions. - let _lock = lock::lock(); - unsafe { DLMALLOC.calloc(layout.size(), layout.align()) } + unsafe { DLMALLOC.lock().unwrap().calloc(layout.size(), layout.align()) } } #[inline] unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { - // SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access. - // Calling free() is safe because preconditions on this function match the trait method preconditions. - let _lock = lock::lock(); - unsafe { DLMALLOC.free(ptr, layout.size(), layout.align()) } + unsafe { DLMALLOC.lock().unwrap().free(ptr, layout.size(), layout.align()) } } #[inline] unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { - // SAFETY: DLMALLOC access is guaranteed to be safe because the lock gives us unique and non-reentrant access. - // Calling realloc() is safe because preconditions on this function match the trait method preconditions. - let _lock = lock::lock(); - unsafe { DLMALLOC.realloc(ptr, layout.size(), layout.align(), new_size) } - } -} - -mod lock { - use crate::sync::atomic::Ordering::{Acquire, Release}; - use crate::sync::atomic::{Atomic, AtomicI32}; - - static LOCKED: Atomic = AtomicI32::new(0); - - pub struct DropLock; - - pub fn lock() -> DropLock { - loop { - if LOCKED.swap(1, Acquire) == 0 { - return DropLock; - } - crate::os::xous::ffi::do_yield(); - } - } - - impl Drop for DropLock { - fn drop(&mut self) { - let r = LOCKED.swap(0, Release); - debug_assert_eq!(r, 1); - } + unsafe { DLMALLOC.lock().unwrap().realloc(ptr, layout.size(), layout.align(), new_size) } } }