Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
38 changes: 24 additions & 14 deletions adapter/cmsis/src/os2/thread.rs
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,20 @@ os_adapter! {
}
}

extern "C" fn cmsis_sigterm_handler(_signum: i32) {
let current = scheduler::current_thread();
let Some(alien_ptr) = current.get_alien_ptr() else {
return;
};
let t = unsafe { &mut *(alien_ptr.as_ptr() as *mut Os2Thread) };
exit_os2_thread(t);
scheduler::retire_me();
}

extern "C" fn cmsis_suspend_handler(_signum: i32) {
scheduler::suspend_me_for(usize::MAX)
}

impl Os2Thread {
delegate! {
to self.inner() {
Expand Down Expand Up @@ -249,15 +263,9 @@ pub extern "C" fn osThreadNew(
.build();
{
let mut l = t.lock();
l.register_once_signal_handler(libc::SIGTERM, move || {
let current = scheduler::current_thread();
let Some(alien_ptr) = current.get_alien_ptr() else {
return;
};
let t = unsafe { &mut *(alien_ptr.as_ptr() as *mut Os2Thread) };
exit_os2_thread(t);
scheduler::retire_me();
});
// CMSIS threads use SIGTERM as the termination request.
// Bind a persistent kernel-side handler to perform cleanup + retire.
l.install_signal_handler(libc::SIGTERM, cmsis_sigterm_handler);
if merge_attr.stack_mem.is_null() {
let stack_base = t.stack_base();
l.set_cleanup(Entry::Closure(Box::new(move || {
Expand Down Expand Up @@ -462,12 +470,14 @@ pub extern "C" fn osThreadSuspend(thread_id: osThreadId_t) -> osStatus_t {
scheduler::suspend_me_for(usize::MAX);
return osStatus_t_osOK;
}
// FIXME: We should use SIGUSR1 here, however it's not defined yet.
if !t
.lock()
.kill_with_once_handler(libc::SIGHUP, move || scheduler::suspend_me_for(usize::MAX))
// Use SIGUSR1 signal to ask the target thread to self-suspend.
{
return osStatus_t_osErrorResource;
let mut guard = t.lock();
guard.install_signal_handler(libc::SIGUSR1, cmsis_suspend_handler);
// If the signal is already pending, treat it as resource-busy.
if !guard.kill(libc::SIGUSR1) {
return osStatus_t_osErrorResource;
}
}
osStatus_t_osOK
}
Expand Down
5 changes: 5 additions & 0 deletions header/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,9 @@ pub mod syscalls {
RtSigQueueInfo,
RtSigSuspend,
RtSigTimedWait,
Kill,
Tgkill,
Tkill,
Socket,
Bind,
Connect,
Expand All @@ -83,6 +86,8 @@ pub mod syscalls {
MqTimedSend,
MqTimedReceive,
MqGetSetAttr,
GetPid,
PthreadToTid,
LastNR,
}
}
Expand Down
3 changes: 3 additions & 0 deletions kernel/src/arch/aarch64/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -560,3 +560,6 @@ pub(crate) extern "C" fn switch_stack(
)
}
}

// add stub functions for IPI
pub fn send_reschedule_ipi_all() {}
3 changes: 3 additions & 0 deletions kernel/src/arch/arm/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -619,6 +619,9 @@ pub extern "C" fn pend_switch_context() {
post_pendsv();
}

// stub: now it's UP case.
pub fn send_reschedule_ipi_all() {}

#[inline(always)]
pub extern "C" fn switch_context(saved_sp_mut: *mut u8, to_sp: usize) {
switch_context_with_hook(saved_sp_mut, to_sp, core::ptr::null_mut());
Expand Down
42 changes: 42 additions & 0 deletions kernel/src/arch/riscv/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,48 @@ pub(crate) const MIE_MEIE: usize = 1 << 11;
static PENDING_SWITCH_CONTEXT: [AtomicBool; NUM_CORES] =
[const { AtomicBool::new(false) }; NUM_CORES];

// QEMU virt uses CLINT at 0x0200_0000 for MSIP (software interrupts).
#[cfg(any(target_board = "qemu_riscv64", target_board = "qemu_riscv32"))]
const CLINT_BASE: usize = 0x0200_0000;

#[cfg(any(target_board = "qemu_riscv64", target_board = "qemu_riscv32"))]
#[inline]
fn msip_ptr(hart: usize) -> *mut u32 {
(CLINT_BASE + hart * core::mem::size_of::<u32>()) as *mut u32
}

#[cfg(not(any(target_board = "qemu_riscv64", target_board = "qemu_riscv32")))]
#[inline]
fn msip_ptr(_hart: usize) -> *mut u32 {
core::ptr::null_mut()
}

pub fn send_reschedule_ipi_all() {
if NUM_CORES <= 1 {
return;
}
let this = current_cpu_id();
for hart in 0..NUM_CORES {
if hart == this {
continue;
}
let p = msip_ptr(hart);
if p.is_null() {
continue;
}
unsafe { p.write_volatile(1) };
}
}

pub(crate) fn clear_reschedule_ipi() {
let hart = current_cpu_id();
let p = msip_ptr(hart);
if p.is_null() {
return;
}
unsafe { p.write_volatile(0) };
}

#[inline]
pub(crate) extern "C" fn pend_switch_context() {
if !sysirq::is_in_irq() {
Expand Down
21 changes: 20 additions & 1 deletion kernel/src/arch/riscv/trap.rs
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,8 @@
// limitations under the License.

use super::{
claim_switch_context, disable_local_irq, enable_local_irq, Context, IsrContext, NR_SWITCH,
claim_switch_context, clear_reschedule_ipi, disable_local_irq, enable_local_irq,
pend_switch_context, Context, IsrContext, NR_SWITCH,
};
use crate::{
boards::handle_plic_irq,
Expand All @@ -34,6 +35,7 @@ use core::{

pub(crate) const INTERRUPT_MASK: usize = 1usize << (usize::BITS - 1);
pub(crate) const TIMER_INT: usize = INTERRUPT_MASK | 0x7;
pub(crate) const SOFT_INT: usize = INTERRUPT_MASK | 0x3;
pub(crate) const ECALL: usize = 0xB;
pub(crate) const EXTERN_INT: usize = INTERRUPT_MASK | 0xB;

Expand Down Expand Up @@ -224,6 +226,17 @@ fn might_switch_context(from: &Context, ra: usize) -> usize {
}
let this_thread = scheduler::current_thread_ref();
let Some(next) = scheduler::next_preferred_thread(this_thread.priority()) else {
let current = scheduler::current_thread();
if current.lock().has_pending_signals()
&& current.state() == thread::RUNNING
&& Thread::id(&current)
!= Thread::id(&scheduler::get_idle_thread(super::current_cpu_id()))
{
return scheduler::switch_current_thread(
old_sp,
scheduler::get_idle_thread(super::current_cpu_id()),
);
}
return old_sp;
};
this_thread.lock().set_saved_sp(old_sp);
Expand All @@ -236,6 +249,12 @@ fn might_switch_context(from: &Context, ra: usize) -> usize {
extern "C" fn handle_trap(ctx: &mut Context, mcause: usize, mtval: usize, cont: usize) -> usize {
let sp = ctx as *const _ as usize;
match mcause {
SOFT_INT => {
clear_reschedule_ipi();
// Ensure we attempt a context switch when leaving the interrupt.
pend_switch_context();
might_switch_context(ctx, cont)
}
EXTERN_INT => {
handle_plic_irq(ctx, mcause, mtval);
sp
Expand Down
4 changes: 4 additions & 0 deletions kernel/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -536,6 +536,10 @@ mod tests {

// Should not hang.
#[test]
#[cfg_attr(
any(target_board = "qemu_virt64_aarch64"),
ignore = "IPI hasn't been implemented yet"
)]
fn test_simple_signal() {
let a = Arc::new(ConstBarrier::<{ 2 }>::new());
let a_cloned = a.clone();
Expand Down
61 changes: 59 additions & 2 deletions kernel/src/scheduler/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -143,11 +143,33 @@ fn prepare_signal_handling(t: &ThreadNode) {
if !l.activate_signal_context() {
return;
};

// Pick one deliverable signal so we can choose the correct stack for this delivery
let pending = l.pending_signals();
let mut deliverable: i32 = 0;
for signum in 1..32 {
if pending & (1 << signum) == 0 {
continue;
}
if l.is_signal_blocked(signum) {
continue;
}
deliverable = signum;
break;
}

let ctx = l.saved_sp() as *mut arch::Context;
let ctx = unsafe { &mut *ctx };
// Update ctx so that signal context will be restored.
ctx.set_return_address(arch::switch_stack as usize)
.set_arg(0, l.signal_handler_sp())
.set_arg(
0,
if deliverable != 0 {
l.signal_delivery_sp(deliverable)
} else {
l.signal_handler_sp()
},
)
.set_arg(1, signal::handler_entry as usize);
}

Expand Down Expand Up @@ -247,7 +269,7 @@ pub(crate) extern "C" fn save_context_finish_hook(hook: Option<&mut ContextSwitc
}
}

fn switch_current_thread(old_sp: usize, next: ThreadNode) -> usize {
pub(crate) fn switch_current_thread(old_sp: usize, next: ThreadNode) -> usize {
let to_sp = next.saved_sp();
let ok = next.transfer_state(thread::READY, thread::RUNNING);
debug_assert!(ok);
Expand All @@ -259,6 +281,11 @@ fn switch_current_thread(old_sp: usize, next: ThreadNode) -> usize {
{
next.lock().set_start_cycles(cycles);
}
// Prepare signal handling before switching into the next thread.
// This keeps behavior consistent across switch paths (with/without hooks).
if next.lock().has_pending_signals() {
prepare_signal_handling(&next);
}
let old = set_current_thread(next);
#[cfg(debugging_scheduler)]
crate::trace!(
Expand Down Expand Up @@ -289,6 +316,36 @@ fn switch_current_thread(old_sp: usize, next: ThreadNode) -> usize {
to_sp
}

// add this for signal process, there is *no* code point return to "user space",
// because we need support both dsc(direct syscall) and swi (svc syscall)
// This is an workaround when we need a guaranteed context switch to process
// per-thread work such as pending signals.
pub fn yield_me_definitely() {
if unlikely(!is_schedule_ready()) {
return;
}
debug_assert!(arch::local_irq_enabled());
let pg = thread::Thread::try_preempt_me();
if !pg.preemptable() {
arch::idle();
return;
}
drop(pg);
yield_to_next_unconditionally();
}

fn yield_to_next_unconditionally() {
debug_assert!(arch::local_irq_enabled());
let next = next_ready_thread().map_or_else(idle::current_idle_thread, |v| v);
let to_sp = next.saved_sp();
let old = current_thread_ref();
let from_sp_ptr = old.saved_sp_ptr();
let mut hook_holder = ContextSwitchHookHolder::new(next);
hook_holder.set_prev_thread_target_state(thread::READY);
arch::switch_context_with_hook(from_sp_ptr as *mut u8, to_sp, &mut hook_holder as *mut _);
debug_assert!(arch::local_irq_enabled());
}

pub(crate) extern "C" fn relinquish_me_and_return_next_sp(old_sp: usize) -> usize {
debug_assert!(!arch::local_irq_enabled());
debug_assert!(!crate::irq::is_in_irq());
Expand Down
50 changes: 34 additions & 16 deletions kernel/src/signal/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@

use crate::{arch, scheduler, thread, thread::ThreadNode};

pub mod syscall;

fn handle_signal_fallback(signum: i32) {
if signum != libc::SIGTERM {
return;
Expand All @@ -22,35 +24,51 @@ fn handle_signal_fallback(signum: i32) {
}

fn handle_signal(t: &ThreadNode, signum: i32) {
let mut l = t.lock();
let Some(handler) = l.take_signal_handler(signum) else {
drop(l);
// Don't use once handler now
// POSIX-ish: consult installed sigaction.
let sa = {
let l = t.lock();
l.get_sigaction(signum)
};

// None => SIG_DFL in our kernel representation.
let Some(handler) = sa.sa_handler else {
// almost all non realtime signals' default action is terminate the process
return handle_signal_fallback(signum);
};
drop(l);
handler();

handler(signum);
}

// This routine is supposed to be executed in THREAD mode.
#[inline(never)]
pub(crate) unsafe extern "C" fn handler_entry(_sp: usize, _old_sp: usize) {
let current = scheduler::current_thread();
let sigset = current.lock().pending_signals();
for i in 0..32 {
if sigset & (1 << i) == 0 {
continue;
for signum in 1..32 {
// Deliver only unblocked signals.
// NOTE: pending uses kernel numbering (bit = 1<<signum).
// blocked uses POSIX numbering (bit = 1<<(signum-1)).
loop {
let should_deliver = {
let mut l = current.lock();
(l.pending_signals() & (1 << signum)) != 0 && !l.is_signal_blocked(signum)
};
if !should_deliver {
break;
}

handle_signal(&current, signum);
// Consume one pending instance.
current.lock().clear_signal(signum);
}
handle_signal(&current, i);
current.lock().clear_signal(i);
}
{
let mut l = current.lock();
l.deactivate_signal_context();
// after handling signals, the saved_sp should be restored to thread context,
// add an assert here to make sure?
}
let saved_sp = current.saved_sp();
current.transfer_state(thread::RUNNING, thread::READY);
let mut hook_holder = scheduler::ContextSwitchHookHolder::new(current);
// We are switching from current thread's signal context to its thread
// context.
arch::restore_context_with_hook(saved_sp, &mut hook_holder as *mut _);
// Restore the original thread context directly, return to same thread.
arch::restore_context_with_hook(saved_sp, core::ptr::null_mut());
}
Loading