From 3b07d28dfd5714596a672980d7b0f4535085e6da Mon Sep 17 00:00:00 2001 From: ZhiyuanSue <2262387848@qq.com> Date: Tue, 3 Dec 2024 16:11:24 +0800 Subject: [PATCH] finish the functions of reply --- sel4_task/src/reply.rs | 108 +++++++++++++++++++++++++++++++++++++++-- sel4_task/src/tcb.rs | 25 ++++++++++ 2 files changed, 128 insertions(+), 5 deletions(-) diff --git a/sel4_task/src/reply.rs b/sel4_task/src/reply.rs index 218e473..feb9214 100644 --- a/sel4_task/src/reply.rs +++ b/sel4_task/src/reply.rs @@ -1,6 +1,8 @@ -use sel4_common::structures_gen::call_stack; +use core::intrinsics::likely; -use crate::{set_thread_state, tcb_t, ThreadState}; +use sel4_common::{structures_gen::call_stack, utils::convert_to_mut_type_ref}; + +use crate::{sched_context::sched_context_t, set_thread_state, tcb_t, ThreadState}; pub type reply_t = reply; #[repr(C)] @@ -25,12 +27,108 @@ impl reply { set_thread_state(tcb, ThreadState::ThreadStateInactive); } pub fn push(&mut self, tcb_caller: &mut tcb_t, tcb_callee: &mut tcb_t, canDonate: bool) { - // TODO: MCS + let sc_donated = convert_to_mut_type_ref::(tcb_caller.tcbSchedContext); + + assert!(tcb_caller.get_ptr() != 0); + assert!(self.get_ptr() != 0); + assert!(self.replyTCB == 0); + + assert!(self.replyPrev.get_callStackPtr() == 0); + assert!(self.replyNext.get_callStackPtr() == 0); + + /* tcb caller should not be in a existing call stack */ + assert!(tcb_caller.tcbState.get_replyObject() == 0); + + /* unlink callee and reply - they may not have been linked already, + * if this rendesvous is occuring when seL4_Recv is called, + * however, no harm in overring 0 with 0 */ + tcb_callee.tcbState.set_replyObject(0); + + /* link caller and reply */ + self.replyTCB = tcb_caller.get_ptr(); + tcb_caller.tcbState.set_replyObject(self.get_ptr() as u64); + set_thread_state(tcb_caller, ThreadState::ThreadStateBlockedOnReply); + + if sc_donated.get_ptr() != 0 && tcb_callee.tcbSchedContext == 0 && canDonate { + let old_caller = convert_to_mut_type_ref::(sc_donated.scReply); + + /* check stack integrity */ + assert!( + old_caller.get_ptr() == 0 + || old_caller.replyNext.get_callStackPtr() == sc_donated.get_ptr() as u64 + ); + + /* push on to stack */ + self.replyPrev = call_stack::new(old_caller.get_ptr() as u64, 0); + if old_caller.get_ptr() != 0 { + old_caller.replyNext = call_stack::new(self.get_ptr() as u64, 0); + } + self.replyNext = call_stack::new(sc_donated.get_ptr() as u64, 1); + sc_donated.scReply = self.get_ptr(); + + /* now do the actual donation */ + sc_donated.schedContext_donate(tcb_callee); + } } pub fn pop(&mut self, tcb: &mut tcb_t) { - // TODO: MCS + assert!(self.get_ptr() != 0); + assert!(self.replyTCB == tcb.get_ptr()); + assert!(tcb.tcbState.get_tsType() == ThreadState::ThreadStateBlockedOnReply as u64); + assert!(tcb.tcbState.get_replyObject() as usize == self.get_ptr()); + + let next_ptr = self.replyNext.get_callStackPtr() as usize; + let prev_ptr = self.replyPrev.get_callStackPtr() as usize; + + if likely(next_ptr != 0) { + assert!(self.replyNext.get_isHead() != 0); + + convert_to_mut_type_ref::(next_ptr).scReply = prev_ptr; + if prev_ptr != 0 { + convert_to_mut_type_ref::(prev_ptr).replyNext = self.replyNext.clone(); + assert!( + convert_to_mut_type_ref::(prev_ptr) + .replyNext + .get_isHead() + != 0 + ); + } + + /* give it back */ + if tcb.tcbSchedContext == 0 { + /* only give the SC back if our SC is NULL. This prevents + * strange behaviour when a thread is bound to an sc while it is + * in the BlockedOnReply state. The semantics in this case are that the + * SC cannot go back to the caller if the caller has received another one */ + convert_to_mut_type_ref::(next_ptr).schedContext_donate(tcb); + } + } + + self.replyPrev = call_stack::new(0, 0); + self.replyNext = call_stack::new(0, 0); + self.unlink(tcb); } pub fn remove(&mut self, tcb: &mut tcb_t) { - // TODO: MCS + assert!(self.replyTCB == tcb.get_ptr()); + assert!(tcb.tcbState.get_tsType() == ThreadState::ThreadStateBlockedOnReply as u64); + assert!(tcb.tcbState.get_replyObject() == self.get_ptr() as u64); + + let next_ptr = self.replyNext.get_callStackPtr() as usize; + let prev_ptr = self.replyPrev.get_callStackPtr() as usize; + + if likely(next_ptr != 0 && self.replyNext.get_isHead() != 0) { + /* head of the call stack -> just pop */ + self.pop(tcb); + } else { + if next_ptr != 0 { + /* not the head, remove from middle - break the chain */ + convert_to_mut_type_ref::(next_ptr).replyPrev = call_stack::new(0, 0); + } + if prev_ptr != 0 { + convert_to_mut_type_ref::(prev_ptr).replyNext = call_stack::new(0, 0); + } + self.replyPrev = call_stack::new(0, 0); + self.replyNext = call_stack::new(0, 0); + self.unlink(tcb); + } } } diff --git a/sel4_task/src/tcb.rs b/sel4_task/src/tcb.rs index 9a48db0..ad6bd9b 100644 --- a/sel4_task/src/tcb.rs +++ b/sel4_task/src/tcb.rs @@ -1024,4 +1024,29 @@ pub fn tcb_Release_Dequeue() -> *mut tcb_t { #[cfg(feature = "KERNEL_MCS")] pub fn reply_remove_tcb(tcb: &mut tcb_t) { // TODO: MCS + + use sel4_common::structures_gen::call_stack; + + use crate::reply::reply_t; + assert!(tcb.tcbState.get_tsType() == ThreadState::ThreadStateBlockedOnReply as u64); + let reply = convert_to_mut_type_ref::(tcb.tcbState.get_replyObject() as usize); + + let next_ptr = reply.replyNext.get_callStackPtr() as usize; + let prev_ptr = reply.replyPrev.get_callStackPtr() as usize; + + if next_ptr != 0 { + if reply.replyNext.get_isHead() != 0 { + convert_to_mut_type_ref::(next_ptr).scReply = 0; + } else { + convert_to_mut_type_ref::(next_ptr).replyPrev = call_stack::new(0, 0); + } + } + + if prev_ptr != 0 { + convert_to_mut_type_ref::(prev_ptr).replyNext = call_stack::new(0, 0); + } + + reply.replyPrev = call_stack::new(0, 0); + reply.replyNext = call_stack::new(0, 0); + reply.unlink(tcb); }