fully implement scheduler, need to rewrite kbd drv

This commit is contained in:
Husky Susbaka 2023-03-14 09:17:29 -07:00
parent a4e1169a89
commit 0f900e05c8
No known key found for this signature in database
GPG key ID: 6B3D8CB511646891
10 changed files with 163 additions and 131 deletions

View file

@ -63,7 +63,7 @@
mtsprg2 sp
// move stack pointer to the exception stack
lis sp, _exception_stack@h
lis sp, _exception_stack@ha
addi sp, sp, _exception_stack@l
subi sp, sp, ALIGN_UP(ISTATE_SIZE, 16)
@ -133,31 +133,30 @@
// called when the decrementer exception occurs (i.e. when it reaches 0)
.global _decrementer_exception_wrapper
.extern _test_decrementer
.extern VAP_KERN_BASE
_decrementer_exception_wrapper:
// save r12 real quick
// temporarily save r12
mtsprg2 r12
// disable interrupts, enable instruction relocation and data relocation
mfmsr r12
ori r12, r12, ((1 << 5) | (1 << 4)) // enable instruction relocation and data relocation
andi. r12, r12, ~(1 << 15)@l // disable interrupts
mtmsr r12
// load r12
mfsprg2 r12
isync
CTX_SAVE
lis r12, _decrementer_exception_definition@h
lis r12, _decrementer_exception_definition@ha
addi r12, r12, _decrementer_exception_definition@l
mtsrr0 r12
mfmsr r12
mtsrr1 r12
rfi
// end of exceptions
.global VAP_VECTOR_END
VAP_VECTOR_END:
.section .text
// exception stack
.global _vap_exception_stack
.org 0x8000 // grows down
@ -171,15 +170,14 @@ _decrementer_exception_definition:
ori r12, r12, ((1 << 5) | (1 << 4)) // enable instruction relocation and data relocation
andi. r12, r12, ~(1 << 15)@l // disable interrupts
mtsrr1 r12
mtmsr r12
isync
// copy sp to r3
mr r3, sp
// load the address
lis r12, scheduler_callback@h
addi r12, r12, scheduler_callback@l
mtsrr0 r12
lis r12, scheduler_callback@ha
addi r12, r12, scheduler_callback@l
mtsrr0 r12
rfi

View file

@ -4,16 +4,16 @@ OUTPUT_ARCH("powerpc:common")
ENTRY(_start)
SECTIONS {
VAP_KERN_START = .;
VAP_KERN_BASE = 0x10000;
.text : {
VAP_KERN_BASE = .;
*(.text .text.*)
}
.vector_container : {
.vector_container VAP_KERN_BASE : {
VAP_VECTOR_LOCATION = .;
*(V_TEXT_GENESIS)
*(V_VECTOR_DEFS)
}
.text : {
*(.text .text.*)
}
.rodata : {

View file

@ -4,7 +4,7 @@ use core::arch::asm;
use core::sync::atomic::AtomicBool;
use ieee1275::IHandle;
use ieee1275::services::{Args, CallMethodArgs};
use crate::{debug, kernel_main, memory, ofhelpers, println, scheduling};
use crate::{debug, kernel_main, kernel_callback, macros, memory, ofhelpers, println, scheduling};
use crate::hacks::patch_interrupt_vectors;
use crate::memory::align_up;
use crate::prom::PROMHNDL;
@ -27,6 +27,7 @@ extern "C" {
// the following aren't functions, but rather pointers to the locations
// unfortunately, rust doesn't let me define extern variables, so i have to
// define these as functions
fn VAP_KERN_BASE();
fn VAP_KERN_END();
fn VAP_VECTOR_LOCATION();
fn VAP_VECTOR_END();
@ -35,6 +36,12 @@ extern "C" {
static FIRED: AtomicBool = AtomicBool::new(false);
pub fn bootstrap() {
let freq = get_primary_cpu_frequency();
debug!("primary cpu frequency: {} Hz", freq);
unsafe {
DECREMENTER_VALUE = freq;
}
// create translation table
let (translation_table, page_count) = create_translation_table();
let phys_translation_table = translate_to_phys(translation_table as usize);
@ -59,8 +66,13 @@ pub extern "C" fn so_fake() -> ! {
let available = get_available_memory();
debug!("available memory: {:?}", available);
memory::VapAllocator::init(available);
debug!("initialized vap allocator");
loop {}
debug!("initialised vap allocator");
debug!("patching interrupt vectors");
patch_interrupt_vectors();
debug!("patched interrupt vectors");
debug!("kernel_main: {:x}", kernel_main as usize);
debug!("kernel_call: {:x}", kernel_callback as usize);
kernel_main()
}
fn create_translation_table() -> (usize, usize) {
@ -72,6 +84,7 @@ fn create_translation_table() -> (usize, usize) {
for i in 0..pages {
let offset = i << PAGE_WIDTH;
let virt_addr = translate_to_phys(offset);
//debug!("phys_addr: {:x}, virt_addr: {:x}", virt_addr, virt_addr + offset);
unsafe {
translation_table.add(i).write_volatile(virt_addr as u32);
}
@ -80,13 +93,6 @@ fn create_translation_table() -> (usize, usize) {
(translation_table as usize, pages)
}
pub fn test_scheduler() -> ! {
debug!("test_scheduler");
loop {
debug!("value of dec: {}", unsafe { read_decrementer() });
}
}
pub fn translate_to_phys(addr: usize) -> usize {
let prom = unsafe { &mut PROMHNDL };
let chosen = prom.get().find_device("/chosen\0").unwrap();

View file

@ -25,6 +25,8 @@ pub fn sleep() {
extern {
fn _decrementer_exception_wrapper(); // defined in assembly, will be sent to 0x900
fn _decrementer_exception_definition();
fn scheduler_callback();
fn VAP_VECTOR_END();
}
@ -45,4 +47,7 @@ pub fn patch_interrupt_vectors() {
}
debug!("patched interrupt vectors");
debug!("_decrementer_exception_definition: {:x}", _decrementer_exception_definition as usize);
debug!("scheduler_callback: {:x}", scheduler_callback as usize);
}

View file

@ -109,6 +109,7 @@ use core::panic::PanicInfo;
use core::sync::atomic::{AtomicBool, AtomicU8, Ordering};
use ieee1275::prom_init;
use ieee1275::services::Args;
use crate::bootstrap::disable_decrementer;
use crate::colourdefs::{COMMUNISM_RED, VAPOREON_BLACK, VAPOREON_BLUE, VAPOREON_CUM, VAPOREON_DARK};
use crate::framebuffer::{OFColour, OFFramebuffer};
use crate::prom::PROMHNDL;
@ -154,36 +155,38 @@ pub fn pls_redraw() {
NEED_REDRAW.store(true, core::sync::atomic::Ordering::Relaxed);
}
pub fn kernel_callback() {
if NEED_REDRAW.load(core::sync::atomic::Ordering::Relaxed) {
redraw_all();
}
let wm_redraw_status = unsafe {
let wm = &mut WINDOW_MANAGER;
wm.handle_requests();
wm.need_redraw()
};
match wm_redraw_status {
NeedRedrawResult::DontNeedRedraw => {}
NeedRedrawResult::OptimizeRedraw => {
unsafe { (&mut WINDOW_MANAGER).draw_optimised() }
pub fn kernel_callback() -> ! {
loop {
if NEED_REDRAW.load(core::sync::atomic::Ordering::Relaxed) {
redraw_all();
}
NeedRedrawResult::AllRedraw => {
pls_redraw();
}
}
let try_read = keyboard::read(1);
if let Ok(read) = try_read {
if !read.is_empty() {
for c in read {
unsafe { (&mut WINDOW_MANAGER).send_input(c) }
let wm_redraw_status = unsafe {
let wm = &mut WINDOW_MANAGER;
wm.handle_requests();
wm.need_redraw()
};
match wm_redraw_status {
NeedRedrawResult::DontNeedRedraw => {}
NeedRedrawResult::OptimizeRedraw => {
unsafe { (&mut WINDOW_MANAGER).draw_optimised() }
}
NeedRedrawResult::AllRedraw => {
pls_redraw();
}
}
} else {
panic!("read error: {:?}", try_read);
//let try_read = keyboard::read(1);
//if let Ok(read) = try_read {
// if !read.is_empty() {
// for c in read {
// unsafe { (&mut WINDOW_MANAGER).send_input(c) }
// }
// }
//} else {
// panic!("read error: {:?}", try_read);
//}
}
}
@ -222,13 +225,12 @@ pub fn kernel_main() -> ! {
debug!("setting default background");
default_bg();
println!("initializing scheduler");
scheduling::add_task(None, kernel_callback).expect("failed to add kernel callback task");
debug!("constructing terminal window");
terminal::construct_terminal_window();
println!("ready!");
println!();
loop {
kernel_callback();
}
//macros::USE_RAW.store(false, core::sync::atomic::Ordering::SeqCst);
unsafe { scheduling::init_scheduler() };
loop {}
}

View file

@ -56,6 +56,16 @@ unsafe impl GlobalAlloc for DynamicAllocator {
}
}
pub fn is_allocator_unlocked() -> bool {
match ALLOCATOR_STAGE.load(Ordering::Relaxed) {
0 => false,
1 => {
unsafe { VAP_ALLOCATOR.try_lock().is_some() } // todo: check if this is correct
}
_ => false,
}
}
const BLOCK_SIZES: &[usize] = &[8, 16, 32, 64, 128, 256, 512, 1024, 2048];
struct FixedSizeBlockNode {

View file

@ -1,5 +1,6 @@
use alloc::vec;
use alloc::vec::Vec;
use core::arch::asm;
use core::sync::atomic::{AtomicBool, AtomicUsize};
use crate::debug;
@ -26,9 +27,10 @@ impl<T: Clone> MessageQueue<T> {
}
pub fn push(&mut self, item: T) -> Result<(),()> {
let head = self.head.fetch_add(1, core::sync::atomic::Ordering::AcqRel);
let mut head = self.head.fetch_add(1, core::sync::atomic::Ordering::AcqRel);
if head >= self.queue.len() {
self.head.store(0, core::sync::atomic::Ordering::Release);
head = 0;
}
let (item_out, writing) = &mut self.queue[head];
if writing.load(core::sync::atomic::Ordering::Acquire) {
@ -36,6 +38,7 @@ impl<T: Clone> MessageQueue<T> {
return Err(());
}
*item_out = Some(item);
writing.store(true, core::sync::atomic::Ordering::Release);
Ok(())
}
@ -55,7 +58,6 @@ impl<T: Clone> MessageQueue<T> {
pub fn is_empty(&self) -> bool {
let head = self.head.load(core::sync::atomic::Ordering::SeqCst);
let tail = self.tail;
debug!("head: {}, tail: {}", head, tail);
head == tail
}

View file

@ -1,6 +1,6 @@
use alloc::format;
use alloc::string::{String, ToString};
use crate::{println, scheduling};
use crate::{debug, println, scheduling};
use crate::scheduling::die;
//mod huskyforth;
@ -18,6 +18,7 @@ pub fn run(input: &str) -> Result<(), String> {
Err("huskyforth is temporarily disabled ):".to_string())
},
"testwrite" => {
debug!("testwrite: {:x}", testwrite as usize);
println!("adding task");
scheduling::add_task(None, testwrite).expect("failed to add task");
println!("added task");

View file

@ -4,8 +4,10 @@ use alloc::vec::Vec;
use core::alloc::Layout;
use core::arch::asm;
use core::cell::{LazyCell, UnsafeCell};
use core::sync::atomic::Ordering;
use spin::Mutex;
use crate::bootstrap::{disable_decrementer, enable_decrementer, ofw_alloc, ofw_free, read_msr, set_decrementer, translate_to_phys, write_msr};
use crate::{debug, println};
use crate::{debug, memory, println};
use crate::message_queue::MessageQueue;
#[derive(Copy, Clone, Debug, Default, PartialEq, Eq)]
@ -76,8 +78,8 @@ static mut SCHEDULER: Scheduler = Scheduler {
next_task: 0,
};
pub static mut SCHEDULER_QUEUE: LazyCell<UnsafeCell<MessageQueue<Task>>> = LazyCell::new(|| UnsafeCell::new(MessageQueue::new(128)));
pub static mut DESCHEDULER_QUEUE: LazyCell<UnsafeCell<MessageQueue<u64>>> = LazyCell::new(|| UnsafeCell::new(MessageQueue::new(128)));
pub static mut SCHEDULER_QUEUE: LazyCell<Mutex<MessageQueue<Task>>> = LazyCell::new(|| Mutex::new(MessageQueue::new(128)));
pub static mut DESCHEDULER_QUEUE: LazyCell<Mutex<MessageQueue<u64>>> = LazyCell::new(|| Mutex::new(MessageQueue::new(128)));
pub static mut DECREMENTER_VALUE: u32 = 0; // will be set to the proper value at runtime
@ -96,7 +98,9 @@ pub unsafe fn init_scheduler() {
enable_decrementer();
}
pub unsafe extern "C" fn finalize_scheduler(savestate: usize) -> ! {
pub unsafe extern "C" fn finalize_scheduler(savestate: usize, prev_allocator_stage: u8) -> ! {
// restore the previous allocator stage
memory::ALLOCATOR_STAGE.store(prev_allocator_stage, Ordering::SeqCst);
set_decrementer(DECREMENTER_VALUE);
_vap_return_from_exception(savestate);
}
@ -105,9 +109,6 @@ pub unsafe extern "C" fn finalize_scheduler(savestate: usize) -> ! {
/// so we should be careful with what we do here
#[no_mangle]
pub extern "C" fn scheduler_callback(savestate: *const SaveState) -> ! {
debug!("value of msr: {:x}", unsafe { read_msr() });
debug!("value of savestate msr: {:x}", unsafe { (*savestate).srr1 });
debug!("value of savestate srr0: {:x}", unsafe { (*savestate).srr0 });
// ensure that everything's been written to memory
unsafe {
asm!("sync");
@ -116,48 +117,51 @@ pub extern "C" fn scheduler_callback(savestate: *const SaveState) -> ! {
let scheduler = unsafe { &mut SCHEDULER };
// add tasks from the queue
while let Some(task) = unsafe { (*SCHEDULER_QUEUE.get()).pop() } {
let mut task = (*task).clone();
task.uuid = scheduler.tasks.len() as u64 + 1;
// allocate 1MiB of memory for the task's stack
let stack = ofw_alloc(Layout::from_size_align(0x100000, 0x100000).unwrap());
// set the stack pointer to the end of the stack
task.savestate.sp = stack as u32 + 0x100000;
task.stack_base = stack as usize;
// these tasks require allocations, so only perform them if the allocator is unlocked
if memory::is_allocator_unlocked() {
if unsafe { &SCHEDULER_QUEUE }.try_lock().is_some() {
// add tasks from the queue
while let Some(task) = unsafe { &SCHEDULER_QUEUE }.lock().pop() {
let mut task = (*task).clone();
task.uuid = scheduler.tasks.len() as u64 + 1;
// allocate 1MiB of memory for the task's stack
let stack = unsafe { alloc::alloc::alloc(Layout::from_size_align_unchecked(0x100000, 0x100000)) };
// set the stack pointer to the end of the stack
task.savestate.sp = stack as u32 + 0x100000;
task.stack_base = stack as usize;
debug!("added task {} to scheduler", task.uuid);
debug!("new task: {:?}", task);
debug!("current task: {:?}", scheduler.current_task);
debug!("new task's srr0: {:x}", task.savestate.srr0);
debug!("next task: {}", scheduler.next_task);
debug!("tasks.len(): {}", scheduler.tasks.len());
// if the next task is the same as tasks.len(), then we need to push the new task to the end of the queue
// otherwise, we need to insert it at the next task index and set the next task to the next empty index or the end of the queue
if scheduler.next_task == scheduler.tasks.len() as u64 {
scheduler.tasks.push(Some(task));
scheduler.next_task += 1;
} else {
scheduler.tasks.insert(scheduler.next_task as usize, Some(task));
// find the next empty index
let mut next_empty_index = scheduler.next_task as usize;
while next_empty_index < scheduler.tasks.len() && scheduler.tasks[next_empty_index].is_some() {
next_empty_index += 1;
// if the next task is the same as tasks.len(), then we need to push the new task to the end of the queue
// otherwise, we need to insert it at the next task index and set the next task to the next empty index or the end of the queue
if scheduler.next_task == scheduler.tasks.len() as u64 {
scheduler.tasks.push(Some(task));
scheduler.next_task += 1;
} else {
scheduler.tasks.insert(scheduler.next_task as usize, Some(task));
// find the next empty index
let mut next_empty_index = scheduler.next_task as usize;
while next_empty_index < scheduler.tasks.len() && scheduler.tasks[next_empty_index].is_some() {
next_empty_index += 1;
}
scheduler.next_task = next_empty_index as u64;
}
}
}
if unsafe { &DESCHEDULER_QUEUE }.try_lock().is_some() {
// for each task in the descheduler queue, remove it from the scheduler
// and free its stack
while let Some(uuid) = unsafe { &DESCHEDULER_QUEUE }.lock().pop() {
let index = *uuid as usize - 1;
let task = scheduler.tasks[index].take().unwrap();
// free the stack
unsafe { alloc::alloc::dealloc(task.stack_base as *mut u8, Layout::from_size_align_unchecked(0x100000, 0x100000)) };
}
scheduler.next_task = next_empty_index as u64;
}
}
// for each task in the descheduler queue, remove it from the scheduler
// and free its stack
while let Some(uuid) = unsafe { (*DESCHEDULER_QUEUE.get()).pop() } {
println!("descheduling task {}", *uuid);
let index = *uuid as usize - 1;
let task = scheduler.tasks[index].take().unwrap();
// free the stack
ofw_free(task.stack_base as *mut u8, Layout::from_size_align(0x100000, 0x100000).unwrap());
println!("freed stack for task {}", task.uuid);
}
let prev_allocator_stage = memory::ALLOCATOR_STAGE.load(Ordering::SeqCst);
memory::ALLOCATOR_STAGE.store(2, Ordering::SeqCst); // DO NOT DO ANY ALLOCATIONS AFTER THIS LINE
// if current task is 0, don't save it
if scheduler.current_task != 0 {
@ -165,15 +169,14 @@ pub extern "C" fn scheduler_callback(savestate: *const SaveState) -> ! {
let index = scheduler.current_task as usize - 1;
// clone the savestate into the task's savestate
scheduler.tasks[index].as_mut().unwrap().savestate = unsafe { (*savestate).clone() };
debug!("saved savestate for task {}", scheduler.current_task);
// copy the savestate into the task's savestate
scheduler.tasks[index].as_mut().unwrap().savestate = unsafe { *savestate };
}
// if we don't have any tasks to run, return to the current task
if scheduler.tasks.is_empty() {
unsafe {
finalize_scheduler(savestate as usize);
finalize_scheduler(savestate as usize, prev_allocator_stage);
}
}
@ -202,18 +205,17 @@ pub extern "C" fn scheduler_callback(savestate: *const SaveState) -> ! {
// set the current task to the task with the highest priority
scheduler.current_task = scheduler.tasks[index_with_highest_priority].as_mut().unwrap().uuid;
debug!("now running task {} with srr0 {:x}", scheduler.current_task, scheduler.tasks[index_with_highest_priority].as_mut().unwrap().savestate.srr0);
// ensure that everything's been written to memory
unsafe {
asm!("sync");
asm!("isync");
}
debug!("next task: {:x}", scheduler.tasks[index_with_highest_priority].as_mut().unwrap().savestate.srr0);
debug!("next task index: {}", index_with_highest_priority);
// return to the task with the highest priority
unsafe {
finalize_scheduler(&mut scheduler.tasks[index_with_highest_priority].as_mut().unwrap().savestate as *mut SaveState as usize);
finalize_scheduler(&mut scheduler.tasks[index_with_highest_priority].as_mut().unwrap().savestate as *mut SaveState as usize, prev_allocator_stage);
}
}
@ -235,26 +237,27 @@ pub fn add_task(priority: Option<u8>, fn_addr: fn() -> !) -> Result<(), ()> {
stack_base: 0,
};
debug!("");
debug!("");
debug!("");
debug!("");
debug!("");
debug!("");
debug!("");
debug!("");
debug!("");
debug!("");
debug!("new task addr: {:x}", fn_addr as u32);
//debug!("");
//debug!("");
//debug!("");
//debug!("");
//debug!("");
//debug!("");
//debug!("");
//debug!("");
//debug!("");
//debug!("");
//debug!("new task addr: {:x}", fn_addr as u32);
task.savestate.srr0 = fn_addr as u32;
// make sure interrupts, instruction relocation, and data relocation are enabled
let msr = unsafe { read_msr() };
task.savestate.srr1 = msr;
task.savestate.srr1 |= ((1 << 15) | (1 << 4) | (1 << 5)) as u32;
//debug!("new task msr: {:x}", task.savestate.srr1);
// send to the scheduler queue
unsafe {
(*SCHEDULER_QUEUE.get()).push(task).expect("scheduler queue is full");
(&SCHEDULER_QUEUE).lock().push(task).expect("scheduler queue is full");
}
Ok(())
@ -264,7 +267,7 @@ pub fn add_task(priority: Option<u8>, fn_addr: fn() -> !) -> Result<(), ()> {
pub fn die() -> ! {
unsafe {
let current_task = SCHEDULER.current_task;
(*DESCHEDULER_QUEUE.get()).push(current_task);
(&DESCHEDULER_QUEUE).lock().push(current_task).expect("descheduler queue is full");
}
loop {}
}

View file

@ -142,7 +142,12 @@ pub unsafe fn request_terminal_redraw() {
let terminal = &mut UNSAFE_TERMINAL_BUFFER;
if let Some(window_ptr) = terminal.window_ptr {
let window = unsafe { &mut *window_ptr };
window.request_queue.push(WindowRequest::Redraw).expect("failed to request redraw!");
loop {
let res = window.request_queue.push(WindowRequest::Redraw);
if res.is_ok() {
break;
}
}
}
}
}