have to actually learn paging so pushing progress for now
This commit is contained in:
@@ -8,3 +8,5 @@ rustflags = [
|
|||||||
"-C", "link-arg=--omagic",
|
"-C", "link-arg=--omagic",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[unstable]
|
||||||
|
build-std = ["core", "compiler_builtins", "alloc"]
|
||||||
|
|||||||
19
kernel/Cargo.lock
generated
19
kernel/Cargo.lock
generated
@@ -83,6 +83,7 @@ dependencies = [
|
|||||||
"bootloader_api",
|
"bootloader_api",
|
||||||
"embedded-graphics",
|
"embedded-graphics",
|
||||||
"lazy_static",
|
"lazy_static",
|
||||||
|
"linked_list_allocator",
|
||||||
"pc-keyboard",
|
"pc-keyboard",
|
||||||
"pic8259",
|
"pic8259",
|
||||||
"spin 0.9.8",
|
"spin 0.9.8",
|
||||||
@@ -99,6 +100,15 @@ dependencies = [
|
|||||||
"spin 0.5.2",
|
"spin 0.5.2",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "linked_list_allocator"
|
||||||
|
version = "0.10.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "9afa463f5405ee81cdb9cc2baf37e08ec7e4c8209442b5d72c04cfb2cd6e6286"
|
||||||
|
dependencies = [
|
||||||
|
"spinning_top",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lock_api"
|
name = "lock_api"
|
||||||
version = "0.4.11"
|
version = "0.4.11"
|
||||||
@@ -175,6 +185,15 @@ dependencies = [
|
|||||||
"lock_api",
|
"lock_api",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "spinning_top"
|
||||||
|
version = "0.2.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "5b9eb1a2f4c41445a3a0ff9abc5221c5fcd28e1f13cd7c0397706f9ac938ddb0"
|
||||||
|
dependencies = [
|
||||||
|
"lock_api",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "uart_16550"
|
name = "uart_16550"
|
||||||
version = "0.3.0"
|
version = "0.3.0"
|
||||||
|
|||||||
@@ -12,6 +12,7 @@ bench = false
|
|||||||
embedded-graphics = "0.8.1"
|
embedded-graphics = "0.8.1"
|
||||||
spin = "0.9.8"
|
spin = "0.9.8"
|
||||||
pc-keyboard = "0.5.0"
|
pc-keyboard = "0.5.0"
|
||||||
|
linked_list_allocator = "0.10.5"
|
||||||
|
|
||||||
[target.'cfg(target_arch = "x86_64")'.dependencies]
|
[target.'cfg(target_arch = "x86_64")'.dependencies]
|
||||||
pic8259 = "0.10.1"
|
pic8259 = "0.10.1"
|
||||||
|
|||||||
13
kernel/src/allocator.rs
Normal file
13
kernel/src/allocator.rs
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
use linked_list_allocator::LockedHeap;
|
||||||
|
|
||||||
|
#[global_allocator]
|
||||||
|
static ALLOCATOR: LockedHeap = LockedHeap::empty();
|
||||||
|
|
||||||
|
pub const HEAP_START: *mut u8 = 0x9000_0000 as *mut u8;
|
||||||
|
pub const HEAP_SIZE: usize = 100 * 1024;
|
||||||
|
|
||||||
|
pub fn init_heap() {
|
||||||
|
unsafe {
|
||||||
|
ALLOCATOR.lock().init(HEAP_START, HEAP_SIZE);
|
||||||
|
}
|
||||||
|
}
|
||||||
64
kernel/src/arch/riscv64/csr.rs
Normal file
64
kernel/src/arch/riscv64/csr.rs
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
macro_rules! csrr {
|
||||||
|
($name:expr) => {{
|
||||||
|
let mut out: u64;
|
||||||
|
unsafe {
|
||||||
|
core::arch::asm!(
|
||||||
|
concat!("csrr {out}, ", $name),
|
||||||
|
out = out(reg) out,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
out
|
||||||
|
}};
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! csrw {
|
||||||
|
($name:expr, $func:path) => {
|
||||||
|
unsafe {
|
||||||
|
core::arch::asm!(
|
||||||
|
"la t0, {func}",
|
||||||
|
concat!("csrw ", $name, ", t0"),
|
||||||
|
func = sym $func,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
($name:expr, $val:expr) => {
|
||||||
|
unsafe {
|
||||||
|
core::arch::asm!(
|
||||||
|
concat!("csrw ", $name, ", {val}"),
|
||||||
|
val = in(reg) $val
|
||||||
|
);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
pub(crate) use csrw;
|
||||||
|
|
||||||
|
pub mod hartid {
|
||||||
|
pub fn read() -> u64 {
|
||||||
|
csrr!("mhartid")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub mod mtvec {
|
||||||
|
macro_rules! init {
|
||||||
|
($func:path) => {
|
||||||
|
let _: fn() -> ! = $func;
|
||||||
|
crate::arch::csr::csrw!("mtvec", $func);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
pub(crate) use init;
|
||||||
|
pub fn read() -> u64 {
|
||||||
|
csrr!("mtvec")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub mod mcause {
|
||||||
|
pub fn read() -> u64 {
|
||||||
|
csrr!("mcause")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub mod satp {
|
||||||
|
pub fn read() -> u64 {
|
||||||
|
csrr!("satp")
|
||||||
|
}
|
||||||
|
}
|
||||||
45
kernel/src/arch/riscv64/init.rs
Normal file
45
kernel/src/arch/riscv64/init.rs
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
use crate::{
|
||||||
|
arch::{csr, instructions, interrupts, wait}, main, println
|
||||||
|
};
|
||||||
|
|
||||||
|
#[no_mangle]
|
||||||
|
#[link_section = ".text.init"]
|
||||||
|
#[naked]
|
||||||
|
unsafe extern "C" fn _start() -> ! {
|
||||||
|
core::arch::asm!(
|
||||||
|
// set up gp & sp
|
||||||
|
".option push",
|
||||||
|
".option norelax",
|
||||||
|
"la gp, global_pointer",
|
||||||
|
"la sp, stack_top",
|
||||||
|
".option pop",
|
||||||
|
// set up stack for each hart
|
||||||
|
"csrr t0, mhartid",
|
||||||
|
"slli t0, t0, 12",
|
||||||
|
"sub sp, sp, t0",
|
||||||
|
// continue to rest of program
|
||||||
|
"tail {entry}",
|
||||||
|
|
||||||
|
entry = sym entry,
|
||||||
|
options(noreturn)
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn entry() -> ! {
|
||||||
|
let dt_addr = instructions::reg!("a1");
|
||||||
|
let hart = csr::hartid::read();
|
||||||
|
println!("yo from hart {hart}");
|
||||||
|
if hart != 0 {
|
||||||
|
wait();
|
||||||
|
}
|
||||||
|
interrupts::init();
|
||||||
|
println!(
|
||||||
|
"machine trap vector base address: 0x{:x}",
|
||||||
|
csr::mtvec::read()
|
||||||
|
);
|
||||||
|
println!(
|
||||||
|
"physical address bits: {}",
|
||||||
|
csr::satp::read()
|
||||||
|
);
|
||||||
|
main(dt_addr)
|
||||||
|
}
|
||||||
16
kernel/src/arch/riscv64/instructions.rs
Normal file
16
kernel/src/arch/riscv64/instructions.rs
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
use core::arch::asm;
|
||||||
|
|
||||||
|
pub fn wfi() {
|
||||||
|
unsafe { asm!("wfi") }
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! reg {
|
||||||
|
($name:expr) => {{
|
||||||
|
let out: u64;
|
||||||
|
unsafe {
|
||||||
|
core::arch::asm!(concat!("mv {out}, ", $name), out = out(reg) out);
|
||||||
|
}
|
||||||
|
out
|
||||||
|
}};
|
||||||
|
}
|
||||||
|
pub(crate) use reg;
|
||||||
12
kernel/src/arch/riscv64/interrupts.rs
Normal file
12
kernel/src/arch/riscv64/interrupts.rs
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
use super::csr;
|
||||||
|
|
||||||
|
pub fn init() {
|
||||||
|
csr::mtvec::init!(stuff);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[repr(align(4))]
|
||||||
|
pub fn stuff() -> ! {
|
||||||
|
let mcause = csr::mcause::read();
|
||||||
|
crate::println!("interrupt triggered: {mcause}");
|
||||||
|
super::qemu::exit();
|
||||||
|
}
|
||||||
@@ -1,48 +1,12 @@
|
|||||||
use crate::{main, println};
|
pub mod csr;
|
||||||
|
pub mod init;
|
||||||
|
pub mod instructions;
|
||||||
|
pub mod interrupts;
|
||||||
|
pub mod page;
|
||||||
pub mod qemu;
|
pub mod qemu;
|
||||||
|
|
||||||
#[no_mangle]
|
pub fn wait() -> ! {
|
||||||
#[link_section = ".text.init"]
|
loop {
|
||||||
#[naked]
|
instructions::wfi();
|
||||||
unsafe extern "C" fn _start() -> ! {
|
|
||||||
core::arch::asm!(
|
|
||||||
".option push",
|
|
||||||
".option norelax",
|
|
||||||
"la gp, global_pointer",
|
|
||||||
"la sp, stack_top",
|
|
||||||
".option pop",
|
|
||||||
|
|
||||||
"csrr a0, mhartid",
|
|
||||||
"slli t0, a0, 12",
|
|
||||||
"sub sp, sp, t0",
|
|
||||||
"tail {entry}",
|
|
||||||
|
|
||||||
entry = sym entry,
|
|
||||||
options(noreturn)
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_hartid() -> u64 {
|
|
||||||
let mut hart: u64;
|
|
||||||
unsafe {
|
|
||||||
core::arch::asm!(
|
|
||||||
"csrr {hart}, mhartid",
|
|
||||||
hart = out(reg) hart
|
|
||||||
);
|
|
||||||
}
|
|
||||||
return hart
|
|
||||||
}
|
|
||||||
|
|
||||||
fn entry() -> ! {
|
|
||||||
let hart = get_hartid();
|
|
||||||
println!("yo from hart {hart}");
|
|
||||||
if hart != 0 {
|
|
||||||
loop {}
|
|
||||||
}
|
|
||||||
main()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn hlt_loop() -> ! {
|
|
||||||
loop {}
|
|
||||||
}
|
}
|
||||||
|
|||||||
547
kernel/src/arch/riscv64/page.rs
Normal file
547
kernel/src/arch/riscv64/page.rs
Normal file
@@ -0,0 +1,547 @@
|
|||||||
|
// stolen from https://osblog.stephenmarz.com/index.html chapter 3 which I'm prolly gonna start
|
||||||
|
// following for now bc don't wanna learn x86_64 :)
|
||||||
|
|
||||||
|
use core::{mem::size_of, ptr::null_mut};
|
||||||
|
|
||||||
|
use crate::{print, println};
|
||||||
|
|
||||||
|
// ////////////////////////////////
|
||||||
|
// // Allocation routines
|
||||||
|
// ////////////////////////////////
|
||||||
|
extern "C" {
|
||||||
|
static HEAP_START: usize;
|
||||||
|
static HEAP_SIZE: usize;
|
||||||
|
}
|
||||||
|
|
||||||
|
// We will use ALLOC_START to mark the start of the actual
|
||||||
|
// memory we can dish out.
|
||||||
|
static mut ALLOC_START: usize = 0;
|
||||||
|
const PAGE_ORDER: usize = 12;
|
||||||
|
pub const PAGE_SIZE: usize = 1 << 12;
|
||||||
|
|
||||||
|
/// Align (set to a multiple of some power of two)
|
||||||
|
/// This takes an order which is the exponent to 2^order
|
||||||
|
/// Therefore, all alignments must be made as a power of two.
|
||||||
|
/// This function always rounds up.
|
||||||
|
pub const fn align_val(val: usize, order: usize) -> usize {
|
||||||
|
let o = (1usize << order) - 1;
|
||||||
|
(val + o) & !o
|
||||||
|
}
|
||||||
|
|
||||||
|
#[repr(u8)]
|
||||||
|
pub enum PageBits {
|
||||||
|
Empty = 0,
|
||||||
|
Taken = 1 << 0,
|
||||||
|
Last = 1 << 1,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PageBits {
|
||||||
|
// We convert PageBits to a u8 a lot, so this is
|
||||||
|
// for convenience.
|
||||||
|
pub fn val(self) -> u8 {
|
||||||
|
self as u8
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Each page is described by the Page structure. Linux does this
|
||||||
|
// as well, where each 4096-byte chunk of memory has a structure
|
||||||
|
// associated with it. However, there structure is much larger.
|
||||||
|
pub struct Page {
|
||||||
|
flags: u8,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Page {
|
||||||
|
// If this page has been marked as the final allocation,
|
||||||
|
// this function returns true. Otherwise, it returns false.
|
||||||
|
pub fn is_last(&self) -> bool {
|
||||||
|
if self.flags & PageBits::Last.val() != 0 {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the page is marked as being taken (allocated), then
|
||||||
|
// this function returns true. Otherwise, it returns false.
|
||||||
|
pub fn is_taken(&self) -> bool {
|
||||||
|
if self.flags & PageBits::Taken.val() != 0 {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is the opposite of is_taken().
|
||||||
|
pub fn is_free(&self) -> bool {
|
||||||
|
!self.is_taken()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Clear the Page structure and all associated allocations.
|
||||||
|
pub fn clear(&mut self) {
|
||||||
|
self.flags = PageBits::Empty.val();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set a certain flag. We ran into trouble here since PageBits
|
||||||
|
// is an enumeration and we haven't implemented the BitOr Trait
|
||||||
|
// on it.
|
||||||
|
pub fn set_flag(&mut self, flag: PageBits) {
|
||||||
|
self.flags |= flag.val();
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn clear_flag(&mut self, flag: PageBits) {
|
||||||
|
self.flags &= !(flag.val());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Initialize the allocation system. There are several ways that we can
|
||||||
|
/// implement the page allocator:
|
||||||
|
/// 1. Free list (singly linked list where it starts at the first free
|
||||||
|
/// allocation) 2. Bookkeeping list (structure contains a taken and length)
|
||||||
|
/// 3. Allocate one Page structure per 4096 bytes (this is what I chose)
|
||||||
|
/// 4. Others
|
||||||
|
pub fn init() {
|
||||||
|
unsafe {
|
||||||
|
let num_pages = HEAP_SIZE / PAGE_SIZE;
|
||||||
|
let ptr = HEAP_START as *mut Page;
|
||||||
|
// Clear all pages to make sure that they aren't accidentally
|
||||||
|
// taken
|
||||||
|
for i in 0..num_pages {
|
||||||
|
(*ptr.add(i)).clear();
|
||||||
|
}
|
||||||
|
// Determine where the actual useful memory starts. This will be
|
||||||
|
// after all Page structures. We also must align the ALLOC_START
|
||||||
|
// to a page-boundary (PAGE_SIZE = 4096). ALLOC_START =
|
||||||
|
// (HEAP_START + num_pages * size_of::<Page>() + PAGE_SIZE - 1)
|
||||||
|
// & !(PAGE_SIZE - 1);
|
||||||
|
ALLOC_START = align_val(
|
||||||
|
HEAP_START
|
||||||
|
+ num_pages * size_of::<Page,>(),
|
||||||
|
PAGE_ORDER,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Allocate a page or multiple pages
|
||||||
|
/// pages: the number of PAGE_SIZE pages to allocate
|
||||||
|
pub fn alloc(pages: usize) -> *mut u8 {
|
||||||
|
// We have to find a contiguous allocation of pages
|
||||||
|
assert!(pages > 0);
|
||||||
|
unsafe {
|
||||||
|
// We create a Page structure for each page on the heap. We
|
||||||
|
// actually might have more since HEAP_SIZE moves and so does
|
||||||
|
// the size of our structure, but we'll only waste a few bytes.
|
||||||
|
let num_pages = HEAP_SIZE / PAGE_SIZE;
|
||||||
|
let ptr = HEAP_START as *mut Page;
|
||||||
|
for i in 0..num_pages - pages {
|
||||||
|
let mut found = false;
|
||||||
|
// Check to see if this Page is free. If so, we have our
|
||||||
|
// first candidate memory address.
|
||||||
|
if (*ptr.add(i)).is_free() {
|
||||||
|
// It was FREE! Yay!
|
||||||
|
found = true;
|
||||||
|
for j in i..i + pages {
|
||||||
|
// Now check to see if we have a
|
||||||
|
// contiguous allocation for all of the
|
||||||
|
// request pages. If not, we should
|
||||||
|
// check somewhere else.
|
||||||
|
if (*ptr.add(j)).is_taken() {
|
||||||
|
found = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// We've checked to see if there are enough contiguous
|
||||||
|
// pages to form what we need. If we couldn't, found
|
||||||
|
// will be false, otherwise it will be true, which means
|
||||||
|
// we've found valid memory we can allocate.
|
||||||
|
if found {
|
||||||
|
for k in i..i + pages - 1 {
|
||||||
|
(*ptr.add(k)).set_flag(PageBits::Taken);
|
||||||
|
}
|
||||||
|
// The marker for the last page is
|
||||||
|
// PageBits::Last This lets us know when we've
|
||||||
|
// hit the end of this particular allocation.
|
||||||
|
(*ptr.add(i+pages-1)).set_flag(PageBits::Taken);
|
||||||
|
(*ptr.add(i+pages-1)).set_flag(PageBits::Last);
|
||||||
|
// The Page structures themselves aren't the
|
||||||
|
// useful memory. Instead, there is 1 Page
|
||||||
|
// structure per 4096 bytes starting at
|
||||||
|
// ALLOC_START.
|
||||||
|
return (ALLOC_START + PAGE_SIZE * i)
|
||||||
|
as *mut u8;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we get here, that means that no contiguous allocation was
|
||||||
|
// found.
|
||||||
|
null_mut()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Allocate and zero a page or multiple pages
|
||||||
|
/// pages: the number of pages to allocate
|
||||||
|
/// Each page is PAGE_SIZE which is calculated as 1 << PAGE_ORDER
|
||||||
|
/// On RISC-V, this typically will be 4,096 bytes.
|
||||||
|
pub fn zalloc(pages: usize) -> *mut u8 {
|
||||||
|
// Allocate and zero a page.
|
||||||
|
// First, let's get the allocation
|
||||||
|
let ret = alloc(pages);
|
||||||
|
if !ret.is_null() {
|
||||||
|
let size = (PAGE_SIZE * pages) / 8;
|
||||||
|
let big_ptr = ret as *mut u64;
|
||||||
|
for i in 0..size {
|
||||||
|
// We use big_ptr so that we can force an
|
||||||
|
// sd (store doubleword) instruction rather than
|
||||||
|
// the sb. This means 8x fewer stores than before.
|
||||||
|
// Typically we have to be concerned about remaining
|
||||||
|
// bytes, but fortunately 4096 % 8 = 0, so we
|
||||||
|
// won't have any remaining bytes.
|
||||||
|
unsafe {
|
||||||
|
(*big_ptr.add(i)) = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ret
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Deallocate a page by its pointer
|
||||||
|
/// The way we've structured this, it will automatically coalesce
|
||||||
|
/// contiguous pages.
|
||||||
|
pub fn dealloc(ptr: *mut u8) {
|
||||||
|
// Make sure we don't try to free a null pointer.
|
||||||
|
assert!(!ptr.is_null());
|
||||||
|
unsafe {
|
||||||
|
let addr =
|
||||||
|
HEAP_START + (ptr as usize - ALLOC_START) / PAGE_SIZE;
|
||||||
|
// Make sure that the address makes sense. The address we
|
||||||
|
// calculate here is the page structure, not the HEAP address!
|
||||||
|
assert!(addr >= HEAP_START && addr < HEAP_START + HEAP_SIZE);
|
||||||
|
let mut p = addr as *mut Page;
|
||||||
|
// Keep clearing pages until we hit the last page.
|
||||||
|
while (*p).is_taken() && !(*p).is_last() {
|
||||||
|
(*p).clear();
|
||||||
|
p = p.add(1);
|
||||||
|
}
|
||||||
|
// If the following assertion fails, it is most likely
|
||||||
|
// caused by a double-free.
|
||||||
|
assert!(
|
||||||
|
(*p).is_last() == true,
|
||||||
|
"Possible double-free detected! (Not taken found \
|
||||||
|
before last)"
|
||||||
|
);
|
||||||
|
// If we get here, we've taken care of all previous pages and
|
||||||
|
// we are on the last page.
|
||||||
|
(*p).clear();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Print all page allocations
|
||||||
|
/// This is mainly used for debugging.
|
||||||
|
pub fn print_page_allocations() {
|
||||||
|
unsafe {
|
||||||
|
let num_pages = HEAP_SIZE / PAGE_SIZE;
|
||||||
|
let mut beg = HEAP_START as *const Page;
|
||||||
|
let end = beg.add(num_pages);
|
||||||
|
let alloc_beg = ALLOC_START;
|
||||||
|
let alloc_end = ALLOC_START + num_pages * PAGE_SIZE;
|
||||||
|
println!();
|
||||||
|
println!(
|
||||||
|
"PAGE ALLOCATION TABLE\nMETA: {:p} -> {:p}\nPHYS: \
|
||||||
|
0x{:x} -> 0x{:x}",
|
||||||
|
beg, end, alloc_beg, alloc_end
|
||||||
|
);
|
||||||
|
println!("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~");
|
||||||
|
let mut num = 0;
|
||||||
|
while beg < end {
|
||||||
|
if (*beg).is_taken() {
|
||||||
|
let start = beg as usize;
|
||||||
|
let memaddr = ALLOC_START
|
||||||
|
+ (start - HEAP_START)
|
||||||
|
* PAGE_SIZE;
|
||||||
|
print!("0x{:x} => ", memaddr);
|
||||||
|
loop {
|
||||||
|
num += 1;
|
||||||
|
if (*beg).is_last() {
|
||||||
|
let end = beg as usize;
|
||||||
|
let memaddr = ALLOC_START
|
||||||
|
+ (end
|
||||||
|
- HEAP_START)
|
||||||
|
* PAGE_SIZE
|
||||||
|
+ PAGE_SIZE - 1;
|
||||||
|
print!(
|
||||||
|
"0x{:x}: {:>3} page(s)",
|
||||||
|
memaddr,
|
||||||
|
(end - start + 1)
|
||||||
|
);
|
||||||
|
println!(".");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
beg = beg.add(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
beg = beg.add(1);
|
||||||
|
}
|
||||||
|
println!("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~");
|
||||||
|
println!(
|
||||||
|
"Allocated: {:>5} pages ({:>9} bytes).",
|
||||||
|
num,
|
||||||
|
num * PAGE_SIZE
|
||||||
|
);
|
||||||
|
println!(
|
||||||
|
"Free : {:>5} pages ({:>9} bytes).",
|
||||||
|
num_pages - num,
|
||||||
|
(num_pages - num) * PAGE_SIZE
|
||||||
|
);
|
||||||
|
println!();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ////////////////////////////////
|
||||||
|
// // MMU Routines
|
||||||
|
// ////////////////////////////////
|
||||||
|
|
||||||
|
// Represent (repr) our entry bits as
|
||||||
|
// unsigned 64-bit integers.
|
||||||
|
#[repr(i64)]
|
||||||
|
#[derive(Copy, Clone)]
|
||||||
|
pub enum EntryBits {
|
||||||
|
None = 0,
|
||||||
|
Valid = 1 << 0,
|
||||||
|
Read = 1 << 1,
|
||||||
|
Write = 1 << 2,
|
||||||
|
Execute = 1 << 3,
|
||||||
|
User = 1 << 4,
|
||||||
|
Global = 1 << 5,
|
||||||
|
Access = 1 << 6,
|
||||||
|
Dirty = 1 << 7,
|
||||||
|
|
||||||
|
// Convenience combinations
|
||||||
|
ReadWrite = 1 << 1 | 1 << 2,
|
||||||
|
ReadExecute = 1 << 1 | 1 << 3,
|
||||||
|
ReadWriteExecute = 1 << 1 | 1 << 2 | 1 << 3,
|
||||||
|
|
||||||
|
// User Convenience Combinations
|
||||||
|
UserReadWrite = 1 << 1 | 1 << 2 | 1 << 4,
|
||||||
|
UserReadExecute = 1 << 1 | 1 << 3 | 1 << 4,
|
||||||
|
UserReadWriteExecute = 1 << 1 | 1 << 2 | 1 << 3 | 1 << 4,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Helper functions to convert the enumeration
|
||||||
|
// into an i64, which is what our page table
|
||||||
|
// entries will be.
|
||||||
|
impl EntryBits {
|
||||||
|
pub fn val(self) -> i64 {
|
||||||
|
self as i64
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A single entry. We're using an i64 so that
|
||||||
|
// this will sign-extend rather than zero-extend
|
||||||
|
// since RISC-V requires that the reserved sections
|
||||||
|
// take on the most significant bit.
|
||||||
|
pub struct Entry {
|
||||||
|
pub entry: i64,
|
||||||
|
}
|
||||||
|
|
||||||
|
// The Entry structure describes one of the 512 entries per table, which is
|
||||||
|
// described in the RISC-V privileged spec Figure 4.18.
|
||||||
|
impl Entry {
|
||||||
|
pub fn is_valid(&self) -> bool {
|
||||||
|
self.get_entry() & EntryBits::Valid.val() != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// The first bit (bit index #0) is the V bit for
|
||||||
|
// valid.
|
||||||
|
pub fn is_invalid(&self) -> bool {
|
||||||
|
!self.is_valid()
|
||||||
|
}
|
||||||
|
|
||||||
|
// A leaf has one or more RWX bits set
|
||||||
|
pub fn is_leaf(&self) -> bool {
|
||||||
|
self.get_entry() & 0xe != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn is_branch(&self) -> bool {
|
||||||
|
!self.is_leaf()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn set_entry(&mut self, entry: i64) {
|
||||||
|
self.entry = entry;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn get_entry(&self) -> i64 {
|
||||||
|
self.entry
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Table represents a single table, which contains 512 (2^9), 64-bit entries.
|
||||||
|
pub struct Table {
|
||||||
|
pub entries: [Entry; 512],
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Table {
|
||||||
|
pub fn len() -> usize {
|
||||||
|
512
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Map a virtual address to a physical address using 4096-byte page
|
||||||
|
/// size.
|
||||||
|
/// root: a mutable reference to the root Table
|
||||||
|
/// vaddr: The virtual address to map
|
||||||
|
/// paddr: The physical address to map
|
||||||
|
/// bits: An OR'd bitset containing the bits the leaf should have.
|
||||||
|
/// The bits should contain only the following:
|
||||||
|
/// Read, Write, Execute, User, and/or Global
|
||||||
|
/// The bits MUST include one or more of the following:
|
||||||
|
/// Read, Write, Execute
|
||||||
|
/// The valid bit automatically gets added.
|
||||||
|
pub fn map(root: &mut Table, vaddr: usize, paddr: usize, bits: i64, level: usize) {
|
||||||
|
// Make sure that Read, Write, or Execute have been provided
|
||||||
|
// otherwise, we'll leak memory and always create a page fault.
|
||||||
|
assert!(bits & 0xe != 0);
|
||||||
|
// Extract out each VPN from the virtual address
|
||||||
|
// On the virtual address, each VPN is exactly 9 bits,
|
||||||
|
// which is why we use the mask 0x1ff = 0b1_1111_1111 (9 bits)
|
||||||
|
let vpn = [
|
||||||
|
// VPN[0] = vaddr[20:12]
|
||||||
|
(vaddr >> 12) & 0x1ff,
|
||||||
|
// VPN[1] = vaddr[29:21]
|
||||||
|
(vaddr >> 21) & 0x1ff,
|
||||||
|
// VPN[2] = vaddr[38:30]
|
||||||
|
(vaddr >> 30) & 0x1ff,
|
||||||
|
];
|
||||||
|
|
||||||
|
// Just like the virtual address, extract the physical address
|
||||||
|
// numbers (PPN). However, PPN[2] is different in that it stores
|
||||||
|
// 26 bits instead of 9. Therefore, we use,
|
||||||
|
// 0x3ff_ffff = 0b11_1111_1111_1111_1111_1111_1111 (26 bits).
|
||||||
|
let ppn = [
|
||||||
|
// PPN[0] = paddr[20:12]
|
||||||
|
(paddr >> 12) & 0x1ff,
|
||||||
|
// PPN[1] = paddr[29:21]
|
||||||
|
(paddr >> 21) & 0x1ff,
|
||||||
|
// PPN[2] = paddr[55:30]
|
||||||
|
(paddr >> 30) & 0x3ff_ffff,
|
||||||
|
];
|
||||||
|
// We will use this as a floating reference so that we can set
|
||||||
|
// individual entries as we walk the table.
|
||||||
|
let mut v = &mut root.entries[vpn[2]];
|
||||||
|
// Now, we're going to traverse the page table and set the bits
|
||||||
|
// properly. We expect the root to be valid, however we're required to
|
||||||
|
// create anything beyond the root.
|
||||||
|
// In Rust, we create a range iterator using the .. operator.
|
||||||
|
// The .rev() will reverse the iteration since we need to start with
|
||||||
|
// VPN[2] The .. operator is inclusive on start but exclusive on end.
|
||||||
|
// So, (0..2) will iterate 0 and 1.
|
||||||
|
for i in (level..2).rev() {
|
||||||
|
if !v.is_valid() {
|
||||||
|
// Allocate a page
|
||||||
|
let page = zalloc(1);
|
||||||
|
// The page is already aligned by 4,096, so store it
|
||||||
|
// directly The page is stored in the entry shifted
|
||||||
|
// right by 2 places.
|
||||||
|
v.set_entry(
|
||||||
|
(page as i64 >> 2)
|
||||||
|
| EntryBits::Valid.val(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
let entry = ((v.get_entry() & !0x3ff) << 2) as *mut Entry;
|
||||||
|
v = unsafe { entry.add(vpn[i]).as_mut().unwrap() };
|
||||||
|
}
|
||||||
|
// When we get here, we should be at VPN[0] and v should be pointing to
|
||||||
|
// our entry.
|
||||||
|
// The entry structure is Figure 4.18 in the RISC-V Privileged
|
||||||
|
// Specification
|
||||||
|
let entry = (ppn[2] << 28) as i64 | // PPN[2] = [53:28]
|
||||||
|
(ppn[1] << 19) as i64 | // PPN[1] = [27:19]
|
||||||
|
(ppn[0] << 10) as i64 | // PPN[0] = [18:10]
|
||||||
|
bits | // Specified bits, such as User, Read, Write, etc
|
||||||
|
EntryBits::Valid.val(); // Valid bit
|
||||||
|
// Set the entry. V should be set to the correct pointer by the loop
|
||||||
|
// above.
|
||||||
|
v.set_entry(entry);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Unmaps and frees all memory associated with a table.
|
||||||
|
/// root: The root table to start freeing.
|
||||||
|
/// NOTE: This does NOT free root directly. This must be
|
||||||
|
/// freed manually.
|
||||||
|
/// The reason we don't free the root is because it is
|
||||||
|
/// usually embedded into the Process structure.
|
||||||
|
pub fn unmap(root: &mut Table) {
|
||||||
|
// Start with level 2
|
||||||
|
for lv2 in 0..Table::len() {
|
||||||
|
let ref entry_lv2 = root.entries[lv2];
|
||||||
|
if entry_lv2.is_valid() && entry_lv2.is_branch() {
|
||||||
|
// This is a valid entry, so drill down and free.
|
||||||
|
let memaddr_lv1 = (entry_lv2.get_entry() & !0x3ff) << 2;
|
||||||
|
let table_lv1 = unsafe {
|
||||||
|
// Make table_lv1 a mutable reference instead of a pointer.
|
||||||
|
(memaddr_lv1 as *mut Table).as_mut().unwrap()
|
||||||
|
};
|
||||||
|
for lv1 in 0..Table::len() {
|
||||||
|
let ref entry_lv1 = table_lv1.entries[lv1];
|
||||||
|
if entry_lv1.is_valid() && entry_lv1.is_branch()
|
||||||
|
{
|
||||||
|
let memaddr_lv0 = (entry_lv1.get_entry()
|
||||||
|
& !0x3ff) << 2;
|
||||||
|
// The next level is level 0, which
|
||||||
|
// cannot have branches, therefore,
|
||||||
|
// we free here.
|
||||||
|
dealloc(memaddr_lv0 as *mut u8);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dealloc(memaddr_lv1 as *mut u8);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Walk the page table to convert a virtual address to a
|
||||||
|
/// physical address.
|
||||||
|
/// If a page fault would occur, this returns None
|
||||||
|
/// Otherwise, it returns Some with the physical address.
|
||||||
|
pub fn virt_to_phys(root: &Table, vaddr: usize) -> Option<usize> {
|
||||||
|
// Walk the page table pointed to by root
|
||||||
|
let vpn = [
|
||||||
|
// VPN[0] = vaddr[20:12]
|
||||||
|
(vaddr >> 12) & 0x1ff,
|
||||||
|
// VPN[1] = vaddr[29:21]
|
||||||
|
(vaddr >> 21) & 0x1ff,
|
||||||
|
// VPN[2] = vaddr[38:30]
|
||||||
|
(vaddr >> 30) & 0x1ff,
|
||||||
|
];
|
||||||
|
|
||||||
|
let mut v = &root.entries[vpn[2]];
|
||||||
|
for i in (0..=2).rev() {
|
||||||
|
if v.is_invalid() {
|
||||||
|
// This is an invalid entry, page fault.
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
else if v.is_leaf() {
|
||||||
|
// According to RISC-V, a leaf can be at any level.
|
||||||
|
|
||||||
|
// The offset mask masks off the PPN. Each PPN is 9
|
||||||
|
// bits and they start at bit #12. So, our formula
|
||||||
|
// 12 + i * 9
|
||||||
|
let off_mask = (1 << (12 + i * 9)) - 1;
|
||||||
|
let vaddr_pgoff = vaddr & off_mask;
|
||||||
|
let addr = ((v.get_entry() << 2) as usize) & !off_mask;
|
||||||
|
return Some(addr | vaddr_pgoff);
|
||||||
|
}
|
||||||
|
// Set v to the next entry which is pointed to by this
|
||||||
|
// entry. However, the address was shifted right by 2 places
|
||||||
|
// when stored in the page table entry, so we shift it left
|
||||||
|
// to get it back into place.
|
||||||
|
let entry = ((v.get_entry() & !0x3ff) << 2) as *const Entry;
|
||||||
|
// We do i - 1 here, however we should get None or Some() above
|
||||||
|
// before we do 0 - 1 = -1.
|
||||||
|
v = unsafe { entry.add(vpn[i - 1]).as_ref().unwrap() };
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we get here, we've exhausted all valid tables and haven't
|
||||||
|
// found a leaf.
|
||||||
|
None
|
||||||
|
}
|
||||||
@@ -13,7 +13,7 @@ fn _start(boot_info: &'static mut bootloader_api::BootInfo) -> ! {
|
|||||||
if let Some(framebuffer) = boot_info.framebuffer.as_mut() {
|
if let Some(framebuffer) = boot_info.framebuffer.as_mut() {
|
||||||
framebuffer::draw_test(framebuffer);
|
framebuffer::draw_test(framebuffer);
|
||||||
}
|
}
|
||||||
main();
|
main(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn hlt_loop() -> ! {
|
pub fn hlt_loop() -> ! {
|
||||||
|
|||||||
64
kernel/src/fdt.rs
Normal file
64
kernel/src/fdt.rs
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
use core::mem::transmute;
|
||||||
|
|
||||||
|
use alloc::vec;
|
||||||
|
|
||||||
|
use crate::println;
|
||||||
|
|
||||||
|
pub struct FDT {
|
||||||
|
pub header: FDTHeader,
|
||||||
|
}
|
||||||
|
|
||||||
|
const MAGIC: u32 = 0xd00dfeed;
|
||||||
|
|
||||||
|
#[repr(u32)]
|
||||||
|
#[derive(Clone, Copy, Debug)]
|
||||||
|
enum Token {
|
||||||
|
BeginNode = 0x00000001,
|
||||||
|
EndNode = 0x00000002,
|
||||||
|
Prop = 0x00000003,
|
||||||
|
Nop = 0x00000004,
|
||||||
|
End = 0x00000009,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy)]
|
||||||
|
#[repr(C)]
|
||||||
|
pub struct FDTHeader {
|
||||||
|
pub magic: u32,
|
||||||
|
pub totalsize: u32,
|
||||||
|
pub off_dt_struct: u32,
|
||||||
|
pub off_dt_strings: u32,
|
||||||
|
pub off_mem_rsvmap: u32,
|
||||||
|
pub version: u32,
|
||||||
|
pub last_comp_version: u32,
|
||||||
|
pub boot_cpuid_phys: u32,
|
||||||
|
pub size_dt_strings: u32,
|
||||||
|
pub size_dt_struct: u32,
|
||||||
|
}
|
||||||
|
|
||||||
|
const HEADER_SIZE: usize = core::mem::size_of::<FDTHeader>();
|
||||||
|
|
||||||
|
impl FDT {
|
||||||
|
pub fn new(addr: u64) -> Self {
|
||||||
|
let header: FDTHeader = unsafe { transmute(from_be_32::<HEADER_SIZE>(addr)) };
|
||||||
|
if header.magic != MAGIC {
|
||||||
|
panic!("FDT magic incorrect");
|
||||||
|
}
|
||||||
|
if header.version != 17 {
|
||||||
|
panic!("FDT version not implemented {}", header.version);
|
||||||
|
}
|
||||||
|
let dt_structs = addr + header.off_dt_struct as u64;
|
||||||
|
let first_node: Token = unsafe { transmute(from_be_32::<4>(dt_structs)) };
|
||||||
|
println!("{first_node:?}");
|
||||||
|
let a = vec![1, 2];
|
||||||
|
println!("arst{a:?}");
|
||||||
|
Self { header }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub unsafe fn from_be_32<const S: usize>(addr: u64) -> [u8; S] {
|
||||||
|
let mut data = *(addr as *mut [u8; S]);
|
||||||
|
for slice in data.chunks_mut(4) {
|
||||||
|
slice.reverse();
|
||||||
|
}
|
||||||
|
data
|
||||||
|
}
|
||||||
@@ -2,15 +2,23 @@
|
|||||||
#![no_main]
|
#![no_main]
|
||||||
#![feature(abi_x86_interrupt)]
|
#![feature(abi_x86_interrupt)]
|
||||||
#![feature(naked_functions)]
|
#![feature(naked_functions)]
|
||||||
|
#![feature(fn_align)]
|
||||||
|
|
||||||
|
extern crate alloc;
|
||||||
|
|
||||||
|
pub mod allocator;
|
||||||
pub mod arch;
|
pub mod arch;
|
||||||
|
pub mod fdt;
|
||||||
pub mod log;
|
pub mod log;
|
||||||
pub mod qemu;
|
pub mod qemu;
|
||||||
pub mod sync;
|
|
||||||
|
|
||||||
pub fn main() -> ! {
|
pub fn main(dt_addr: u64) -> ! {
|
||||||
println!("we out here vibin");
|
println!("we out here vibin");
|
||||||
for _ in 0..20000000 {}
|
allocator::init_heap();
|
||||||
|
let fdt = fdt::FDT::new(dt_addr);
|
||||||
|
// for _ in 0..40000000 {}
|
||||||
|
let x = unsafe { *(0xdeadbeef as *mut u8) };
|
||||||
|
println!("we got {x}");
|
||||||
qemu::exit();
|
qemu::exit();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,26 +0,0 @@
|
|||||||
// use core::sync::atomic::AtomicBool;
|
|
||||||
//
|
|
||||||
// struct SpinLock(AtomicBool);
|
|
||||||
//
|
|
||||||
// impl SpinLock {
|
|
||||||
// pub fn new() -> Self {
|
|
||||||
// Self(AtomicBool::new(false))
|
|
||||||
// }
|
|
||||||
// pub fn lock(&mut self) {
|
|
||||||
// while self.0.swap(true, core::sync::atomic::Ordering::Acquire) {}
|
|
||||||
// }
|
|
||||||
// pub fn release(&mut self) {
|
|
||||||
// self.0.store(false, core::sync::atomic::Ordering::Release);
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// struct Mutex<T> {
|
|
||||||
// lock: SpinLock,
|
|
||||||
// data: T
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// struct MutexGuard<T>(T);
|
|
||||||
//
|
|
||||||
// impl <T> MutexGuard<T> {
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
Reference in New Issue
Block a user