can now print device tree

This commit is contained in:
Bryan McShea
2024-01-26 18:55:51 -05:00
parent bdb29a6361
commit 3bf7735fe8
13 changed files with 233 additions and 597 deletions

3
kernel/build.rs Normal file
View File

@@ -0,0 +1,3 @@
fn main() {
println!("cargo:rerun-if-changed=src/**/*.ld");
}

View File

@@ -0,0 +1,29 @@
use core::arch::asm;
macro_rules! reg {
($name:expr) => {{
let out: u64;
unsafe {
core::arch::asm!(concat!("mv {out}, ", $name), out = out(reg) out);
}
out
}};
}
pub(crate) use reg;
macro_rules! linker_static {
($name:ident: $type:ty, $source:expr) => {
core::arch::global_asm!(
concat!(".global ", stringify!($name)),
concat!(stringify!($name), ": ", $source)
);
extern "C" {
pub static $name: $type;
}
};
}
pub(crate) use linker_static;
pub fn wfi() {
unsafe { asm!("wfi") }
}

View File

@@ -11,7 +11,7 @@ macro_rules! csrr {
}}; }};
} }
macro_rules! csrw { macro_rules! csrw_fn {
($name:expr, $func:path) => { ($name:expr, $func:path) => {
unsafe { unsafe {
core::arch::asm!( core::arch::asm!(
@@ -21,6 +21,10 @@ macro_rules! csrw {
); );
} }
}; };
}
pub(crate) use csrw_fn;
macro_rules! csrw {
($name:expr, $val:expr) => { ($name:expr, $val:expr) => {
unsafe { unsafe {
core::arch::asm!( core::arch::asm!(
@@ -30,7 +34,12 @@ macro_rules! csrw {
} }
}; };
} }
pub(crate) use csrw;
macro_rules! bits {
($name:ident[$high:expr,$low:expr]) => {{
($name & ((2u64.pow($high - $low + 1) - 1) << $low)) >> $low
}};
}
pub mod hartid { pub mod hartid {
pub fn read() -> u64 { pub fn read() -> u64 {
@@ -42,7 +51,7 @@ pub mod mtvec {
macro_rules! init { macro_rules! init {
($func:path) => { ($func:path) => {
let _: fn() -> ! = $func; let _: fn() -> ! = $func;
crate::arch::csr::csrw!("mtvec", $func); crate::arch::csr::csrw_fn!("mtvec", $func);
}; };
} }
pub(crate) use init; pub(crate) use init;
@@ -58,7 +67,45 @@ pub mod mcause {
} }
pub mod satp { pub mod satp {
pub fn read() -> u64 { use core::mem::transmute;
csrr!("satp")
use crate::arch::paging::Table;
#[derive(Debug)]
#[repr(u64)]
pub enum Mode {
Bare = 0,
Reserved1 = 1,
Reserved2 = 2,
Reserved3 = 3,
Reserved4 = 4,
Reserved5 = 5,
Reserved6 = 6,
Reserved7 = 7,
Sv39 = 8,
Sv48 = 9,
Sv57 = 10,
Sv64 = 11,
Reserved8 = 12,
Reserved9 = 13,
Custom1 = 14,
Custom2 = 15,
}
#[derive(Debug)]
pub struct Satp {
pub mode: Mode,
pub asid: u64,
pub ppn: *mut Table,
}
pub fn read() -> Satp {
let satp = csrr!("satp");
let mode = unsafe { transmute(bits!(satp[63,60])) };
let asid = bits!(satp[59, 44]);
let ppn = unsafe { transmute(bits!(satp[43, 0]) << 12) };
Satp { mode, asid, ppn }
}
pub fn write(satp: Satp) {
let val = (satp.mode as u64) << 60 | satp.asid << 44 | (satp.ppn as u64 >> 12);
csrw!("satp", val);
} }
} }

View File

@@ -1,5 +1,5 @@
use crate::{ use crate::{
arch::{csr, instructions, interrupts, wait}, main, println arch::{asm, csr, interrupts, paging, wait}, main, println
}; };
#[no_mangle] #[no_mangle]
@@ -10,8 +10,8 @@ unsafe extern "C" fn _start() -> ! {
// set up gp & sp // set up gp & sp
".option push", ".option push",
".option norelax", ".option norelax",
"la gp, global_pointer", "la gp, _global_pointer",
"la sp, stack_top", "la sp, _stack_end",
".option pop", ".option pop",
// set up stack for each hart // set up stack for each hart
"csrr t0, mhartid", "csrr t0, mhartid",
@@ -26,20 +26,17 @@ unsafe extern "C" fn _start() -> ! {
} }
pub fn entry() -> ! { pub fn entry() -> ! {
let dt_addr = instructions::reg!("a1"); let dt_addr = asm::reg!("a1") as usize;
let hart = csr::hartid::read(); let hart = csr::hartid::read();
println!("yo from hart {hart}"); println!("yo from hart {hart}");
if hart != 0 { if hart != 0 {
wait(); wait();
} }
interrupts::init(); interrupts::init();
paging::init(dt_addr);
println!( println!(
"machine trap vector base address: 0x{:x}", "machine trap vector base address: 0x{:x}",
csr::mtvec::read() csr::mtvec::read()
); );
println!(
"physical address bits: {}",
csr::satp::read()
);
main(dt_addr) main(dt_addr)
} }

View File

@@ -1,16 +0,0 @@
use core::arch::asm;
pub fn wfi() {
unsafe { asm!("wfi") }
}
macro_rules! reg {
($name:expr) => {{
let out: u64;
unsafe {
core::arch::asm!(concat!("mv {out}, ", $name), out = out(reg) out);
}
out
}};
}
pub(crate) use reg;

View File

@@ -5,9 +5,9 @@ SECTIONS
. = 0x80000000; . = 0x80000000;
.text : { .text : {
*(.text.init) *(.text.init)
*(.text) *(.text .text.*)
} }
PROVIDE(global_pointer = .); PROVIDE(_global_pointer = .);
.rodata : { .rodata : {
*(.rodata .rodata.*) *(.rodata .rodata.*)
} }
@@ -15,12 +15,13 @@ SECTIONS
*(.sdata .sdata.*) *(.data .data.*) *(.sdata .sdata.*) *(.data .data.*)
} }
.bss : { .bss : {
PROVIDE(_bss_start = .);
*(.sbss .sbss.*) *(.bss .bss.*) *(.sbss .sbss.*) *(.bss .bss.*)
PROVIDE(_bss_end = .);
} }
. += 0x8000;
. = ALIGN(16); . = ALIGN(16);
PROVIDE(stack_top = .); PROVIDE(_stack_start = .);
. += 0x8000;
PROVIDE(_stack_end = .);
. = ALIGN(4096); /* 2^12 */
PROVIDE(_heap_start = .);
} }

View File

@@ -1,12 +1,12 @@
pub mod csr; pub mod csr;
pub mod init; pub mod init;
pub mod instructions; pub mod asm;
pub mod interrupts; pub mod interrupts;
pub mod page; pub mod paging;
pub mod qemu; pub mod qemu;
pub fn wait() -> ! { pub fn wait() -> ! {
loop { loop {
instructions::wfi(); asm::wfi();
} }
} }

View File

@@ -1,547 +0,0 @@
// stolen from https://osblog.stephenmarz.com/index.html chapter 3 which I'm prolly gonna start
// following for now bc don't wanna learn x86_64 :)
use core::{mem::size_of, ptr::null_mut};
use crate::{print, println};
// ////////////////////////////////
// // Allocation routines
// ////////////////////////////////
extern "C" {
static HEAP_START: usize;
static HEAP_SIZE: usize;
}
// We will use ALLOC_START to mark the start of the actual
// memory we can dish out.
static mut ALLOC_START: usize = 0;
const PAGE_ORDER: usize = 12;
pub const PAGE_SIZE: usize = 1 << 12;
/// Align (set to a multiple of some power of two)
/// This takes an order which is the exponent to 2^order
/// Therefore, all alignments must be made as a power of two.
/// This function always rounds up.
pub const fn align_val(val: usize, order: usize) -> usize {
let o = (1usize << order) - 1;
(val + o) & !o
}
#[repr(u8)]
pub enum PageBits {
Empty = 0,
Taken = 1 << 0,
Last = 1 << 1,
}
impl PageBits {
// We convert PageBits to a u8 a lot, so this is
// for convenience.
pub fn val(self) -> u8 {
self as u8
}
}
// Each page is described by the Page structure. Linux does this
// as well, where each 4096-byte chunk of memory has a structure
// associated with it. However, there structure is much larger.
pub struct Page {
flags: u8,
}
impl Page {
// If this page has been marked as the final allocation,
// this function returns true. Otherwise, it returns false.
pub fn is_last(&self) -> bool {
if self.flags & PageBits::Last.val() != 0 {
true
}
else {
false
}
}
// If the page is marked as being taken (allocated), then
// this function returns true. Otherwise, it returns false.
pub fn is_taken(&self) -> bool {
if self.flags & PageBits::Taken.val() != 0 {
true
}
else {
false
}
}
// This is the opposite of is_taken().
pub fn is_free(&self) -> bool {
!self.is_taken()
}
// Clear the Page structure and all associated allocations.
pub fn clear(&mut self) {
self.flags = PageBits::Empty.val();
}
// Set a certain flag. We ran into trouble here since PageBits
// is an enumeration and we haven't implemented the BitOr Trait
// on it.
pub fn set_flag(&mut self, flag: PageBits) {
self.flags |= flag.val();
}
pub fn clear_flag(&mut self, flag: PageBits) {
self.flags &= !(flag.val());
}
}
/// Initialize the allocation system. There are several ways that we can
/// implement the page allocator:
/// 1. Free list (singly linked list where it starts at the first free
/// allocation) 2. Bookkeeping list (structure contains a taken and length)
/// 3. Allocate one Page structure per 4096 bytes (this is what I chose)
/// 4. Others
pub fn init() {
unsafe {
let num_pages = HEAP_SIZE / PAGE_SIZE;
let ptr = HEAP_START as *mut Page;
// Clear all pages to make sure that they aren't accidentally
// taken
for i in 0..num_pages {
(*ptr.add(i)).clear();
}
// Determine where the actual useful memory starts. This will be
// after all Page structures. We also must align the ALLOC_START
// to a page-boundary (PAGE_SIZE = 4096). ALLOC_START =
// (HEAP_START + num_pages * size_of::<Page>() + PAGE_SIZE - 1)
// & !(PAGE_SIZE - 1);
ALLOC_START = align_val(
HEAP_START
+ num_pages * size_of::<Page,>(),
PAGE_ORDER,
);
}
}
/// Allocate a page or multiple pages
/// pages: the number of PAGE_SIZE pages to allocate
pub fn alloc(pages: usize) -> *mut u8 {
// We have to find a contiguous allocation of pages
assert!(pages > 0);
unsafe {
// We create a Page structure for each page on the heap. We
// actually might have more since HEAP_SIZE moves and so does
// the size of our structure, but we'll only waste a few bytes.
let num_pages = HEAP_SIZE / PAGE_SIZE;
let ptr = HEAP_START as *mut Page;
for i in 0..num_pages - pages {
let mut found = false;
// Check to see if this Page is free. If so, we have our
// first candidate memory address.
if (*ptr.add(i)).is_free() {
// It was FREE! Yay!
found = true;
for j in i..i + pages {
// Now check to see if we have a
// contiguous allocation for all of the
// request pages. If not, we should
// check somewhere else.
if (*ptr.add(j)).is_taken() {
found = false;
break;
}
}
}
// We've checked to see if there are enough contiguous
// pages to form what we need. If we couldn't, found
// will be false, otherwise it will be true, which means
// we've found valid memory we can allocate.
if found {
for k in i..i + pages - 1 {
(*ptr.add(k)).set_flag(PageBits::Taken);
}
// The marker for the last page is
// PageBits::Last This lets us know when we've
// hit the end of this particular allocation.
(*ptr.add(i+pages-1)).set_flag(PageBits::Taken);
(*ptr.add(i+pages-1)).set_flag(PageBits::Last);
// The Page structures themselves aren't the
// useful memory. Instead, there is 1 Page
// structure per 4096 bytes starting at
// ALLOC_START.
return (ALLOC_START + PAGE_SIZE * i)
as *mut u8;
}
}
}
// If we get here, that means that no contiguous allocation was
// found.
null_mut()
}
/// Allocate and zero a page or multiple pages
/// pages: the number of pages to allocate
/// Each page is PAGE_SIZE which is calculated as 1 << PAGE_ORDER
/// On RISC-V, this typically will be 4,096 bytes.
pub fn zalloc(pages: usize) -> *mut u8 {
// Allocate and zero a page.
// First, let's get the allocation
let ret = alloc(pages);
if !ret.is_null() {
let size = (PAGE_SIZE * pages) / 8;
let big_ptr = ret as *mut u64;
for i in 0..size {
// We use big_ptr so that we can force an
// sd (store doubleword) instruction rather than
// the sb. This means 8x fewer stores than before.
// Typically we have to be concerned about remaining
// bytes, but fortunately 4096 % 8 = 0, so we
// won't have any remaining bytes.
unsafe {
(*big_ptr.add(i)) = 0;
}
}
}
ret
}
/// Deallocate a page by its pointer
/// The way we've structured this, it will automatically coalesce
/// contiguous pages.
pub fn dealloc(ptr: *mut u8) {
// Make sure we don't try to free a null pointer.
assert!(!ptr.is_null());
unsafe {
let addr =
HEAP_START + (ptr as usize - ALLOC_START) / PAGE_SIZE;
// Make sure that the address makes sense. The address we
// calculate here is the page structure, not the HEAP address!
assert!(addr >= HEAP_START && addr < HEAP_START + HEAP_SIZE);
let mut p = addr as *mut Page;
// Keep clearing pages until we hit the last page.
while (*p).is_taken() && !(*p).is_last() {
(*p).clear();
p = p.add(1);
}
// If the following assertion fails, it is most likely
// caused by a double-free.
assert!(
(*p).is_last() == true,
"Possible double-free detected! (Not taken found \
before last)"
);
// If we get here, we've taken care of all previous pages and
// we are on the last page.
(*p).clear();
}
}
/// Print all page allocations
/// This is mainly used for debugging.
pub fn print_page_allocations() {
unsafe {
let num_pages = HEAP_SIZE / PAGE_SIZE;
let mut beg = HEAP_START as *const Page;
let end = beg.add(num_pages);
let alloc_beg = ALLOC_START;
let alloc_end = ALLOC_START + num_pages * PAGE_SIZE;
println!();
println!(
"PAGE ALLOCATION TABLE\nMETA: {:p} -> {:p}\nPHYS: \
0x{:x} -> 0x{:x}",
beg, end, alloc_beg, alloc_end
);
println!("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~");
let mut num = 0;
while beg < end {
if (*beg).is_taken() {
let start = beg as usize;
let memaddr = ALLOC_START
+ (start - HEAP_START)
* PAGE_SIZE;
print!("0x{:x} => ", memaddr);
loop {
num += 1;
if (*beg).is_last() {
let end = beg as usize;
let memaddr = ALLOC_START
+ (end
- HEAP_START)
* PAGE_SIZE
+ PAGE_SIZE - 1;
print!(
"0x{:x}: {:>3} page(s)",
memaddr,
(end - start + 1)
);
println!(".");
break;
}
beg = beg.add(1);
}
}
beg = beg.add(1);
}
println!("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~");
println!(
"Allocated: {:>5} pages ({:>9} bytes).",
num,
num * PAGE_SIZE
);
println!(
"Free : {:>5} pages ({:>9} bytes).",
num_pages - num,
(num_pages - num) * PAGE_SIZE
);
println!();
}
}
// ////////////////////////////////
// // MMU Routines
// ////////////////////////////////
// Represent (repr) our entry bits as
// unsigned 64-bit integers.
#[repr(i64)]
#[derive(Copy, Clone)]
pub enum EntryBits {
None = 0,
Valid = 1 << 0,
Read = 1 << 1,
Write = 1 << 2,
Execute = 1 << 3,
User = 1 << 4,
Global = 1 << 5,
Access = 1 << 6,
Dirty = 1 << 7,
// Convenience combinations
ReadWrite = 1 << 1 | 1 << 2,
ReadExecute = 1 << 1 | 1 << 3,
ReadWriteExecute = 1 << 1 | 1 << 2 | 1 << 3,
// User Convenience Combinations
UserReadWrite = 1 << 1 | 1 << 2 | 1 << 4,
UserReadExecute = 1 << 1 | 1 << 3 | 1 << 4,
UserReadWriteExecute = 1 << 1 | 1 << 2 | 1 << 3 | 1 << 4,
}
// Helper functions to convert the enumeration
// into an i64, which is what our page table
// entries will be.
impl EntryBits {
pub fn val(self) -> i64 {
self as i64
}
}
// A single entry. We're using an i64 so that
// this will sign-extend rather than zero-extend
// since RISC-V requires that the reserved sections
// take on the most significant bit.
pub struct Entry {
pub entry: i64,
}
// The Entry structure describes one of the 512 entries per table, which is
// described in the RISC-V privileged spec Figure 4.18.
impl Entry {
pub fn is_valid(&self) -> bool {
self.get_entry() & EntryBits::Valid.val() != 0
}
// The first bit (bit index #0) is the V bit for
// valid.
pub fn is_invalid(&self) -> bool {
!self.is_valid()
}
// A leaf has one or more RWX bits set
pub fn is_leaf(&self) -> bool {
self.get_entry() & 0xe != 0
}
pub fn is_branch(&self) -> bool {
!self.is_leaf()
}
pub fn set_entry(&mut self, entry: i64) {
self.entry = entry;
}
pub fn get_entry(&self) -> i64 {
self.entry
}
}
// Table represents a single table, which contains 512 (2^9), 64-bit entries.
pub struct Table {
pub entries: [Entry; 512],
}
impl Table {
pub fn len() -> usize {
512
}
}
/// Map a virtual address to a physical address using 4096-byte page
/// size.
/// root: a mutable reference to the root Table
/// vaddr: The virtual address to map
/// paddr: The physical address to map
/// bits: An OR'd bitset containing the bits the leaf should have.
/// The bits should contain only the following:
/// Read, Write, Execute, User, and/or Global
/// The bits MUST include one or more of the following:
/// Read, Write, Execute
/// The valid bit automatically gets added.
pub fn map(root: &mut Table, vaddr: usize, paddr: usize, bits: i64, level: usize) {
// Make sure that Read, Write, or Execute have been provided
// otherwise, we'll leak memory and always create a page fault.
assert!(bits & 0xe != 0);
// Extract out each VPN from the virtual address
// On the virtual address, each VPN is exactly 9 bits,
// which is why we use the mask 0x1ff = 0b1_1111_1111 (9 bits)
let vpn = [
// VPN[0] = vaddr[20:12]
(vaddr >> 12) & 0x1ff,
// VPN[1] = vaddr[29:21]
(vaddr >> 21) & 0x1ff,
// VPN[2] = vaddr[38:30]
(vaddr >> 30) & 0x1ff,
];
// Just like the virtual address, extract the physical address
// numbers (PPN). However, PPN[2] is different in that it stores
// 26 bits instead of 9. Therefore, we use,
// 0x3ff_ffff = 0b11_1111_1111_1111_1111_1111_1111 (26 bits).
let ppn = [
// PPN[0] = paddr[20:12]
(paddr >> 12) & 0x1ff,
// PPN[1] = paddr[29:21]
(paddr >> 21) & 0x1ff,
// PPN[2] = paddr[55:30]
(paddr >> 30) & 0x3ff_ffff,
];
// We will use this as a floating reference so that we can set
// individual entries as we walk the table.
let mut v = &mut root.entries[vpn[2]];
// Now, we're going to traverse the page table and set the bits
// properly. We expect the root to be valid, however we're required to
// create anything beyond the root.
// In Rust, we create a range iterator using the .. operator.
// The .rev() will reverse the iteration since we need to start with
// VPN[2] The .. operator is inclusive on start but exclusive on end.
// So, (0..2) will iterate 0 and 1.
for i in (level..2).rev() {
if !v.is_valid() {
// Allocate a page
let page = zalloc(1);
// The page is already aligned by 4,096, so store it
// directly The page is stored in the entry shifted
// right by 2 places.
v.set_entry(
(page as i64 >> 2)
| EntryBits::Valid.val(),
);
}
let entry = ((v.get_entry() & !0x3ff) << 2) as *mut Entry;
v = unsafe { entry.add(vpn[i]).as_mut().unwrap() };
}
// When we get here, we should be at VPN[0] and v should be pointing to
// our entry.
// The entry structure is Figure 4.18 in the RISC-V Privileged
// Specification
let entry = (ppn[2] << 28) as i64 | // PPN[2] = [53:28]
(ppn[1] << 19) as i64 | // PPN[1] = [27:19]
(ppn[0] << 10) as i64 | // PPN[0] = [18:10]
bits | // Specified bits, such as User, Read, Write, etc
EntryBits::Valid.val(); // Valid bit
// Set the entry. V should be set to the correct pointer by the loop
// above.
v.set_entry(entry);
}
/// Unmaps and frees all memory associated with a table.
/// root: The root table to start freeing.
/// NOTE: This does NOT free root directly. This must be
/// freed manually.
/// The reason we don't free the root is because it is
/// usually embedded into the Process structure.
pub fn unmap(root: &mut Table) {
// Start with level 2
for lv2 in 0..Table::len() {
let ref entry_lv2 = root.entries[lv2];
if entry_lv2.is_valid() && entry_lv2.is_branch() {
// This is a valid entry, so drill down and free.
let memaddr_lv1 = (entry_lv2.get_entry() & !0x3ff) << 2;
let table_lv1 = unsafe {
// Make table_lv1 a mutable reference instead of a pointer.
(memaddr_lv1 as *mut Table).as_mut().unwrap()
};
for lv1 in 0..Table::len() {
let ref entry_lv1 = table_lv1.entries[lv1];
if entry_lv1.is_valid() && entry_lv1.is_branch()
{
let memaddr_lv0 = (entry_lv1.get_entry()
& !0x3ff) << 2;
// The next level is level 0, which
// cannot have branches, therefore,
// we free here.
dealloc(memaddr_lv0 as *mut u8);
}
}
dealloc(memaddr_lv1 as *mut u8);
}
}
}
/// Walk the page table to convert a virtual address to a
/// physical address.
/// If a page fault would occur, this returns None
/// Otherwise, it returns Some with the physical address.
pub fn virt_to_phys(root: &Table, vaddr: usize) -> Option<usize> {
// Walk the page table pointed to by root
let vpn = [
// VPN[0] = vaddr[20:12]
(vaddr >> 12) & 0x1ff,
// VPN[1] = vaddr[29:21]
(vaddr >> 21) & 0x1ff,
// VPN[2] = vaddr[38:30]
(vaddr >> 30) & 0x1ff,
];
let mut v = &root.entries[vpn[2]];
for i in (0..=2).rev() {
if v.is_invalid() {
// This is an invalid entry, page fault.
break;
}
else if v.is_leaf() {
// According to RISC-V, a leaf can be at any level.
// The offset mask masks off the PPN. Each PPN is 9
// bits and they start at bit #12. So, our formula
// 12 + i * 9
let off_mask = (1 << (12 + i * 9)) - 1;
let vaddr_pgoff = vaddr & off_mask;
let addr = ((v.get_entry() << 2) as usize) & !off_mask;
return Some(addr | vaddr_pgoff);
}
// Set v to the next entry which is pointed to by this
// entry. However, the address was shifted right by 2 places
// when stored in the page table entry, so we shift it left
// to get it back into place.
let entry = ((v.get_entry() & !0x3ff) << 2) as *const Entry;
// We do i - 1 here, however we should get None or Some() above
// before we do 0 - 1 = -1.
v = unsafe { entry.add(vpn[i - 1]).as_ref().unwrap() };
}
// If we get here, we've exhausted all valid tables and haven't
// found a leaf.
None
}

View File

@@ -0,0 +1,34 @@
// stolen from https://osblog.stephenmarz.com/index.html chapter 3 which I'm prolly gonna start
// following for now bc don't wanna learn x86_64 :)
use crate::{
arch::csr::{self, satp}, fdt::print_fdt, println
};
use super::asm::linker_static;
linker_static!(HEAP_START: usize, ".dword _heap_start");
static HEAP_SIZE: usize = 128 * 1024 * 1024;
pub struct Entry(u64);
pub struct Table {
pub entries: [Entry; 2usize.pow(9)],
}
pub fn init(fdt: usize) {
unsafe {
println!("heap start: 0x{:x}", HEAP_START);
print_fdt(fdt);
let table_start = HEAP_START as *mut Table;
csr::satp::write(satp::Satp {
mode: satp::Mode::Sv39,
asid: 0,
ppn: table_start,
});
let satp = csr::satp::read();
println!("satp: {satp:?}");
let x = *(0x9000_0000 as *mut u8);
println!("we got {x}");
}
}

View File

@@ -47,5 +47,13 @@ pub fn exit() -> ! {
} }
pub fn _print(args: core::fmt::Arguments<'_>) { pub fn _print(args: core::fmt::Arguments<'_>) {
// NOTE: something really dumb can happen here;
// if you evaluate an expression in a print statement, and that
// causes an interrupt, this will be left locked...
// Should I set up the heap before interrupts? or just avoid printing until both...?
// or maybe force unlock if there's an interrupt?
// or store the hart in the lock, and unlock if that hart was interrupted??
// or just have a constant-sized buffer?
// or create a "locked writer"?
UART.lock().write_fmt(args).unwrap(); UART.lock().write_fmt(args).unwrap();
} }

View File

@@ -1,11 +1,11 @@
use core::mem::transmute; // NOTE: basically none of this is safe rn, ideally it's eventually made safe / able to recover
use crate::{print, println};
use alloc::vec; use alloc::vec;
use core::mem::{size_of, transmute};
use crate::println;
pub struct FDT { pub struct FDT {
pub header: FDTHeader, pub header: Header,
} }
const MAGIC: u32 = 0xd00dfeed; const MAGIC: u32 = 0xd00dfeed;
@@ -19,10 +19,16 @@ enum Token {
Nop = 0x00000004, Nop = 0x00000004,
End = 0x00000009, End = 0x00000009,
} }
const TOKEN_SIZE: usize = size_of::<Token>();
impl Token {
pub fn from_addr(addr: usize) -> Self {
unsafe { transmute(from_be_32::<TOKEN_SIZE>(addr)) }
}
}
#[derive(Clone, Copy)] #[derive(Clone, Copy)]
#[repr(C)] #[repr(C)]
pub struct FDTHeader { pub struct Header {
pub magic: u32, pub magic: u32,
pub totalsize: u32, pub totalsize: u32,
pub off_dt_struct: u32, pub off_dt_struct: u32,
@@ -35,19 +41,37 @@ pub struct FDTHeader {
pub size_dt_struct: u32, pub size_dt_struct: u32,
} }
const HEADER_SIZE: usize = core::mem::size_of::<FDTHeader>(); const HEADER_SIZE: usize = size_of::<Header>();
impl Header {
pub fn from_addr(addr: usize) -> Self {
unsafe { transmute(from_be_32::<HEADER_SIZE>(addr)) }
}
}
const PROP_SIZE: usize = size_of::<Prop>();
#[derive(Debug)]
pub struct Prop {
len: u32,
nameoff: u32,
}
impl Prop {
pub fn from_addr(addr: usize) -> Self {
unsafe { transmute(from_be_32::<PROP_SIZE>(addr)) }
}
}
impl FDT { impl FDT {
pub fn new(addr: u64) -> Self { pub fn new(addr: usize) -> Self {
let header: FDTHeader = unsafe { transmute(from_be_32::<HEADER_SIZE>(addr)) }; let header = Header::from_addr(addr);
if header.magic != MAGIC { if header.magic != MAGIC {
panic!("FDT magic incorrect"); panic!("FDT magic incorrect");
} }
if header.version != 17 { if header.version != 17 {
panic!("FDT version not implemented {}", header.version); panic!("FDT version not implemented {}", header.version);
} }
let dt_structs = addr + header.off_dt_struct as u64; let dt_structs = addr + header.off_dt_struct as usize;
let first_node: Token = unsafe { transmute(from_be_32::<4>(dt_structs)) }; let first_node: Token = Token::from_addr(dt_structs);
println!("{first_node:?}"); println!("{first_node:?}");
let a = vec![1, 2]; let a = vec![1, 2];
println!("arst{a:?}"); println!("arst{a:?}");
@@ -55,10 +79,65 @@ impl FDT {
} }
} }
pub unsafe fn from_be_32<const S: usize>(addr: u64) -> [u8; S] { pub unsafe fn from_be_32<const S: usize>(addr: usize) -> [u8; S] {
let mut data = *(addr as *mut [u8; S]); let mut data = *(addr as *mut [u8; S]);
for slice in data.chunks_mut(4) { for slice in data.chunks_mut(4) {
slice.reverse(); slice.reverse();
} }
data data
} }
pub fn print_fdt(addr: usize) {
let header = Header::from_addr(addr);
let str_addr = header.off_dt_strings as usize + addr;
let mut addr = header.off_dt_struct as usize + addr;
loop {
let token: Token = Token::from_addr(addr);
addr += TOKEN_SIZE;
if let Token::End = token {
break;
}
if let Token::EndNode = token {
continue;
}
print!("name: ");
'outer: loop {
let bytes = unsafe { *(addr as *mut [u8; TOKEN_SIZE]) };
addr += TOKEN_SIZE;
for byte in bytes {
if byte == 0 {
break 'outer;
}
let c = byte as char;
print!("{}", c);
}
}
println!();
print_props(str_addr, &mut addr)
}
}
pub fn print_props(str_addr: usize, addr: &mut usize) {
loop {
let token: Token = Token::from_addr(*addr);
let Token::Prop = token else {
break;
};
*addr += TOKEN_SIZE;
let prop: Prop = Prop::from_addr(*addr);
let mut name_addr = str_addr + prop.nameoff as usize;
print!(" ");
loop {
let byte = unsafe { *(name_addr as *mut u8) };
name_addr += 1;
if byte == 0 {
break;
}
let c = byte as char;
print!("{}", c);
}
println!(": {prop:?}");
let aligned_len = (prop.len as usize + (TOKEN_SIZE - 1)) & !(TOKEN_SIZE - 1);
*addr += PROP_SIZE + aligned_len;
}
}

View File

@@ -12,7 +12,7 @@ pub mod fdt;
pub mod log; pub mod log;
pub mod qemu; pub mod qemu;
pub fn main(dt_addr: u64) -> ! { pub fn main(dt_addr: usize) -> ! {
println!("we out here vibin"); println!("we out here vibin");
allocator::init_heap(); allocator::init_heap();
let fdt = fdt::FDT::new(dt_addr); let fdt = fdt::FDT::new(dt_addr);

View File

@@ -29,6 +29,7 @@ fn run_qemu(target: &Target, gdb: Option<Option<u16>>) {
if let Some(port) = gdb { if let Some(port) = gdb {
let port = port.unwrap_or(1234); let port = port.unwrap_or(1234);
qemu.arg("-S"); qemu.arg("-S");
qemu.args(["-m", "4G"]);
qemu.args(["-gdb", &format!("tcp::{}", port)]); qemu.args(["-gdb", &format!("tcp::{}", port)]);
let mut gdb = target.gdb(); let mut gdb = target.gdb();
gdb.arg("-q"); gdb.arg("-q");