Skip to content

Commit

Permalink
move memory discovery to the Architecture trait
Browse files Browse the repository at this point in the history
Memory discovery (finding how much we have, where etc...) shouldn't be
hardcoded in `mm/mod.rs`. Instead it should be in the Architecture trait
so that each platform can do its thing (riscv/arm => device tree, x86 =>
multiboot info or e820).

Originaly I just wanted a `memory_regions -> impl Iterator<Item=T>`
method in the Architecture trait. But oh boy no...
Currently a method that returns an impl Trait is not possible (rustc
1.59.0-nightly).
In the future, this should be possible, see: rust-lang/rust#63066

Meanwhile, this is what I came up with.
Since I can't return an impl Trait but I can pass one as parameter, I
have a `for_all_memory_regions` method that creates the iterator and
then calls the closure with that parameter. Code that uses that is a bit
more ugly and less clear than if I could have returned an impl Trait,
but it should do for now. This MUST be fixed when the above issue is
resolved.
Also the type signatures are a bit *complex*, this is due to my vicious
fight with the rust borrow-checking-and-mighty-type-checking gods of
Rust.

Last but not least, a quote from the great:
Tout est simple, tout est complexe, a vous de trouver la solution, une solution peut en cacher une autre
-- Garnier
  • Loading branch information
n1tram1 committed Feb 17, 2022
1 parent b429592 commit 19da7ed
Show file tree
Hide file tree
Showing 5 changed files with 118 additions and 89 deletions.
12 changes: 9 additions & 3 deletions src/arch/mod.rs
Original file line number Diff line number Diff line change
@@ -1,19 +1,21 @@

#[cfg(target_arch = "arm")]
mod arm32;
#[cfg(target_arch = "riscv64")]
mod riscv64;


use cfg_if::cfg_if;

use crate::mm;

#[cfg(target_arch = "riscv64")]
pub type MemoryImpl = riscv64::sv39::PageTable;

pub fn new_arch() -> impl Architecture {
pub fn new_arch(info: usize) -> impl Architecture {
cfg_if! {
if #[cfg(target_arch = "riscv64")] {
riscv64::Riscv64::new()
riscv64::Riscv64::new(info)
} else if #[cfg(target_arch = "arm")] {
arm32::Arm32::new()
} else {
Expand All @@ -24,8 +26,12 @@ pub fn new_arch() -> impl Architecture {

pub trait Architecture {
unsafe extern "C" fn _start() -> !;

fn init_interrupts(&mut self);

fn new(info: usize) -> Self;

fn for_all_memory_regions<F: FnMut(&mut dyn Iterator<Item=(usize, usize)>)>(&self, f: F);
fn for_all_reserved_memory_regions<F: FnMut(&mut dyn Iterator<Item=(usize, usize)>)>(&self, f: F);
}

pub trait ArchitectureMemory {
Expand Down
38 changes: 33 additions & 5 deletions src/arch/riscv64/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ use super::Architecture;
use crate::drivers::plic::plic_handler;
use core::arch::asm;


pub mod sv39;

#[no_mangle]
Expand All @@ -20,13 +21,11 @@ static mut INTERRUPT_VECTOR: &[extern "C" fn()] = &[
plic_handler,
];

pub struct Riscv64 {}
pub struct Riscv64 {
device_tree: fdt::Fdt<'static>,
}

impl Riscv64 {
pub fn new() -> Self {
Self {}
}

fn set_sstatus_sie(&self) {
unsafe {
asm!("csrrs zero, sstatus, {}", in(reg)1 << 1);
Expand Down Expand Up @@ -58,13 +57,42 @@ impl Riscv64 {
}
}


impl Architecture for Riscv64 {
#[naked]
#[no_mangle]
unsafe extern "C" fn _start() -> ! {
asm!("la sp, STACK_START", "call k_main", options(noreturn));
}

fn new(info: usize) -> Self {
let device_tree = unsafe { fdt::Fdt::from_ptr(info as *const u8).unwrap() };

Self { device_tree }
}

fn for_all_memory_regions<F: FnMut(&mut dyn Iterator<Item=(usize, usize)>)>(&self, mut f: F) {
let memory = self.device_tree.memory();
let mut regions = memory
.regions()
.map(|region| {
(region.starting_address as usize, region.size.unwrap_or(0))
});

f(&mut regions);
}

fn for_all_reserved_memory_regions<F: FnMut(&mut dyn Iterator<Item=(usize, usize)>)>(&self, mut f: F) {
let reserved_memory = self.device_tree.find_node("/reserved-memory").unwrap();

let mut regions = reserved_memory
.children()
.flat_map(|child| child.reg().unwrap())
.map(|region| (region.starting_address as usize, region.size.unwrap_or(0)));

f(&mut regions);
}

fn init_interrupts(&mut self) {
self.set_sstatus_sie();
self.set_sie_ssie();
Expand Down
8 changes: 4 additions & 4 deletions src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@
#![feature(fn_align)]
#![feature(naked_functions)]
#![feature(custom_test_frameworks)]
#![feature(associated_type_defaults)]
#![feature(type_alias_impl_trait)]
#![test_runner(crate::kernel_tests::runner)]
#![reexport_test_harness_main = "ktests_launch"]

Expand All @@ -27,7 +29,7 @@ extern "C" fn k_main(_core_id: usize, device_tree_ptr: usize) -> ! {
#[cfg(test)]
ktests_launch();

let mut arch = arch::new_arch();
let mut arch = arch::new_arch(device_tree_ptr);

kprintln!("GoOSe is booting");

Expand All @@ -44,9 +46,7 @@ extern "C" fn k_main(_core_id: usize, device_tree_ptr: usize) -> ! {
}
plic.set_threshold(0);

let device_tree = unsafe { fdt::Fdt::from_ptr(device_tree_ptr as *const u8).unwrap() };

let mut memory = mm::MemoryManager::<arch::MemoryImpl>::new(&device_tree);
let mut memory = mm::MemoryManager::<arch::MemoryImpl>::new(&arch);
memory.map_address_space();

kprintln!("Virtual memory enabled!");
Expand Down
31 changes: 16 additions & 15 deletions src/mm/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -32,17 +32,18 @@ pub fn is_kernel_page(base: usize) -> bool {
base >= kernel_start && base < kernel_end
}

// TODO: shall this be moved to arch/riscv (in the MemoryImpl) ?
pub fn is_reserved_page(base: usize, device_tree: &fdt::Fdt) -> bool {
let reserved_memory = device_tree.find_node("/reserved-memory").unwrap();
let mut reserved_pages = reserved_memory
.children()
.flat_map(|child| child.reg().unwrap())
.map(|region| (region.starting_address as usize, region.size.unwrap_or(0)));

reserved_pages.any(|(region_start, region_size)| {
base >= region_start && base <= (region_start + region_size)
})
pub fn is_reserved_page(base: usize, arch: &impl arch::Architecture) -> bool {
let mut is_res = false;

arch.for_all_reserved_memory_regions(|regions| {
is_res = regions
.map(|(start, size)| (start, size)) // this is a weird hack to fix a type error.
.any(|(region_start, region_size)| {
base >= region_start && base <= (region_start + region_size)
})
});

return is_res;
}

pub struct MemoryManager<'alloc, T: arch::ArchitectureMemory> {
Expand All @@ -51,12 +52,12 @@ pub struct MemoryManager<'alloc, T: arch::ArchitectureMemory> {
}

impl<'alloc, T: arch::ArchitectureMemory> MemoryManager<'alloc, T> {
pub fn new(device_tree: &fdt::Fdt) -> Self {
pub fn new(arch: &impl arch::Architecture) -> Self {
let mut page_manager =
page_manager::PageManager::from_device_tree(&device_tree, T::get_page_size());
let arch = T::new(&mut page_manager);
page_manager::PageManager::from_arch_info(arch, T::get_page_size());
let arch_mem = T::new(&mut page_manager);

Self { page_manager, arch }
Self { page_manager, arch: arch_mem }
}

fn map(&mut self, to: usize, from: usize, perms: Permissions) {
Expand Down
118 changes: 56 additions & 62 deletions src/mm/page_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ use super::page_alloc::{AllocatorError, PageAllocator};
use crate::kprintln;
use crate::mm;
use core::mem;
use crate::Architecture;

#[derive(Debug, PartialEq)]
pub enum PageKind {
Expand Down Expand Up @@ -55,18 +56,19 @@ pub struct PageManager<'a> {

impl<'a> PageManager<'a> {
fn count_pages(
regions: impl Iterator<Item = fdt::standard_nodes::MemoryRegion>,
arch: &impl Architecture,
page_size: usize,
) -> usize {
regions
.map(|region| {
(region.starting_address, unsafe {
region.starting_address.add(region.size.unwrap_or(0))
})
})
.map(|(start, end)| (start as usize, end as usize))
.flat_map(|(start, end)| (start..end).step_by(page_size))
.count()
let mut count = 0;

arch.for_all_memory_regions(|regions| {
count = regions
.map(|(start, end)| (start as usize, end as usize))
.flat_map(|(start, end)| (start..end).step_by(page_size))
.count();
});

count
}

fn align_up(addr: usize, alignment: usize) -> usize {
Expand All @@ -75,12 +77,12 @@ impl<'a> PageManager<'a> {

fn phys_addr_to_physical_page(
phys_addr: usize,
device_tree: &fdt::Fdt,
arch: &impl Architecture,
page_size: usize,
) -> PhysicalPage {
let kind = if mm::is_kernel_page(phys_addr) {
PageKind::Kernel
} else if mm::is_reserved_page(phys_addr, device_tree) {
} else if mm::is_reserved_page(phys_addr, arch) {
PageKind::Reserved
} else {
PageKind::Unused
Expand All @@ -96,44 +98,42 @@ impl<'a> PageManager<'a> {
/// Look for `pages_needed` contiguous unused pages, beware of pages that are in use by the
/// kernel.
fn find_contiguous_unused_pages(
device_tree: &fdt::Fdt,
arch: &impl Architecture,
pages_needed: usize,
page_size: usize,
) -> Option<usize> {
let memory = device_tree.memory();

let physical_pages = memory
.regions()
.map(|region| {
(region.starting_address, unsafe {
region.starting_address.add(region.size.unwrap_or(0))
})
})
.map(|(start, end)| (start as usize, end as usize))
.flat_map(|(start, end)| (start..end).step_by(page_size))
.map(|base| Self::phys_addr_to_physical_page(base, device_tree, page_size));

let mut first_page_addr: usize = 0;
let mut consecutive_pages: usize = 0;
let mut found = None;

for page in physical_pages {
if consecutive_pages == 0 {
first_page_addr = page.base;
}
arch.for_all_memory_regions(|regions| {
let physical_pages = regions
.flat_map(|(start, end)| (start..end).step_by(page_size))
.map(|base| Self::phys_addr_to_physical_page(base, arch, page_size));

if page.is_used() {
consecutive_pages = 0;
continue;
}
let mut first_page_addr: usize = 0;
let mut consecutive_pages: usize = 0;

consecutive_pages += 1;
for page in physical_pages {
if consecutive_pages == 0 {
first_page_addr = page.base;
}

if page.is_used() {
consecutive_pages = 0;
continue;
}

if consecutive_pages == pages_needed {
return Some(first_page_addr);
consecutive_pages += 1;

if consecutive_pages == pages_needed {
found = Some(first_page_addr);
return
}
}
}

None
return;
});

found
}

/// TLDR: Initialize a [`PageAllocator`] from the device tree.
Expand All @@ -143,42 +143,36 @@ impl<'a> PageManager<'a> {
/// - Second (in [`Self::find_contiguous_unused_pages`]), look through our pages for a contiguous
/// space large enough to hold all our metadata.
/// - Lastly store our metadata there, and mark pages as unused or kernel.
pub fn from_device_tree(device_tree: &fdt::Fdt, page_size: usize) -> Self {
let memory_node = device_tree.memory();

let page_count = Self::count_pages(memory_node.regions(), page_size);
pub fn from_arch_info(arch: &impl Architecture, page_size: usize) -> Self {
let page_count = Self::count_pages(arch, page_size);
let metadata_size = page_count * mem::size_of::<PhysicalPage>();
let pages_needed = Self::align_up(metadata_size, page_size) / page_size;
kprintln!("total of {:?} pages", page_count);
kprintln!("metadata_size: {:?}", metadata_size);
kprintln!("pages_needed: {:?}", pages_needed);

let metadata_addr =
Self::find_contiguous_unused_pages(device_tree, pages_needed, page_size).unwrap();
Self::find_contiguous_unused_pages(arch, pages_needed, page_size).unwrap();
kprintln!("metadata_addr: {:X}", metadata_addr);

let metadata: &mut [PhysicalPage] =
unsafe { core::slice::from_raw_parts_mut(metadata_addr as *mut _, page_count) };

let physical_pages = memory_node
.regions()
.map(|region| {
(region.starting_address, unsafe {
region.starting_address.add(region.size.unwrap_or(0))
})
})
.map(|(start, end)| (start as usize, end as usize))
.flat_map(|(start, end)| (start..end).step_by(page_size))
.map(|base| Self::phys_addr_to_physical_page(base, device_tree, page_size));

for (i, page) in physical_pages.enumerate() {
metadata[i] = page;
}
arch.for_all_memory_regions(|regions| {
let physical_pages = regions
.flat_map(|(start, end)| (start..end).step_by(page_size))
.map(|base| Self::phys_addr_to_physical_page(base, arch, page_size));

return Self {
for (i, page) in physical_pages.enumerate() {
metadata[i] = page;
}
});


Self {
metadata,
page_size,
};
}
}

pub fn page_size(&self) -> usize {
Expand Down

0 comments on commit 19da7ed

Please sign in to comment.