Skip to content

Commit

Permalink
Simple bitmap page allocator
Browse files Browse the repository at this point in the history
Replaces the freelist-based page allocator and can be used for lifetime of kernel.

Signed-off-by: Graham MacDonald <[email protected]>
  • Loading branch information
gmacd committed Jan 31, 2024
1 parent e176e4f commit 71fe013
Show file tree
Hide file tree
Showing 10 changed files with 522 additions and 218 deletions.
97 changes: 58 additions & 39 deletions aarch64/src/kalloc.rs
Original file line number Diff line number Diff line change
@@ -1,54 +1,73 @@
use crate::vm::{Page4K, PAGE_SIZE_4K};
use core::ptr;
use port::mcslock::{Lock, LockNode};

static FREE_LIST: Lock<FreeList> = Lock::new("kmem", FreeList { next: None });
/// This module acts as an interface between the portable allocator and the
/// arch-specific use of it.
///
/// The page allocator is constructed and finalised in a number of phases:
/// 1. `init_page_allocator` to create a fixed size allocator assuming everything
/// is in use except a small number of statically defined pages available for
/// setting up the initial page tables.
/// 2. `free_unused_ranges` to mark available ranges as the inverse of the
/// physical memory map within the bounds of the available memory.
use crate::{
kmem::{self, from_physaddr_to_ptr_mut},
vm::{Page4K, PAGE_SIZE_4K},
};
use port::{
bitmapalloc::{BitmapPageAlloc, BitmapPageAllocError},
mcslock::{Lock, LockNode},
mem::PhysRange,
};

#[repr(align(4096))]
struct FreeList {
next: Option<ptr::NonNull<FreeList>>,
}
unsafe impl Send for FreeList {}
/// Set up bitmap page allocator assuming everything is allocated.
static PAGE_ALLOC: Lock<BitmapPageAlloc<16, PAGE_SIZE_4K>> = Lock::new(
"page_alloc",
const { BitmapPageAlloc::<16, PAGE_SIZE_4K>::new_all_allocated(PAGE_SIZE_4K) },
);

#[derive(Debug)]
pub enum Error {
NoFreeBlocks,
}
/// The bitmap allocator has all pages marked as allocated initially. We'll
/// add some pages (mark free) to allow us to set up the page tables and build
/// a memory map. Once the memory map has been build, we can mark all the unused
/// space as available. This allows us to use only one page allocator throughout.
pub fn init_page_allocator() {
static mut NODE: LockNode = LockNode::new();
let mut lock = PAGE_ALLOC.lock(unsafe { &*ptr::addr_of!(NODE) });
let page_alloc = &mut *lock;

impl FreeList {
pub fn put(&mut self, page: &mut Page4K) {
let ptr = (page as *mut Page4K).addr();
assert_eq!(ptr % PAGE_SIZE_4K, 0, "freeing unaligned page");
page.scribble();
let f = page as *mut Page4K as *mut FreeList;
unsafe {
ptr::write(f, FreeList { next: self.next });
}
self.next = ptr::NonNull::new(f);
let early_pages_range = kmem::early_pages_range();
if let Err(err) = page_alloc.mark_free(&early_pages_range) {
panic!("Couldn't mark early pages free: range: {} err: {:?}", early_pages_range, err);
}
}

pub fn get(&mut self) -> Result<&'static mut Page4K, Error> {
let mut next = self.next.ok_or(Error::NoFreeBlocks)?;
let next = unsafe { next.as_mut() };
self.next = next.next;
let pg = unsafe { &mut *(next as *mut FreeList as *mut Page4K) };
pg.clear();
Ok(pg)
}
/// Free unused pages in mem that aren't covered by the memory map. Assumes
/// that custom_map is sorted.
pub fn free_unused_ranges<'a>(
available_mem: &PhysRange,
used_ranges: impl Iterator<Item = &'a PhysRange>,
) -> Result<(), BitmapPageAllocError> {
static mut NODE: LockNode = LockNode::new();
let mut lock = PAGE_ALLOC.lock(unsafe { &*ptr::addr_of!(NODE) });
let page_alloc = &mut *lock;

page_alloc.free_unused_ranges(available_mem, used_ranges)
}

pub unsafe fn free_pages(pages: &mut [Page4K]) {
/// Try to allocate a page
pub fn allocate() -> Result<&'static mut Page4K, BitmapPageAllocError> {
static mut NODE: LockNode = LockNode::new();
let mut lock = FREE_LIST.lock(unsafe { &*ptr::addr_of!(NODE) });
let fl = &mut *lock;
for page in pages.iter_mut() {
fl.put(page);
let mut lock = PAGE_ALLOC.lock(unsafe { &NODE });
let page_alloc = &mut *lock;

match page_alloc.allocate() {
Ok(page_pa) => Ok(unsafe { &mut *from_physaddr_to_ptr_mut::<Page4K>(page_pa) }),
Err(err) => Err(err),
}
}

pub fn alloc() -> Result<&'static mut Page4K, Error> {
pub fn usage_bytes() -> (usize, usize) {
static mut NODE: LockNode = LockNode::new();
let mut lock = FREE_LIST.lock(unsafe { &*ptr::addr_of!(NODE) });
let fl = &mut *lock;
fl.get()
let mut lock = PAGE_ALLOC.lock(unsafe { &NODE });
let page_alloc = &mut *lock;
page_alloc.usage_bytes()
}
147 changes: 15 additions & 132 deletions aarch64/src/kmem.rs
Original file line number Diff line number Diff line change
@@ -1,13 +1,6 @@
use port::fdt::RegBlock;
use port::mem::{PhysAddr, PhysRange};

use crate::{param::KZERO, vm::Page4K};
use core::{
fmt,
iter::{Step, StepBy},
mem,
ops::{self, Range},
slice,
};
use crate::param::KZERO;

// These map to definitions in kernel.ld
extern "C" {
Expand Down Expand Up @@ -42,137 +35,27 @@ pub fn eearly_pagetables_addr() -> usize {
unsafe { eearly_pagetables.as_ptr().addr() }
}

#[derive(Clone, Copy, PartialEq, PartialOrd, Eq, Ord)]
#[repr(transparent)]
pub struct PhysAddr(u64);

impl PhysAddr {
pub const fn new(value: u64) -> Self {
PhysAddr(value)
}

pub const fn addr(&self) -> u64 {
self.0
}

pub const fn as_virt(&self) -> usize {
(self.0 as usize).wrapping_add(KZERO)
}

pub fn from_virt(a: usize) -> Self {
Self((a - KZERO) as u64)
}

pub fn from_ptr<T>(a: *const T) -> Self {
Self::from_virt(a.addr())
}

pub const fn as_ptr_mut<T>(&self) -> *mut T {
self.as_virt() as *mut T
}

pub const fn round_up(&self, step: u64) -> PhysAddr {
PhysAddr((self.0 + step - 1) & !(step - 1))
}

pub const fn round_down(&self, step: u64) -> PhysAddr {
PhysAddr(self.0 & !(step - 1))
}
}

impl ops::Add<u64> for PhysAddr {
type Output = PhysAddr;

fn add(self, offset: u64) -> PhysAddr {
PhysAddr(self.0 + offset)
}
}

/// Note that this implementation will round down the startpa and round up the endpa
impl Step for PhysAddr {
fn steps_between(&startpa: &Self, &endpa: &Self) -> Option<usize> {
if startpa.0 <= endpa.0 {
match endpa.0.checked_sub(startpa.0) {
Some(result) => usize::try_from(result).ok(),
None => None,
}
} else {
None
}
}

fn forward_checked(startpa: Self, count: usize) -> Option<Self> {
startpa.0.checked_add(count as u64).map(PhysAddr)
}

fn backward_checked(startpa: Self, count: usize) -> Option<Self> {
startpa.0.checked_sub(count as u64).map(PhysAddr)
}
pub const fn from_physaddr_to_virt(pa: PhysAddr) -> usize {
(pa.addr() as usize).wrapping_add(KZERO)
}

impl fmt::Debug for PhysAddr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "PhysAddr({:#016x})", self.0)?;
Ok(())
}
pub const fn from_virt_to_physaddr(va: usize) -> PhysAddr {
PhysAddr((va - KZERO) as u64)
}

pub struct PhysRange(pub Range<PhysAddr>);

impl PhysRange {
pub fn with_len(start: u64, len: usize) -> Self {
Self(PhysAddr(start)..PhysAddr(start + len as u64))
}

#[allow(dead_code)]
pub fn offset_addr(&self, offset: u64) -> Option<PhysAddr> {
let addr = self.0.start + offset;
if self.0.contains(&addr) {
Some(addr)
} else {
None
}
}

pub fn start(&self) -> PhysAddr {
self.0.start
}

pub fn end(&self) -> PhysAddr {
self.0.end
}

pub fn step_by_rounded(&self, step_size: usize) -> StepBy<Range<PhysAddr>> {
let startpa = self.start().round_down(step_size as u64);
let endpa = self.end().round_up(step_size as u64);
(startpa..endpa).step_by(step_size)
}
pub fn from_ptr_to_physaddr<T>(a: *const T) -> PhysAddr {
from_virt_to_physaddr(a.addr())
}

impl From<&RegBlock> for PhysRange {
fn from(r: &RegBlock) -> Self {
let start = PhysAddr(r.addr);
let end = start + r.len.unwrap_or(0);
PhysRange(start..end)
}
}

unsafe fn page_slice_mut<'a>(pstart: *mut Page4K, pend: *mut Page4K) -> &'a mut [Page4K] {
let ustart = pstart.addr();
let uend = pend.addr();
const PAGE_SIZE: usize = mem::size_of::<Page4K>();
assert_eq!(ustart % PAGE_SIZE, 0, "page_slice_mut: unaligned start page");
assert_eq!(uend % PAGE_SIZE, 0, "page_slice_mut: unaligned end page");
assert!(ustart < uend, "page_slice_mut: bad range");

let len = (uend - ustart) / PAGE_SIZE;
unsafe { slice::from_raw_parts_mut(ustart as *mut Page4K, len) }
pub const fn from_physaddr_to_ptr_mut<T>(pa: PhysAddr) -> *mut T {
from_physaddr_to_virt(pa) as *mut T
}

pub fn early_pages() -> &'static mut [Page4K] {
let early_start = early_pagetables_addr() as *mut Page4K;
let early_end = eearly_pagetables_addr() as *mut Page4K;
unsafe { page_slice_mut(early_start, early_end) }
pub fn early_pages_range() -> PhysRange {
PhysRange::new(
from_virt_to_physaddr(early_pagetables_addr()),
from_virt_to_physaddr(eearly_pagetables_addr()),
)
}

#[cfg(test)]
Expand Down
10 changes: 5 additions & 5 deletions aarch64/src/mailbox.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ use core::mem;
use core::mem::MaybeUninit;
use port::fdt::DeviceTree;
use port::mcslock::{Lock, LockNode};
use port::mem::VirtRange;
use port::mem::{PhysAddr, PhysRange, VirtRange};

const MBOX_READ: usize = 0x00;
const MBOX_STATUS: usize = 0x18;
Expand Down Expand Up @@ -191,7 +191,7 @@ pub struct MemoryInfo {
pub end: u32,
}

pub fn get_arm_memory() -> MemoryInfo {
pub fn get_arm_memory() -> PhysRange {
let tags = Tag::<EmptyRequest> {
tag_id0: TagId::GetArmMemory,
tag_buffer_size0: 12,
Expand All @@ -204,10 +204,10 @@ pub fn get_arm_memory() -> MemoryInfo {
let size = res.size;
let end = start + size;

MemoryInfo { start, size, end }
PhysRange::new(PhysAddr::new(start as u64), PhysAddr::new(end as u64))
}

pub fn get_vc_memory() -> MemoryInfo {
pub fn get_vc_memory() -> PhysRange {
let tags = Tag::<EmptyRequest> {
tag_id0: TagId::GetVcMemory,
tag_buffer_size0: 12,
Expand All @@ -220,7 +220,7 @@ pub fn get_vc_memory() -> MemoryInfo {
let size = res.size;
let end = start + size;

MemoryInfo { start, size, end }
PhysRange::new(PhysAddr::new(start as u64), PhysAddr::new(end as u64))
}

pub fn get_firmware_revision() -> u32 {
Expand Down
32 changes: 18 additions & 14 deletions aarch64/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
#![feature(asm_const)]
#![feature(core_intrinsics)]
#![feature(stdsimd)]
#![feature(step_trait)]
#![feature(inline_const)]
#![feature(strict_provenance)]
#![forbid(unsafe_op_in_unsafe_fn)]

Expand All @@ -22,11 +22,12 @@ mod uartmini;
mod uartpl011;
mod vm;

use crate::kmem::{PhysAddr, PhysRange};
use crate::kmem::from_virt_to_physaddr;
use crate::vm::kernel_root;
use core::ffi::c_void;
use core::ptr;
use port::fdt::DeviceTree;
use port::mem::PhysRange;
use port::println;
use vm::PageTable;

Expand All @@ -39,7 +40,7 @@ unsafe fn print_memory_range(name: &str, start: &*const c_void, end: &*const c_v
let start = start as *const _ as u64;
let end = end as *const _ as u64;
let size = end - start;
println!(" {name}{start:#x}-{end:#x} ({size:#x})");
println!(" {name}{start:#x}..{end:#x} ({size:#x})");
}

fn print_binary_sections() {
Expand Down Expand Up @@ -67,12 +68,17 @@ fn print_binary_sections() {
}
}

fn print_physical_memory_map() {
fn print_memory_info() {
println!("Physical memory map:");
let mailbox::MemoryInfo { start, size, end } = mailbox::get_arm_memory();
println!(" Memory:\t{start:#018x}-{end:#018x} ({size:#x})");
let mailbox::MemoryInfo { start, size, end } = mailbox::get_vc_memory();
println!(" Video:\t{start:#018x}-{end:#018x} ({size:#x})");
let arm_mem = mailbox::get_arm_memory();
println!(" Memory:\t{arm_mem} ({:#x})", arm_mem.size());
let vc_mem = mailbox::get_vc_memory();
println!(" Video:\t{vc_mem} ({:#x})", vc_mem.size());

println!("Memory usage::");
let (used, total) = kalloc::usage_bytes();
println!(" Used:\t\t{used:#016x}");
println!(" Total:\t{total:#016x}");
}

// https://github.com/raspberrypi/documentation/blob/develop/documentation/asciidoc/computers/raspberry-pi/revision-codes.adoc
Expand Down Expand Up @@ -121,15 +127,13 @@ pub extern "C" fn main9(dtb_va: usize) {

// Map address space accurately using rust VM code to manage page tables
unsafe {
kalloc::free_pages(kmem::early_pages());

let dtb_range = PhysRange::with_len(PhysAddr::from_virt(dtb_va).addr(), dt.size());
vm::init(&dt, &mut *ptr::addr_of_mut!(KPGTBL), dtb_range);
vm::switch(&*ptr::addr_of!(KPGTBL));
let dtb_range = PhysRange::with_len(from_virt_to_physaddr(dtb_va).addr(), dt.size());
vm::init(&mut *ptr::addr_of_mut!(KPGTBL), dtb_range, mailbox::get_arm_memory());
vm::switch(&KPGTBL);
}

print_binary_sections();
print_physical_memory_map();
print_memory_info();
print_board_info();

kernel_root().print_recursive_tables();
Expand Down
Loading

0 comments on commit 71fe013

Please sign in to comment.