Skip to content
This repository has been archived by the owner on Sep 1, 2024. It is now read-only.

Commit

Permalink
Hv memory hidden successfully #28 BOOM!
Browse files Browse the repository at this point in the history
- Major improvements to memory manager.
- This might break the ept hooks but that can be fixed later as this was important to make memory manager robust and improve design flaws that will help for hooks as well.
  • Loading branch information
memN0ps committed Jun 6, 2024
1 parent 4c6d9f8 commit 9a6992d
Show file tree
Hide file tree
Showing 7 changed files with 200 additions and 26 deletions.
16 changes: 16 additions & 0 deletions hypervisor/src/allocate.rs
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,22 @@ pub unsafe fn box_zeroed<T>() -> Box<T> {
unsafe { Box::from_raw(ptr) }
}

/// Creates a dummy page filled with a specific byte value.
///
/// # Arguments
///
/// * `fill_byte` - The byte value to fill the page with.
///
/// # Returns
///
/// The physical address of the dummy page.
pub fn create_dummy_page(fill_byte: u8) -> u64 {
let mut dummy_page = unsafe { box_zeroed::<Page>() };
dummy_page.0.iter_mut().for_each(|byte| *byte = fill_byte);
let dummy_page_pa = Box::into_raw(dummy_page) as u64;
dummy_page_pa
}

/// Records an image allocation in the global memory set.
/// This function is useful for tracking allocated memory regions for enhanced stealth capabilities.
///
Expand Down
3 changes: 3 additions & 0 deletions hypervisor/src/error.rs
Original file line number Diff line number Diff line change
Expand Up @@ -230,6 +230,9 @@ pub enum HypervisorError {
#[error("Active mapping error")]
ActiveMappingError,

#[error("Large page table mapping error")]
LargePtMappingError,

#[error("Failed to get hook info")]
HookInfoNotFound,
}
14 changes: 7 additions & 7 deletions hypervisor/src/intel/ept.rs
Original file line number Diff line number Diff line change
Expand Up @@ -112,19 +112,19 @@ impl Ept {

Ok(())
}

/// Checks if a guest physical address is part of a large 2MB page.
///
///
/// This function is used to determine if a guest physical address is part of a large 2MB page.
///
///
/// # Arguments
///
///
/// * `guest_pa` - The guest physical address to check.
///
///
/// # Returns
///
///
/// `true` if the guest physical address is part of a large 2MB page, otherwise `false`.
pub fn is_page_split(&self, guest_pa: u64) -> bool {
pub fn is_large_page(&self, guest_pa: u64) -> bool {
let guest_pa = VAddr::from(guest_pa);
let pdpt_index = pdpt_index(guest_pa);
let pd_index = pd_index(guest_pa);
Expand Down
106 changes: 104 additions & 2 deletions hypervisor/src/intel/hooks/hook_manager.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
use {
crate::{
allocate::ALLOCATED_MEMORY,
error::HypervisorError,
intel::{
addresses::PhysicalAddress,
Expand All @@ -14,7 +15,7 @@ use {
},
windows::kernel::KernelHook,
},
alloc::boxed::Box,
alloc::{boxed::Box, vec::Vec},
core::intrinsics::copy_nonoverlapping,
log::*,
x86::bits64::paging::{PAddr, BASE_PAGE_SIZE},
Expand Down Expand Up @@ -78,6 +79,91 @@ impl HookManager {
}))
}

/// Hides the hypervisor memory from the guest by installing EPT hooks on all allocated memory regions.
///
/// This function iterates through the `ALLOCATED_MEMORY` set and calls `ept_hide_hypervisor_memory`
/// for each page to split the 2MB pages into 4KB pages and fill the shadow page with a specified value.
/// It then swaps the guest page with the shadow page and sets the desired permissions.
///
/// # Arguments
///
/// * `vm` - The virtual machine instance of the hypervisor.
/// * `dummy_page_pa` - The physical address of the dummy page.
/// * `page_permissions` - The desired permissions for the hooked page.
///
/// # Returns
///
/// * Returns `Ok(())` if the hooks were successfully installed, `Err(HypervisorError)` otherwise.
pub fn hide_hypervisor_memory(vm: &mut Vm, page_permissions: AccessType) -> Result<(), HypervisorError> {
let allocated_memory: Vec<(u64, u64)> = {
let allocated_memory = ALLOCATED_MEMORY.lock();
allocated_memory.iter().copied().collect()
};

debug!("Allocated memory ranges:");
for &(base, end) in &allocated_memory {
debug!("Memory range: {:#x} - {:#x}", base, end);
}

for &(base, _end) in &allocated_memory {
HookManager::ept_hide_hypervisor_memory(vm, base, page_permissions)?;
}

Ok(())
}

/// Hide the hypervisor memory from the guest by installing an EPT hook.
/// This function will split the 2MB page to 4KB pages and fill the shadow page with 0xff.
/// The guest page will be swapped with the shadow page and the permissions will be set to the desired permissions.
///
/// # Arguments
///
/// * `vm` - The virtual machine instance of the hypervisor.
/// * `page_permissions` - The desired permissions for the hooked page.
///
/// # Returns
///
/// * Returns `Ok(())` if the hook was successfully installed, `Err(HypervisorError)` otherwise.
fn ept_hide_hypervisor_memory(vm: &mut Vm, guest_page_pa: u64, page_permissions: AccessType) -> Result<(), HypervisorError> {
let guest_page_pa = PAddr::from(guest_page_pa).align_down_to_base_page();
debug!("Guest page PA: {:#x}", guest_page_pa.as_u64());

let guest_large_page_pa = guest_page_pa.align_down_to_large_page();
debug!("Guest large page PA: {:#x}", guest_large_page_pa.as_u64());

let dummy_page_pa = vm.dummy_page_pa;
trace!("Dummy page PA: {:#x}", dummy_page_pa);

debug!("Mapping large page");
vm.hook_manager.memory_manager.map_large_pages(guest_large_page_pa.as_u64())?;

debug!("Filling shadow page with 0xff");
Self::unsafe_fill_shadow_page(PAddr::from(dummy_page_pa), 0xff);

let pre_alloc_pt = vm
.hook_manager
.memory_manager
.get_page_table_as_mut(guest_large_page_pa.as_u64())
.ok_or(HypervisorError::PageTableNotFound)?;

// Check if a guest page has already been split.
if vm.primary_ept.is_large_page(guest_page_pa.as_u64()) {
debug!("Splitting 2MB page to 4KB pages for Primary EPT: {:#x}", guest_large_page_pa);
vm.primary_ept.split_2mb_to_4kb(guest_large_page_pa.as_u64(), pre_alloc_pt)?;
}

debug!("Swapping guest page: {:#x} with dummy page: {:#x}", guest_page_pa.as_u64(), dummy_page_pa);
vm.primary_ept
.swap_page(guest_page_pa.as_u64(), dummy_page_pa, page_permissions, pre_alloc_pt)?;

invept_all_contexts();
invvpid_all_contexts();

debug!("EPT hide hypervisor memory completed successfully");

Ok(())
}

/// Installs an EPT hook for a function.
///
/// # Arguments
Expand Down Expand Up @@ -144,7 +230,7 @@ impl HookManager {
let pre_alloc_pt = vm
.hook_manager
.memory_manager
.get_page_table_as_mut(guest_page_pa.as_u64())
.get_page_table_as_mut(guest_large_page_pa.as_u64())
.ok_or(HypervisorError::PageTableNotFound)?;

// Install the inline hook at the shadow function address, even if it's already installed (no check for now)
Expand Down Expand Up @@ -220,6 +306,22 @@ impl HookManager {
unsafe { copy_nonoverlapping(guest_page_pa.as_u64() as *mut u8, host_shadow_page_pa.as_u64() as *mut u8, BASE_PAGE_SIZE) };
}

/// Fills the shadow page with a specific byte value.
///
/// # Arguments
///
/// * `shadow_page_pa` - The physical address of the shadow page.
/// * `fill_byte` - The byte value to fill the page with.
///
/// # Safety
///
/// This function is unsafe because it performs a raw memory fill operation on the shadow page.
pub fn unsafe_fill_shadow_page(shadow_page_pa: PAddr, fill_byte: u8) {
unsafe {
core::ptr::write_bytes(shadow_page_pa.as_u64() as *mut u8, fill_byte, BASE_PAGE_SIZE);
}
}

/// Calculates the address of the function within the host shadow page.
///
/// # Arguments
Expand Down
71 changes: 55 additions & 16 deletions hypervisor/src/intel/hooks/memory_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,6 @@ pub struct HookMapping {
/// The shadow page.
pub shadow_page: Box<Page>,

/// The page table.
pub page_table: Box<Pt>,

/// The list of hooks associated with this page.
pub hooks: Vec<HookInfo, MAX_HOOKS_PER_PAGE>,
}
Expand All @@ -48,8 +45,14 @@ pub struct MemoryManager {
/// Active mappings of guest physical addresses to their respective hook mappings.
active_mappings: LinearMap<u64, HookMapping, MAX_HOOK_ENTRIES>,

/// Mappings of large guest physical addresses to their respective page tables.
large_pt_mappings: LinearMap<u64, Box<Pt>, MAX_HOOK_ENTRIES>,

/// Free slots for hook mappings.
free_slots: Vec<usize, MAX_HOOK_ENTRIES>,
free_slots_hm: Vec<usize, MAX_HOOK_ENTRIES>,

/// Free slots for page tables.
free_slots_pt: Vec<usize, MAX_HOOK_ENTRIES>,
}

impl MemoryManager {
Expand All @@ -61,31 +64,41 @@ impl MemoryManager {
trace!("Initializing memory manager");

let mut active_mappings = LinearMap::<u64, HookMapping, MAX_HOOK_ENTRIES>::new();
let mut free_slots = Vec::<usize, MAX_HOOK_ENTRIES>::new();
let mut large_pt_mappings = LinearMap::<u64, Box<Pt>, MAX_HOOK_ENTRIES>::new();
let mut free_slots_hm = Vec::<usize, MAX_HOOK_ENTRIES>::new();
let mut free_slots_pt = Vec::<usize, MAX_HOOK_ENTRIES>::new();

trace!("Pre-allocating page tables and shadow pages");
trace!("Pre-allocating shadow pages and page tables");

// Pre-allocate shadow pages and page tables for hooks.
// Pre-allocate shadow pages for hooks and page tables for large pages.
for i in 0..MAX_HOOK_ENTRIES {
let pt = unsafe { box_zeroed::<Pt>() };
let sp = unsafe { box_zeroed::<Page>() };

active_mappings
.insert(
i as u64,
HookMapping {
shadow_page: sp,
page_table: pt,
hooks: Vec::<HookInfo, MAX_HOOKS_PER_PAGE>::new(),
},
)
.map_err(|_| HypervisorError::ActiveMappingError)?;
free_slots.push(i).map_err(|_| HypervisorError::ActiveMappingError)?;

let pt = unsafe { box_zeroed::<Pt>() };
large_pt_mappings.insert(i as u64, pt).map_err(|_| HypervisorError::LargePtMappingError)?;

free_slots_hm.push(i).map_err(|_| HypervisorError::ActiveMappingError)?;
free_slots_pt.push(i).map_err(|_| HypervisorError::LargePtMappingError)?;
}

trace!("Memory manager initialized");

Ok(Self { active_mappings, free_slots })
Ok(Self {
active_mappings,
large_pt_mappings,
free_slots_hm,
free_slots_pt,
})
}

/// Checks if a guest page is already processed (split and copied).
Expand All @@ -100,6 +113,7 @@ impl MemoryManager {
}

/// Maps a free page table and shadow page to a guest physical address, removing them from the free pool.
/// Maps the Large Page to the Page Table if not already mapped.
///
/// # Arguments
/// * `guest_page_pa` - The guest physical address to map.
Expand Down Expand Up @@ -131,7 +145,7 @@ impl MemoryManager {
mapping.hooks.push(hook_info).map_err(|_| HypervisorError::TooManyHooks)?;
} else {
trace!("Mapping does not exist, creating new mapping");
if let Some(free_slot) = self.free_slots.pop() {
if let Some(free_slot) = self.free_slots_hm.pop() {
trace!("Found free slot at index: {}", free_slot);
let key = free_slot as u64;
let mut mapping = self.active_mappings.remove(&key).unwrap();
Expand All @@ -149,15 +163,40 @@ impl MemoryManager {
Ok(())
}

/// Retrieves a mutable reference to the page table associated with a guest physical address.
/// Maps a free page table to a large guest physical address, removing it from the free pool.
///
/// # Arguments
/// * `guest_page_pa` - The guest physical address.
///
/// * `guest_large_page_pa` - The large guest physical address to map.
pub fn map_large_pages(&mut self, guest_large_page_pa: u64) -> Result<(), HypervisorError> {
// Ensure the large page has a page table (Pt)
if !self.large_pt_mappings.contains_key(&guest_large_page_pa) {
trace!("Large page not mapped to page table, mapping now");
if let Some(free_slot) = self.free_slots_pt.pop() {
trace!("Found free slot for page table at index: {}", free_slot);
let pt_key = free_slot as u64;
let pt = self.large_pt_mappings.remove(&pt_key).unwrap();
self.large_pt_mappings
.insert(guest_large_page_pa, pt)
.map_err(|_| HypervisorError::ActiveMappingError)?;
} else {
error!("No free page tables available for mapping");
return Err(HypervisorError::OutOfMemory);
}
}

Ok(())
}

/// Retrieves a mutable reference to the page table associated with a large guest physical address.
///
/// # Arguments
/// * `guest_large_page_pa` - The large guest physical address.
///
/// # Returns
/// An `Option` containing a mutable reference to the `Pt` if found.
pub fn get_page_table_as_mut(&mut self, guest_page_pa: u64) -> Option<&mut Pt> {
self.active_mappings.get_mut(&guest_page_pa).map(|mapping| &mut *mapping.page_table)
pub fn get_page_table_as_mut(&mut self, guest_large_page_pa: u64) -> Option<&mut Pt> {
self.large_pt_mappings.get_mut(&guest_large_page_pa).map(|pt| &mut **pt)
}

/// Retrieves a pointer to the shadow page associated with a guest physical address.
Expand Down
9 changes: 8 additions & 1 deletion hypervisor/src/intel/vm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
use {
crate::{
allocate::box_zeroed,
allocate::{box_zeroed, create_dummy_page},
error::HypervisorError,
intel::{
bitmap::{MsrAccessType, MsrBitmap, MsrOperation},
Expand Down Expand Up @@ -63,6 +63,9 @@ pub struct Vm {

/// Flag indicating if the VM has been launched.
pub has_launched: bool,

/// Physical address of a dummy page.
pub dummy_page_pa: u64,
}

impl Vm {
Expand Down Expand Up @@ -108,6 +111,9 @@ impl Vm {
trace!("Creating EPT hook manager");
let hook_manager = HookManager::new()?;

trace!("Creating dummy page filled with 0xffs");
let dummy_page_pa = create_dummy_page(0xff);

trace!("VM created");

Ok(Self {
Expand All @@ -121,6 +127,7 @@ impl Vm {
primary_eptp,
guest_registers: guest_registers.clone(),
has_launched: false,
dummy_page_pa,
})
}

Expand Down
7 changes: 7 additions & 0 deletions hypervisor/src/vmm.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@ use {
intel::{
bitmap::MsrAccessType,
capture::GuestRegisters,
ept::AccessType,
hooks::hook_manager::HookManager,
support::{rdmsr, vmread, vmwrite},
vm::Vm,
vmerror::VmxBasicExitReason,
Expand Down Expand Up @@ -78,6 +80,11 @@ pub fn start_hypervisor(guest_registers: &GuestRegisters) -> ! {
Err(e) => panic!("Failed to activate VMCS: {:?}", e),
}

match HookManager::hide_hypervisor_memory(&mut vm, AccessType::READ_WRITE_EXECUTE) {
Ok(_) => debug!("Hypervisor memory hidden"),
Err(e) => panic!("Failed to hide hypervisor memory: {:?}", e),
};

info!("Launching the VM until a vmexit occurs...");

loop {
Expand Down

0 comments on commit 9a6992d

Please sign in to comment.