Skip to content
This repository has been archived by the owner on Sep 1, 2024. It is now read-only.

Commit

Permalink
Merge pull request #35 from memN0ps/dev
Browse files Browse the repository at this point in the history
Refactor pa_from_va to handle guest to host address translation
  • Loading branch information
memN0ps committed Jul 31, 2024
2 parents b6ca1a0 + 9ebae16 commit 58c5fb6
Show file tree
Hide file tree
Showing 9 changed files with 175 additions and 146 deletions.
101 changes: 48 additions & 53 deletions hypervisor/src/intel/addresses.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,15 @@
//! as well as methods for extracting page frame numbers (PFNs) and other address-related information.

use {
crate::intel::paging::PageTables,
core::ops::{Deref, DerefMut},
x86::bits64::paging::{PAddr, BASE_PAGE_SHIFT},
crate::{
error::HypervisorError,
intel::{ept::Ept, paging::PageTables, support::vmread},
},
log::trace,
x86::{
bits64::paging::{PAddr, BASE_PAGE_SHIFT},
vmx::vmcs,
},
};

/// A representation of physical addresses.
Expand All @@ -23,16 +29,16 @@ impl PhysicalAddress {
Self(PAddr::from(pa))
}

/// Constructs a `PhysicalAddress` from a given virtual address.
pub fn from_va(va: u64) -> Result<Self, HypervisorError> {
Ok(Self(PAddr::from(Self::pa_from_va(va)?)))
}

/// Constructs a `PhysicalAddress` from a given page frame number (PFN).
pub fn from_pfn(pfn: u64) -> Self {
Self(PAddr::from(pfn << BASE_PAGE_SHIFT))
}

/// Constructs a `PhysicalAddress` from a given virtual address.
pub fn from_va(va: u64) -> Self {
Self(PAddr::from(Self::pa_from_va(va)))
}

/// Retrieves the page frame number (PFN) for the physical address.
pub fn pfn(&self) -> u64 {
self.0.as_u64() >> BASE_PAGE_SHIFT
Expand All @@ -43,60 +49,49 @@ impl PhysicalAddress {
self.0.as_u64()
}

/// Converts a virtual address to its corresponding physical address.
pub fn pa_from_va(va: u64) -> u64 {
let guest_cr3 = PageTables::get_guest_cr3();
PageTables::translate_guest_virtual_to_physical(guest_cr3 as usize, va as _).unwrap() as u64
}

/// Reads a value of a specified type from guest memory at the provided virtual address, ensuring safety by internal validation.
/// Converts a guest virtual address to its corresponding host physical address.
///
/// # Arguments
/// This function first translates the guest virtual address to a guest physical address
/// using the guest's CR3. It then translates the guest physical address to a host physical
/// address using the EPT (Extended Page Table).
///
/// * `guest_cr3` - The base address of the guest's page table hierarchy.
/// * `guest_va` - The guest virtual address from which to read.
/// # Arguments
///
/// # Returns
/// * `va` - The guest virtual address to translate.
///
/// * Returns an `Option<T>` which is `Some(value)` if the read is successful and safe, or `None` if the address cannot be translated or if safety conditions are not met.
/// # Safety
///
/// # Type Parameters
/// This function is unsafe because it involves raw memory access and relies on the integrity
/// of the VMCS (Virtual Machine Control Structure).
///
/// * `T` - The type of the value to read. This can be any type that implements the `Copy` trait and has a size that can be read atomically.
/// # Returns
///
/// # Credits
/// Credits to Jessie (jessiep_) for the initial concept.
pub fn read_guest_memory<T: Copy>(guest_cr3: usize, guest_va: usize) -> Option<T> {
// Safety justification:
// The translation function ensures that the physical address is valid and maps to a real physical memory location.
// The dereference is only performed if the translation succeeds, and it's constrained to types that are Copy, implying they can be safely duplicated and do not manage resources that require manual cleanup.
// Still, the caller must ensure that reading from this specific address does not violate any safety contracts.
let pa = PageTables::translate_guest_virtual_to_physical(guest_cr3, guest_va)?;
unsafe { Some(*(pa as *const T)) }
}
}
/// A `Result<u64, HypervisorError>` containing the host physical address on success, or an error if the translation fails.
pub fn pa_from_va(va: u64) -> Result<u64, HypervisorError> {
let guest_cr3 = vmread(vmcs::guest::CR3);
trace!("Guest CR3: {:#x}", guest_cr3);

impl Deref for PhysicalAddress {
type Target = PAddr;
let guest_pa = unsafe { PageTables::translate_guest_virtual_to_guest_physical(guest_cr3, va)? };
trace!("Guest VA: {:#x} -> Guest PA: {:#x}", va, guest_pa);

/// Dereferences the `PhysicalAddress` to retrieve the underlying `PAddr`.
fn deref(&self) -> &Self::Target {
&self.0
}
}
// Translate guest physical address (GPA) to host physical address (HPA) using Extended Page Tables (EPT)
// In a 1:1 mapping, the guest physical address is the same as the host physical address.
// This translation is not required in a 1:1 mapping but is done for demonstration purposes
// and in case changes are made to the Paging/EPT.
let vmcs_eptp = vmread(vmcs::control::EPTP_FULL);
trace!("VMCS EPTP: {:#x}", vmcs_eptp);

impl DerefMut for PhysicalAddress {
/// Provides mutable access to the underlying `PAddr`.
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
let (pml4_address, _, _) = Ept::decode_eptp(vmcs_eptp)?;
trace!("EPT PML4 Address: {:#x}", pml4_address);

/// Converts a virtual address to its corresponding physical address.
///
/// # Arguments
///
/// * `ptr` - The virtual address to convert.
pub fn physical_address(ptr: *const u64) -> PAddr {
PhysicalAddress::from_va(ptr as u64).0
// Note: This may cause a crash at `!pt_entry.readable()` because the hypervisor has pre-allocated page tables
// in the hook_manager that are not passed to this function. We're attempting to translate a guest physical address to a host physical address using the EPT.
// The hypervisor maps everything as 2MB pages. The hooked pages are split and stored in the pre-allocated Pt,
// which are usually passed as a parameter, those are not stored in the EPT structure.
// This translation is not required in a 1:1 mapping but is done for demonstration purposes and in case changes are made to the Paging/EPT.
// let host_pa = unsafe { Ept::translate_guest_pa_to_host_pa(pml4_address, guest_pa)? };
// trace!("Guest PA: {:#x} -> Host PA: {:#x}", guest_pa, host_pa);

Ok(guest_pa)
}
}
57 changes: 27 additions & 30 deletions hypervisor/src/intel/ept.rs
Original file line number Diff line number Diff line change
Expand Up @@ -121,94 +121,90 @@ impl Ept {
Ok(())
}

/// Reads a value from a given guest physical address.
/// Translates a guest physical address to a host physical address using the EPT.
///
/// This function translates the guest physical address (GPA) to the corresponding host physical address (HPA)
/// using the EPT, and then reads the value at the HPA.
/// This function traverses the EPT hierarchy (PML4, PDPT, PD, PT) to translate the given
/// guest physical address (GPA) to its corresponding host physical address (HPA).
///
/// # Arguments
///
/// * `guest_pa` - The guest physical address to read from.
/// * `guest_pa` - The guest physical address to translate.
///
/// # Returns
///
/// A `Result<u64, HypervisorError>` containing the value read from the address on success.
pub fn read_guest_pa(&self, guest_pa: u64) -> Result<u64, HypervisorError> {
// Translate the guest physical address to host physical address.
// In a 1:1 mapping, the guest physical address is the same as the host physical address.
// Assuming the environment allows direct memory access to those addresses.
let host_pa = self.translate_guest_pa_to_host_pa(guest_pa)?;
trace!("Reading from GPA {:#x} (HPA: {:#x})", guest_pa, host_pa);

// Read the value at the host physical address.
// Assuming the host physical address can be directly dereferenced.
// You may need to adjust the pointer dereferencing method based on your environment.
let value = unsafe { *(host_pa as *const u64) };
trace!("Read value: {:#x}", value);

Ok(value)
}

/// A `Result<u64, HypervisorError>` containing the host physical address on success.
/// Translates a guest physical address to a host physical address using the EPT.
///
/// This function traverses the EPT hierarchy (PML4, PDPT, PD, PT) to translate the given
/// guest physical address (GPA) to its corresponding host physical address (HPA).
///
/// # Arguments
///
/// * `ept_base` - The base address of the EPT structure.
/// * `guest_pa` - The guest physical address to translate.
///
/// # Returns
///
/// A `Result<u64, HypervisorError>` containing the host physical address on success.
pub fn translate_guest_pa_to_host_pa(&self, guest_pa: u64) -> Result<u64, HypervisorError> {
pub unsafe fn translate_guest_pa_to_host_pa(ept_base: u64, guest_pa: u64) -> Result<u64, HypervisorError> {
let guest_pa = VAddr::from(guest_pa);

// Cast the EPT base to the PML4 table structure.
let pml4_table = ept_base as *const Pml4;

// Calculate the PML4 index and access the corresponding entry.
let pmld4_index = pml4_index(guest_pa);
let pml4_entry = &self.pml4.0.entries[pmld4_index];
let pml4_index = pml4_index(guest_pa);
let pml4_entry = &(*pml4_table).0.entries[pml4_index];

// Check if the PML4 entry is present (readable).
if !pml4_entry.readable() {
error!("PML4 entry is not present: {:#x}", guest_pa);
return Err(HypervisorError::InvalidPml4Entry);
}

// Cast the entry to the PDPT table structure.
let pdpt_table = (pml4_entry.pfn() << BASE_PAGE_SHIFT) as *const Pdpt;

// Calculate the PDPT index and access the corresponding entry.
let pdpt_index = pdpt_index(guest_pa);
let pdpt_entry = &self.pdpt.0.entries[pdpt_index];
let pdpt_entry = &(*pdpt_table).0.entries[pdpt_index];

// Check if the PDPT entry is present (readable).
if !pdpt_entry.readable() {
error!("PDPT entry is not present: {:#x}", guest_pa);
return Err(HypervisorError::InvalidPdptEntry);
}

// Check if the PDPT entry is huge page (1 GB), if so, calculate the host physical address.
// Check if the PDPT entry is a huge page (1 GB), if so, calculate the host physical address.
if pdpt_entry.large() {
let host_pa = (pdpt_entry.pfn() << BASE_PAGE_SHIFT) + (guest_pa.as_u64() % HUGE_PAGE_SIZE as u64);
return Ok(host_pa);
}

// Cast the entry to the PD table structure.
let pd_table = (pdpt_entry.pfn() << BASE_PAGE_SHIFT) as *const Pd;

// Calculate the PD index and access the corresponding entry.
let pd_index = pd_index(guest_pa);
let pd_entry = &self.pd[pdpt_index].0.entries[pd_index];
let pd_entry = &(*pd_table).0.entries[pd_index];

// Check if the PD entry is present (readable).
if !pd_entry.readable() {
error!("PD entry is not present: {:#x}", guest_pa);
return Err(HypervisorError::InvalidPdEntry);
}

// Check if the PD entry is large page (2 MB), if so, calculate the host physical address.
// Check if the PD entry is a large page (2 MB), if so, calculate the host physical address.
if pd_entry.large() {
let host_pa = (pd_entry.pfn() << BASE_PAGE_SHIFT) + (guest_pa.as_u64() % LARGE_PAGE_SIZE as u64);
return Ok(host_pa);
}

// Cast the entry to the PT table structure.
let pt_table = (pd_entry.pfn() << BASE_PAGE_SHIFT) as *const Pt;

// Calculate the PT index and access the corresponding entry.
let pt_index = pt_index(guest_pa);
let pt_entry = &self.pt.0.entries[pt_index];
let pt_entry = &(*pt_table).0.entries[pt_index];

// Check if the PT entry is present (readable).
if !pt_entry.readable() {
Expand Down Expand Up @@ -530,6 +526,7 @@ impl Ept {
pub fn create_eptp_with_wb_and_4lvl_walk(&self) -> Result<u64, HypervisorError> {
// Get the virtual address of the PML4 table for EPT.
let addr = addr_of!(self.pml4) as u64;
trace!("EPT PML4 (self) address: {:#x}", addr);

// Get the physical address of the PML4 table for EPT.
let ept_pml4_base_addr = addr;
Expand Down
8 changes: 4 additions & 4 deletions hypervisor/src/intel/hooks/hook_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -142,10 +142,10 @@ impl HookManager {
/// * `Ok(())` - The kernel base and size were set successfully.
pub fn set_kernel_base_and_size(&mut self, guest_va: u64) -> Result<(), HypervisorError> {
// Get the base address of ntoskrnl.exe.
self.ntoskrnl_base_va = unsafe { get_image_base_address(guest_va).ok_or(HypervisorError::FailedToGetImageBaseAddress)? };
self.ntoskrnl_base_va = unsafe { get_image_base_address(guest_va)? };

// Get the physical address of ntoskrnl.exe using GUEST_CR3 and the virtual address.
self.ntoskrnl_base_pa = PhysicalAddress::pa_from_va(self.ntoskrnl_base_va);
self.ntoskrnl_base_pa = PhysicalAddress::pa_from_va(self.ntoskrnl_base_va)?;

// Get the size of ntoskrnl.exe.
self.ntoskrnl_size = unsafe { get_size_of_image(self.ntoskrnl_base_pa as _).ok_or(HypervisorError::FailedToGetKernelSize)? } as u64;
Expand Down Expand Up @@ -323,7 +323,7 @@ impl HookManager {
) -> Result<(), HypervisorError> {
debug!("Creating EPT hook for function at VA: {:#x}", guest_function_va);

let guest_function_pa = PAddr::from(PhysicalAddress::pa_from_va(guest_function_va));
let guest_function_pa = PAddr::from(PhysicalAddress::pa_from_va(guest_function_va)?);
debug!("Guest function PA: {:#x}", guest_function_pa.as_u64());

let guest_page_pa = guest_function_pa.align_down_to_base_page();
Expand Down Expand Up @@ -424,7 +424,7 @@ impl HookManager {
pub fn ept_unhook_function(&mut self, vm: &mut Vm, guest_function_va: u64, _ept_hook_type: EptHookType) -> Result<(), HypervisorError> {
debug!("Removing EPT hook for function at VA: {:#x}", guest_function_va);

let guest_function_pa = PAddr::from(PhysicalAddress::pa_from_va(guest_function_va));
let guest_function_pa = PAddr::from(PhysicalAddress::pa_from_va(guest_function_va)?);
debug!("Guest function PA: {:#x}", guest_function_pa.as_u64());

let guest_page_pa = guest_function_pa.align_down_to_base_page();
Expand Down
Loading

0 comments on commit 58c5fb6

Please sign in to comment.