This repository was archived by the owner on Sep 1, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 33
/
Copy pathhook_manager.rs
539 lines (468 loc) · 23.1 KB
/
hook_manager.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
use {
crate::{
error::HypervisorError,
intel::{
addresses::PhysicalAddress,
bitmap::{MsrAccessType, MsrBitmap, MsrOperation},
ept::AccessType,
hooks::{
inline::{InlineHook, InlineHookType},
memory_manager::MemoryManager,
},
invept::invept_all_contexts,
invvpid::invvpid_all_contexts,
vm::Vm,
},
windows::{
nt::pe::{get_export_by_hash, get_image_base_address, get_size_of_image},
ssdt::ssdt_hook::SsdtHook,
},
},
alloc::vec::Vec,
core::intrinsics::copy_nonoverlapping,
lazy_static::lazy_static,
log::*,
spin::Mutex,
x86::{
bits64::paging::{PAddr, BASE_PAGE_SIZE},
msr,
},
};
/// Enum representing different types of hooks that can be applied.
#[derive(Debug, Clone, Copy)]
pub enum EptHookType {
/// Hook for intercepting and possibly modifying function execution.
/// Requires specifying the type of inline hook to use.
Function(InlineHookType),
/// Hook for hiding or monitoring access to a specific page.
/// No inline hook type is required for page hooks.
Page,
}
/// Represents hook manager structures for hypervisor operations.
#[repr(C)]
#[derive(Debug, Clone)]
pub struct HookManager {
/// The memory manager instance for the pre-allocated shadow pages and page tables.
pub memory_manager: MemoryManager,
/// A bitmap for handling MSRs.
pub msr_bitmap: MsrBitmap,
/// The physical address of the dummy page used for hiding hypervisor memory.
pub dummy_page_pa: u64,
/// The base virtual address of ntoskrnl.exe.
pub ntoskrnl_base_va: u64,
/// The base physical address of ntoskrnl.exe.
pub ntoskrnl_base_pa: u64,
/// The size of ntoskrnl.exe.
pub ntoskrnl_size: u64,
/// A flag indicating whether the CPUID cache information has been called. This will be used to perform hooks at boot time when SSDT has been initialized.
/// KiSetCacheInformation -> KiSetCacheInformationIntel -> KiSetStandardizedCacheInformation -> __cpuid(4, 0)
pub has_cpuid_cache_info_been_called: bool,
/// A vector to keep track of allocated memory ranges for debugging and management purposes.
/// Each element is a tuple where the first value is the start address and the second value is the size of the allocation.
pub allocated_memory_ranges: Vec<(usize, usize)>,
}
lazy_static! {
/// A global static instance of `HookManager` wrapped in a `Mutex` to ensure thread-safe access.
/// This instance is initialized lazily on first access using the `lazy_static!` macro.
///
/// The `HookManager` contains the following fields:
/// - `memory_manager`: An instance of `MemoryManager` for managing shadow pages and page tables.
/// - `dummy_page_pa`: Physical address of the dummy page used for hiding hypervisor memory.
/// - `ntoskrnl_base_va`: Virtual address of the Windows kernel (ntoskrnl.exe).
/// - `ntoskrnl_base_pa`: Physical address of the Windows kernel (ntoskrnl.exe).
/// - `ntoskrnl_size`: Size of the Windows kernel (ntoskrnl.exe).
/// - `has_cpuid_cache_info_been_called`: Flag indicating whether the CPUID cache information has been called.
pub static ref SHARED_HOOK_MANAGER: Mutex<HookManager> = Mutex::new(HookManager {
memory_manager: MemoryManager::new(),
msr_bitmap: MsrBitmap::new(),
dummy_page_pa: 0,
ntoskrnl_base_va: 0,
ntoskrnl_base_pa: 0,
ntoskrnl_size: 0,
has_cpuid_cache_info_been_called: false,
allocated_memory_ranges: Vec::with_capacity(128),
});
}
impl HookManager {
/// Initializes the `SHARED_HOOK_MANAGER` with the provided dummy page physical address.
///
/// This function should be called during the hypervisor setup process to set the `dummy_page_pa`
/// field of the `HookManager` instance. It ensures that the `dummy_page_pa` is correctly set before
/// any operations that depend on this field.
///
/// # Arguments
///
/// * `dummy_page_pa`: The physical address of the dummy page used for hiding hypervisor memory.
pub fn initialize_shared_hook_manager(dummy_page_pa: u64) {
let mut hook_manager = SHARED_HOOK_MANAGER.lock();
hook_manager.dummy_page_pa = dummy_page_pa;
trace!("Modifying MSR interception for LSTAR MSR write access");
hook_manager
.msr_bitmap
.modify_msr_interception(msr::IA32_LSTAR, MsrAccessType::Write, MsrOperation::Hook);
}
/// Records a memory allocation for tracking purposes.
///
/// # Arguments
///
/// * `start` - The start address of the memory allocation.
/// * `size` - The size of the memory allocation.
pub fn record_allocation(&mut self, start: usize, size: usize) {
self.allocated_memory_ranges.push((start, size));
}
/// Prints the allocated memory ranges for debugging purposes.
pub fn print_allocated_memory(&self) {
self.allocated_memory_ranges.iter().for_each(|(start, size)| {
debug!("Memory Range: Start = {:#x}, Size = {:#x}", start, size);
});
}
/// Sets the base address and size of the Windows kernel.
///
/// # Arguments
///
/// * `guest_va` - The virtual address of the guest.
///
/// # Returns
///
/// * `Ok(())` - The kernel base and size were set successfully.
pub fn set_kernel_base_and_size(&mut self, guest_va: u64) -> Result<(), HypervisorError> {
// Get the base address of ntoskrnl.exe.
self.ntoskrnl_base_va = unsafe { get_image_base_address(guest_va).ok_or(HypervisorError::FailedToGetImageBaseAddress)? };
// Get the physical address of ntoskrnl.exe using GUEST_CR3 and the virtual address.
self.ntoskrnl_base_pa = PhysicalAddress::pa_from_va(self.ntoskrnl_base_va);
// Get the size of ntoskrnl.exe.
self.ntoskrnl_size = unsafe { get_size_of_image(self.ntoskrnl_base_pa as _).ok_or(HypervisorError::FailedToGetKernelSize)? } as u64;
Ok(())
}
/// Manages an EPT hook for a kernel function, enabling or disabling it.
///
/// # Arguments
///
/// * `vm` - The virtual machine to install/remove the hook on.
/// * `function_hash` - The hash of the function to hook/unhook.
/// * `syscall_number` - The syscall number to use if `get_export_by_hash` fails.
/// * `ept_hook_type` - The type of EPT hook to use.
/// * `enable` - A boolean indicating whether to enable (true) or disable (false) the hook.
///
/// # Returns
///
/// * `Ok(())` - The hook was managed successfully.
/// * `Err(HypervisorError)` - If the hook management fails.
pub fn manage_kernel_ept_hook(
&mut self,
vm: &mut Vm,
function_hash: u32,
syscall_number: u16,
ept_hook_type: EptHookType,
enable: bool,
) -> Result<(), HypervisorError> {
let action = if enable { "Enabling" } else { "Disabling" };
debug!("{} EPT hook for function: {:#x}", action, function_hash);
trace!("Ntoskrnl base VA: {:#x}", self.ntoskrnl_base_va);
trace!("Ntoskrnl base PA: {:#x}", self.ntoskrnl_base_pa);
trace!("Ntoskrnl size: {:#x}", self.ntoskrnl_size);
let function_va = unsafe {
if let Some(va) = get_export_by_hash(self.ntoskrnl_base_pa as _, self.ntoskrnl_base_va as _, function_hash) {
va
} else {
let ssdt_function_address =
SsdtHook::find_ssdt_function_address(syscall_number as _, false, self.ntoskrnl_base_pa as _, self.ntoskrnl_size as _);
match ssdt_function_address {
Ok(ssdt_hook) => ssdt_hook.guest_function_va as *mut u8,
Err(_) => return Err(HypervisorError::FailedToGetExport),
}
}
};
if enable {
self.ept_hook_function(vm, function_va as _, function_hash, ept_hook_type)?;
} else {
self.ept_unhook_function(vm, function_va as _, ept_hook_type)?;
}
Ok(())
}
/// Hides the hypervisor memory from the guest by installing EPT hooks on all allocated memory regions.
///
/// This function iterates through the recorded memory allocations and calls `ept_hide_hypervisor_memory`
/// for each page to split the 2MB pages into 4KB pages and fill the shadow page with a specified value.
/// It then swaps the guest page with the shadow page and sets the desired permissions.
///
/// # Arguments
///
/// * `vm` - The virtual machine instance of the hypervisor.
/// * `page_permissions` - The desired permissions for the hooked page.
///
/// # Returns
///
/// Returns `Ok(())` if the hooks were successfully installed, `Err(HypervisorError)` otherwise.
pub fn hide_hypervisor_memory(&mut self, vm: &mut Vm, page_permissions: AccessType) -> Result<(), HypervisorError> {
let pages: Vec<u64> = self
.allocated_memory_ranges
.iter()
.step_by(BASE_PAGE_SIZE)
.map(|(start, _size)| *start as u64)
.collect();
for guest_page_pa in pages {
self.ept_hide_hypervisor_memory(vm, PAddr::from(guest_page_pa).align_down_to_base_page().as_u64(), page_permissions)?
}
Ok(())
}
/// Hide the hypervisor memory from the guest by installing an EPT hook.
/// This function will split the 2MB page to 4KB pages and fill the shadow page with 0xff.
/// The guest page will be swapped with the shadow page and the permissions will be set to the desired permissions.
///
/// # Arguments
///
/// * `vm` - The virtual machine instance of the hypervisor.
/// * `page_permissions` - The desired permissions for the hooked page.
///
/// # Returns
///
/// * Returns `Ok(())` if the hook was successfully installed, `Err(HypervisorError)` otherwise.
fn ept_hide_hypervisor_memory(&mut self, vm: &mut Vm, guest_page_pa: u64, page_permissions: AccessType) -> Result<(), HypervisorError> {
let guest_page_pa = PAddr::from(guest_page_pa).align_down_to_base_page();
trace!("Guest page PA: {:#x}", guest_page_pa.as_u64());
let guest_large_page_pa = guest_page_pa.align_down_to_large_page();
trace!("Guest large page PA: {:#x}", guest_large_page_pa.as_u64());
let dummy_page_pa = self.dummy_page_pa;
trace!("Dummy page PA: {:#x}", dummy_page_pa);
trace!("Mapping large page");
// Map the large page to the pre-allocated page table, if it hasn't been mapped already.
self.memory_manager.map_large_page_to_pt(guest_large_page_pa.as_u64())?;
let pre_alloc_pt = self
.memory_manager
.get_page_table_as_mut(guest_large_page_pa.as_u64())
.ok_or(HypervisorError::PageTableNotFound)?;
// Check if a guest page has already been split.
if vm.primary_ept.is_large_page(guest_page_pa.as_u64()) {
trace!("Splitting 2MB page to 4KB pages for Primary EPT: {:#x}", guest_large_page_pa);
vm.primary_ept.split_2mb_to_4kb(guest_large_page_pa.as_u64(), pre_alloc_pt)?;
}
trace!("Swapping guest page: {:#x} with dummy page: {:#x}", guest_page_pa.as_u64(), dummy_page_pa);
vm.primary_ept
.swap_page(guest_page_pa.as_u64(), dummy_page_pa, page_permissions, pre_alloc_pt)?;
invept_all_contexts();
invvpid_all_contexts();
trace!("EPT hide hypervisor memory completed successfully");
Ok(())
}
/// Installs an EPT hook for a function.
///
/// # Steps:
/// 1. Map the large page to the pre-allocated page table, if it hasn't been mapped already.
///
/// 2. Check if the large page has already been split. If not, split it into 4KB pages.
///
/// 3. Check if the guest page is already processed. If not, map the guest page to the shadow page.
/// Ensure the memory manager maintains a set of processed guest pages to track this mapping.
///
/// 4. Copy the guest page to the shadow page if it hasn't been copied already, ensuring the
/// shadow page contains the original function code.
///
/// 5. Install the inline hook at the shadow function address if the hook type is `Function`.
///
/// 6. Change the permissions of the guest page to read-write only.
///
/// 7. Invalidate the EPT and VPID contexts to ensure the changes take effect.
///
/// These operations are performed only once per guest page to avoid overwriting existing hooks on the same page.
///
/// # Arguments
///
/// * `vm` - The virtual machine instance of the hypervisor.
/// * `guest_function_va` - The virtual address of the function or page to be hooked.
/// * `function_hash` - The hash of the function to be hooked.
/// * `ept_hook_type` - The type of EPT hook to be installed.
///
/// # Returns
///
/// * Returns `Ok(())` if the hook was successfully installed, `Err(HypervisorError)` otherwise.
pub fn ept_hook_function(
&mut self,
vm: &mut Vm,
guest_function_va: u64,
function_hash: u32,
ept_hook_type: EptHookType,
) -> Result<(), HypervisorError> {
debug!("Creating EPT hook for function at VA: {:#x}", guest_function_va);
let guest_function_pa = PAddr::from(PhysicalAddress::pa_from_va(guest_function_va));
debug!("Guest function PA: {:#x}", guest_function_pa.as_u64());
let guest_page_pa = guest_function_pa.align_down_to_base_page();
debug!("Guest page PA: {:#x}", guest_page_pa.as_u64());
let guest_large_page_pa = guest_function_pa.align_down_to_large_page();
debug!("Guest large page PA: {:#x}", guest_large_page_pa.as_u64());
// 1. Map the large page to the pre-allocated page table, if it hasn't been mapped already.
// We must map the large page to the pre-allocated page table before accessing it.
debug!("Mapping large page");
self.memory_manager.map_large_page_to_pt(guest_large_page_pa.as_u64())?;
// 2. Check if the large page has already been split. If not, split it into 4KB pages.
debug!("Checking if large page has already been split");
if vm.primary_ept.is_large_page(guest_page_pa.as_u64()) {
// We must map the large page to the pre-allocated page table before accessing it.
let pre_alloc_pt = self
.memory_manager
.get_page_table_as_mut(guest_large_page_pa.as_u64())
.ok_or(HypervisorError::PageTableNotFound)?;
debug!("Splitting 2MB page to 4KB pages for Primary EPT: {:#x}", guest_large_page_pa);
vm.primary_ept.split_2mb_to_4kb(guest_large_page_pa.as_u64(), pre_alloc_pt)?;
}
// 3. Check if the guest page is already processed. If not, map the guest page to the shadow page.
// Ensure the memory manager maintains a set of processed guest pages to track this mapping.
if !self.memory_manager.is_guest_page_processed(guest_page_pa.as_u64()) {
// We must map the guest page to the shadow page before accessing it.
debug!("Mapping guest page and shadow page");
self.memory_manager.map_guest_to_shadow_page(
guest_page_pa.as_u64(),
guest_function_va,
guest_function_pa.as_u64(),
ept_hook_type,
function_hash,
)?;
// We must map the guest page to the shadow page before accessing it.
let shadow_page_pa = PAddr::from(
self.memory_manager
.get_shadow_page_as_ptr(guest_page_pa.as_u64())
.ok_or(HypervisorError::ShadowPageNotFound)?,
);
// 4. Copy the guest page to the shadow page if it hasn't been copied already, ensuring the shadow page contains the original function code.
debug!("Copying guest page to shadow page: {:#x}", guest_page_pa.as_u64());
Self::unsafe_copy_guest_to_shadow(guest_page_pa, shadow_page_pa);
// 5. Install the inline hook at the shadow function address if the hook type is `Function`.
match ept_hook_type {
EptHookType::Function(inline_hook_type) => {
let shadow_function_pa = PAddr::from(Self::calculate_function_offset_in_host_shadow_page(shadow_page_pa, guest_function_pa));
debug!("Shadow Function PA: {:#x}", shadow_function_pa);
debug!("Installing inline hook at shadow function PA: {:#x}", shadow_function_pa.as_u64());
InlineHook::new(shadow_function_pa.as_u64() as *mut u8, inline_hook_type).detour64();
}
EptHookType::Page => {
unimplemented!("Page hooks are not yet implemented");
}
}
let pre_alloc_pt = self
.memory_manager
.get_page_table_as_mut(guest_large_page_pa.as_u64())
.ok_or(HypervisorError::PageTableNotFound)?;
// 6. Change the permissions of the guest page to read-write only.
debug!("Changing Primary EPT permissions for page to Read-Write (RW) only: {:#x}", guest_page_pa);
vm.primary_ept
.modify_page_permissions(guest_page_pa.as_u64(), AccessType::READ_WRITE, pre_alloc_pt)?;
// 7. Invalidate the EPT and VPID contexts to ensure the changes take effect.
invept_all_contexts();
invvpid_all_contexts();
debug!("EPT hook created and enabled successfully");
} else {
debug!("Guest page already processed, skipping hook installation and permission modification.");
}
Ok(())
}
/// Removes an EPT hook for a function.
///
/// # Arguments
///
/// * `vm` - The virtual machine instance of the hypervisor.
/// * `guest_function_va` - The virtual address of the function or page to be unhooked.
/// * `ept_hook_type` - The type of EPT hook to be removed.
///
/// # Returns
///
/// * Returns `Ok(())` if the hook was successfully removed, `Err(HypervisorError)` otherwise.
pub fn ept_unhook_function(&mut self, vm: &mut Vm, guest_function_va: u64, _ept_hook_type: EptHookType) -> Result<(), HypervisorError> {
debug!("Removing EPT hook for function at VA: {:#x}", guest_function_va);
let guest_function_pa = PAddr::from(PhysicalAddress::pa_from_va(guest_function_va));
debug!("Guest function PA: {:#x}", guest_function_pa.as_u64());
let guest_page_pa = guest_function_pa.align_down_to_base_page();
debug!("Guest page PA: {:#x}", guest_page_pa.as_u64());
let guest_large_page_pa = guest_function_pa.align_down_to_large_page();
debug!("Guest large page PA: {:#x}", guest_large_page_pa.as_u64());
let pre_alloc_pt = self
.memory_manager
.get_page_table_as_mut(guest_large_page_pa.as_u64())
.ok_or(HypervisorError::PageTableNotFound)?;
// Swap the page back and restore the original page permissions
vm.primary_ept
.swap_page(guest_page_pa.as_u64(), guest_page_pa.as_u64(), AccessType::READ_WRITE_EXECUTE, pre_alloc_pt)?;
// Update the memory manager to indicate that the guest page is no longer processed (unmapped/unhooked).
// This will allow the page to be reprocessed/remapped/rehooked if needed.
self.memory_manager.unmap_guest_from_shadow_page(guest_page_pa.as_u64())?;
Ok(())
}
/// Copies the guest page to the pre-allocated host shadow page.
///
/// # Arguments
///
/// * `guest_page_pa` - The physical address of the guest page.
/// * `host_shadow_page_pa` - The physical address of the host shadow page.
///
/// # Safety
///
/// This function is unsafe because it performs a raw memory copy from the guest page to the shadow page.
pub fn unsafe_copy_guest_to_shadow(guest_page_pa: PAddr, host_shadow_page_pa: PAddr) {
unsafe { copy_nonoverlapping(guest_page_pa.as_u64() as *mut u8, host_shadow_page_pa.as_u64() as *mut u8, BASE_PAGE_SIZE) };
}
/// Fills the shadow page with a specific byte value.
///
/// # Arguments
///
/// * `shadow_page_pa` - The physical address of the shadow page.
/// * `fill_byte` - The byte value to fill the page with.
///
/// # Safety
///
/// This function is unsafe because it performs a raw memory fill operation on the shadow page.
pub fn unsafe_fill_shadow_page(shadow_page_pa: PAddr, fill_byte: u8) {
unsafe {
core::ptr::write_bytes(shadow_page_pa.as_u64() as *mut u8, fill_byte, BASE_PAGE_SIZE);
}
}
/// Calculates the address of the function within the host shadow page.
///
/// # Arguments
///
/// * `host_shadow_page_pa` - The physical address of the host shadow page.
/// * `guest_function_pa` - The physical address of the guest function.
///
/// # Returns
///
/// * `u64` - The adjusted address of the function within the new page.
fn calculate_function_offset_in_host_shadow_page(host_shadow_page_pa: PAddr, guest_function_pa: PAddr) -> u64 {
host_shadow_page_pa.as_u64() + guest_function_pa.base_page_offset()
}
/// Returns the size of the hook code in bytes based on the EPT hook type.
///
/// # Returns
///
/// * `usize` - The size of the hook code in bytes, or 0 if the hook type is `Page`.
pub fn hook_size(hook_type: EptHookType) -> usize {
match hook_type {
EptHookType::Function(inline_hook_type) => InlineHook::hook_size(inline_hook_type),
EptHookType::Page => 0, // Assuming page hooks do not have a hook size
}
}
/// Calculates the number of instructions that fit into the given number of bytes,
/// adjusting for partial instruction overwrites by including the next full instruction.
///
/// # Safety
///
/// This function is unsafe because it performs operations on raw pointers. The caller must
/// ensure that the memory at `guest_pa` (converted properly to a virtual address if necessary)
/// is valid and that reading beyond `hook_size` bytes does not cause memory violations.
pub unsafe fn calculate_instruction_count(guest_pa: u64, hook_size: usize) -> usize {
// Define a buffer size, typical maximum x86-64 instruction length is 15 bytes.
let buffer_size = hook_size + 15; // Buffer size to read, slightly larger than hook_size to accommodate potential long instructions at the boundary.
let bytes = core::slice::from_raw_parts(guest_pa as *const u8, buffer_size);
let mut byte_count = 0;
let mut instruction_count = 0;
// Use a disassembler engine to iterate over the instructions within the bytes read.
for (opcode, pa) in lde::X64.iter(bytes, guest_pa) {
byte_count += opcode.len();
instruction_count += 1;
trace!("{:x}: {}", pa, opcode);
if byte_count >= hook_size {
break;
}
}
trace!("Calculated byte count: {}", byte_count);
trace!("Calculated instruction count: {}", instruction_count);
instruction_count
}
}