Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add proof for conformance to 2.7.7.2 section #324

Draft
wants to merge 1 commit into
base: main
Choose a base branch
from
Draft
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 6 additions & 2 deletions virtio-queue/Cargo.toml
Original file line number Diff line number Diff line change
@@ -13,16 +13,20 @@ edition = "2021"
test-utils = []

[dependencies]
vm-memory = { workspace = true }
vm-memory = { version = "0.15.0", features = ["backend-mmap", "backend-bitmap"] }
vmm-sys-util = { workspace = true }
log = "0.4.17"
libc = "0.2.161"
virtio-bindings = { path="../virtio-bindings", version = "0.2.4" }

[dev-dependencies]
criterion = "0.5.1"
vm-memory = { workspace = true, features = ["backend-mmap", "backend-atomic"] }
vm-memory = { version = "0.15.0", features = ["backend-mmap", "backend-bitmap", "backend-atomic"] }
memoffset = "0.9.0"

[[bench]]
name = "main"
harness = false

[lints.rust]
unexpected_cfgs = { level = "warn", check-cfg = ['cfg(kani)'] }
253 changes: 253 additions & 0 deletions virtio-queue/src/queue.rs
Original file line number Diff line number Diff line change
@@ -271,6 +271,259 @@ impl Queue {
}
}

#[cfg(kani)]
#[allow(dead_code)]
mod verification {
use std::mem::ManuallyDrop;
use vm_memory::MmapRegion;

use std::num::Wrapping;
use vm_memory::FileOffset;

use vm_memory::guest_memory::GuestMemoryIterator;
use vm_memory::{GuestMemoryRegion, MemoryRegionAddress};

use super::*;

/// A made-for-kani version of `vm_memory::GuestMemoryMmap`. Unlike the real
/// `GuestMemoryMmap`, which manages a list of regions and then does a binary
/// search to determine which region a specific read or write request goes to,
/// this only uses a single region. Eliminating this binary search significantly
/// speeds up all queue proofs, because it eliminates the only loop contained herein,
/// meaning we can use `kani::unwind(0)` instead of `kani::unwind(2)`. Functionally,
/// it works identically to `GuestMemoryMmap` with only a single contained region.
pub struct ProofGuestMemory {
the_region: vm_memory::GuestRegionMmap,
}

impl<'a> GuestMemoryIterator<'a, vm_memory::GuestRegionMmap> for ProofGuestMemory {
type Iter = std::iter::Once<&'a vm_memory::GuestRegionMmap>;
}

impl GuestMemory for ProofGuestMemory {
type R = vm_memory::GuestRegionMmap;
type I = Self;

fn num_regions(&self) -> usize {
1
}

fn find_region(&self, addr: GuestAddress) -> Option<&Self::R> {
self.the_region
.to_region_addr(addr)
.map(|_| &self.the_region)
}

fn iter(&self) -> <Self::I as GuestMemoryIterator<Self::R>>::Iter {
std::iter::once(&self.the_region)
}

fn try_access<F>(
&self,
count: usize,
addr: GuestAddress,
mut f: F,
) -> vm_memory::guest_memory::Result<usize>
where
F: FnMut(
usize,
usize,
MemoryRegionAddress,
&Self::R,
) -> vm_memory::guest_memory::Result<usize>,
{
// We only have a single region, meaning a lot of the complications of the default
// try_access implementation for dealing with reads/writes across multiple
// regions does not apply.
let region_addr = self
.the_region
.to_region_addr(addr)
.ok_or(vm_memory::guest_memory::Error::InvalidGuestAddress(addr))?;
self.the_region
.checked_offset(region_addr, count)
.ok_or(vm_memory::guest_memory::Error::InvalidGuestAddress(addr))?;
f(0, count, region_addr, &self.the_region)
}
}

pub struct ProofContext(pub Queue, pub ProofGuestMemory);

pub struct MmapRegionStub {
addr: *mut u8,
size: usize,
bitmap: (),
file_offset: Option<FileOffset>,
prot: i32,
flags: i32,
owned: bool,
hugetlbfs: Option<bool>,
}

/// We start the first guest memory region at an offset so that harnesses using
/// Queue::any() will be exposed to queue segments both before and after valid guest memory.
/// This is conforming to MockSplitQueue::new() that uses `0` as starting address of the
/// virtqueue. Also, QUEUE_END is the size only if GUEST_MEMORY_BASE is `0`
const GUEST_MEMORY_BASE: u64 = 0;

// We size our guest memory to fit a properly aligned queue, plus some wiggles bytes
// to make sure we not only test queues where all segments are consecutively aligned.
// We need to give at least 16 bytes of buffer space for the descriptor table to be
// able to change its address, as it is 16-byte aligned.
const GUEST_MEMORY_SIZE: usize = QUEUE_END as usize + 30;

fn guest_memory(memory: *mut u8) -> ProofGuestMemory {
// Ideally, we'd want to do
// let region = unsafe {MmapRegionBuilder::new(GUEST_MEMORY_SIZE)
// .with_raw_mmap_pointer(bytes.as_mut_ptr())
// .build()
// .unwrap()};
// However, .build() calls to .build_raw(), which contains a call to libc::sysconf.
// Since kani 0.34.0, stubbing out foreign functions is supported, but due to the rust
// standard library using a special version of the libc crate, it runs into some problems
// [1] Even if we work around those problems, we run into performance problems [2].
// Therefore, for now we stick to this ugly transmute hack (which only works because
// the kani compiler will never re-order fields, so we can treat repr(Rust) as repr(C)).
//
// [1]: https://github.com/model-checking/kani/issues/2673
// [2]: https://github.com/model-checking/kani/issues/2538
let region_stub = MmapRegionStub {
addr: memory,
size: GUEST_MEMORY_SIZE,
bitmap: Default::default(),
file_offset: None,
prot: 0,
flags: libc::MAP_ANONYMOUS | libc::MAP_PRIVATE,
owned: false,
hugetlbfs: None,
};

let region: MmapRegion<()> = unsafe { std::mem::transmute(region_stub) };

let guest_region =
vm_memory::GuestRegionMmap::new(region, GuestAddress(GUEST_MEMORY_BASE)).unwrap();

// Use a single memory region, just as firecracker does for guests of size < 2GB
// For largest guests, firecracker uses two regions (due to the MMIO gap being
// at the top of 32-bit address space)
ProofGuestMemory {
the_region: guest_region,
}
}

// can't implement kani::Arbitrary for the relevant types due to orphan rules
fn setup_kani_guest_memory() -> ProofGuestMemory {
// Non-deterministic Vec that will be used as the guest memory. We use `exact_vec` for now
// as `any_vec` will likely result in worse performance. We do not loose much from
// `exact_vec`, as our proofs do not make any assumptions about "filling" guest
// memory: Since everything is placed at non-deterministic addresses with
// non-deterministic lengths, we still cover all scenarios that would be covered by
// smaller guest memory closely. We leak the memory allocated here, so that it
// doesnt get deallocated at the end of this function. We do not explicitly
// de-allocate, but since this is a kani proof, that does not matter.
guest_memory(
ManuallyDrop::new(kani::vec::exact_vec::<u8, GUEST_MEMORY_SIZE>()).as_mut_ptr(),
)
}

const MAX_QUEUE_SIZE: u16 = 256;

// Constants describing the in-memory layout of a queue of size MAX_QUEUE_SIZE starting
// at the beginning of guest memory. These are based on Section 2.7 of the VirtIO 1.2
// specification.
const QUEUE_BASE_ADDRESS: u64 = GUEST_MEMORY_BASE;

/// descriptor table has 16 bytes per entry, avail ring starts right after
const AVAIL_RING_BASE_ADDRESS: u64 = QUEUE_BASE_ADDRESS + MAX_QUEUE_SIZE as u64 * 16;

/// Used ring starts after avail ring (which has size 6 + 2 * MAX_QUEUE_SIZE),
/// and needs 2 bytes of padding
const USED_RING_BASE_ADDRESS: u64 = AVAIL_RING_BASE_ADDRESS + 6 + 2 * MAX_QUEUE_SIZE as u64 + 2;

/// The address of the first byte after the queue. Since our queue starts at guest physical
/// address 0, this is also the size of the memory area occupied by the queue.
/// Note that the used ring structure has size 6 + 8 * MAX_QUEUE_SIZE
const QUEUE_END: u64 = USED_RING_BASE_ADDRESS + 6 + 8 * MAX_QUEUE_SIZE as u64;

impl kani::Arbitrary for ProofContext {
fn any() -> Self {
let mem = setup_kani_guest_memory();

let mut queue = Queue::new(MAX_QUEUE_SIZE).unwrap();

queue.ready = true;

queue.set_desc_table_address(
Some(QUEUE_BASE_ADDRESS as u32),
Some((QUEUE_BASE_ADDRESS >> 32) as u32),
);

queue.set_avail_ring_address(
Some(AVAIL_RING_BASE_ADDRESS as u32),
Some((AVAIL_RING_BASE_ADDRESS >> 32) as u32),
);

queue.set_used_ring_address(
Some(USED_RING_BASE_ADDRESS as u32),
Some((USED_RING_BASE_ADDRESS >> 32) as u32),
);

queue.set_next_avail(kani::any());
queue.set_next_used(kani::any());
queue.set_event_idx(kani::any());
queue.num_added = Wrapping(kani::any());

kani::assume(queue.is_valid(&mem));

ProofContext(queue, mem)
}
}

#[kani::proof]
#[kani::unwind(0)] // There are no loops anywhere, but kani really enjoys getting stuck in std::ptr::drop_in_place.
// This is a compiler intrinsic that has a "dummy" implementation in stdlib that just
// recursively calls itself. Kani will generally unwind this recursion infinitely
fn verify_spec_2_7_7_2() {
// Section 2.7.7.2 deals with device-to-driver notification suppression.
// It describes a mechanism by which the driver can tell the device that it does not
// want notifications (IRQs) about the device finishing processing individual buffers
// (descriptor chain heads) from the avail ring until a specific number of descriptors
// has been processed. This is done by the driver
// defining a "used_event" index, which tells the device "please do not notify me until
// used.ring[used_event] has been written to by you".
let ProofContext(mut queue, mem) = kani::any();

let num_added_old = queue.num_added.0;
let needs_notification = queue.needs_notification(&mem);

// event_idx_enabled equivalent to VIRTIO_F_EVENT_IDX negotiated
if !queue.event_idx_enabled {
// The specification here says
// After the device writes a descriptor index into the used ring:
// – If flags is 1, the device SHOULD NOT send a notification.
// – If flags is 0, the device MUST send a notification
// flags is the first field in the avail_ring_address, which we completely ignore. We
// always send a notification, and as there only is a SHOULD NOT, that is okay
assert!(needs_notification.unwrap());
} else {
// next_used - 1 is where the previous descriptor was placed
if Wrapping(queue.used_event(&mem, Ordering::Relaxed).unwrap())
== std::num::Wrapping(queue.next_used - Wrapping(1))
&& num_added_old > 0
{
// If the idx field in the used ring (which determined where that descriptor index
// was placed) was equal to used_event, the device MUST send a
// notification.
assert!(needs_notification.unwrap());

kani::cover!();
}

// The other case is handled by a "SHOULD NOT send a notification" in the spec.
// So we do not care
}
}
}

impl<'a> QueueGuard<'a> for Queue {
type G = &'a mut Self;
}