diff --git a/coverage_config.json b/coverage_config.json index a9e9a75..2e04917 100644 --- a/coverage_config.json +++ b/coverage_config.json @@ -1,5 +1,5 @@ { - "coverage_score": 79.9, + "coverage_score": 86.1, "exclude_path": "", "crate_features": "" } diff --git a/src/device_manager.rs b/src/device_manager.rs index bbbb857..047792c 100644 --- a/src/device_manager.rs +++ b/src/device_manager.rs @@ -12,6 +12,8 @@ //! devices IO ranges, and finally set resources to virtual device. use crate::resources::Resource; +#[cfg(any(target_arch = "x86_64", target_arch = "x86"))] +use crate::PioAddress; use crate::{DeviceIo, IoAddress, IoSize}; use std::cmp::{Ord, Ordering, PartialEq, PartialOrd}; @@ -31,30 +33,30 @@ pub enum Error { /// Simplify the `Result` type. pub type Result = result::Result; -// Structure describing an IO range. -#[derive(Debug, Copy, Clone)] -struct IoRange { +/// Structure describing an IO range. +#[derive(Debug, Copy, Clone, Eq)] +pub struct IoRange { base: IoAddress, size: IoSize, } impl IoRange { + #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] fn new_pio_range(base: u16, size: u16) -> Self { IoRange { - base: IoAddress::Pio(base), - size: IoSize::Pio(size), + base: IoAddress(base as u64), + size: IoSize(size as u64), } } + fn new_mmio_range(base: u64, size: u64) -> Self { IoRange { - base: IoAddress::Mmio(base), - size: IoSize::Mmio(size), + base: IoAddress(base), + size: IoSize(size), } } } -impl Eq for IoRange {} - impl PartialEq for IoRange { fn eq(&self, other: &IoRange) -> bool { self.base == other.base @@ -74,8 +76,9 @@ impl PartialOrd for IoRange { } /// System IO manager serving for all devices management and VM exit handling. -#[derive(Default)] +#[derive(Clone, Default)] pub struct IoManager { + #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] /// Range mapping for VM exit pio operations. pio_bus: BTreeMap>, /// Range mapping for VM exit mmio operations. @@ -87,6 +90,7 @@ impl IoManager { pub fn new() -> Self { IoManager::default() } + /// Register a new device IO with its allocated resources. /// VMM is responsible for providing the allocated resources to virtual device. /// @@ -104,6 +108,7 @@ impl IoManager { // The resources addresses being registered are sucessfully allocated before. for (idx, res) in resources.iter().enumerate() { match *res { + #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] Resource::PioAddressRange { base, size } => { if self .pio_bus @@ -147,6 +152,7 @@ impl IoManager { pub fn unregister_device_io(&mut self, resources: &[Resource]) -> Result<()> { for res in resources.iter() { match *res { + #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] Resource::PioAddressRange { base, size } => { self.pio_bus.remove(&IoRange::new_pio_range(base, size)); } @@ -159,40 +165,48 @@ impl IoManager { Ok(()) } - fn get_entry(&self, addr: IoAddress) -> Option<(&IoRange, &Arc)> { - match addr { - IoAddress::Pio(a) => self - .pio_bus - .range(..=&IoRange::new_pio_range(a, 0)) - .nth_back(0), - IoAddress::Mmio(a) => self - .mmio_bus - .range(..=&IoRange::new_mmio_range(a, 0)) - .nth_back(0), - } + /// A helper function handling MMIO read command during VM exit. + /// The virtual device itself provides mutable ability and thead-safe protection. + /// + /// Return error if failed to get the device. + pub fn mmio_read(&self, addr: u64, data: &mut [u8]) -> Result<()> { + self.get_device(IoAddress(addr)) + .map(|(device, base)| device.read(base, IoAddress(addr - base.raw_value()), data)) + .ok_or(Error::NoDevice) + } + + /// A helper function handling MMIO write command during VM exit. + /// The virtual device itself provides mutable ability and thead-safe protection. + /// + /// Return error if failed to get the device. + pub fn mmio_write(&self, addr: u64, data: &[u8]) -> Result<()> { + self.get_device(IoAddress(addr)) + .map(|(device, base)| device.write(base, IoAddress(addr - base.raw_value()), data)) + .ok_or(Error::NoDevice) } // Return the Device mapped `addr` and the base address. fn get_device(&self, addr: IoAddress) -> Option<(&Arc, IoAddress)> { - if let Some((range, dev)) = self.get_entry(addr) { + let range = IoRange::new_mmio_range(addr.raw_value(), 0); + if let Some((range, dev)) = self.mmio_bus.range(..=&range).nth_back(0) { if (addr.raw_value() - range.base.raw_value()) < range.size.raw_value() { return Some((dev, range.base)); } } None } +} +#[cfg(any(target_arch = "x86_64", target_arch = "x86"))] +impl IoManager { /// A helper function handling PIO read command during VM exit. /// The virtual device itself provides mutable ability and thead-safe protection. /// /// Return error if failed to get the device. pub fn pio_read(&self, addr: u16, data: &mut [u8]) -> Result<()> { - if let Some((device, base)) = self.get_device(IoAddress::Pio(addr)) { - device.read(base, IoAddress::Pio(addr - (base.raw_value() as u16)), data); - Ok(()) - } else { - Err(Error::NoDevice) - } + self.get_pio_device(PioAddress(addr)) + .map(|(device, base)| device.pio_read(base, PioAddress(addr - base.raw_value()), data)) + .ok_or(Error::NoDevice) } /// A helper function handling PIO write command during VM exit. @@ -200,38 +214,20 @@ impl IoManager { /// /// Return error if failed to get the device. pub fn pio_write(&self, addr: u16, data: &[u8]) -> Result<()> { - if let Some((device, base)) = self.get_device(IoAddress::Pio(addr)) { - device.write(base, IoAddress::Pio(addr - (base.raw_value() as u16)), data); - Ok(()) - } else { - Err(Error::NoDevice) - } + self.get_pio_device(PioAddress(addr)) + .map(|(device, base)| device.pio_write(base, PioAddress(addr - base.raw_value()), data)) + .ok_or(Error::NoDevice) } - /// A helper function handling MMIO read command during VM exit. - /// The virtual device itself provides mutable ability and thead-safe protection. - /// - /// Return error if failed to get the device. - pub fn mmio_read(&self, addr: u64, data: &mut [u8]) -> Result<()> { - if let Some((device, base)) = self.get_device(IoAddress::Mmio(addr)) { - device.read(base, IoAddress::Mmio(addr - base.raw_value()), data); - Ok(()) - } else { - Err(Error::NoDevice) - } - } - - /// A helper function handling MMIO write command during VM exit. - /// The virtual device itself provides mutable ability and thead-safe protection. - /// - /// Return error if failed to get the device. - pub fn mmio_write(&self, addr: u64, data: &[u8]) -> Result<()> { - if let Some((device, base)) = self.get_device(IoAddress::Mmio(addr)) { - device.write(base, IoAddress::Mmio(addr - base.raw_value()), data); - Ok(()) - } else { - Err(Error::NoDevice) + // Return the Device mapped `addr` and the base address. + fn get_pio_device(&self, addr: PioAddress) -> Option<(&Arc, PioAddress)> { + let range = IoRange::new_pio_range(addr.raw_value(), 0); + if let Some((range, dev)) = self.pio_bus.range(..=&range).nth_back(0) { + if (addr.raw_value() as u64 - range.base.raw_value()) < range.size.raw_value() { + return Some((dev, PioAddress(range.base.0 as u16))); + } } + None } } @@ -240,7 +236,9 @@ mod tests { use super::*; use std::sync::Mutex; + #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] const PIO_ADDRESS_SIZE: u16 = 4; + #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] const PIO_ADDRESS_BASE: u16 = 0x40; const MMIO_ADDRESS_SIZE: u64 = 0x8765_4321; const MMIO_ADDRESS_BASE: u64 = 0x1234_5678; @@ -274,6 +272,72 @@ mod tests { let mut config = self.config.lock().expect("failed to acquire lock"); *config = u32::from(data[0]) & 0xff; } + + #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] + fn pio_read(&self, _base: PioAddress, _offset: PioAddress, data: &mut [u8]) { + if data.len() > 4 { + return; + } + for (idx, iter) in data.iter_mut().enumerate() { + let config = self.config.lock().expect("failed to acquire lock"); + *iter = (*config >> (idx * 8) & 0xff) as u8; + } + } + + #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] + fn pio_write(&self, _base: PioAddress, _offset: PioAddress, data: &[u8]) { + let mut config = self.config.lock().expect("failed to acquire lock"); + *config = u32::from(data[0]) & 0xff; + } + } + + #[test] + fn test_clone_io_manager() { + let mut io_mgr = IoManager::new(); + let dummy = DummyDevice::new(0); + let dum = Arc::new(dummy); + + let mut resource: Vec = Vec::new(); + let mmio = Resource::MmioAddressRange { + base: MMIO_ADDRESS_BASE, + size: MMIO_ADDRESS_SIZE, + }; + let irq = Resource::LegacyIrq(LEGACY_IRQ); + + resource.push(mmio); + resource.push(irq); + + #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] + { + let pio = Resource::PioAddressRange { + base: PIO_ADDRESS_BASE, + size: PIO_ADDRESS_SIZE, + }; + resource.push(pio); + } + + assert!(io_mgr.register_device_io(dum.clone(), &resource).is_ok()); + + let io_mgr2 = io_mgr.clone(); + assert_eq!(io_mgr2.mmio_bus.len(), 1); + + #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] + { + assert_eq!(io_mgr2.pio_bus.len(), 1); + + let (dev, addr) = io_mgr2 + .get_device(IoAddress(MMIO_ADDRESS_BASE + 1)) + .unwrap(); + assert_eq!(Arc::strong_count(dev), 5); + + assert_eq!(addr, IoAddress(MMIO_ADDRESS_BASE)); + + drop(io_mgr); + assert_eq!(Arc::strong_count(dev), 3); + + drop(io_mgr2); + assert_eq!(Arc::strong_count(&dum), 1); + } } #[test] @@ -326,6 +390,7 @@ mod tests { .is_err()); } + #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] #[test] fn test_pio_read_write() { let mut io_mgr: IoManager = Default::default(); @@ -355,4 +420,15 @@ mod tests { .pio_write(PIO_ADDRESS_BASE + PIO_ADDRESS_SIZE, &data) .is_err()); } + + #[test] + fn test_device_manager_data_structs() { + let range1 = IoRange::new_mmio_range(0x1000, 0x1000); + let range2 = IoRange::new_mmio_range(0x1000, 0x2000); + let range3 = IoRange::new_mmio_range(0x2000, 0x1000); + + assert_eq!(range1, range1.clone()); + assert_eq!(range1, range2); + assert!(range1 < range3); + } } diff --git a/src/lib.rs b/src/lib.rs index dc09ddb..a58c6a0 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -3,83 +3,333 @@ //! rust-vmm device model. -use std::cmp::{Ord, Ordering, PartialOrd}; +use std::cmp::{Ord, PartialOrd}; +use std::sync::Mutex; pub mod device_manager; pub mod resources; -// IO Size. -#[derive(Debug, Copy, Clone)] -enum IoSize { - // Port I/O size. - Pio(u16), - - // Memory mapped I/O size. - Mmio(u64), -} +/// IO Size. +#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq)] +pub struct IoSize(pub u64); impl IoSize { - // Get the raw value as u64 to make operation simple. - fn raw_value(&self) -> u64 { - match *self { - IoSize::Pio(p) => u64::from(p), - IoSize::Mmio(m) => m, - } + /// Get the raw value as u64 to make operation simple. + #[inline] + pub fn raw_value(self) -> u64 { + self.0 } } -/// IO Addresses. -#[derive(Debug, Copy, Clone)] -pub enum IoAddress { - /// Port I/O address. - Pio(u16), +impl From for IoSize { + #[inline] + fn from(size: u64) -> Self { + IoSize(size) + } +} - /// Memory mapped I/O address. - Mmio(u64), +impl From for u64 { + #[inline] + fn from(size: IoSize) -> Self { + size.0 + } } +/// IO Addresses. +#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq)] +pub struct IoAddress(pub u64); + impl IoAddress { - // Get the raw value of IO Address to make operation simple. - fn raw_value(&self) -> u64 { - match *self { - IoAddress::Pio(p) => u64::from(p), - IoAddress::Mmio(m) => m, - } + /// Get the raw value of IO Address to make operation simple. + #[inline] + pub fn raw_value(self) -> u64 { + self.0 } } -impl Eq for IoAddress {} - -impl PartialEq for IoAddress { - fn eq(&self, other: &IoAddress) -> bool { - self.raw_value() == other.raw_value() +impl From for IoAddress { + #[inline] + fn from(addr: u64) -> Self { + IoAddress(addr) } } -impl Ord for IoAddress { - fn cmp(&self, other: &IoAddress) -> Ordering { - self.raw_value().cmp(&other.raw_value()) +impl From for u64 { + #[inline] + fn from(addr: IoAddress) -> Self { + addr.0 } } -impl PartialOrd for IoAddress { - fn partial_cmp(&self, other: &IoAddress) -> Option { - self.raw_value().partial_cmp(&other.raw_value()) +#[cfg(any(target_arch = "x86_64", target_arch = "x86"))] +mod x86 { + use super::{IoAddress, IoSize}; + use std::convert::TryFrom; + + type PioAddressType = u16; + + #[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq)] + /// Port I/O size. + pub struct PioSize(pub PioAddressType); + + impl PioSize { + /// Get the raw value as u64 to make operation simple. + #[inline] + pub fn raw_value(self) -> PioAddressType { + self.0 + } + } + + impl From for PioSize { + #[inline] + fn from(size: PioAddressType) -> Self { + PioSize(size) + } + } + + impl From for PioAddressType { + #[inline] + fn from(size: PioSize) -> Self { + size.0 + } + } + + impl TryFrom for PioSize { + type Error = IoSize; + + #[inline] + fn try_from(size: IoSize) -> Result { + if size.raw_value() <= std::u16::MAX as u64 { + Ok(PioSize(size.raw_value() as PioAddressType)) + } else { + Err(size) + } + } + } + + impl From for IoSize { + #[inline] + fn from(size: PioSize) -> Self { + IoSize(size.raw_value() as u64) + } + } + + /// Port I/O address. + #[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq)] + pub struct PioAddress(pub PioAddressType); + + impl PioAddress { + /// Get the raw value of IO Address to make operation simple. + #[inline] + pub fn raw_value(self) -> PioAddressType { + self.0 + } + } + + impl From for PioAddress { + #[inline] + fn from(addr: PioAddressType) -> Self { + PioAddress(addr) + } + } + + impl From for PioAddressType { + #[inline] + fn from(addr: PioAddress) -> Self { + addr.0 + } + } + + impl TryFrom for PioAddress { + type Error = IoAddress; + + #[inline] + fn try_from(addr: IoAddress) -> Result { + if addr.0 <= std::u16::MAX as u64 { + Ok(PioAddress(addr.raw_value() as PioAddressType)) + } else { + Err(addr) + } + } + } + + impl From for IoAddress { + #[inline] + fn from(addr: PioAddress) -> Self { + IoAddress(addr.raw_value() as u64) + } } } -/// Device IO trait. +#[cfg(any(target_arch = "x86_64", target_arch = "x86"))] +pub use self::x86::{PioAddress, PioSize}; + +/// IO Addresses. +/// Device IO trait adopting interior mutability pattern. +/// /// A device supporting memory based I/O should implement this trait, then /// register itself against the different IO type ranges it handles. /// The VMM will then dispatch IO (PIO or MMIO) VM exits by calling into the /// registered devices read or write method from this trait. -/// The DeviceIo trait adopts the interior mutability pattern -/// so we can get a real multiple threads handling. -pub trait DeviceIo: Send { +/// +/// The DeviceIo trait adopts the interior mutability pattern so we can get a +/// real concurrent multiple threads handling. For device backend drivers not +/// focusing on high performance, they may use the Mutex +/// adapter to simplify implementation. +#[allow(unused_variables)] +pub trait DeviceIo: Send + Sync { /// Read from the guest physical address `base`, starting at `offset`. /// Result is placed in `data`. - fn read(&self, base: IoAddress, offset: IoAddress, data: &mut [u8]); + fn read(&self, base: IoAddress, offset: IoAddress, data: &mut [u8]) {} /// Write `data` to the guest physical address `base`, starting from `offset`. - fn write(&self, base: IoAddress, offset: IoAddress, data: &[u8]); + fn write(&self, base: IoAddress, offset: IoAddress, data: &[u8]) {} + + #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] + /// Read from the guest physical address `base`, starting at `offset`. + /// Result is placed in `data`. + fn pio_read(&self, base: PioAddress, offset: PioAddress, data: &mut [u8]) {} + + #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] + /// Write `data` to the guest physical address `base`, starting from `offset`. + fn pio_write(&self, base: PioAddress, offset: PioAddress, data: &[u8]) {} +} + +/// Device IO trait without interior mutability. +/// +/// Many device backend drivers will mutate itself when handling IO requests. +/// The DeviceIo trait assumes interior mutability, but it's a little complex +/// to support interior mutability. So the Mutex adapter may be +/// used to ease device backend driver implementations. +/// +/// The Mutex adapter is an zero overhead abstraction without +/// performance penalty. +#[allow(unused_variables)] +pub trait DeviceIoMut: Send { + /// Read from the guest physical address `base`, starting at `offset`. + /// Result is placed in `data`. + fn read(&mut self, base: IoAddress, offset: IoAddress, data: &mut [u8]) {} + + /// Write `data` to the guest physical address `base`, starting from `offset`. + fn write(&mut self, base: IoAddress, offset: IoAddress, data: &[u8]) {} + + #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] + /// Read from the guest physical address `base`, starting at `offset`. + /// Result is placed in `data`. + fn pio_read(&mut self, base: PioAddress, offset: PioAddress, data: &mut [u8]) {} + + #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] + /// Write `data` to the guest physical address `base`, starting from `offset`. + fn pio_write(&mut self, base: PioAddress, offset: PioAddress, data: &[u8]) {} +} + +impl DeviceIo for Mutex { + fn read(&self, base: IoAddress, offset: IoAddress, data: &mut [u8]) { + // Safe to unwrap() because we don't expect poisoned lock here. + self.lock().unwrap().read(base, offset, data) + } + + fn write(&self, base: IoAddress, offset: IoAddress, data: &[u8]) { + // Safe to unwrap() because we don't expect poisoned lock here. + self.lock().unwrap().write(base, offset, data) + } + + #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] + fn pio_read(&self, base: PioAddress, offset: PioAddress, data: &mut [u8]) { + // Safe to unwrap() because we don't expect poisoned lock here. + self.lock().unwrap().pio_read(base, offset, data) + } + + #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] + fn pio_write(&self, base: PioAddress, offset: PioAddress, data: &[u8]) { + // Safe to unwrap() because we don't expect poisoned lock here. + self.lock().unwrap().pio_write(base, offset, data) + } +} + +#[cfg(test)] +mod tests { + use super::*; + #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] + use std::convert::TryFrom; + use std::sync::Arc; + + #[derive(Default)] + struct MockDevice { + data: u8, + } + + impl DeviceIoMut for MockDevice { + fn read(&mut self, _base: IoAddress, _offset: IoAddress, data: &mut [u8]) { + data[0] = self.data; + } + + fn write(&mut self, _base: IoAddress, _offset: IoAddress, data: &[u8]) { + self.data = data[0]; + } + + #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] + fn pio_read(&mut self, _base: PioAddress, _offset: PioAddress, data: &mut [u8]) { + data[0] = self.data; + } + + #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] + fn pio_write(&mut self, _base: PioAddress, _offset: PioAddress, data: &[u8]) { + self.data = data[0]; + } + } + + fn register_device(device: Arc) { + device.write(IoAddress(0), IoAddress(0), &[0x10u8]); + let mut buf = [0x0u8]; + device.read(IoAddress(0), IoAddress(0), &mut buf); + assert_eq!(buf[0], 0x10); + } + + #[test] + fn test_device_io_mut_adapter() { + let device_mut = Arc::new(Mutex::new(MockDevice::default())); + + register_device(device_mut.clone()); + assert_eq!(device_mut.lock().unwrap().data, 0x010); + } + + #[test] + fn test_io_data_struct() { + let io_size = IoSize::from(0x1111u64); + assert_eq!(io_size.raw_value(), 0x1111u64); + assert_eq!(u64::from(io_size), 0x1111u64); + assert_eq!(io_size, io_size.clone()); + let io_size1 = IoSize::from(0x1112u64); + assert!(io_size < io_size1); + + let io_addr = IoAddress::from(0x1234u64); + assert_eq!(io_addr.raw_value(), 0x1234u64); + assert_eq!(u64::from(io_addr), 0x1234u64); + assert_eq!(io_addr, io_addr.clone()); + let io_addr1 = IoAddress::from(0x1235u64); + assert!(io_addr < io_addr1); + } + + #[cfg(any(target_arch = "x86_64", target_arch = "x86"))] + #[test] + fn test_pio_data_struct() { + let pio_size = PioSize::from(0x1111u16); + assert_eq!(pio_size.raw_value(), 0x1111u16); + assert_eq!(u16::from(pio_size), 0x1111u16); + assert_eq!(pio_size, pio_size.clone()); + let pio_size1 = PioSize::from(0x1112u16); + assert!(pio_size < pio_size1); + + let pio_addr = PioAddress::from(0x1234u16); + assert_eq!(pio_addr.raw_value(), 0x1234u16); + assert_eq!(u16::from(pio_addr), 0x1234u16); + assert_eq!(pio_addr, pio_addr.clone()); + let pio_addr1 = PioAddress::from(0x1235u16); + assert!(pio_addr < pio_addr1); + + assert!(PioAddress::try_from(IoAddress::from(0x123456u64)).is_err()); + assert!(PioAddress::try_from(IoAddress::from(0x1234u64)).is_ok()); + assert_eq!(IoAddress::from(pio_addr).raw_value(), 0x1234u64); + } } diff --git a/src/resources.rs b/src/resources.rs index 8a74ea8..89f383d 100644 --- a/src/resources.rs +++ b/src/resources.rs @@ -12,9 +12,11 @@ //! 5) the VMM registers the new device onto corresponding device managers according the allocated //! resources. +use std::ops::Deref; use std::{u16, u32, u64}; /// Enumeration describing a device's resource constraints. +#[derive(Copy, Clone, Debug, PartialEq)] pub enum ResourceConstraint { /// Constraint for an IO Port address range. PioAddress { @@ -108,7 +110,7 @@ impl ResourceConstraint { } /// Type of Message Singaled Interrupt -#[derive(Copy, Clone, PartialEq)] +#[derive(Copy, Clone, Debug, PartialEq)] pub enum MsiIrqType { /// PCI MSI IRQ numbers. PciMsi, @@ -120,7 +122,7 @@ pub enum MsiIrqType { /// Enumeration for device resources. #[allow(missing_docs)] -#[derive(Clone)] +#[derive(Clone, Debug, PartialEq)] pub enum Resource { /// IO Port address range. PioAddressRange { base: u16, size: u16 }, @@ -141,7 +143,7 @@ pub enum Resource { } /// Newtype to store a set of device resources. -#[derive(Default, Clone)] +#[derive(Clone, Debug, Default)] pub struct DeviceResources(Vec); impl DeviceResources { @@ -245,6 +247,14 @@ impl DeviceResources { } } +impl Deref for DeviceResources { + type Target = [Resource]; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + #[cfg(test)] mod tests { use super::*; @@ -269,12 +279,16 @@ mod tests { size: PIO_ADDRESS_SIZE, }; let mut resource = DeviceResources::new(); - resource.append(entry); + resource.append(entry.clone()); + assert_eq!(entry, resource[0]); + let entry = Resource::MmioAddressRange { base: MMIO_ADDRESS_BASE, size: MMIO_ADDRESS_SIZE, }; - resource.append(entry); + resource.append(entry.clone()); + assert_eq!(entry, resource[1]); + let entry = Resource::LegacyIrq(LEGACY_IRQ); resource.append(entry); let entry = Resource::MsiIrq { @@ -310,6 +324,25 @@ mod tests { resources.get_pio_address_ranges()[0].0 == PIO_ADDRESS_BASE && resources.get_pio_address_ranges()[0].1 == PIO_ADDRESS_SIZE ); + assert_eq!( + resources[0], + Resource::PioAddressRange { + base: PIO_ADDRESS_BASE, + size: PIO_ADDRESS_SIZE, + } + ); + assert_ne!(resources[0], resources[1]); + + let resources2 = resources.clone(); + assert_eq!(resources.len(), resources2.len()); + drop(resources); + assert_eq!( + resources2[0], + Resource::PioAddressRange { + base: PIO_ADDRESS_BASE, + size: PIO_ADDRESS_SIZE, + } + ); } #[test] @@ -374,6 +407,13 @@ mod tests { #[test] fn test_resource_constraint() { + let pio = ResourceConstraint::new_pio(2); + let pio2 = pio.clone(); + let mmio = ResourceConstraint::new_mmio(0x1000); + assert_eq!(pio, pio2); + drop(pio2); + assert_ne!(pio, mmio); + if let ResourceConstraint::PioAddress { range, align, size } = ResourceConstraint::new_pio(2) { @@ -431,4 +471,14 @@ mod tests { panic!("KVM slot resource constraint is invalid."); } } + + #[test] + fn test_resources_deref() { + let resources = get_device_resource(); + let mut count = 0; + for _res in resources.iter() { + count += 1; + } + assert_eq!(count, resources.0.len()); + } }