Skip to content

Commit 9bfb07e

Browse files
committed
devices: net: activate through an activation event
Postpone net external events registration to device activation time. This makes the net device unaware of external events prior to its activation. During creation register a dedicated activation event which will notify the device when it's time to register the other external events sources. This activation event is unregistered after successful device activation. Code coverage slightly decreased because of untestable EventFd error cases. Signed-off-by: Adrian Catangiu <acatan@amazon.com>
1 parent f21d692 commit 9bfb07e

File tree

5 files changed

+262
-93
lines changed

5 files changed

+262
-93
lines changed

src/devices/src/lib.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,4 +47,5 @@ pub enum Error {
4747
PayloadExpected,
4848
IoError(io::Error),
4949
NoAvailBuffers,
50+
SpuriousEvent,
5051
}

src/devices/src/virtio/net/device.rs

Lines changed: 92 additions & 58 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,9 @@
88
use crate::virtio::net::Error;
99
use crate::virtio::net::Result;
1010
use crate::virtio::net::{MAX_BUFFER_SIZE, QUEUE_SIZE, QUEUE_SIZES, RX_INDEX, TX_INDEX};
11-
use crate::virtio::{ActivateResult, Queue, VirtioDevice, TYPE_NET, VIRTIO_MMIO_INT_VRING};
11+
use crate::virtio::{
12+
ActivateResult, DeviceState, Queue, VirtioDevice, TYPE_NET, VIRTIO_MMIO_INT_VRING,
13+
};
1214
use crate::{report_net_event_fail, Error as DeviceError};
1315
use dumbo::ns::MmdsNetworkStack;
1416
use dumbo::{EthernetFrame, MacAddr, MAC_ADDR_LEN};
@@ -59,8 +61,6 @@ pub struct Net {
5961
avail_features: u64,
6062
acked_features: u64,
6163

62-
mem: GuestMemoryMmap,
63-
6464
pub(crate) queues: Vec<Queue>,
6565
pub(crate) queue_evts: Vec<EventFd>,
6666

@@ -82,7 +82,8 @@ pub struct Net {
8282
config_space: Vec<u8>,
8383
guest_mac: Option<MacAddr>,
8484

85-
device_activated: bool,
85+
pub(crate) device_state: DeviceState,
86+
pub(crate) activate_evt: EventFd,
8687

8788
mmds_ns: Option<MmdsNetworkStack>,
8889

@@ -95,7 +96,6 @@ impl Net {
9596
pub fn new_with_tap(
9697
tap: Tap,
9798
guest_mac: Option<&MacAddr>,
98-
mem: GuestMemoryMmap,
9999
rx_rate_limiter: RateLimiter,
100100
tx_rate_limiter: RateLimiter,
101101
allow_mmds_requests: bool,
@@ -148,7 +148,6 @@ impl Net {
148148
tap,
149149
avail_features,
150150
acked_features: 0u64,
151-
mem,
152151
queues,
153152
queue_evts,
154153
rx_rate_limiter,
@@ -161,7 +160,8 @@ impl Net {
161160
tx_iovec: Vec::with_capacity(QUEUE_SIZE as usize),
162161
interrupt_status: Arc::new(AtomicUsize::new(0)),
163162
interrupt_evt: EventFd::new(libc::EFD_NONBLOCK).map_err(Error::EventFd)?,
164-
device_activated: false,
163+
device_state: DeviceState::Inactive,
164+
activate_evt: EventFd::new(libc::EFD_NONBLOCK).map_err(Error::EventFd)?,
165165
config_space,
166166
guest_mac,
167167
mmds_ns,
@@ -219,8 +219,13 @@ impl Net {
219219
// if a buffer was used, and false if the frame must be deferred until a buffer
220220
// is made available by the driver.
221221
fn rx_single_frame(&mut self) -> bool {
222+
let mem = match self.device_state {
223+
DeviceState::Activated(ref mem) => mem,
224+
// This should never happen, it's been already validated in the event handler.
225+
DeviceState::Inactive => return false,
226+
};
222227
let rx_queue = &mut self.queues[RX_INDEX];
223-
let mut next_desc = rx_queue.pop(&self.mem);
228+
let mut next_desc = rx_queue.pop(mem);
224229
if next_desc.is_none() {
225230
return false;
226231
}
@@ -239,7 +244,7 @@ impl Net {
239244

240245
let limit = cmp::min(write_count + desc.len as usize, self.rx_bytes_read);
241246
let source_slice = &self.rx_frame_buf[write_count..limit];
242-
let write_result = self.mem.write_slice(source_slice, desc.addr);
247+
let write_result = mem.write_slice(source_slice, desc.addr);
243248

244249
match write_result {
245250
Ok(()) => {
@@ -271,7 +276,7 @@ impl Net {
271276
}
272277
}
273278

274-
rx_queue.add_used(&self.mem, head_index, write_count as u32);
279+
rx_queue.add_used(mem, head_index, write_count as u32);
275280

276281
// Mark that we have at least one pending packet and we need to interrupt the guest.
277282
self.rx_deferred_irqs = true;
@@ -406,6 +411,12 @@ impl Net {
406411
}
407412

408413
fn process_tx(&mut self) -> result::Result<(), DeviceError> {
414+
let mem = match self.device_state {
415+
DeviceState::Activated(ref mem) => mem,
416+
// This should never happen, it's been already validated in the event handler.
417+
DeviceState::Inactive => return Err(DeviceError::SpuriousEvent),
418+
};
419+
409420
// The MMDS network stack works like a state machine, based on synchronous calls, and
410421
// without being added to any event loop. If any frame is accepted by the MMDS, we also
411422
// trigger a process_rx() which checks if there are any new frames to be sent, starting
@@ -414,7 +425,7 @@ impl Net {
414425
let mut raise_irq = false;
415426
let tx_queue = &mut self.queues[TX_INDEX];
416427

417-
while let Some(head) = tx_queue.pop(&self.mem) {
428+
while let Some(head) = tx_queue.pop(mem) {
418429
// If limiter.consume() fails it means there is no more TokenType::Ops
419430
// budget and rate limiting is in effect.
420431
if !self.tx_rate_limiter.consume(1, TokenType::Ops) {
@@ -459,7 +470,7 @@ impl Net {
459470
for (desc_addr, desc_len) in self.tx_iovec.drain(..) {
460471
let limit = cmp::min((read_count + desc_len) as usize, self.tx_frame_buf.len());
461472

462-
let read_result = self.mem.read_slice(
473+
let read_result = mem.read_slice(
463474
&mut self.tx_frame_buf[read_count..limit as usize],
464475
desc_addr,
465476
);
@@ -492,7 +503,7 @@ impl Net {
492503
process_rx_for_mmds = true;
493504
}
494505

495-
tx_queue.add_used(&self.mem, head_index, 0);
506+
tx_queue.add_used(mem, head_index, 0);
496507
raise_irq = true;
497508
}
498509

@@ -541,8 +552,13 @@ impl Net {
541552
}
542553

543554
pub fn process_tap_rx_event(&mut self) {
555+
let mem = match self.device_state {
556+
DeviceState::Activated(ref mem) => mem,
557+
// This should never happen, it's been already validated in the event handler.
558+
DeviceState::Inactive => return,
559+
};
544560
METRICS.net.rx_tap_event_count.inc();
545-
if self.queues[RX_INDEX].is_empty(&self.mem) {
561+
if self.queues[RX_INDEX].is_empty(mem) {
546562
error!("The RX queue is empty, there is no available buffer.");
547563
METRICS.net.event_fails.inc();
548564
return;
@@ -676,20 +692,28 @@ impl VirtioDevice for Net {
676692
}
677693

678694
fn is_activated(&self) -> bool {
679-
self.device_activated
695+
match self.device_state {
696+
DeviceState::Inactive => false,
697+
DeviceState::Activated(_) => true,
698+
}
680699
}
681700

682-
fn activate(&mut self, _: GuestMemoryMmap) -> ActivateResult {
683-
self.device_activated = true;
701+
fn activate(&mut self, mem: GuestMemoryMmap) -> ActivateResult {
702+
if self.activate_evt.write(1).is_err() {
703+
error!("Net: Cannot write to activate_evt");
704+
return Err(super::super::ActivateError::BadActivate);
705+
}
706+
self.device_state = DeviceState::Activated(mem);
684707
Ok(())
685708
}
686709
}
687710

688711
#[cfg(test)]
689-
mod tests {
712+
pub(crate) mod tests {
690713
use std::net::Ipv4Addr;
691714
use std::os::unix::io::AsRawFd;
692715
use std::sync::atomic::{AtomicUsize, Ordering};
716+
use std::sync::Mutex;
693717
use std::time::Duration;
694718
use std::{io, mem, thread};
695719

@@ -741,13 +765,13 @@ mod tests {
741765
}
742766
}
743767

744-
trait TestUtil {
768+
pub(crate) trait TestUtil {
745769
fn default_net(test_mutators: TestMutators) -> Net;
746770
fn default_guest_mac() -> MacAddr;
747771
fn default_guest_memory() -> GuestMemoryMmap;
748772
fn rx_single_frame_no_irq_coalescing(&mut self) -> bool;
749773
fn virtqueues(mem: &GuestMemoryMmap) -> (VirtQueue, VirtQueue);
750-
fn assign_queues(&mut self, rxq: Queue, txq: Queue);
774+
fn assign_queues(&mut self, rxq: Queue, txq: Queue, mem: &GuestMemoryMmap);
751775
fn set_mac(&mut self, mac: MacAddr);
752776
}
753777

@@ -762,7 +786,6 @@ mod tests {
762786
let mut net = Net::new_with_tap(
763787
tap,
764788
Some(&guest_mac),
765-
Net::default_guest_memory(),
766789
RateLimiter::default(),
767790
RateLimiter::default(),
768791
true,
@@ -808,11 +831,11 @@ mod tests {
808831
}
809832

810833
// Assigns "guest virtio driver" activated queues to the net device.
811-
fn assign_queues(&mut self, rxq: Queue, txq: Queue) {
834+
fn assign_queues(&mut self, rxq: Queue, txq: Queue, mem: &GuestMemoryMmap) {
812835
self.queues.clear();
813836
self.queues.push(rxq);
814837
self.queues.push(txq);
815-
self.activate(self.mem.clone()).unwrap();
838+
self.activate(mem.clone()).unwrap();
816839
}
817840
}
818841

@@ -931,12 +954,12 @@ mod tests {
931954
}
932955

933956
#[test]
934-
fn test_event_handling() {
957+
fn test_event_processing() {
935958
let mut event_manager = EventManager::new().unwrap();
936959
let mut net = Net::default_net(TestMutators::default());
937-
let mem_clone = net.mem.clone();
938-
let (rxq, txq) = Net::virtqueues(&mem_clone);
939-
net.assign_queues(rxq.create_queue(), txq.create_queue());
960+
let mem = Net::default_guest_memory();
961+
let (rxq, txq) = Net::virtqueues(&mem);
962+
net.assign_queues(rxq.create_queue(), txq.create_queue(), &mem);
940963

941964
let daddr = 0x2000;
942965
assert!(daddr > txq.end().0);
@@ -1213,9 +1236,9 @@ mod tests {
12131236
fn test_process_error_cases() {
12141237
let mut event_manager = EventManager::new().unwrap();
12151238
let mut net = Net::default_net(TestMutators::default());
1216-
let mem_clone = net.mem.clone();
1217-
let (rxq, txq) = Net::virtqueues(&mem_clone);
1218-
net.assign_queues(rxq.create_queue(), txq.create_queue());
1239+
let mem = Net::default_guest_memory();
1240+
let (rxq, txq) = Net::virtqueues(&mem);
1241+
net.assign_queues(rxq.create_queue(), txq.create_queue(), &mem);
12191242

12201243
// RX rate limiter events should error since the limiter is not blocked.
12211244
// Validate that the event failed and failure was properly accounted for.
@@ -1242,15 +1265,26 @@ mod tests {
12421265
fn test_invalid_event() {
12431266
let mut event_manager = EventManager::new().unwrap();
12441267
let mut net = Net::default_net(TestMutators::default());
1245-
let mem_clone = net.mem.clone();
1246-
let (rxq, txq) = Net::virtqueues(&mem_clone);
1247-
net.assign_queues(rxq.create_queue(), txq.create_queue());
12481268

1269+
let mem = Net::default_guest_memory();
1270+
let (rxq, txq) = Net::virtqueues(&mem);
1271+
net.assign_queues(rxq.create_queue(), txq.create_queue(), &mem);
1272+
1273+
let net = Arc::new(Mutex::new(net));
1274+
event_manager.add_subscriber(net.clone()).unwrap();
1275+
1276+
// Process the activate event.
1277+
let ev_count = event_manager.run_with_timeout(50).unwrap();
1278+
assert_eq!(ev_count, 1);
1279+
1280+
// Inject invalid event.
12491281
let invalid_event = EpollEvent::new(EventSet::IN, 1000);
12501282
check_metric_after_block!(
12511283
&METRICS.net.event_fails,
12521284
1,
1253-
net.process(&invalid_event, &mut event_manager)
1285+
net.lock()
1286+
.unwrap()
1287+
.process(&invalid_event, &mut event_manager)
12541288
);
12551289
}
12561290

@@ -1265,9 +1299,9 @@ mod tests {
12651299
};
12661300

12671301
let mut net = Net::default_net(test_mutators);
1268-
let mem_clone = net.mem.clone();
1269-
let (rxq, txq) = Net::virtqueues(&mem_clone);
1270-
net.assign_queues(rxq.create_queue(), txq.create_queue());
1302+
let mem = Net::default_guest_memory();
1303+
let (rxq, txq) = Net::virtqueues(&mem);
1304+
net.assign_queues(rxq.create_queue(), txq.create_queue(), &mem);
12711305

12721306
// The RX queue is empty.
12731307
let tap_event = EpollEvent::new(EventSet::IN, net.tap.as_raw_fd() as u64);
@@ -1290,9 +1324,9 @@ mod tests {
12901324
fn test_rx_rate_limiter_handling() {
12911325
let mut event_manager = EventManager::new().unwrap();
12921326
let mut net = Net::default_net(TestMutators::default());
1293-
let mem_clone = net.mem.clone();
1294-
let (rxq, txq) = Net::virtqueues(&mem_clone);
1295-
net.assign_queues(rxq.create_queue(), txq.create_queue());
1327+
let mem = Net::default_guest_memory();
1328+
let (rxq, txq) = Net::virtqueues(&mem);
1329+
net.assign_queues(rxq.create_queue(), txq.create_queue(), &mem);
12961330

12971331
net.rx_rate_limiter = RateLimiter::new(0, None, 0, 0, None, 0).unwrap();
12981332
let rate_limiter_event =
@@ -1308,9 +1342,9 @@ mod tests {
13081342
fn test_tx_rate_limiter_handling() {
13091343
let mut event_manager = EventManager::new().unwrap();
13101344
let mut net = Net::default_net(TestMutators::default());
1311-
let mem_clone = net.mem.clone();
1312-
let (rxq, txq) = Net::virtqueues(&mem_clone);
1313-
net.assign_queues(rxq.create_queue(), txq.create_queue());
1345+
let mem = Net::default_guest_memory();
1346+
let (rxq, txq) = Net::virtqueues(&mem);
1347+
net.assign_queues(rxq.create_queue(), txq.create_queue(), &mem);
13141348

13151349
net.tx_rate_limiter = RateLimiter::new(0, None, 0, 0, None, 0).unwrap();
13161350
let rate_limiter_event =
@@ -1327,9 +1361,9 @@ mod tests {
13271361
fn test_bandwidth_rate_limiter() {
13281362
let mut event_manager = EventManager::new().unwrap();
13291363
let mut net = Net::default_net(TestMutators::default());
1330-
let mem_clone = net.mem.clone();
1331-
let (rxq, txq) = Net::virtqueues(&mem_clone);
1332-
net.assign_queues(rxq.create_queue(), txq.create_queue());
1364+
let mem = Net::default_guest_memory();
1365+
let (rxq, txq) = Net::virtqueues(&mem);
1366+
net.assign_queues(rxq.create_queue(), txq.create_queue(), &mem);
13331367

13341368
let daddr = 0x2000;
13351369
assert!(daddr > txq.end().0);
@@ -1445,9 +1479,9 @@ mod tests {
14451479
fn test_ops_rate_limiter() {
14461480
let mut event_manager = EventManager::new().unwrap();
14471481
let mut net = Net::default_net(TestMutators::default());
1448-
let mem_clone = net.mem.clone();
1449-
let (rxq, txq) = Net::virtqueues(&mem_clone);
1450-
net.assign_queues(rxq.create_queue(), txq.create_queue());
1482+
let mem = Net::default_guest_memory();
1483+
let (rxq, txq) = Net::virtqueues(&mem);
1484+
net.assign_queues(rxq.create_queue(), txq.create_queue(), &mem);
14511485

14521486
let daddr = 0x2000;
14531487
assert!(daddr > txq.end().0);
@@ -1564,9 +1598,9 @@ mod tests {
15641598
#[test]
15651599
fn test_patch_rate_limiters() {
15661600
let mut net = Net::default_net(TestMutators::default());
1567-
let mem_clone = net.mem.clone();
1568-
let (rxq, txq) = Net::virtqueues(&mem_clone);
1569-
net.assign_queues(rxq.create_queue(), txq.create_queue());
1601+
let mem = Net::default_guest_memory();
1602+
let (rxq, txq) = Net::virtqueues(&mem);
1603+
net.assign_queues(rxq.create_queue(), txq.create_queue(), &mem);
15701604

15711605
net.rx_rate_limiter = RateLimiter::new(10, None, 10, 2, None, 2).unwrap();
15721606
net.tx_rate_limiter = RateLimiter::new(10, None, 10, 2, None, 2).unwrap();
@@ -1600,9 +1634,9 @@ mod tests {
16001634
// Regression test for https://github.com/firecracker-microvm/firecracker/issues/1436 .
16011635
let mut event_manager = EventManager::new().unwrap();
16021636
let mut net = Net::default_net(TestMutators::default());
1603-
let mem_clone = net.mem.clone();
1604-
let (rxq, txq) = Net::virtqueues(&mem_clone);
1605-
net.assign_queues(rxq.create_queue(), txq.create_queue());
1637+
let mem = Net::default_guest_memory();
1638+
let (rxq, txq) = Net::virtqueues(&mem);
1639+
net.assign_queues(rxq.create_queue(), txq.create_queue(), &mem);
16061640

16071641
let daddr = 0x2000;
16081642
assert!(daddr > txq.end().0);
@@ -1626,9 +1660,9 @@ mod tests {
16261660
#[test]
16271661
fn test_virtio_device() {
16281662
let mut net = Net::default_net(TestMutators::default());
1629-
let mem_clone = net.mem.clone();
1630-
let (rxq, txq) = Net::virtqueues(&mem_clone);
1631-
net.assign_queues(rxq.create_queue(), txq.create_queue());
1663+
let mem = Net::default_guest_memory();
1664+
let (rxq, txq) = Net::virtqueues(&mem);
1665+
net.assign_queues(rxq.create_queue(), txq.create_queue(), &mem);
16321666

16331667
// Test queues count (TX and RX).
16341668
let queues = net.queues();

0 commit comments

Comments
 (0)