1 // Copyright 2017 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 use std::fmt::{self, Display};
6 use std::io::{self, Write};
7 use std::mem;
8 use std::net::Ipv4Addr;
9 use std::os::raw::c_uint;
10 use std::result;
11 use std::sync::Arc;
12 use std::thread;
13
14 use base::Error as SysError;
15 use base::{error, warn, AsRawDescriptor, Event, EventType, PollToken, RawDescriptor, WaitContext};
16 use data_model::{DataInit, Le16, Le64};
17 use net_util::{Error as TapError, MacAddress, TapT};
18 use virtio_sys::virtio_net;
19 use virtio_sys::virtio_net::{
20 virtio_net_hdr_v1, VIRTIO_NET_CTRL_GUEST_OFFLOADS, VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
21 VIRTIO_NET_CTRL_MQ, VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, VIRTIO_NET_ERR, VIRTIO_NET_OK,
22 };
23 use vm_memory::GuestMemory;
24
25 use super::{
26 copy_config, DescriptorError, Interrupt, Queue, Reader, SignalableInterrupt, VirtioDevice,
27 Writer, TYPE_NET,
28 };
29
30 const QUEUE_SIZE: u16 = 256;
31
32 #[derive(Debug)]
33 pub enum NetError {
34 /// Creating kill event failed.
35 CreateKillEvent(SysError),
36 /// Creating WaitContext failed.
37 CreateWaitContext(SysError),
38 /// Cloning kill event failed.
39 CloneKillEvent(SysError),
40 /// Descriptor chain was invalid.
41 DescriptorChain(DescriptorError),
42 /// Removing read event from the tap fd events failed.
43 WaitContextDisableTap(SysError),
44 /// Adding read event to the tap fd events failed.
45 WaitContextEnableTap(SysError),
46 /// Error while waiting for events.
47 WaitError(SysError),
48 /// Error reading data from control queue.
49 ReadCtrlData(io::Error),
50 /// Error reading header from control queue.
51 ReadCtrlHeader(io::Error),
52 /// There are no more available descriptors to receive into.
53 RxDescriptorsExhausted,
54 /// Open tap device failed.
55 TapOpen(TapError),
56 /// Setting tap IP failed.
57 TapSetIp(TapError),
58 /// Setting tap netmask failed.
59 TapSetNetmask(TapError),
60 /// Setting tap mac address failed.
61 TapSetMacAddress(TapError),
62 /// Setting tap interface offload flags failed.
63 TapSetOffload(TapError),
64 /// Setting vnet header size failed.
65 TapSetVnetHdrSize(TapError),
66 /// Enabling tap interface failed.
67 TapEnable(TapError),
68 /// Validating tap interface failed.
69 TapValidate(String),
70 /// Failed writing an ack in response to a control message.
71 WriteAck(io::Error),
72 /// Writing to a buffer in the guest failed.
73 WriteBuffer(io::Error),
74 }
75
76 impl Display for NetError {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result77 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
78 use self::NetError::*;
79
80 match self {
81 CreateKillEvent(e) => write!(f, "failed to create kill event: {}", e),
82 CreateWaitContext(e) => write!(f, "failed to create wait context: {}", e),
83 CloneKillEvent(e) => write!(f, "failed to clone kill event: {}", e),
84 DescriptorChain(e) => write!(f, "failed to valildate descriptor chain: {}", e),
85 WaitContextDisableTap(e) => write!(f, "failed to disable EPOLLIN on tap fd: {}", e),
86 WaitContextEnableTap(e) => write!(f, "failed to enable EPOLLIN on tap fd: {}", e),
87 WaitError(e) => write!(f, "error while waiting for events: {}", e),
88 ReadCtrlData(e) => write!(f, "failed to read control message data: {}", e),
89 ReadCtrlHeader(e) => write!(f, "failed to read control message header: {}", e),
90 RxDescriptorsExhausted => write!(f, "no rx descriptors available"),
91 TapOpen(e) => write!(f, "failed to open tap device: {}", e),
92 TapSetIp(e) => write!(f, "failed to set tap IP: {}", e),
93 TapSetNetmask(e) => write!(f, "failed to set tap netmask: {}", e),
94 TapSetMacAddress(e) => write!(f, "failed to set tap mac address: {}", e),
95 TapSetOffload(e) => write!(f, "failed to set tap interface offload flags: {}", e),
96 TapSetVnetHdrSize(e) => write!(f, "failed to set vnet header size: {}", e),
97 TapEnable(e) => write!(f, "failed to enable tap interface: {}", e),
98 TapValidate(s) => write!(f, "failed to validate tap interface: {}", s),
99 WriteAck(e) => write!(f, "failed to write control message ack: {}", e),
100 WriteBuffer(e) => write!(f, "failed to write to guest buffer: {}", e),
101 }
102 }
103 }
104
105 #[repr(C, packed)]
106 #[derive(Debug, Clone, Copy)]
107 pub struct virtio_net_ctrl_hdr {
108 pub class: u8,
109 pub cmd: u8,
110 }
111
112 // Safe because it only has data and has no implicit padding.
113 unsafe impl DataInit for virtio_net_ctrl_hdr {}
114
virtio_features_to_tap_offload(features: u64) -> c_uint115 fn virtio_features_to_tap_offload(features: u64) -> c_uint {
116 let mut tap_offloads: c_uint = 0;
117 if features & (1 << virtio_net::VIRTIO_NET_F_GUEST_CSUM) != 0 {
118 tap_offloads |= net_sys::TUN_F_CSUM;
119 }
120 if features & (1 << virtio_net::VIRTIO_NET_F_GUEST_TSO4) != 0 {
121 tap_offloads |= net_sys::TUN_F_TSO4;
122 }
123 if features & (1 << virtio_net::VIRTIO_NET_F_GUEST_TSO6) != 0 {
124 tap_offloads |= net_sys::TUN_F_TSO6;
125 }
126 if features & (1 << virtio_net::VIRTIO_NET_F_GUEST_ECN) != 0 {
127 tap_offloads |= net_sys::TUN_F_TSO_ECN;
128 }
129 if features & (1 << virtio_net::VIRTIO_NET_F_GUEST_UFO) != 0 {
130 tap_offloads |= net_sys::TUN_F_UFO;
131 }
132
133 tap_offloads
134 }
135
136 #[derive(Debug, Clone, Copy, Default)]
137 #[repr(C)]
138 pub(crate) struct VirtioNetConfig {
139 mac: [u8; 6],
140 status: Le16,
141 max_vq_pairs: Le16,
142 mtu: Le16,
143 }
144
145 // Safe because it only has data and has no implicit padding.
146 unsafe impl DataInit for VirtioNetConfig {}
147
148 struct Worker<T: TapT> {
149 interrupt: Arc<Interrupt>,
150 mem: GuestMemory,
151 rx_queue: Queue,
152 tx_queue: Queue,
153 ctrl_queue: Option<Queue>,
154 tap: T,
155 acked_features: u64,
156 vq_pairs: u16,
157 kill_evt: Event,
158 }
159
160 impl<T> Worker<T>
161 where
162 T: TapT,
163 {
process_rx(&mut self) -> result::Result<(), NetError>164 fn process_rx(&mut self) -> result::Result<(), NetError> {
165 let mut needs_interrupt = false;
166 let mut exhausted_queue = false;
167
168 // Read as many frames as possible.
169 loop {
170 let desc_chain = match self.rx_queue.peek(&self.mem) {
171 Some(desc) => desc,
172 None => {
173 exhausted_queue = true;
174 break;
175 }
176 };
177
178 let index = desc_chain.index;
179 let bytes_written = match Writer::new(self.mem.clone(), desc_chain) {
180 Ok(mut writer) => {
181 match writer.write_from(&mut self.tap, writer.available_bytes()) {
182 Ok(_) => {}
183 Err(ref e) if e.kind() == io::ErrorKind::WriteZero => {
184 warn!("net: rx: buffer is too small to hold frame");
185 break;
186 }
187 Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
188 // No more to read from the tap.
189 break;
190 }
191 Err(e) => {
192 warn!("net: rx: failed to write slice: {}", e);
193 return Err(NetError::WriteBuffer(e));
194 }
195 };
196
197 writer.bytes_written() as u32
198 }
199 Err(e) => {
200 error!("net: failed to create Writer: {}", e);
201 0
202 }
203 };
204
205 if bytes_written > 0 {
206 self.rx_queue.pop_peeked(&self.mem);
207 self.rx_queue.add_used(&self.mem, index, bytes_written);
208 needs_interrupt = true;
209 }
210 }
211
212 if needs_interrupt {
213 self.interrupt.signal_used_queue(self.rx_queue.vector);
214 }
215
216 if exhausted_queue {
217 Err(NetError::RxDescriptorsExhausted)
218 } else {
219 Ok(())
220 }
221 }
222
process_tx(&mut self)223 fn process_tx(&mut self) {
224 while let Some(desc_chain) = self.tx_queue.pop(&self.mem) {
225 let index = desc_chain.index;
226
227 match Reader::new(self.mem.clone(), desc_chain) {
228 Ok(mut reader) => {
229 let expected_count = reader.available_bytes();
230 match reader.read_to(&mut self.tap, expected_count) {
231 Ok(count) => {
232 // Tap writes must be done in one call. If the entire frame was not
233 // written, it's an error.
234 if count != expected_count {
235 error!(
236 "net: tx: wrote only {} bytes of {} byte frame",
237 count, expected_count
238 );
239 }
240 }
241 Err(e) => error!("net: tx: failed to write frame to tap: {}", e),
242 }
243 }
244 Err(e) => error!("net: failed to create Reader: {}", e),
245 }
246
247 self.tx_queue.add_used(&self.mem, index, 0);
248 }
249
250 self.interrupt.signal_used_queue(self.tx_queue.vector);
251 }
252
process_ctrl(&mut self) -> Result<(), NetError>253 fn process_ctrl(&mut self) -> Result<(), NetError> {
254 let ctrl_queue = match self.ctrl_queue.as_mut() {
255 Some(queue) => queue,
256 None => return Ok(()),
257 };
258
259 while let Some(desc_chain) = ctrl_queue.pop(&self.mem) {
260 let index = desc_chain.index;
261
262 let mut reader = Reader::new(self.mem.clone(), desc_chain.clone())
263 .map_err(NetError::DescriptorChain)?;
264 let mut writer =
265 Writer::new(self.mem.clone(), desc_chain).map_err(NetError::DescriptorChain)?;
266 let ctrl_hdr: virtio_net_ctrl_hdr =
267 reader.read_obj().map_err(NetError::ReadCtrlHeader)?;
268
269 match ctrl_hdr.class as c_uint {
270 VIRTIO_NET_CTRL_GUEST_OFFLOADS => {
271 if ctrl_hdr.cmd != VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET as u8 {
272 error!(
273 "invalid cmd for VIRTIO_NET_CTRL_GUEST_OFFLOADS: {}",
274 ctrl_hdr.cmd
275 );
276 let ack = VIRTIO_NET_ERR as u8;
277 writer.write_all(&[ack]).map_err(NetError::WriteAck)?;
278 ctrl_queue.add_used(&self.mem, index, 0);
279 continue;
280 }
281 let offloads: Le64 = reader.read_obj().map_err(NetError::ReadCtrlData)?;
282 let tap_offloads = virtio_features_to_tap_offload(offloads.into());
283 self.tap
284 .set_offload(tap_offloads)
285 .map_err(NetError::TapSetOffload)?;
286 let ack = VIRTIO_NET_OK as u8;
287 writer.write_all(&[ack]).map_err(NetError::WriteAck)?;
288 }
289 VIRTIO_NET_CTRL_MQ => {
290 if ctrl_hdr.cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET as u8 {
291 let pairs: Le16 = reader.read_obj().map_err(NetError::ReadCtrlData)?;
292 // Simple handle it now
293 if self.acked_features & 1 << virtio_net::VIRTIO_NET_F_MQ == 0
294 || pairs.to_native() != self.vq_pairs
295 {
296 error!("Invalid VQ_PAIRS_SET cmd, driver request pairs: {}, device vq pairs: {}",
297 pairs.to_native(), self.vq_pairs);
298 let ack = VIRTIO_NET_ERR as u8;
299 writer.write_all(&[ack]).map_err(NetError::WriteAck)?;
300 ctrl_queue.add_used(&self.mem, index, 0);
301 continue;
302 }
303 let ack = VIRTIO_NET_OK as u8;
304 writer.write_all(&[ack]).map_err(NetError::WriteAck)?;
305 }
306 }
307 _ => warn!(
308 "unimplemented class for VIRTIO_NET_CTRL_GUEST_OFFLOADS: {}",
309 ctrl_hdr.class
310 ),
311 }
312
313 ctrl_queue.add_used(&self.mem, index, 0);
314 }
315
316 self.interrupt.signal_used_queue(ctrl_queue.vector);
317 Ok(())
318 }
319
run( &mut self, rx_queue_evt: Event, tx_queue_evt: Event, ctrl_queue_evt: Option<Event>, ) -> Result<(), NetError>320 fn run(
321 &mut self,
322 rx_queue_evt: Event,
323 tx_queue_evt: Event,
324 ctrl_queue_evt: Option<Event>,
325 ) -> Result<(), NetError> {
326 #[derive(PollToken)]
327 enum Token {
328 // A frame is available for reading from the tap device to receive in the guest.
329 RxTap,
330 // The guest has made a buffer available to receive a frame into.
331 RxQueue,
332 // The transmit queue has a frame that is ready to send from the guest.
333 TxQueue,
334 // The control queue has a message.
335 CtrlQueue,
336 // Check if any interrupts need to be re-asserted.
337 InterruptResample,
338 // crosvm has requested the device to shut down.
339 Kill,
340 }
341
342 let wait_ctx: WaitContext<Token> = WaitContext::build_with(&[
343 (&self.tap, Token::RxTap),
344 (&rx_queue_evt, Token::RxQueue),
345 (&tx_queue_evt, Token::TxQueue),
346 (&self.kill_evt, Token::Kill),
347 ])
348 .map_err(NetError::CreateWaitContext)?;
349
350 if let Some(ctrl_evt) = &ctrl_queue_evt {
351 wait_ctx
352 .add(ctrl_evt, Token::CtrlQueue)
353 .map_err(NetError::CreateWaitContext)?;
354 // Let CtrlQueue's thread handle InterruptResample also.
355 if let Some(resample_evt) = self.interrupt.get_resample_evt() {
356 wait_ctx
357 .add(resample_evt, Token::InterruptResample)
358 .map_err(NetError::CreateWaitContext)?;
359 }
360 }
361
362 let mut tap_polling_enabled = true;
363 'wait: loop {
364 let events = wait_ctx.wait().map_err(NetError::WaitError)?;
365 for event in events.iter().filter(|e| e.is_readable) {
366 match event.token {
367 Token::RxTap => match self.process_rx() {
368 Ok(()) => {}
369 Err(NetError::RxDescriptorsExhausted) => {
370 wait_ctx
371 .modify(&self.tap, EventType::None, Token::RxTap)
372 .map_err(NetError::WaitContextDisableTap)?;
373 tap_polling_enabled = false;
374 }
375 Err(e) => return Err(e),
376 },
377 Token::RxQueue => {
378 if let Err(e) = rx_queue_evt.read() {
379 error!("net: error reading rx queue Event: {}", e);
380 break 'wait;
381 }
382 if !tap_polling_enabled {
383 wait_ctx
384 .modify(&self.tap, EventType::Read, Token::RxTap)
385 .map_err(NetError::WaitContextEnableTap)?;
386 tap_polling_enabled = true;
387 }
388 }
389 Token::TxQueue => {
390 if let Err(e) = tx_queue_evt.read() {
391 error!("net: error reading tx queue Event: {}", e);
392 break 'wait;
393 }
394 self.process_tx();
395 }
396 Token::CtrlQueue => {
397 if let Some(ctrl_evt) = &ctrl_queue_evt {
398 if let Err(e) = ctrl_evt.read() {
399 error!("net: error reading ctrl queue Event: {}", e);
400 break 'wait;
401 }
402 } else {
403 break 'wait;
404 }
405 if let Err(e) = self.process_ctrl() {
406 error!("net: failed to process control message: {}", e);
407 break 'wait;
408 }
409 }
410 Token::InterruptResample => {
411 self.interrupt.interrupt_resample();
412 }
413 Token::Kill => {
414 let _ = self.kill_evt.read();
415 break 'wait;
416 }
417 }
418 }
419 }
420 Ok(())
421 }
422 }
423
424 pub struct Net<T: TapT> {
425 queue_sizes: Box<[u16]>,
426 workers_kill_evt: Vec<Event>,
427 kill_evts: Vec<Event>,
428 worker_threads: Vec<thread::JoinHandle<Worker<T>>>,
429 taps: Vec<T>,
430 avail_features: u64,
431 acked_features: u64,
432 }
433
434 impl<T> Net<T>
435 where
436 T: TapT,
437 {
438 /// Create a new virtio network device with the given IP address and
439 /// netmask.
new( base_features: u64, ip_addr: Ipv4Addr, netmask: Ipv4Addr, mac_addr: MacAddress, vq_pairs: u16, ) -> Result<Net<T>, NetError>440 pub fn new(
441 base_features: u64,
442 ip_addr: Ipv4Addr,
443 netmask: Ipv4Addr,
444 mac_addr: MacAddress,
445 vq_pairs: u16,
446 ) -> Result<Net<T>, NetError> {
447 let multi_queue = vq_pairs > 1;
448 let tap: T = T::new(true, multi_queue).map_err(NetError::TapOpen)?;
449 tap.set_ip_addr(ip_addr).map_err(NetError::TapSetIp)?;
450 tap.set_netmask(netmask).map_err(NetError::TapSetNetmask)?;
451 tap.set_mac_address(mac_addr)
452 .map_err(NetError::TapSetMacAddress)?;
453
454 tap.enable().map_err(NetError::TapEnable)?;
455
456 Net::from(base_features, tap, vq_pairs)
457 }
458
459 /// Creates a new virtio network device from a tap device that has already been
460 /// configured.
from(base_features: u64, tap: T, vq_pairs: u16) -> Result<Net<T>, NetError>461 pub fn from(base_features: u64, tap: T, vq_pairs: u16) -> Result<Net<T>, NetError> {
462 let taps = tap.into_mq_taps(vq_pairs).map_err(NetError::TapOpen)?;
463
464 // This would also validate a tap created by Self::new(), but that's a good thing as it
465 // would ensure that any changes in the creation procedure are matched in the validation.
466 // Plus we still need to set the offload and vnet_hdr_size values.
467 for tap in &taps {
468 validate_and_configure_tap(tap, vq_pairs)?;
469 }
470
471 let mut avail_features = base_features
472 | 1 << virtio_net::VIRTIO_NET_F_GUEST_CSUM
473 | 1 << virtio_net::VIRTIO_NET_F_CSUM
474 | 1 << virtio_net::VIRTIO_NET_F_CTRL_VQ
475 | 1 << virtio_net::VIRTIO_NET_F_CTRL_GUEST_OFFLOADS
476 | 1 << virtio_net::VIRTIO_NET_F_GUEST_TSO4
477 | 1 << virtio_net::VIRTIO_NET_F_GUEST_UFO
478 | 1 << virtio_net::VIRTIO_NET_F_HOST_TSO4
479 | 1 << virtio_net::VIRTIO_NET_F_HOST_UFO;
480
481 if vq_pairs > 1 {
482 avail_features |= 1 << virtio_net::VIRTIO_NET_F_MQ;
483 }
484
485 let mut kill_evts: Vec<Event> = Vec::new();
486 let mut workers_kill_evt: Vec<Event> = Vec::new();
487 for _ in 0..taps.len() {
488 let kill_evt = Event::new().map_err(NetError::CreateKillEvent)?;
489 let worker_kill_evt = kill_evt.try_clone().map_err(NetError::CloneKillEvent)?;
490 kill_evts.push(kill_evt);
491 workers_kill_evt.push(worker_kill_evt);
492 }
493
494 Ok(Net {
495 queue_sizes: vec![QUEUE_SIZE; (vq_pairs * 2 + 1) as usize].into_boxed_slice(),
496 workers_kill_evt,
497 kill_evts,
498 worker_threads: Vec::new(),
499 taps,
500 avail_features,
501 acked_features: 0u64,
502 })
503 }
504
build_config(&self) -> VirtioNetConfig505 fn build_config(&self) -> VirtioNetConfig {
506 let vq_pairs = self.queue_sizes.len() as u16 / 2;
507
508 VirtioNetConfig {
509 max_vq_pairs: Le16::from(vq_pairs),
510 // Other field has meaningful value when the corresponding feature
511 // is enabled, but all these features aren't supported now.
512 // So set them to default.
513 ..Default::default()
514 }
515 }
516 }
517
518 // Ensure that the tap interface has the correct flags and sets the offload and VNET header size
519 // to the appropriate values.
validate_and_configure_tap<T: TapT>(tap: &T, vq_pairs: u16) -> Result<(), NetError>520 fn validate_and_configure_tap<T: TapT>(tap: &T, vq_pairs: u16) -> Result<(), NetError> {
521 let flags = tap.if_flags();
522 let mut required_flags = vec![
523 (net_sys::IFF_TAP, "IFF_TAP"),
524 (net_sys::IFF_NO_PI, "IFF_NO_PI"),
525 (net_sys::IFF_VNET_HDR, "IFF_VNET_HDR"),
526 ];
527 if vq_pairs > 1 {
528 required_flags.push((net_sys::IFF_MULTI_QUEUE, "IFF_MULTI_QUEUE"));
529 }
530 let missing_flags = required_flags
531 .iter()
532 .filter_map(
533 |(value, name)| {
534 if value & flags == 0 {
535 Some(name)
536 } else {
537 None
538 }
539 },
540 )
541 .collect::<Vec<_>>();
542
543 if !missing_flags.is_empty() {
544 return Err(NetError::TapValidate(format!(
545 "Missing flags: {:?}",
546 missing_flags
547 )));
548 }
549
550 let vnet_hdr_size = mem::size_of::<virtio_net_hdr_v1>() as i32;
551 tap.set_vnet_hdr_size(vnet_hdr_size)
552 .map_err(NetError::TapSetVnetHdrSize)?;
553
554 Ok(())
555 }
556
557 impl<T> Drop for Net<T>
558 where
559 T: TapT,
560 {
drop(&mut self)561 fn drop(&mut self) {
562 let len = self.kill_evts.len();
563 for i in 0..len {
564 // Only kill the child if it claimed its event.
565 if self.workers_kill_evt.get(i).is_none() {
566 if let Some(kill_evt) = self.kill_evts.get(i) {
567 // Ignore the result because there is nothing we can do about it.
568 let _ = kill_evt.write(1);
569 }
570 }
571 }
572
573 let len = self.worker_threads.len();
574 for _ in 0..len {
575 let _ = self.worker_threads.remove(0).join();
576 }
577 }
578 }
579
580 impl<T> VirtioDevice for Net<T>
581 where
582 T: 'static + TapT,
583 {
keep_rds(&self) -> Vec<RawDescriptor>584 fn keep_rds(&self) -> Vec<RawDescriptor> {
585 let mut keep_rds = Vec::new();
586
587 for tap in &self.taps {
588 keep_rds.push(tap.as_raw_descriptor());
589 }
590
591 for worker_kill_evt in &self.workers_kill_evt {
592 keep_rds.push(worker_kill_evt.as_raw_descriptor());
593 }
594 for kill_evt in &self.kill_evts {
595 keep_rds.push(kill_evt.as_raw_descriptor());
596 }
597
598 keep_rds
599 }
600
device_type(&self) -> u32601 fn device_type(&self) -> u32 {
602 TYPE_NET
603 }
604
queue_max_sizes(&self) -> &[u16]605 fn queue_max_sizes(&self) -> &[u16] {
606 &self.queue_sizes
607 }
608
features(&self) -> u64609 fn features(&self) -> u64 {
610 self.avail_features
611 }
612
ack_features(&mut self, value: u64)613 fn ack_features(&mut self, value: u64) {
614 let mut v = value;
615
616 // Check if the guest is ACK'ing a feature that we didn't claim to have.
617 let unrequested_features = v & !self.avail_features;
618 if unrequested_features != 0 {
619 warn!("net: virtio net got unknown feature ack: {:x}", v);
620
621 // Don't count these features as acked.
622 v &= !unrequested_features;
623 }
624 self.acked_features |= v;
625
626 // Set offload flags to match acked virtio features.
627 if let Some(tap) = self.taps.first() {
628 if let Err(e) = tap.set_offload(virtio_features_to_tap_offload(self.acked_features)) {
629 warn!(
630 "net: failed to set tap offload to match acked features: {}",
631 e
632 );
633 }
634 }
635 }
636
read_config(&self, offset: u64, data: &mut [u8])637 fn read_config(&self, offset: u64, data: &mut [u8]) {
638 let config_space = self.build_config();
639 copy_config(data, 0, config_space.as_slice(), offset);
640 }
641
activate( &mut self, mem: GuestMemory, interrupt: Interrupt, mut queues: Vec<Queue>, mut queue_evts: Vec<Event>, )642 fn activate(
643 &mut self,
644 mem: GuestMemory,
645 interrupt: Interrupt,
646 mut queues: Vec<Queue>,
647 mut queue_evts: Vec<Event>,
648 ) {
649 if queues.len() != self.queue_sizes.len() || queue_evts.len() != self.queue_sizes.len() {
650 error!(
651 "net: expected {} queues, got {}",
652 self.queue_sizes.len(),
653 queues.len()
654 );
655 return;
656 }
657
658 let vq_pairs = self.queue_sizes.len() / 2;
659 if self.taps.len() != vq_pairs {
660 error!("net: expected {} taps, got {}", vq_pairs, self.taps.len());
661 return;
662 }
663 if self.workers_kill_evt.len() != vq_pairs {
664 error!(
665 "net: expected {} worker_kill_evt, got {}",
666 vq_pairs,
667 self.workers_kill_evt.len()
668 );
669 return;
670 }
671 let interrupt_arc = Arc::new(interrupt);
672 for i in 0..vq_pairs {
673 let tap = self.taps.remove(0);
674 let acked_features = self.acked_features;
675 let interrupt = interrupt_arc.clone();
676 let memory = mem.clone();
677 let kill_evt = self.workers_kill_evt.remove(0);
678 // Queues alternate between rx0, tx0, rx1, tx1, ..., rxN, txN, ctrl.
679 let rx_queue = queues.remove(0);
680 let tx_queue = queues.remove(0);
681 let ctrl_queue = if i == 0 {
682 Some(queues.remove(queues.len() - 1))
683 } else {
684 None
685 };
686 let pairs = vq_pairs as u16;
687 let rx_queue_evt = queue_evts.remove(0);
688 let tx_queue_evt = queue_evts.remove(0);
689 let ctrl_queue_evt = if i == 0 {
690 Some(queue_evts.remove(queue_evts.len() - 1))
691 } else {
692 None
693 };
694 let worker_result = thread::Builder::new()
695 .name(format!("virtio_net worker {}", i))
696 .spawn(move || {
697 let mut worker = Worker {
698 interrupt,
699 mem: memory,
700 rx_queue,
701 tx_queue,
702 ctrl_queue,
703 tap,
704 acked_features,
705 vq_pairs: pairs,
706 kill_evt,
707 };
708 let result = worker.run(rx_queue_evt, tx_queue_evt, ctrl_queue_evt);
709 if let Err(e) = result {
710 error!("net worker thread exited with error: {}", e);
711 }
712 worker
713 });
714
715 match worker_result {
716 Err(e) => {
717 error!("failed to spawn virtio_net worker: {}", e);
718 return;
719 }
720 Ok(join_handle) => self.worker_threads.push(join_handle),
721 }
722 }
723 }
724
reset(&mut self) -> bool725 fn reset(&mut self) -> bool {
726 let len = self.kill_evts.len();
727 for i in 0..len {
728 // Only kill the child if it claimed its event.
729 if self.workers_kill_evt.get(i).is_none() {
730 if let Some(kill_evt) = self.kill_evts.get(i) {
731 if kill_evt.write(1).is_err() {
732 error!("{}: failed to notify the kill event", self.debug_label());
733 return false;
734 }
735 }
736 }
737 }
738
739 let len = self.worker_threads.len();
740 for _ in 0..len {
741 match self.worker_threads.remove(0).join() {
742 Err(_) => {
743 error!("{}: failed to get back resources", self.debug_label());
744 return false;
745 }
746 Ok(worker) => {
747 self.taps.push(worker.tap);
748 self.workers_kill_evt.push(worker.kill_evt);
749 }
750 }
751 }
752
753 true
754 }
755 }
756