1 // Copyright 2018 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use std::collections::hash_map::{Entry, HashMap, VacantEntry};
6 use std::env::set_var;
7 use std::fs::File;
8 use std::io::{IoSlice, Write};
9 use std::mem::transmute;
10 use std::os::unix::net::UnixDatagram;
11 use std::path::Path;
12 use std::process::Command;
13 use std::sync::{Arc, RwLock};
14 use std::thread::JoinHandle;
15 
16 use net_util::Error as NetError;
17 
18 use libc::{pid_t, waitpid, EINVAL, ENODATA, ENOTTY, WEXITSTATUS, WIFEXITED, WNOHANG, WTERMSIG};
19 
20 use protobuf::Message;
21 
22 use base::{
23     error, AsRawDescriptor, Error as SysError, Event, IntoRawDescriptor, Killable,
24     MemoryMappingBuilder, RawDescriptor, Result as SysResult, ScmSocket, SharedMemory,
25     SharedMemoryUnix, SIGRTMIN,
26 };
27 use kvm::{dirty_log_bitmap_size, Datamatch, IoeventAddress, IrqRoute, IrqSource, PicId, Vm};
28 use kvm_sys::{kvm_clock_data, kvm_ioapic_state, kvm_pic_state, kvm_pit_state2};
29 use minijail::Minijail;
30 use protos::plugin::*;
31 use sync::Mutex;
32 use vm_memory::GuestAddress;
33 
34 use super::*;
35 
36 // Wrapper types to make the kvm state structs DataInit
37 use data_model::DataInit;
38 #[derive(Copy, Clone)]
39 struct VmPicState(kvm_pic_state);
40 unsafe impl DataInit for VmPicState {}
41 #[derive(Copy, Clone)]
42 struct VmIoapicState(kvm_ioapic_state);
43 unsafe impl DataInit for VmIoapicState {}
44 #[derive(Copy, Clone)]
45 struct VmPitState(kvm_pit_state2);
46 unsafe impl DataInit for VmPitState {}
47 #[derive(Copy, Clone)]
48 struct VmClockState(kvm_clock_data);
49 unsafe impl DataInit for VmClockState {}
50 
get_vm_state(vm: &Vm, state_set: MainRequest_StateSet) -> SysResult<Vec<u8>>51 fn get_vm_state(vm: &Vm, state_set: MainRequest_StateSet) -> SysResult<Vec<u8>> {
52     Ok(match state_set {
53         MainRequest_StateSet::PIC0 => VmPicState(vm.get_pic_state(PicId::Primary)?)
54             .as_slice()
55             .to_vec(),
56         MainRequest_StateSet::PIC1 => VmPicState(vm.get_pic_state(PicId::Secondary)?)
57             .as_slice()
58             .to_vec(),
59         MainRequest_StateSet::IOAPIC => VmIoapicState(vm.get_ioapic_state()?).as_slice().to_vec(),
60         MainRequest_StateSet::PIT => VmPitState(vm.get_pit_state()?).as_slice().to_vec(),
61         MainRequest_StateSet::CLOCK => VmClockState(vm.get_clock()?).as_slice().to_vec(),
62     })
63 }
64 
set_vm_state(vm: &Vm, state_set: MainRequest_StateSet, state: &[u8]) -> SysResult<()>65 fn set_vm_state(vm: &Vm, state_set: MainRequest_StateSet, state: &[u8]) -> SysResult<()> {
66     match state_set {
67         MainRequest_StateSet::PIC0 => vm.set_pic_state(
68             PicId::Primary,
69             &VmPicState::from_slice(state)
70                 .ok_or(SysError::new(EINVAL))?
71                 .0,
72         ),
73         MainRequest_StateSet::PIC1 => vm.set_pic_state(
74             PicId::Secondary,
75             &VmPicState::from_slice(state)
76                 .ok_or(SysError::new(EINVAL))?
77                 .0,
78         ),
79         MainRequest_StateSet::IOAPIC => vm.set_ioapic_state(
80             &VmIoapicState::from_slice(state)
81                 .ok_or(SysError::new(EINVAL))?
82                 .0,
83         ),
84         MainRequest_StateSet::PIT => vm.set_pit_state(
85             &VmPitState::from_slice(state)
86                 .ok_or(SysError::new(EINVAL))?
87                 .0,
88         ),
89         MainRequest_StateSet::CLOCK => vm.set_clock(
90             &VmClockState::from_slice(state)
91                 .ok_or(SysError::new(EINVAL))?
92                 .0,
93         ),
94     }
95 }
96 
97 /// The status of a process, either that it is running, or that it exited under some condition.
98 pub enum ProcessStatus {
99     /// The process is running and therefore has no information about its result.
100     Running,
101     /// The process has exited with a successful code.
102     Success,
103     /// The process failed with the given exit code.
104     Fail(i32),
105     /// The process was terminated with the given signal code.
106     Signal(i32),
107 }
108 
109 /// Creates, owns, and handles messages from a plugin process.
110 ///
111 /// A plugin process has control over a single VM and a fixed number of VCPUs via a set of pipes & unix
112 /// domain socket connections and a protocol defined in `protos::plugin`. The plugin process is run
113 /// in an unprivileged manner as a child process spawned via a path to a arbitrary executable.
114 pub struct Process {
115     started: bool,
116     plugin_pid: pid_t,
117     request_sockets: Vec<UnixDatagram>,
118     objects: HashMap<u32, PluginObject>,
119     shared_vcpu_state: Arc<RwLock<SharedVcpuState>>,
120     per_vcpu_states: Vec<Arc<Mutex<PerVcpuState>>>,
121 
122     // Resource to sent to plugin
123     kill_evt: Event,
124     vcpu_pipes: Vec<VcpuPipe>,
125 
126     // Socket Transmission
127     request_buffer: Vec<u8>,
128     response_buffer: Vec<u8>,
129 }
130 
131 impl Process {
132     /// Creates a new plugin process for the given number of vcpus and VM.
133     ///
134     /// This will immediately spawn the plugin process and wait for the child to signal that it is
135     /// ready to start. This call may block indefinitely.
136     ///
137     /// Set the `jail` argument to spawn the plugin process within the preconfigured jail.
138     /// Due to an API limitation in libminijail necessitating that this function set an environment
139     /// variable, this function is not thread-safe.
new( cpu_count: u32, cmd: &Path, args: &[&str], jail: Option<Minijail>, ) -> Result<Process>140     pub fn new(
141         cpu_count: u32,
142         cmd: &Path,
143         args: &[&str],
144         jail: Option<Minijail>,
145     ) -> Result<Process> {
146         let (request_socket, child_socket) =
147             new_seqpacket_pair().map_err(Error::CreateMainSocket)?;
148 
149         let mut vcpu_pipes: Vec<VcpuPipe> = Vec::with_capacity(cpu_count as usize);
150         for _ in 0..cpu_count {
151             vcpu_pipes.push(new_pipe_pair().map_err(Error::CreateVcpuSocket)?);
152         }
153         let mut per_vcpu_states: Vec<Arc<Mutex<PerVcpuState>>> =
154             Vec::with_capacity(cpu_count as usize);
155         // TODO(zachr): replace with `resize_default` when that stabilizes. Using a plain `resize`
156         // is incorrect because each element in the `Vec` will contain a shared reference to the
157         // same `PerVcpuState` instance. This happens because `resize` fills new slots using clones
158         // of the instance given to `resize`.
159         for _ in 0..cpu_count {
160             per_vcpu_states.push(Default::default());
161         }
162 
163         let plugin_pid = match jail {
164             Some(jail) => {
165                 set_var(
166                     "CROSVM_SOCKET",
167                     child_socket.as_raw_descriptor().to_string(),
168                 );
169                 jail.run(cmd, &[0, 1, 2, child_socket.as_raw_descriptor()], args)
170                     .map_err(Error::PluginRunJail)?
171             }
172             None => Command::new(cmd)
173                 .args(args)
174                 .env(
175                     "CROSVM_SOCKET",
176                     child_socket.as_raw_descriptor().to_string(),
177                 )
178                 .spawn()
179                 .map_err(Error::PluginSpawn)?
180                 .id() as pid_t,
181         };
182 
183         Ok(Process {
184             started: false,
185             plugin_pid,
186             request_sockets: vec![request_socket],
187             objects: Default::default(),
188             shared_vcpu_state: Default::default(),
189             per_vcpu_states,
190             kill_evt: Event::new().map_err(Error::CreateEvent)?,
191             vcpu_pipes,
192             request_buffer: vec![0; MAX_DATAGRAM_SIZE],
193             response_buffer: Vec::new(),
194         })
195     }
196 
197     /// Creates a VCPU plugin connection object, used by a VCPU run loop to communicate with the
198     /// plugin process.
199     ///
200     /// While each invocation of `create_vcpu` with the given `cpu_id` will return a unique
201     /// `PluginVcpu` object, the underlying resources are shared by each `PluginVcpu` resulting from
202     /// the same `cpu_id`.
create_vcpu(&self, cpu_id: u32) -> Result<PluginVcpu>203     pub fn create_vcpu(&self, cpu_id: u32) -> Result<PluginVcpu> {
204         let vcpu_pipe_read = self.vcpu_pipes[cpu_id as usize]
205             .crosvm_read
206             .try_clone()
207             .map_err(Error::CloneVcpuPipe)?;
208         let vcpu_pipe_write = self.vcpu_pipes[cpu_id as usize]
209             .crosvm_write
210             .try_clone()
211             .map_err(Error::CloneVcpuPipe)?;
212         Ok(PluginVcpu::new(
213             self.shared_vcpu_state.clone(),
214             self.per_vcpu_states[cpu_id as usize].clone(),
215             vcpu_pipe_read,
216             vcpu_pipe_write,
217         ))
218     }
219 
220     /// Returns if the plugin process indicated the VM was ready to start.
is_started(&self) -> bool221     pub fn is_started(&self) -> bool {
222         self.started
223     }
224 
225     /// Returns the process ID of the plugin process.
pid(&self) -> pid_t226     pub fn pid(&self) -> pid_t {
227         self.plugin_pid
228     }
229 
230     /// Returns a slice of each socket that should be polled.
231     ///
232     /// If any socket in this slice becomes readable, `handle_socket` should be called with the
233     /// index of that socket. If any socket becomes closed, its index should be passed to
234     /// `drop_sockets`.
sockets(&self) -> &[UnixDatagram]235     pub fn sockets(&self) -> &[UnixDatagram] {
236         &self.request_sockets
237     }
238 
239     /// Drops the each socket identified by its index in the slice returned by `sockets`.
240     ///
241     /// The given `socket_idxs` slice will be modified in an arbitrary way for efficient removal of
242     /// the sockets from internal data structures.
drop_sockets(&mut self, socket_idxs: &mut [usize])243     pub fn drop_sockets(&mut self, socket_idxs: &mut [usize]) {
244         // Takes a mutable slice so that the indices can be sorted for efficient removal in
245         // request_sockets..
246         socket_idxs.sort_unstable_by(|a, b| b.cmp(a));
247         let old_len = self.request_sockets.len();
248         for &socket_index in socket_idxs.iter() {
249             // swap_remove changes the index of the last element, but we already know that one
250             // doesn't need to be removed because we are removing sockets in descending order thanks
251             // to the above sort.
252             self.request_sockets.swap_remove(socket_index);
253         }
254         assert_eq!(old_len - socket_idxs.len(), self.request_sockets.len());
255     }
256 
257     /// Gently requests that the plugin process exit cleanly, and ends handling of all VCPU
258     /// connections.
259     ///
260     /// The plugin process can ignore the given signal, and so some timeout should be used before
261     /// forcefully terminating the process.
262     ///
263     /// Any blocked VCPU connections will get interrupted so that the VCPU threads can exit cleanly.
264     /// Any subsequent attempt to use the VCPU connections will fail.
signal_kill(&mut self) -> SysResult<()>265     pub fn signal_kill(&mut self) -> SysResult<()> {
266         self.kill_evt.write(1)?;
267         // Normally we'd get any blocked recv() calls in the VCPU threads
268         // to unblock by calling shutdown().  However, we're using pipes
269         // (for improved performance), and pipes don't have shutdown so
270         // instead we'll write a shutdown message to ourselves using the
271         // the writable side of the pipe (normally used by the plugin).
272         for pipe in self.vcpu_pipes.iter_mut() {
273             let mut shutdown_request = VcpuRequest::new();
274             shutdown_request.set_shutdown(VcpuRequest_Shutdown::new());
275             let mut buffer = Vec::new();
276             shutdown_request
277                 .write_to_vec(&mut buffer)
278                 .map_err(proto_to_sys_err)?;
279             pipe.plugin_write
280                 .write(&buffer[..])
281                 .map_err(io_to_sys_err)?;
282         }
283         Ok(())
284     }
285 
286     /// Waits without blocking for the plugin process to exit and returns the status.
try_wait(&mut self) -> SysResult<ProcessStatus>287     pub fn try_wait(&mut self) -> SysResult<ProcessStatus> {
288         let mut status = 0;
289         // Safe because waitpid is given a valid pointer of correct size and mutability, and the
290         // return value is checked.
291         let ret = unsafe { waitpid(self.plugin_pid, &mut status, WNOHANG) };
292         match ret {
293             -1 => Err(SysError::last()),
294             0 => Ok(ProcessStatus::Running),
295             _ => {
296                 if WIFEXITED(status) {
297                     match WEXITSTATUS(status) {
298                         0 => Ok(ProcessStatus::Success),
299                         code => Ok(ProcessStatus::Fail(code)),
300                     }
301                 } else {
302                     // Plugin terminated but has no exit status, so it must have been signaled.
303                     Ok(ProcessStatus::Signal(WTERMSIG(status)))
304                 }
305             }
306         }
307     }
308 
handle_io_event( entry: VacantEntry<u32, PluginObject>, vm: &mut Vm, io_event: &MainRequest_Create_IoEvent, ) -> SysResult<RawDescriptor>309     fn handle_io_event(
310         entry: VacantEntry<u32, PluginObject>,
311         vm: &mut Vm,
312         io_event: &MainRequest_Create_IoEvent,
313     ) -> SysResult<RawDescriptor> {
314         let evt = Event::new()?;
315         let addr = match io_event.space {
316             AddressSpace::IOPORT => IoeventAddress::Pio(io_event.address),
317             AddressSpace::MMIO => IoeventAddress::Mmio(io_event.address),
318         };
319         match io_event.length {
320             0 => vm.register_ioevent(&evt, addr, Datamatch::AnyLength)?,
321             1 => vm.register_ioevent(&evt, addr, Datamatch::U8(Some(io_event.datamatch as u8)))?,
322             2 => {
323                 vm.register_ioevent(&evt, addr, Datamatch::U16(Some(io_event.datamatch as u16)))?
324             }
325             4 => {
326                 vm.register_ioevent(&evt, addr, Datamatch::U32(Some(io_event.datamatch as u32)))?
327             }
328             8 => {
329                 vm.register_ioevent(&evt, addr, Datamatch::U64(Some(io_event.datamatch as u64)))?
330             }
331             _ => return Err(SysError::new(EINVAL)),
332         };
333 
334         let fd = evt.as_raw_descriptor();
335         entry.insert(PluginObject::IoEvent {
336             evt,
337             addr,
338             length: io_event.length,
339             datamatch: io_event.datamatch,
340         });
341         Ok(fd)
342     }
343 
handle_memory( entry: VacantEntry<u32, PluginObject>, vm: &mut Vm, memfd: File, offset: u64, start: u64, length: u64, read_only: bool, dirty_log: bool, ) -> SysResult<()>344     fn handle_memory(
345         entry: VacantEntry<u32, PluginObject>,
346         vm: &mut Vm,
347         memfd: File,
348         offset: u64,
349         start: u64,
350         length: u64,
351         read_only: bool,
352         dirty_log: bool,
353     ) -> SysResult<()> {
354         let shm = SharedMemory::from_file(memfd)?;
355         // Checking the seals ensures the plugin process won't shrink the mmapped file, causing us
356         // to SIGBUS in the future.
357         let seals = shm.get_seals()?;
358         if !seals.shrink_seal() {
359             return Err(SysError::new(EPERM));
360         }
361         // Check to make sure we don't mmap areas beyond the end of the memfd.
362         match length.checked_add(offset) {
363             Some(end) if end > shm.size() => return Err(SysError::new(EINVAL)),
364             None => return Err(SysError::new(EOVERFLOW)),
365             _ => {}
366         }
367         let mem = MemoryMappingBuilder::new(length as usize)
368             .from_shared_memory(&shm)
369             .offset(offset)
370             .build()
371             .map_err(mmap_to_sys_err)?;
372         let slot =
373             vm.add_memory_region(GuestAddress(start), Box::new(mem), read_only, dirty_log)?;
374         entry.insert(PluginObject::Memory {
375             slot,
376             length: length as usize,
377         });
378         Ok(())
379     }
380 
handle_reserve_range(&mut self, reserve_range: &MainRequest_ReserveRange) -> SysResult<()>381     fn handle_reserve_range(&mut self, reserve_range: &MainRequest_ReserveRange) -> SysResult<()> {
382         match self.shared_vcpu_state.write() {
383             Ok(mut lock) => {
384                 let space = match reserve_range.space {
385                     AddressSpace::IOPORT => IoSpace::Ioport,
386                     AddressSpace::MMIO => IoSpace::Mmio,
387                 };
388                 match reserve_range.length {
389                     0 => lock.unreserve_range(space, reserve_range.start),
390                     _ => lock.reserve_range(
391                         space,
392                         reserve_range.start,
393                         reserve_range.length,
394                         reserve_range.async_write,
395                     ),
396                 }
397             }
398             Err(_) => Err(SysError::new(EDEADLK)),
399         }
400     }
401 
handle_set_irq_routing( vm: &mut Vm, irq_routing: &MainRequest_SetIrqRouting, ) -> SysResult<()>402     fn handle_set_irq_routing(
403         vm: &mut Vm,
404         irq_routing: &MainRequest_SetIrqRouting,
405     ) -> SysResult<()> {
406         let mut routes = Vec::with_capacity(irq_routing.routes.len());
407         for route in &irq_routing.routes {
408             routes.push(IrqRoute {
409                 gsi: route.irq_id,
410                 source: if route.has_irqchip() {
411                     let irqchip = route.get_irqchip();
412                     IrqSource::Irqchip {
413                         chip: irqchip.irqchip,
414                         pin: irqchip.pin,
415                     }
416                 } else if route.has_msi() {
417                     let msi = route.get_msi();
418                     IrqSource::Msi {
419                         address: msi.address,
420                         data: msi.data,
421                     }
422                 } else {
423                     // Because route is a oneof field in the proto definition, this should
424                     // only happen if a new variant gets added without updating this chained
425                     // if block.
426                     return Err(SysError::new(EINVAL));
427                 },
428             });
429         }
430         vm.set_gsi_routing(&routes[..])
431     }
432 
handle_set_call_hint(&mut self, hints: &MainRequest_SetCallHint) -> SysResult<()>433     fn handle_set_call_hint(&mut self, hints: &MainRequest_SetCallHint) -> SysResult<()> {
434         let mut regs: Vec<CallHintDetails> = vec![];
435         for hint in &hints.hints {
436             regs.push(CallHintDetails {
437                 match_rax: hint.match_rax,
438                 match_rbx: hint.match_rbx,
439                 match_rcx: hint.match_rcx,
440                 match_rdx: hint.match_rdx,
441                 rax: hint.rax,
442                 rbx: hint.rbx,
443                 rcx: hint.rcx,
444                 rdx: hint.rdx,
445                 send_sregs: hint.send_sregs,
446                 send_debugregs: hint.send_debugregs,
447             });
448         }
449         match self.shared_vcpu_state.write() {
450             Ok(mut lock) => {
451                 let space = match hints.space {
452                     AddressSpace::IOPORT => IoSpace::Ioport,
453                     AddressSpace::MMIO => IoSpace::Mmio,
454                 };
455                 lock.set_hint(space, hints.address, hints.on_write, regs);
456                 Ok(())
457             }
458             Err(_) => Err(SysError::new(EDEADLK)),
459         }
460     }
461 
handle_pause_vcpus(&self, vcpu_handles: &[JoinHandle<()>], cpu_mask: u64, user_data: u64)462     fn handle_pause_vcpus(&self, vcpu_handles: &[JoinHandle<()>], cpu_mask: u64, user_data: u64) {
463         for (cpu_id, (handle, per_cpu_state)) in
464             vcpu_handles.iter().zip(&self.per_vcpu_states).enumerate()
465         {
466             if cpu_mask & (1 << cpu_id) != 0 {
467                 per_cpu_state.lock().request_pause(user_data);
468                 if let Err(e) = handle.kill(SIGRTMIN() + 0) {
469                     error!("failed to interrupt vcpu {}: {}", cpu_id, e);
470                 }
471             }
472         }
473     }
474 
handle_get_net_config( tap: &net_util::Tap, config: &mut MainResponse_GetNetConfig, ) -> SysResult<()>475     fn handle_get_net_config(
476         tap: &net_util::Tap,
477         config: &mut MainResponse_GetNetConfig,
478     ) -> SysResult<()> {
479         // Log any NetError so that the cause can be found later, but extract and return the
480         // underlying errno for the client as well.
481         fn map_net_error(s: &str, e: NetError) -> SysError {
482             error!("failed to get {}: {}", s, e);
483             e.sys_error()
484         }
485 
486         let ip_addr = tap.ip_addr().map_err(|e| map_net_error("IP address", e))?;
487         config.set_host_ipv4_address(u32::from(ip_addr));
488 
489         let netmask = tap.netmask().map_err(|e| map_net_error("netmask", e))?;
490         config.set_netmask(u32::from(netmask));
491 
492         let result_mac_addr = config.mut_host_mac_address();
493         let mac_addr_octets = tap
494             .mac_address()
495             .map_err(|e| map_net_error("mac address", e))?
496             .octets();
497         result_mac_addr.resize(mac_addr_octets.len(), 0);
498         result_mac_addr.clone_from_slice(&mac_addr_octets);
499 
500         Ok(())
501     }
502 
503     /// Handles a request on a readable socket identified by its index in the slice returned by
504     /// `sockets`.
505     ///
506     /// The `vm` is used to service request that affect the VM. The `vcpu_handles` slice is used to
507     /// interrupt a VCPU thread currently running in the VM if the socket request it.
handle_socket( &mut self, index: usize, kvm: &Kvm, vm: &mut Vm, vcpu_handles: &[JoinHandle<()>], taps: &[Tap], ) -> Result<()>508     pub fn handle_socket(
509         &mut self,
510         index: usize,
511         kvm: &Kvm,
512         vm: &mut Vm,
513         vcpu_handles: &[JoinHandle<()>],
514         taps: &[Tap],
515     ) -> Result<()> {
516         let (msg_size, request_file) = self.request_sockets[index]
517             .recv_with_fd(&mut self.request_buffer)
518             .map_err(Error::PluginSocketRecv)?;
519 
520         if msg_size == 0 {
521             return Err(Error::PluginSocketHup);
522         }
523 
524         let request = protobuf::parse_from_bytes::<MainRequest>(&self.request_buffer[..msg_size])
525             .map_err(Error::DecodeRequest)?;
526 
527         /// Use this to make it easier to stuff various kinds of File-like objects into the
528         /// `boxed_fds` list.
529         fn box_owned_fd<F: IntoRawDescriptor + 'static>(f: F) -> Box<dyn IntoRawDescriptor> {
530             Box::new(f)
531         }
532 
533         // This vec is used to extend ownership of certain FDs until the end of this function.
534         let mut boxed_fds = Vec::new();
535         let mut response_fds = Vec::new();
536         let mut response = MainResponse::new();
537         let res = if request.has_create() {
538             response.mut_create();
539             let create = request.get_create();
540             match self.objects.entry(create.id) {
541                 Entry::Vacant(entry) => {
542                     if create.has_io_event() {
543                         match Self::handle_io_event(entry, vm, create.get_io_event()) {
544                             Ok(fd) => {
545                                 response_fds.push(fd);
546                                 Ok(())
547                             }
548                             Err(e) => Err(e),
549                         }
550                     } else if create.has_memory() {
551                         let memory = create.get_memory();
552                         match request_file {
553                             Some(memfd) => Self::handle_memory(
554                                 entry,
555                                 vm,
556                                 memfd,
557                                 memory.offset,
558                                 memory.start,
559                                 memory.length,
560                                 memory.read_only,
561                                 memory.dirty_log,
562                             ),
563                             None => Err(SysError::new(EBADF)),
564                         }
565                     } else if create.has_irq_event() {
566                         let irq_event = create.get_irq_event();
567                         match (Event::new(), Event::new()) {
568                             (Ok(evt), Ok(resample_evt)) => match vm.register_irqfd_resample(
569                                 &evt,
570                                 &resample_evt,
571                                 irq_event.irq_id,
572                             ) {
573                                 Ok(()) => {
574                                     response_fds.push(evt.as_raw_descriptor());
575                                     response_fds.push(resample_evt.as_raw_descriptor());
576                                     boxed_fds.push(box_owned_fd(resample_evt));
577                                     entry.insert(PluginObject::IrqEvent {
578                                         irq_id: irq_event.irq_id,
579                                         evt,
580                                     });
581                                     Ok(())
582                                 }
583                                 Err(e) => Err(e),
584                             },
585                             (Err(e), _) | (_, Err(e)) => Err(e),
586                         }
587                     } else {
588                         Err(SysError::new(ENOTTY))
589                     }
590                 }
591                 Entry::Occupied(_) => Err(SysError::new(EEXIST)),
592             }
593         } else if request.has_destroy() {
594             response.mut_destroy();
595             match self.objects.entry(request.get_destroy().id) {
596                 Entry::Occupied(entry) => entry.remove().destroy(vm),
597                 Entry::Vacant(_) => Err(SysError::new(ENOENT)),
598             }
599         } else if request.has_new_connection() {
600             response.mut_new_connection();
601             match new_seqpacket_pair() {
602                 Ok((request_socket, child_socket)) => {
603                     self.request_sockets.push(request_socket);
604                     response_fds.push(child_socket.as_raw_descriptor());
605                     boxed_fds.push(box_owned_fd(child_socket));
606                     Ok(())
607                 }
608                 Err(e) => Err(e),
609             }
610         } else if request.has_get_shutdown_eventfd() {
611             response.mut_get_shutdown_eventfd();
612             response_fds.push(self.kill_evt.as_raw_descriptor());
613             Ok(())
614         } else if request.has_check_extension() {
615             // Safe because the Cap enum is not read by the check_extension method. In that method,
616             // cap is cast back to an integer and fed to an ioctl. If the extension name is actually
617             // invalid, the kernel will safely reject the extension under the assumption that the
618             // capability is legitimately unsupported.
619             let cap = unsafe { transmute(request.get_check_extension().extension) };
620             response.mut_check_extension().has_extension = vm.check_extension(cap);
621             Ok(())
622         } else if request.has_reserve_range() {
623             response.mut_reserve_range();
624             self.handle_reserve_range(request.get_reserve_range())
625         } else if request.has_set_irq() {
626             response.mut_set_irq();
627             let irq = request.get_set_irq();
628             vm.set_irq_line(irq.irq_id, irq.active)
629         } else if request.has_set_irq_routing() {
630             response.mut_set_irq_routing();
631             Self::handle_set_irq_routing(vm, request.get_set_irq_routing())
632         } else if request.has_get_state() {
633             let response_state = response.mut_get_state();
634             match get_vm_state(vm, request.get_get_state().set) {
635                 Ok(state) => {
636                     response_state.state = state;
637                     Ok(())
638                 }
639                 Err(e) => Err(e),
640             }
641         } else if request.has_set_state() {
642             response.mut_set_state();
643             let set_state = request.get_set_state();
644             set_vm_state(vm, set_state.set, set_state.get_state())
645         } else if request.has_set_identity_map_addr() {
646             response.mut_set_identity_map_addr();
647             let addr = request.get_set_identity_map_addr().address;
648             vm.set_identity_map_addr(GuestAddress(addr as u64))
649         } else if request.has_pause_vcpus() {
650             response.mut_pause_vcpus();
651             let pause_vcpus = request.get_pause_vcpus();
652             self.handle_pause_vcpus(vcpu_handles, pause_vcpus.cpu_mask, pause_vcpus.user);
653             Ok(())
654         } else if request.has_get_vcpus() {
655             response.mut_get_vcpus();
656             for pipe in self.vcpu_pipes.iter() {
657                 response_fds.push(pipe.plugin_write.as_raw_descriptor());
658                 response_fds.push(pipe.plugin_read.as_raw_descriptor());
659             }
660             Ok(())
661         } else if request.has_start() {
662             response.mut_start();
663             if self.started {
664                 Err(SysError::new(EINVAL))
665             } else {
666                 self.started = true;
667                 Ok(())
668             }
669         } else if request.has_get_net_config() {
670             match taps.first() {
671                 Some(tap) => {
672                     match Self::handle_get_net_config(tap, response.mut_get_net_config()) {
673                         Ok(_) => {
674                             response_fds.push(tap.as_raw_descriptor());
675                             Ok(())
676                         }
677                         Err(e) => Err(e),
678                     }
679                 }
680                 None => Err(SysError::new(ENODATA)),
681             }
682         } else if request.has_set_call_hint() {
683             response.mut_set_call_hint();
684             self.handle_set_call_hint(request.get_set_call_hint())
685         } else if request.has_dirty_log() {
686             let dirty_log_response = response.mut_dirty_log();
687             match self.objects.get(&request.get_dirty_log().id) {
688                 Some(&PluginObject::Memory { slot, length }) => {
689                     let dirty_log = dirty_log_response.mut_bitmap();
690                     dirty_log.resize(dirty_log_bitmap_size(length), 0);
691                     vm.get_dirty_log(slot, &mut dirty_log[..])
692                 }
693                 _ => Err(SysError::new(ENOENT)),
694             }
695         } else if request.has_get_supported_cpuid() {
696             let cpuid_response = &mut response.mut_get_supported_cpuid().entries;
697             match kvm.get_supported_cpuid() {
698                 Ok(mut cpuid) => {
699                     for entry in cpuid.mut_entries_slice() {
700                         cpuid_response.push(cpuid_kvm_to_proto(entry));
701                     }
702                     Ok(())
703                 }
704                 Err(e) => Err(e),
705             }
706         } else if request.has_get_emulated_cpuid() {
707             let cpuid_response = &mut response.mut_get_emulated_cpuid().entries;
708             match kvm.get_emulated_cpuid() {
709                 Ok(mut cpuid) => {
710                     for entry in cpuid.mut_entries_slice() {
711                         cpuid_response.push(cpuid_kvm_to_proto(entry));
712                     }
713                     Ok(())
714                 }
715                 Err(e) => Err(e),
716             }
717         } else if request.has_get_msr_index_list() {
718             let msr_list_response = &mut response.mut_get_msr_index_list().indices;
719             match kvm.get_msr_index_list() {
720                 Ok(indices) => {
721                     for entry in indices {
722                         msr_list_response.push(entry);
723                     }
724                     Ok(())
725                 }
726                 Err(e) => Err(e),
727             }
728         } else {
729             Err(SysError::new(ENOTTY))
730         };
731 
732         if let Err(e) = res {
733             response.errno = e.errno();
734         }
735 
736         self.response_buffer.clear();
737         response
738             .write_to_vec(&mut self.response_buffer)
739             .map_err(Error::EncodeResponse)?;
740         assert_ne!(self.response_buffer.len(), 0);
741         self.request_sockets[index]
742             .send_with_fds(&[IoSlice::new(&self.response_buffer[..])], &response_fds)
743             .map_err(Error::PluginSocketSend)?;
744 
745         Ok(())
746     }
747 }
748 
749 impl Drop for Process {
drop(&mut self)750     fn drop(&mut self) {
751         // Ignore the result because there is nothing we can do about it.
752         if let Err(e) = self.signal_kill() {
753             error!("failed to signal kill event for plugin: {}", e);
754         }
755     }
756 }
757