1 // Copyright 2017 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #![allow(non_camel_case_types)]
6
7 //! This module implements the dynamically loaded client library API used by a crosvm plugin,
8 //! defined in `crosvm.h`. It implements the client half of the plugin protocol, which is defined in
9 //! the `protos::plugin` module.
10 //!
11 //! To implement the `crosvm.h` C API, each function and struct definition is repeated here, with
12 //! concrete definitions for each struct. Most functions are thin shims to the underlying object
13 //! oriented Rust implementation method. Most methods require a request over the crosvm connection,
14 //! which is done by creating a `MainRequest` or `VcpuRequest` protobuf and sending it over the
15 //! connection's socket. Then, that socket is read for a `MainResponse` or `VcpuResponse`, which is
16 //! translated to the appropriate return type for the C API.
17
18 use std::env;
19 use std::fs::File;
20 use std::io::{IoSlice, Read, Write};
21 use std::mem::{size_of, swap};
22 use std::os::raw::{c_int, c_void};
23 use std::os::unix::io::{AsRawFd, FromRawFd, IntoRawFd, RawFd};
24 use std::os::unix::net::UnixDatagram;
25 use std::ptr::{self, null_mut};
26 use std::result;
27 use std::slice;
28 use std::slice::{from_raw_parts, from_raw_parts_mut};
29 use std::sync::atomic::{AtomicUsize, Ordering};
30 use std::sync::Arc;
31
32 use libc::{E2BIG, EINVAL, ENOENT, ENOTCONN, EPROTO};
33
34 use protobuf::{parse_from_bytes, Message, ProtobufEnum, RepeatedField};
35
36 use base::ScmSocket;
37
38 use kvm::dirty_log_bitmap_size;
39
40 use kvm_sys::{
41 kvm_clock_data, kvm_cpuid_entry2, kvm_debugregs, kvm_fpu, kvm_ioapic_state, kvm_lapic_state,
42 kvm_mp_state, kvm_msr_entry, kvm_pic_state, kvm_pit_state2, kvm_regs, kvm_sregs,
43 kvm_vcpu_events, kvm_xcrs,
44 };
45
46 use protos::plugin::*;
47
48 #[cfg(feature = "stats")]
49 mod stats;
50
51 // Needs to be large enough to receive all the VCPU sockets.
52 const MAX_DATAGRAM_FD: usize = 32;
53 // Needs to be large enough for a sizable dirty log.
54 const MAX_DATAGRAM_SIZE: usize = 0x40000;
55
56 const CROSVM_IRQ_ROUTE_IRQCHIP: u32 = 0;
57 const CROSVM_IRQ_ROUTE_MSI: u32 = 1;
58
59 const CROSVM_VCPU_EVENT_KIND_INIT: u32 = 0;
60 const CROSVM_VCPU_EVENT_KIND_IO_ACCESS: u32 = 1;
61 const CROSVM_VCPU_EVENT_KIND_PAUSED: u32 = 2;
62 const CROSVM_VCPU_EVENT_KIND_HYPERV_HCALL: u32 = 3;
63 const CROSVM_VCPU_EVENT_KIND_HYPERV_SYNIC: u32 = 4;
64
65 #[repr(C)]
66 #[derive(Copy, Clone)]
67 pub struct crosvm_net_config {
68 tap_fd: c_int,
69 host_ipv4_address: u32,
70 netmask: u32,
71 host_mac_address: [u8; 6],
72 _reserved: [u8; 2],
73 }
74
75 #[repr(C)]
76 #[derive(Copy, Clone)]
77 pub struct anon_irqchip {
78 irqchip: u32,
79 pin: u32,
80 }
81
82 #[repr(C)]
83 #[derive(Copy, Clone)]
84 pub struct anon_msi {
85 address: u64,
86 data: u32,
87 }
88
89 #[repr(C)]
90 pub union anon_route {
91 irqchip: anon_irqchip,
92 msi: anon_msi,
93 reserved: [u8; 16],
94 }
95
96 #[repr(C)]
97 pub struct crosvm_irq_route {
98 irq_id: u32,
99 kind: u32,
100 route: anon_route,
101 }
102
103 const CROSVM_MAX_HINT_COUNT: u32 = 1;
104 const CROSVM_MAX_HINT_DETAIL_COUNT: u32 = 32;
105 const CROSVM_HINT_ON_WRITE: u16 = 1;
106
107 #[repr(C)]
108 pub struct crosvm_hint {
109 hint_version: u32,
110 reserved: u32,
111 address_space: u32,
112 address_flags: u16,
113 details_count: u16,
114 address: u64,
115 details: *const crosvm_hint_detail,
116 }
117
118 #[repr(C)]
119 pub struct crosvm_hint_detail {
120 match_rax: bool,
121 match_rbx: bool,
122 match_rcx: bool,
123 match_rdx: bool,
124 reserved1: [u8; 4],
125 rax: u64,
126 rbx: u64,
127 rcx: u64,
128 rdx: u64,
129 send_sregs: bool,
130 send_debugregs: bool,
131 reserved2: [u8; 6],
132 }
133
proto_error_to_int(e: protobuf::ProtobufError) -> c_int134 fn proto_error_to_int(e: protobuf::ProtobufError) -> c_int {
135 match e {
136 protobuf::ProtobufError::IoError(e) => e.raw_os_error().unwrap_or(EINVAL),
137 _ => EINVAL,
138 }
139 }
140
fd_cast<F: FromRawFd>(f: File) -> F141 fn fd_cast<F: FromRawFd>(f: File) -> F {
142 // Safe because we are transferring unique ownership.
143 unsafe { F::from_raw_fd(f.into_raw_fd()) }
144 }
145
146 #[derive(Default)]
147 struct IdAllocator(AtomicUsize);
148
149 impl IdAllocator {
alloc(&self) -> u32150 fn alloc(&self) -> u32 {
151 self.0.fetch_add(1, Ordering::Relaxed) as u32
152 }
153
free(&self, id: u32)154 fn free(&self, id: u32) {
155 let _ = self.0.compare_exchange(
156 id as usize + 1,
157 id as usize,
158 Ordering::Relaxed,
159 Ordering::Relaxed,
160 );
161 }
162 }
163
164 #[repr(u8)]
165 #[derive(Debug, Clone, Copy)]
166 pub enum Stat {
167 IoEvent,
168 MemoryGetDirtyLog,
169 IrqEventGetFd,
170 IrqEventGetResampleFd,
171 Connect,
172 DestroyConnection,
173 GetShutdownEvent,
174 CheckExtentsion,
175 EnableVmCapability,
176 EnableVcpuCapability,
177 GetSupportedCpuid,
178 GetEmulatedCpuid,
179 GetHypervCpuid,
180 GetMsrIndexList,
181 NetGetConfig,
182 ReserveRange,
183 ReserveAsyncWriteRange,
184 SetIrq,
185 SetIrqRouting,
186 GetPicState,
187 SetPicState,
188 GetIoapicState,
189 SetIoapicState,
190 GetPitState,
191 SetPitState,
192 GetClock,
193 SetClock,
194 SetIdentityMapAddr,
195 PauseVcpus,
196 Start,
197 GetVcpu,
198 VcpuWait,
199 VcpuResume,
200 VcpuGetRegs,
201 VcpuSetRegs,
202 VcpuGetSregs,
203 VcpuSetSregs,
204 GetFpu,
205 SetFpu,
206 GetDebugRegs,
207 SetDebugRegs,
208 GetXCRegs,
209 SetXCRegs,
210 VcpuGetMsrs,
211 VcpuSetMsrs,
212 VcpuSetCpuid,
213 VcpuGetLapicState,
214 VcpuSetLapicState,
215 VcpuGetMpState,
216 VcpuSetMpState,
217 VcpuGetVcpuEvents,
218 VcpuSetVcpuEvents,
219 NewConnection,
220 SetHypercallHint,
221
222 Count,
223 }
224
225 #[cfg(feature = "stats")]
record(a: Stat) -> stats::StatUpdater226 fn record(a: Stat) -> stats::StatUpdater {
227 unsafe { stats::STATS.record(a) }
228 }
229
230 #[cfg(not(feature = "stats"))]
record(_a: Stat) -> u32231 fn record(_a: Stat) -> u32 {
232 0
233 }
234
235 #[cfg(feature = "stats")]
printstats()236 fn printstats() {
237 // Unsafe due to racy access - OK for stats
238 if std::env::var("CROSVM_STATS").is_ok() {
239 unsafe {
240 stats::STATS.print();
241 }
242 }
243 }
244
245 #[cfg(not(feature = "stats"))]
printstats()246 fn printstats() {}
247
248 pub struct crosvm {
249 id_allocator: Arc<IdAllocator>,
250 socket: UnixDatagram,
251 request_buffer: Vec<u8>,
252 response_buffer: Vec<u8>,
253 vcpus: Arc<[crosvm_vcpu]>,
254 }
255
256 impl crosvm {
from_connection(socket: UnixDatagram) -> result::Result<crosvm, c_int>257 fn from_connection(socket: UnixDatagram) -> result::Result<crosvm, c_int> {
258 let mut crosvm = crosvm {
259 id_allocator: Default::default(),
260 socket,
261 request_buffer: Vec::new(),
262 response_buffer: vec![0; MAX_DATAGRAM_SIZE],
263 vcpus: Arc::new([]),
264 };
265 crosvm.load_all_vcpus()?;
266 Ok(crosvm)
267 }
268
new( id_allocator: Arc<IdAllocator>, socket: UnixDatagram, vcpus: Arc<[crosvm_vcpu]>, ) -> crosvm269 fn new(
270 id_allocator: Arc<IdAllocator>,
271 socket: UnixDatagram,
272 vcpus: Arc<[crosvm_vcpu]>,
273 ) -> crosvm {
274 crosvm {
275 id_allocator,
276 socket,
277 request_buffer: Vec::new(),
278 response_buffer: vec![0; MAX_DATAGRAM_SIZE],
279 vcpus,
280 }
281 }
282
get_id_allocator(&self) -> &IdAllocator283 fn get_id_allocator(&self) -> &IdAllocator {
284 &*self.id_allocator
285 }
286
main_transaction( &mut self, request: &MainRequest, fds: &[RawFd], ) -> result::Result<(MainResponse, Vec<File>), c_int>287 fn main_transaction(
288 &mut self,
289 request: &MainRequest,
290 fds: &[RawFd],
291 ) -> result::Result<(MainResponse, Vec<File>), c_int> {
292 self.request_buffer.clear();
293 request
294 .write_to_vec(&mut self.request_buffer)
295 .map_err(proto_error_to_int)?;
296 self.socket
297 .send_with_fds(&[IoSlice::new(self.request_buffer.as_slice())], fds)
298 .map_err(|e| -e.errno())?;
299
300 let mut datagram_fds = [0; MAX_DATAGRAM_FD];
301 let (msg_size, fd_count) = self
302 .socket
303 .recv_with_fds(&mut self.response_buffer, &mut datagram_fds)
304 .map_err(|e| -e.errno())?;
305 // Safe because the first fd_count fds from recv_with_fds are owned by us and valid.
306 let datagram_files = datagram_fds[..fd_count]
307 .iter()
308 .map(|&fd| unsafe { File::from_raw_fd(fd) })
309 .collect();
310
311 let response: MainResponse =
312 parse_from_bytes(&self.response_buffer[..msg_size]).map_err(proto_error_to_int)?;
313 if response.errno != 0 {
314 return Err(response.errno);
315 }
316 Ok((response, datagram_files))
317 }
318
try_clone(&mut self) -> result::Result<crosvm, c_int>319 fn try_clone(&mut self) -> result::Result<crosvm, c_int> {
320 let mut r = MainRequest::new();
321 r.mut_new_connection();
322 let mut files = self.main_transaction(&r, &[])?.1;
323 match files.pop() {
324 Some(new_socket) => Ok(crosvm::new(
325 self.id_allocator.clone(),
326 fd_cast(new_socket),
327 self.vcpus.clone(),
328 )),
329 None => Err(EPROTO),
330 }
331 }
332
destroy(&mut self, id: u32) -> result::Result<(), c_int>333 fn destroy(&mut self, id: u32) -> result::Result<(), c_int> {
334 let mut r = MainRequest::new();
335 r.mut_destroy().id = id;
336 self.main_transaction(&r, &[])?;
337 self.get_id_allocator().free(id);
338 printstats();
339 Ok(())
340 }
341
342 // Only call this at `from_connection` function.
load_all_vcpus(&mut self) -> result::Result<(), c_int>343 fn load_all_vcpus(&mut self) -> result::Result<(), c_int> {
344 let mut r = MainRequest::new();
345 r.mut_get_vcpus();
346 let (_, mut files) = self.main_transaction(&r, &[])?;
347 if files.is_empty() || files.len() % 2 != 0 {
348 return Err(EPROTO);
349 }
350
351 let mut vcpus = Vec::with_capacity(files.len() / 2);
352 while files.len() > 1 {
353 let write_pipe = files.remove(0);
354 let read_pipe = files.remove(0);
355 vcpus.push(crosvm_vcpu::new(fd_cast(read_pipe), fd_cast(write_pipe)));
356 }
357 self.vcpus = Arc::from(vcpus);
358 Ok(())
359 }
360
get_shutdown_event(&mut self) -> result::Result<File, c_int>361 fn get_shutdown_event(&mut self) -> result::Result<File, c_int> {
362 let mut r = MainRequest::new();
363 r.mut_get_shutdown_eventfd();
364 let (_, mut files) = self.main_transaction(&r, &[])?;
365 match files.pop() {
366 Some(f) => Ok(f),
367 None => Err(EPROTO),
368 }
369 }
370
check_extension(&mut self, extension: u32) -> result::Result<bool, c_int>371 fn check_extension(&mut self, extension: u32) -> result::Result<bool, c_int> {
372 let mut r = MainRequest::new();
373 r.mut_check_extension().extension = extension;
374 let (response, _) = self.main_transaction(&r, &[])?;
375 if !response.has_check_extension() {
376 return Err(EPROTO);
377 }
378 Ok(response.get_check_extension().has_extension)
379 }
380
get_supported_cpuid( &mut self, cpuid_entries: &mut [kvm_cpuid_entry2], cpuid_count: &mut usize, ) -> result::Result<(), c_int>381 fn get_supported_cpuid(
382 &mut self,
383 cpuid_entries: &mut [kvm_cpuid_entry2],
384 cpuid_count: &mut usize,
385 ) -> result::Result<(), c_int> {
386 *cpuid_count = 0;
387
388 let mut r = MainRequest::new();
389 r.mut_get_supported_cpuid();
390
391 let (response, _) = self.main_transaction(&r, &[])?;
392 if !response.has_get_supported_cpuid() {
393 return Err(EPROTO);
394 }
395
396 let supported_cpuids: &MainResponse_CpuidResponse = response.get_get_supported_cpuid();
397
398 *cpuid_count = supported_cpuids.get_entries().len();
399 if *cpuid_count > cpuid_entries.len() {
400 return Err(E2BIG);
401 }
402
403 for (proto_entry, kvm_entry) in supported_cpuids
404 .get_entries()
405 .iter()
406 .zip(cpuid_entries.iter_mut())
407 {
408 *kvm_entry = cpuid_proto_to_kvm(proto_entry);
409 }
410
411 Ok(())
412 }
413
get_emulated_cpuid( &mut self, cpuid_entries: &mut [kvm_cpuid_entry2], cpuid_count: &mut usize, ) -> result::Result<(), c_int>414 fn get_emulated_cpuid(
415 &mut self,
416 cpuid_entries: &mut [kvm_cpuid_entry2],
417 cpuid_count: &mut usize,
418 ) -> result::Result<(), c_int> {
419 *cpuid_count = 0;
420
421 let mut r = MainRequest::new();
422 r.mut_get_emulated_cpuid();
423
424 let (response, _) = self.main_transaction(&r, &[])?;
425 if !response.has_get_emulated_cpuid() {
426 return Err(EPROTO);
427 }
428
429 let emulated_cpuids: &MainResponse_CpuidResponse = response.get_get_emulated_cpuid();
430
431 *cpuid_count = emulated_cpuids.get_entries().len();
432 if *cpuid_count > cpuid_entries.len() {
433 return Err(E2BIG);
434 }
435
436 for (proto_entry, kvm_entry) in emulated_cpuids
437 .get_entries()
438 .iter()
439 .zip(cpuid_entries.iter_mut())
440 {
441 *kvm_entry = cpuid_proto_to_kvm(proto_entry);
442 }
443
444 Ok(())
445 }
446
get_msr_index_list( &mut self, msr_indices: &mut [u32], msr_count: &mut usize, ) -> result::Result<(), c_int>447 fn get_msr_index_list(
448 &mut self,
449 msr_indices: &mut [u32],
450 msr_count: &mut usize,
451 ) -> result::Result<(), c_int> {
452 *msr_count = 0;
453
454 let mut r = MainRequest::new();
455 r.mut_get_msr_index_list();
456
457 let (response, _) = self.main_transaction(&r, &[])?;
458 if !response.has_get_msr_index_list() {
459 return Err(EPROTO);
460 }
461
462 let msr_list: &MainResponse_MsrListResponse = response.get_get_msr_index_list();
463
464 *msr_count = msr_list.get_indices().len();
465 if *msr_count > msr_indices.len() {
466 return Err(E2BIG);
467 }
468
469 for (proto_entry, kvm_entry) in msr_list.get_indices().iter().zip(msr_indices.iter_mut()) {
470 *kvm_entry = *proto_entry;
471 }
472
473 Ok(())
474 }
475
reserve_range( &mut self, space: u32, start: u64, length: u64, async_write: bool, ) -> result::Result<(), c_int>476 fn reserve_range(
477 &mut self,
478 space: u32,
479 start: u64,
480 length: u64,
481 async_write: bool,
482 ) -> result::Result<(), c_int> {
483 let mut r = MainRequest::new();
484 let reserve: &mut MainRequest_ReserveRange = r.mut_reserve_range();
485 reserve.space = AddressSpace::from_i32(space as i32).ok_or(EINVAL)?;
486 reserve.start = start;
487 reserve.length = length;
488 reserve.async_write = async_write;
489
490 self.main_transaction(&r, &[])?;
491 Ok(())
492 }
493
set_irq(&mut self, irq_id: u32, active: bool) -> result::Result<(), c_int>494 fn set_irq(&mut self, irq_id: u32, active: bool) -> result::Result<(), c_int> {
495 let mut r = MainRequest::new();
496 let set_irq: &mut MainRequest_SetIrq = r.mut_set_irq();
497 set_irq.irq_id = irq_id;
498 set_irq.active = active;
499
500 self.main_transaction(&r, &[])?;
501 Ok(())
502 }
503
set_irq_routing(&mut self, routing: &[crosvm_irq_route]) -> result::Result<(), c_int>504 fn set_irq_routing(&mut self, routing: &[crosvm_irq_route]) -> result::Result<(), c_int> {
505 let mut r = MainRequest::new();
506 let set_irq_routing: &mut RepeatedField<MainRequest_SetIrqRouting_Route> =
507 r.mut_set_irq_routing().mut_routes();
508 for route in routing {
509 let mut entry = MainRequest_SetIrqRouting_Route::new();
510 entry.irq_id = route.irq_id;
511 match route.kind {
512 CROSVM_IRQ_ROUTE_IRQCHIP => {
513 let irqchip: &mut MainRequest_SetIrqRouting_Route_Irqchip;
514 irqchip = entry.mut_irqchip();
515 // Safe because route.kind indicates which union field is valid.
516 irqchip.irqchip = unsafe { route.route.irqchip }.irqchip;
517 irqchip.pin = unsafe { route.route.irqchip }.pin;
518 }
519 CROSVM_IRQ_ROUTE_MSI => {
520 let msi: &mut MainRequest_SetIrqRouting_Route_Msi = entry.mut_msi();
521 // Safe because route.kind indicates which union field is valid.
522 msi.address = unsafe { route.route.msi }.address;
523 msi.data = unsafe { route.route.msi }.data;
524 }
525 _ => return Err(EINVAL),
526 }
527 set_irq_routing.push(entry);
528 }
529
530 self.main_transaction(&r, &[])?;
531 Ok(())
532 }
533
set_hint( &mut self, space: u32, addr: u64, on_write: bool, hints: &[crosvm_hint_detail], ) -> result::Result<(), c_int>534 fn set_hint(
535 &mut self,
536 space: u32,
537 addr: u64,
538 on_write: bool,
539 hints: &[crosvm_hint_detail],
540 ) -> result::Result<(), c_int> {
541 let mut r = MainRequest::new();
542 let req: &mut MainRequest_SetCallHint = r.mut_set_call_hint();
543 let set_hints: &mut RepeatedField<MainRequest_SetCallHint_RegHint> = req.mut_hints();
544 for hint in hints {
545 let mut entry = MainRequest_SetCallHint_RegHint::new();
546 entry.match_rax = hint.match_rax;
547 entry.match_rbx = hint.match_rbx;
548 entry.match_rcx = hint.match_rcx;
549 entry.match_rdx = hint.match_rdx;
550 entry.rax = hint.rax;
551 entry.rbx = hint.rbx;
552 entry.rcx = hint.rcx;
553 entry.rdx = hint.rdx;
554 entry.send_sregs = hint.send_sregs;
555 entry.send_debugregs = hint.send_debugregs;
556 set_hints.push(entry);
557 }
558 req.space = AddressSpace::from_i32(space as i32).ok_or(EINVAL)?;
559 req.address = addr;
560 req.on_write = on_write;
561
562 self.main_transaction(&r, &[])?;
563 Ok(())
564 }
565
get_state( &mut self, state_set: MainRequest_StateSet, out: &mut [u8], ) -> result::Result<(), c_int>566 fn get_state(
567 &mut self,
568 state_set: MainRequest_StateSet,
569 out: &mut [u8],
570 ) -> result::Result<(), c_int> {
571 let mut r = MainRequest::new();
572 r.mut_get_state().set = state_set;
573 let (response, _) = self.main_transaction(&r, &[])?;
574 if !response.has_get_state() {
575 return Err(EPROTO);
576 }
577 let get_state: &MainResponse_GetState = response.get_get_state();
578 if get_state.state.len() != out.len() {
579 return Err(EPROTO);
580 }
581 out.copy_from_slice(&get_state.state);
582 Ok(())
583 }
584
set_state( &mut self, state_set: MainRequest_StateSet, new_state: &[u8], ) -> result::Result<(), c_int>585 fn set_state(
586 &mut self,
587 state_set: MainRequest_StateSet,
588 new_state: &[u8],
589 ) -> result::Result<(), c_int> {
590 let mut r = MainRequest::new();
591 let set_state: &mut MainRequest_SetState = r.mut_set_state();
592 set_state.set = state_set;
593 set_state.state = new_state.to_vec();
594
595 self.main_transaction(&r, &[])?;
596 Ok(())
597 }
598
set_identity_map_addr(&mut self, addr: u32) -> result::Result<(), c_int>599 fn set_identity_map_addr(&mut self, addr: u32) -> result::Result<(), c_int> {
600 let mut r = MainRequest::new();
601 r.mut_set_identity_map_addr().address = addr;
602
603 self.main_transaction(&r, &[])?;
604 Ok(())
605 }
606
pause_vcpus(&mut self, cpu_mask: u64, user: *mut c_void) -> result::Result<(), c_int>607 fn pause_vcpus(&mut self, cpu_mask: u64, user: *mut c_void) -> result::Result<(), c_int> {
608 let mut r = MainRequest::new();
609 let pause_vcpus: &mut MainRequest_PauseVcpus = r.mut_pause_vcpus();
610 pause_vcpus.cpu_mask = cpu_mask;
611 pause_vcpus.user = user as u64;
612 self.main_transaction(&r, &[])?;
613 Ok(())
614 }
615
start(&mut self) -> result::Result<(), c_int>616 fn start(&mut self) -> result::Result<(), c_int> {
617 let mut r = MainRequest::new();
618 r.mut_start();
619 self.main_transaction(&r, &[])?;
620 Ok(())
621 }
622
get_vcpu(&mut self, cpu_id: u32) -> Result<*mut crosvm_vcpu, c_int>623 fn get_vcpu(&mut self, cpu_id: u32) -> Result<*mut crosvm_vcpu, c_int> {
624 if let Some(vcpu) = self.vcpus.get(cpu_id as usize) {
625 Ok(vcpu as *const crosvm_vcpu as *mut crosvm_vcpu)
626 } else {
627 Err(ENOENT)
628 }
629 }
630
get_net_config(&mut self) -> result::Result<crosvm_net_config, c_int>631 fn get_net_config(&mut self) -> result::Result<crosvm_net_config, c_int> {
632 let mut r = MainRequest::new();
633 r.mut_get_net_config();
634
635 let (response, mut files) = self.main_transaction(&r, &[])?;
636 if !response.has_get_net_config() {
637 return Err(EPROTO);
638 }
639 let config = response.get_get_net_config();
640
641 match files.pop() {
642 Some(f) => {
643 let mut net_config = crosvm_net_config {
644 tap_fd: f.into_raw_fd(),
645 host_ipv4_address: config.host_ipv4_address,
646 netmask: config.netmask,
647 host_mac_address: [0; 6],
648 _reserved: [0; 2],
649 };
650
651 let mac_addr = config.get_host_mac_address();
652 if mac_addr.len() != net_config.host_mac_address.len() {
653 return Err(EPROTO);
654 }
655 net_config.host_mac_address.copy_from_slice(mac_addr);
656
657 Ok(net_config)
658 }
659 None => Err(EPROTO),
660 }
661 }
662 }
663
664 /// This helper macro implements the C API's constructor/destructor for a given type. Because they
665 /// all follow the same pattern and include lots of boilerplate unsafe code, it makes sense to write
666 /// it once with this helper macro.
667 macro_rules! impl_ctor_dtor {
668 (
669 $t:ident,
670 $ctor:ident ( $( $x:ident: $y:ty ),* ),
671 $dtor:ident,
672 ) => {
673 #[allow(unused_unsafe)]
674 #[no_mangle]
675 pub unsafe extern fn $ctor(self_: *mut crosvm, $($x: $y,)* obj_ptr: *mut *mut $t) -> c_int {
676 let self_ = &mut (*self_);
677 match $t::create(self_, $($x,)*) {
678 Ok(obj) => {
679 *obj_ptr = Box::into_raw(Box::new(obj));
680 0
681 }
682 Err(e) => -e,
683 }
684 }
685 #[no_mangle]
686 pub unsafe extern fn $dtor(self_: *mut crosvm, obj_ptr: *mut *mut $t) -> c_int {
687 let self_ = &mut (*self_);
688 let obj = Box::from_raw(*obj_ptr);
689 match self_.destroy(obj.id) {
690 Ok(_) => {
691 *obj_ptr = null_mut();
692 0
693 }
694 Err(e) => {
695 Box::into_raw(obj);
696 -e
697 }
698 }
699 }
700 }
701 }
702
703 pub struct crosvm_io_event {
704 id: u32,
705 evt: File,
706 }
707
708 impl crosvm_io_event {
709 // Clippy: we use ptr::read_unaligned to read from pointers that may be
710 // underaligned. Dereferencing such a pointer is always undefined behavior
711 // in Rust.
712 //
713 // Lint can be unsuppressed once Clippy recognizes this pattern as correct.
714 // https://github.com/rust-lang/rust-clippy/issues/2881
715 #[allow(clippy::cast_ptr_alignment)]
create( crosvm: &mut crosvm, space: u32, addr: u64, length: u32, datamatch: *const u8, ) -> result::Result<crosvm_io_event, c_int>716 unsafe fn create(
717 crosvm: &mut crosvm,
718 space: u32,
719 addr: u64,
720 length: u32,
721 datamatch: *const u8,
722 ) -> result::Result<crosvm_io_event, c_int> {
723 let datamatch = match length {
724 0 => 0,
725 1 => ptr::read_unaligned(datamatch as *const u8) as u64,
726 2 => ptr::read_unaligned(datamatch as *const u16) as u64,
727 4 => ptr::read_unaligned(datamatch as *const u32) as u64,
728 8 => ptr::read_unaligned(datamatch as *const u64),
729 _ => return Err(EINVAL),
730 };
731 Self::safe_create(crosvm, space, addr, length, datamatch)
732 }
733
safe_create( crosvm: &mut crosvm, space: u32, addr: u64, length: u32, datamatch: u64, ) -> result::Result<crosvm_io_event, c_int>734 fn safe_create(
735 crosvm: &mut crosvm,
736 space: u32,
737 addr: u64,
738 length: u32,
739 datamatch: u64,
740 ) -> result::Result<crosvm_io_event, c_int> {
741 let id = crosvm.get_id_allocator().alloc();
742
743 let mut r = MainRequest::new();
744 let create: &mut MainRequest_Create = r.mut_create();
745 create.id = id;
746 let io_event: &mut MainRequest_Create_IoEvent = create.mut_io_event();
747 io_event.space = AddressSpace::from_i32(space as i32).ok_or(EINVAL)?;
748 io_event.address = addr;
749 io_event.length = length;
750 io_event.datamatch = datamatch;
751
752 let ret = match crosvm.main_transaction(&r, &[]) {
753 Ok((_, mut files)) => match files.pop() {
754 Some(evt) => return Ok(crosvm_io_event { id, evt }),
755 None => EPROTO,
756 },
757 Err(e) => e,
758 };
759 crosvm.get_id_allocator().free(id);
760 Err(ret)
761 }
762 }
763
764 impl_ctor_dtor!(
765 crosvm_io_event,
766 crosvm_create_io_event(space: u32, addr: u64, len: u32, datamatch: *const u8),
767 crosvm_destroy_io_event,
768 );
769
770 #[no_mangle]
crosvm_io_event_fd(this: *mut crosvm_io_event) -> c_int771 pub unsafe extern "C" fn crosvm_io_event_fd(this: *mut crosvm_io_event) -> c_int {
772 let _u = record(Stat::IoEvent);
773 (*this).evt.as_raw_fd()
774 }
775
776 pub struct crosvm_memory {
777 id: u32,
778 length: u64,
779 }
780
781 impl crosvm_memory {
create( crosvm: &mut crosvm, fd: c_int, offset: u64, length: u64, start: u64, read_only: bool, dirty_log: bool, ) -> result::Result<crosvm_memory, c_int>782 fn create(
783 crosvm: &mut crosvm,
784 fd: c_int,
785 offset: u64,
786 length: u64,
787 start: u64,
788 read_only: bool,
789 dirty_log: bool,
790 ) -> result::Result<crosvm_memory, c_int> {
791 const PAGE_MASK: u64 = 0x0fff;
792 if offset & PAGE_MASK != 0 || length & PAGE_MASK != 0 {
793 return Err(EINVAL);
794 }
795 let id = crosvm.get_id_allocator().alloc();
796
797 let mut r = MainRequest::new();
798 let create: &mut MainRequest_Create = r.mut_create();
799 create.id = id;
800 let memory: &mut MainRequest_Create_Memory = create.mut_memory();
801 memory.offset = offset;
802 memory.start = start;
803 memory.length = length;
804 memory.read_only = read_only;
805 memory.dirty_log = dirty_log;
806
807 let ret = match crosvm.main_transaction(&r, &[fd]) {
808 Ok(_) => return Ok(crosvm_memory { id, length }),
809 Err(e) => e,
810 };
811 crosvm.get_id_allocator().free(id);
812 Err(ret)
813 }
814
get_dirty_log(&mut self, crosvm: &mut crosvm) -> result::Result<Vec<u8>, c_int>815 fn get_dirty_log(&mut self, crosvm: &mut crosvm) -> result::Result<Vec<u8>, c_int> {
816 let mut r = MainRequest::new();
817 r.mut_dirty_log().id = self.id;
818 let (mut response, _) = crosvm.main_transaction(&r, &[])?;
819 if !response.has_dirty_log() {
820 return Err(EPROTO);
821 }
822 Ok(response.take_dirty_log().bitmap)
823 }
824 }
825
826 impl_ctor_dtor!(
827 crosvm_memory,
828 crosvm_create_memory(
829 fd: c_int,
830 offset: u64,
831 length: u64,
832 start: u64,
833 read_only: bool,
834 dirty_log: bool
835 ),
836 crosvm_destroy_memory,
837 );
838
839 #[no_mangle]
crosvm_memory_get_dirty_log( crosvm: *mut crosvm, this: *mut crosvm_memory, log: *mut u8, ) -> c_int840 pub unsafe extern "C" fn crosvm_memory_get_dirty_log(
841 crosvm: *mut crosvm,
842 this: *mut crosvm_memory,
843 log: *mut u8,
844 ) -> c_int {
845 let _u = record(Stat::MemoryGetDirtyLog);
846 let crosvm = &mut *crosvm;
847 let this = &mut *this;
848 let log_slice = slice::from_raw_parts_mut(log, dirty_log_bitmap_size(this.length as usize));
849 match this.get_dirty_log(crosvm) {
850 Ok(bitmap) => {
851 if bitmap.len() == log_slice.len() {
852 log_slice.copy_from_slice(&bitmap);
853 0
854 } else {
855 -EPROTO
856 }
857 }
858 Err(e) => -e,
859 }
860 }
861
862 pub struct crosvm_irq_event {
863 id: u32,
864 trigger_evt: File,
865 resample_evt: File,
866 }
867
868 impl crosvm_irq_event {
create(crosvm: &mut crosvm, irq_id: u32) -> result::Result<crosvm_irq_event, c_int>869 fn create(crosvm: &mut crosvm, irq_id: u32) -> result::Result<crosvm_irq_event, c_int> {
870 let id = crosvm.get_id_allocator().alloc();
871
872 let mut r = MainRequest::new();
873 let create: &mut MainRequest_Create = r.mut_create();
874 create.id = id;
875 let irq_event: &mut MainRequest_Create_IrqEvent = create.mut_irq_event();
876 irq_event.irq_id = irq_id;
877 irq_event.resample = true;
878
879 let ret = match crosvm.main_transaction(&r, &[]) {
880 Ok((_, mut files)) => {
881 if files.len() >= 2 {
882 let resample_evt = files.pop().unwrap();
883 let trigger_evt = files.pop().unwrap();
884 return Ok(crosvm_irq_event {
885 id,
886 trigger_evt,
887 resample_evt,
888 });
889 }
890 EPROTO
891 }
892 Err(e) => e,
893 };
894 crosvm.get_id_allocator().free(id);
895 Err(ret)
896 }
897 }
898
899 impl_ctor_dtor!(
900 crosvm_irq_event,
901 crosvm_create_irq_event(irq_id: u32),
902 crosvm_destroy_irq_event,
903 );
904
905 #[no_mangle]
crosvm_irq_event_get_fd(this: *mut crosvm_irq_event) -> c_int906 pub unsafe extern "C" fn crosvm_irq_event_get_fd(this: *mut crosvm_irq_event) -> c_int {
907 let _u = record(Stat::IrqEventGetFd);
908 (*this).trigger_evt.as_raw_fd()
909 }
910
911 #[no_mangle]
crosvm_irq_event_get_resample_fd(this: *mut crosvm_irq_event) -> c_int912 pub unsafe extern "C" fn crosvm_irq_event_get_resample_fd(this: *mut crosvm_irq_event) -> c_int {
913 let _u = record(Stat::IrqEventGetResampleFd);
914 (*this).resample_evt.as_raw_fd()
915 }
916
917 #[allow(dead_code)]
918 #[derive(Copy, Clone)]
919 #[repr(C)]
920 struct anon_io_access {
921 address_space: u32,
922 __reserved0: [u8; 4],
923 address: u64,
924 data: *mut u8,
925 length: u32,
926 is_write: u8,
927 no_resume: u8,
928 __reserved1: [u8; 2],
929 }
930
931 #[derive(Copy, Clone)]
932 #[repr(C)]
933 struct anon_hyperv_call {
934 input: u64,
935 result: *mut u8,
936 params: [u64; 2],
937 }
938
939 #[derive(Copy, Clone)]
940 #[repr(C)]
941 struct anon_hyperv_synic {
942 msr: u32,
943 reserved: u32,
944 control: u64,
945 evt_page: u64,
946 msg_page: u64,
947 }
948
949 #[repr(C)]
950 union anon_vcpu_event {
951 io_access: anon_io_access,
952 user: *mut c_void,
953 hyperv_call: anon_hyperv_call,
954 hyperv_synic: anon_hyperv_synic,
955 #[allow(dead_code)]
956 __reserved: [u8; 64],
957 }
958
959 #[repr(C)]
960 pub struct crosvm_vcpu_event {
961 kind: u32,
962 __reserved: [u8; 4],
963 event: anon_vcpu_event,
964 }
965
966 // |get| tracks if the |cache| contains a cached value that can service get()
967 // requests. A set() call will populate |cache| and |set| to true to record
968 // that the next resume() should apply the state. We've got two choices on
969 // what to do about |get| on a set(): 1) leave it as true, or 2) clear it and
970 // have any call to get() first apply any pending set. Currently #2 is used
971 // to favor correctness over performance (it gives KVM a chance to
972 // modify/massage the values input to the set call). A plugin will rarely
973 // (if ever) issue a get() after a set() on the same vcpu exit, so opting for
974 // #1 is unlikely to provide a tangible performance gain.
975 pub struct crosvm_vcpu_reg_cache {
976 get: bool,
977 set: bool,
978 cache: Vec<u8>,
979 }
980
981 pub struct crosvm_vcpu {
982 read_pipe: File,
983 write_pipe: File,
984 send_init: bool,
985 request_buffer: Vec<u8>,
986 response_buffer: Vec<u8>,
987 response_base: usize,
988 response_length: usize,
989 resume_data: Vec<u8>,
990
991 regs: crosvm_vcpu_reg_cache,
992 sregs: crosvm_vcpu_reg_cache,
993 debugregs: crosvm_vcpu_reg_cache,
994 }
995
read_varint32(data: &[u8]) -> (u32, usize)996 fn read_varint32(data: &[u8]) -> (u32, usize) {
997 let mut value: u32 = 0;
998 let mut shift: u32 = 0;
999 for (i, &b) in data.iter().enumerate() {
1000 if b < 0x80 {
1001 return match (b as u32).checked_shl(shift) {
1002 None => (0, 0),
1003 Some(b) => (value | b, i + 1),
1004 };
1005 }
1006 match ((b as u32) & 0x7F).checked_shl(shift) {
1007 None => return (0, 0),
1008 Some(b) => value |= b,
1009 }
1010 shift += 7;
1011 }
1012 (0, 0)
1013 }
1014
1015 impl crosvm_vcpu {
new(read_pipe: File, write_pipe: File) -> crosvm_vcpu1016 fn new(read_pipe: File, write_pipe: File) -> crosvm_vcpu {
1017 crosvm_vcpu {
1018 read_pipe,
1019 write_pipe,
1020 send_init: true,
1021 request_buffer: Vec::new(),
1022 response_buffer: vec![0; MAX_DATAGRAM_SIZE],
1023 response_base: 0,
1024 response_length: 0,
1025 resume_data: Vec::new(),
1026 regs: crosvm_vcpu_reg_cache {
1027 get: false,
1028 set: false,
1029 cache: vec![],
1030 },
1031 sregs: crosvm_vcpu_reg_cache {
1032 get: false,
1033 set: false,
1034 cache: vec![],
1035 },
1036 debugregs: crosvm_vcpu_reg_cache {
1037 get: false,
1038 set: false,
1039 cache: vec![],
1040 },
1041 }
1042 }
vcpu_send(&mut self, request: &VcpuRequest) -> result::Result<(), c_int>1043 fn vcpu_send(&mut self, request: &VcpuRequest) -> result::Result<(), c_int> {
1044 self.request_buffer.clear();
1045 request
1046 .write_to_vec(&mut self.request_buffer)
1047 .map_err(proto_error_to_int)?;
1048 self.write_pipe
1049 .write(self.request_buffer.as_slice())
1050 .map_err(|e| -e.raw_os_error().unwrap_or(EINVAL))?;
1051 Ok(())
1052 }
1053
vcpu_recv(&mut self) -> result::Result<VcpuResponse, c_int>1054 fn vcpu_recv(&mut self) -> result::Result<VcpuResponse, c_int> {
1055 if self.response_length == 0 {
1056 let msg_size = self
1057 .read_pipe
1058 .read(&mut self.response_buffer)
1059 .map_err(|e| -e.raw_os_error().unwrap_or(EINVAL))?;
1060 self.response_base = 0;
1061 self.response_length = msg_size;
1062 }
1063 if self.response_length == 0 {
1064 return Err(EINVAL);
1065 }
1066 let (value, bytes) = read_varint32(
1067 &self.response_buffer[self.response_base..self.response_base + self.response_length],
1068 );
1069 let total_size: usize = bytes + value as usize;
1070 if bytes == 0 || total_size > self.response_length {
1071 return Err(EINVAL);
1072 }
1073 let response: VcpuResponse = parse_from_bytes(
1074 &self.response_buffer[self.response_base + bytes..self.response_base + total_size],
1075 )
1076 .map_err(proto_error_to_int)?;
1077 self.response_base += total_size;
1078 self.response_length -= total_size;
1079 if response.errno != 0 {
1080 return Err(response.errno);
1081 }
1082 Ok(response)
1083 }
1084
vcpu_transaction(&mut self, request: &VcpuRequest) -> result::Result<VcpuResponse, c_int>1085 fn vcpu_transaction(&mut self, request: &VcpuRequest) -> result::Result<VcpuResponse, c_int> {
1086 self.vcpu_send(request)?;
1087 let response: VcpuResponse = self.vcpu_recv()?;
1088 Ok(response)
1089 }
1090
wait(&mut self, event: &mut crosvm_vcpu_event) -> result::Result<(), c_int>1091 fn wait(&mut self, event: &mut crosvm_vcpu_event) -> result::Result<(), c_int> {
1092 if self.send_init {
1093 self.send_init = false;
1094 let mut r = VcpuRequest::new();
1095 r.mut_wait();
1096 self.vcpu_send(&r)?;
1097 }
1098 let mut response: VcpuResponse = self.vcpu_recv()?;
1099 if !response.has_wait() {
1100 return Err(EPROTO);
1101 }
1102 let wait: &mut VcpuResponse_Wait = response.mut_wait();
1103 if wait.has_init() {
1104 event.kind = CROSVM_VCPU_EVENT_KIND_INIT;
1105 self.regs.get = false;
1106 self.sregs.get = false;
1107 self.debugregs.get = false;
1108 Ok(())
1109 } else if wait.has_io() {
1110 let mut io: VcpuResponse_Wait_Io = wait.take_io();
1111 event.kind = CROSVM_VCPU_EVENT_KIND_IO_ACCESS;
1112 event.event.io_access = anon_io_access {
1113 address_space: io.space.value() as u32,
1114 __reserved0: Default::default(),
1115 address: io.address,
1116 data: io.data.as_mut_ptr(),
1117 length: io.data.len() as u32,
1118 is_write: io.is_write as u8,
1119 no_resume: io.no_resume as u8,
1120 __reserved1: Default::default(),
1121 };
1122 self.resume_data = io.data;
1123 self.regs.get = !io.regs.is_empty();
1124 if self.regs.get {
1125 swap(&mut self.regs.cache, &mut io.regs);
1126 }
1127 self.sregs.get = !io.sregs.is_empty();
1128 if self.sregs.get {
1129 swap(&mut self.sregs.cache, &mut io.sregs);
1130 }
1131 self.debugregs.get = !io.debugregs.is_empty();
1132 if self.debugregs.get {
1133 swap(&mut self.debugregs.cache, &mut io.debugregs);
1134 }
1135 Ok(())
1136 } else if wait.has_user() {
1137 let user: &VcpuResponse_Wait_User = wait.get_user();
1138 event.kind = CROSVM_VCPU_EVENT_KIND_PAUSED;
1139 event.event.user = user.user as *mut c_void;
1140 self.regs.get = false;
1141 self.sregs.get = false;
1142 self.debugregs.get = false;
1143 Ok(())
1144 } else if wait.has_hyperv_call() {
1145 let hv: &VcpuResponse_Wait_HypervCall = wait.get_hyperv_call();
1146 event.kind = CROSVM_VCPU_EVENT_KIND_HYPERV_HCALL;
1147 self.resume_data = vec![0; 8];
1148 event.event.hyperv_call = anon_hyperv_call {
1149 input: hv.input,
1150 result: self.resume_data.as_mut_ptr(),
1151 params: [hv.params0, hv.params1],
1152 };
1153 self.regs.get = false;
1154 self.sregs.get = false;
1155 self.debugregs.get = false;
1156 Ok(())
1157 } else if wait.has_hyperv_synic() {
1158 let hv: &VcpuResponse_Wait_HypervSynic = wait.get_hyperv_synic();
1159 event.kind = CROSVM_VCPU_EVENT_KIND_HYPERV_SYNIC;
1160 event.event.hyperv_synic = anon_hyperv_synic {
1161 msr: hv.msr,
1162 reserved: 0,
1163 control: hv.control,
1164 evt_page: hv.evt_page,
1165 msg_page: hv.msg_page,
1166 };
1167 self.regs.get = false;
1168 self.sregs.get = false;
1169 self.debugregs.get = false;
1170 Ok(())
1171 } else {
1172 Err(EPROTO)
1173 }
1174 }
1175
resume(&mut self) -> result::Result<(), c_int>1176 fn resume(&mut self) -> result::Result<(), c_int> {
1177 let mut r = VcpuRequest::new();
1178 let resume: &mut VcpuRequest_Resume = r.mut_resume();
1179 swap(&mut resume.data, &mut self.resume_data);
1180
1181 if self.regs.set {
1182 swap(&mut resume.regs, &mut self.regs.cache);
1183 self.regs.set = false;
1184 }
1185 if self.sregs.set {
1186 swap(&mut resume.sregs, &mut self.sregs.cache);
1187 self.sregs.set = false;
1188 }
1189 if self.debugregs.set {
1190 swap(&mut resume.debugregs, &mut self.debugregs.cache);
1191 self.debugregs.set = false;
1192 }
1193
1194 self.vcpu_send(&r)?;
1195 Ok(())
1196 }
1197
get_state( &mut self, state_set: VcpuRequest_StateSet, out: &mut [u8], ) -> result::Result<(), c_int>1198 fn get_state(
1199 &mut self,
1200 state_set: VcpuRequest_StateSet,
1201 out: &mut [u8],
1202 ) -> result::Result<(), c_int> {
1203 let mut r = VcpuRequest::new();
1204 r.mut_get_state().set = state_set;
1205 let response = self.vcpu_transaction(&r)?;
1206 if !response.has_get_state() {
1207 return Err(EPROTO);
1208 }
1209 let get_state: &VcpuResponse_GetState = response.get_get_state();
1210 if get_state.state.len() != out.len() {
1211 return Err(EPROTO);
1212 }
1213 out.copy_from_slice(&get_state.state);
1214 Ok(())
1215 }
1216
set_state( &mut self, state_set: VcpuRequest_StateSet, new_state: &[u8], ) -> result::Result<(), c_int>1217 fn set_state(
1218 &mut self,
1219 state_set: VcpuRequest_StateSet,
1220 new_state: &[u8],
1221 ) -> result::Result<(), c_int> {
1222 let mut r = VcpuRequest::new();
1223 let set_state: &mut VcpuRequest_SetState = r.mut_set_state();
1224 set_state.set = state_set;
1225 set_state.state = new_state.to_vec();
1226
1227 self.vcpu_transaction(&r)?;
1228 Ok(())
1229 }
1230
set_state_from_cache( &mut self, state_set: VcpuRequest_StateSet, ) -> result::Result<(), c_int>1231 fn set_state_from_cache(
1232 &mut self,
1233 state_set: VcpuRequest_StateSet,
1234 ) -> result::Result<(), c_int> {
1235 let mut r = VcpuRequest::new();
1236 let set_state: &mut VcpuRequest_SetState = r.mut_set_state();
1237 set_state.set = state_set;
1238 match state_set {
1239 VcpuRequest_StateSet::REGS => {
1240 swap(&mut set_state.state, &mut self.regs.cache);
1241 self.regs.set = false;
1242 }
1243 VcpuRequest_StateSet::SREGS => {
1244 swap(&mut set_state.state, &mut self.sregs.cache);
1245 self.sregs.set = false;
1246 }
1247 VcpuRequest_StateSet::DEBUGREGS => {
1248 swap(&mut set_state.state, &mut self.debugregs.cache);
1249 self.debugregs.set = false;
1250 }
1251 _ => return Err(EINVAL),
1252 }
1253
1254 self.vcpu_transaction(&r)?;
1255 Ok(())
1256 }
1257
get_hyperv_cpuid( &mut self, cpuid_entries: &mut [kvm_cpuid_entry2], cpuid_count: &mut usize, ) -> result::Result<(), c_int>1258 fn get_hyperv_cpuid(
1259 &mut self,
1260 cpuid_entries: &mut [kvm_cpuid_entry2],
1261 cpuid_count: &mut usize,
1262 ) -> result::Result<(), c_int> {
1263 *cpuid_count = 0;
1264
1265 let mut r = VcpuRequest::new();
1266 r.mut_get_hyperv_cpuid();
1267
1268 let response = self.vcpu_transaction(&r)?;
1269 if !response.has_get_hyperv_cpuid() {
1270 return Err(EPROTO);
1271 }
1272
1273 let hyperv_cpuids: &VcpuResponse_CpuidResponse = response.get_get_hyperv_cpuid();
1274
1275 *cpuid_count = hyperv_cpuids.get_entries().len();
1276 if *cpuid_count > cpuid_entries.len() {
1277 return Err(E2BIG);
1278 }
1279
1280 for (proto_entry, kvm_entry) in hyperv_cpuids
1281 .get_entries()
1282 .iter()
1283 .zip(cpuid_entries.iter_mut())
1284 {
1285 *kvm_entry = cpuid_proto_to_kvm(proto_entry);
1286 }
1287
1288 Ok(())
1289 }
1290
get_msrs( &mut self, msr_entries: &mut [kvm_msr_entry], msr_count: &mut usize, ) -> result::Result<(), c_int>1291 fn get_msrs(
1292 &mut self,
1293 msr_entries: &mut [kvm_msr_entry],
1294 msr_count: &mut usize,
1295 ) -> result::Result<(), c_int> {
1296 *msr_count = 0;
1297
1298 let mut r = VcpuRequest::new();
1299 let entry_indices: &mut Vec<u32> = r.mut_get_msrs().mut_entry_indices();
1300 for entry in msr_entries.iter() {
1301 entry_indices.push(entry.index);
1302 }
1303
1304 let response = self.vcpu_transaction(&r)?;
1305 if !response.has_get_msrs() {
1306 return Err(EPROTO);
1307 }
1308 let get_msrs: &VcpuResponse_GetMsrs = response.get_get_msrs();
1309 *msr_count = get_msrs.get_entry_data().len();
1310 if *msr_count > msr_entries.len() {
1311 return Err(E2BIG);
1312 }
1313 for (&msr_data, msr_entry) in get_msrs.get_entry_data().iter().zip(msr_entries) {
1314 msr_entry.data = msr_data;
1315 }
1316 Ok(())
1317 }
1318
set_msrs(&mut self, msr_entries: &[kvm_msr_entry]) -> result::Result<(), c_int>1319 fn set_msrs(&mut self, msr_entries: &[kvm_msr_entry]) -> result::Result<(), c_int> {
1320 let mut r = VcpuRequest::new();
1321 let set_msrs_entries: &mut RepeatedField<VcpuRequest_MsrEntry> =
1322 r.mut_set_msrs().mut_entries();
1323 for msr_entry in msr_entries {
1324 let mut entry = VcpuRequest_MsrEntry::new();
1325 entry.index = msr_entry.index;
1326 entry.data = msr_entry.data;
1327 set_msrs_entries.push(entry);
1328 }
1329
1330 self.vcpu_transaction(&r)?;
1331 Ok(())
1332 }
1333
set_cpuid(&mut self, cpuid_entries: &[kvm_cpuid_entry2]) -> result::Result<(), c_int>1334 fn set_cpuid(&mut self, cpuid_entries: &[kvm_cpuid_entry2]) -> result::Result<(), c_int> {
1335 let mut r = VcpuRequest::new();
1336 let set_cpuid_entries: &mut RepeatedField<CpuidEntry> = r.mut_set_cpuid().mut_entries();
1337 for cpuid_entry in cpuid_entries {
1338 set_cpuid_entries.push(cpuid_kvm_to_proto(cpuid_entry));
1339 }
1340
1341 self.vcpu_transaction(&r)?;
1342 Ok(())
1343 }
1344
enable_capability(&mut self, capability: u32) -> result::Result<(), c_int>1345 fn enable_capability(&mut self, capability: u32) -> result::Result<(), c_int> {
1346 let mut r = VcpuRequest::new();
1347 r.mut_enable_capability().capability = capability;
1348 self.vcpu_transaction(&r)?;
1349 Ok(())
1350 }
1351 }
1352
1353 // crosvm API signals success as 0 and errors as negative values
1354 // derived from `errno`.
to_crosvm_rc<T>(r: result::Result<T, c_int>) -> c_int1355 fn to_crosvm_rc<T>(r: result::Result<T, c_int>) -> c_int {
1356 match r {
1357 Ok(_) => 0,
1358 Err(e) => -e,
1359 }
1360 }
1361
1362 #[no_mangle]
crosvm_connect(out: *mut *mut crosvm) -> c_int1363 pub unsafe extern "C" fn crosvm_connect(out: *mut *mut crosvm) -> c_int {
1364 let _u = record(Stat::Connect);
1365 let socket_name = match env::var("CROSVM_SOCKET") {
1366 Ok(v) => v,
1367 _ => return -ENOTCONN,
1368 };
1369
1370 let socket = match socket_name.parse() {
1371 Ok(v) if v < 0 => return -EINVAL,
1372 Ok(v) => v,
1373 _ => return -EINVAL,
1374 };
1375
1376 let socket = UnixDatagram::from_raw_fd(socket);
1377 let crosvm = match crosvm::from_connection(socket) {
1378 Ok(c) => c,
1379 Err(e) => return -e,
1380 };
1381 *out = Box::into_raw(Box::new(crosvm));
1382 0
1383 }
1384
1385 #[no_mangle]
crosvm_new_connection(self_: *mut crosvm, out: *mut *mut crosvm) -> c_int1386 pub unsafe extern "C" fn crosvm_new_connection(self_: *mut crosvm, out: *mut *mut crosvm) -> c_int {
1387 let _u = record(Stat::NewConnection);
1388 let self_ = &mut (*self_);
1389 match self_.try_clone() {
1390 Ok(cloned) => {
1391 *out = Box::into_raw(Box::new(cloned));
1392 0
1393 }
1394 Err(e) => -e,
1395 }
1396 }
1397
1398 #[no_mangle]
crosvm_destroy_connection(self_: *mut *mut crosvm) -> c_int1399 pub unsafe extern "C" fn crosvm_destroy_connection(self_: *mut *mut crosvm) -> c_int {
1400 let _u = record(Stat::DestroyConnection);
1401 Box::from_raw(*self_);
1402 *self_ = null_mut();
1403 0
1404 }
1405
1406 #[no_mangle]
crosvm_get_shutdown_eventfd(self_: *mut crosvm) -> c_int1407 pub unsafe extern "C" fn crosvm_get_shutdown_eventfd(self_: *mut crosvm) -> c_int {
1408 let _u = record(Stat::GetShutdownEvent);
1409 let self_ = &mut (*self_);
1410 match self_.get_shutdown_event() {
1411 Ok(f) => f.into_raw_fd(),
1412 Err(e) => -e,
1413 }
1414 }
1415
1416 #[no_mangle]
crosvm_check_extension( self_: *mut crosvm, extension: u32, has_extension: *mut bool, ) -> c_int1417 pub unsafe extern "C" fn crosvm_check_extension(
1418 self_: *mut crosvm,
1419 extension: u32,
1420 has_extension: *mut bool,
1421 ) -> c_int {
1422 let _u = record(Stat::CheckExtentsion);
1423 let self_ = &mut (*self_);
1424 let ret = self_.check_extension(extension);
1425
1426 if let Ok(supported) = ret {
1427 *has_extension = supported;
1428 }
1429 to_crosvm_rc(ret)
1430 }
1431
1432 #[no_mangle]
crosvm_enable_capability( _self_: *mut crosvm, _capability: u32, _flags: u32, _args: *const u64, ) -> c_int1433 pub unsafe extern "C" fn crosvm_enable_capability(
1434 _self_: *mut crosvm,
1435 _capability: u32,
1436 _flags: u32,
1437 _args: *const u64,
1438 ) -> c_int {
1439 let _u = record(Stat::EnableVmCapability);
1440 -EINVAL
1441 }
1442
1443 #[no_mangle]
crosvm_get_supported_cpuid( this: *mut crosvm, entry_count: u32, cpuid_entries: *mut kvm_cpuid_entry2, out_count: *mut u32, ) -> c_int1444 pub unsafe extern "C" fn crosvm_get_supported_cpuid(
1445 this: *mut crosvm,
1446 entry_count: u32,
1447 cpuid_entries: *mut kvm_cpuid_entry2,
1448 out_count: *mut u32,
1449 ) -> c_int {
1450 let _u = record(Stat::GetSupportedCpuid);
1451 let this = &mut *this;
1452 let cpuid_entries = from_raw_parts_mut(cpuid_entries, entry_count as usize);
1453 let mut cpuid_count: usize = 0;
1454 let ret = this.get_supported_cpuid(cpuid_entries, &mut cpuid_count);
1455 *out_count = cpuid_count as u32;
1456 to_crosvm_rc(ret)
1457 }
1458
1459 #[no_mangle]
crosvm_get_emulated_cpuid( this: *mut crosvm, entry_count: u32, cpuid_entries: *mut kvm_cpuid_entry2, out_count: *mut u32, ) -> c_int1460 pub unsafe extern "C" fn crosvm_get_emulated_cpuid(
1461 this: *mut crosvm,
1462 entry_count: u32,
1463 cpuid_entries: *mut kvm_cpuid_entry2,
1464 out_count: *mut u32,
1465 ) -> c_int {
1466 let _u = record(Stat::GetEmulatedCpuid);
1467 let this = &mut *this;
1468 let cpuid_entries = from_raw_parts_mut(cpuid_entries, entry_count as usize);
1469 let mut cpuid_count: usize = 0;
1470 let ret = this.get_emulated_cpuid(cpuid_entries, &mut cpuid_count);
1471 *out_count = cpuid_count as u32;
1472 to_crosvm_rc(ret)
1473 }
1474
1475 #[no_mangle]
crosvm_get_msr_index_list( this: *mut crosvm, entry_count: u32, msr_indices: *mut u32, out_count: *mut u32, ) -> c_int1476 pub unsafe extern "C" fn crosvm_get_msr_index_list(
1477 this: *mut crosvm,
1478 entry_count: u32,
1479 msr_indices: *mut u32,
1480 out_count: *mut u32,
1481 ) -> c_int {
1482 let _u = record(Stat::GetMsrIndexList);
1483 let this = &mut *this;
1484 let msr_indices = from_raw_parts_mut(msr_indices, entry_count as usize);
1485 let mut msr_count: usize = 0;
1486 let ret = this.get_msr_index_list(msr_indices, &mut msr_count);
1487 *out_count = msr_count as u32;
1488 to_crosvm_rc(ret)
1489 }
1490
1491 #[no_mangle]
crosvm_net_get_config( self_: *mut crosvm, config: *mut crosvm_net_config, ) -> c_int1492 pub unsafe extern "C" fn crosvm_net_get_config(
1493 self_: *mut crosvm,
1494 config: *mut crosvm_net_config,
1495 ) -> c_int {
1496 let _u = record(Stat::NetGetConfig);
1497 let self_ = &mut (*self_);
1498 let ret = self_.get_net_config();
1499
1500 if let Ok(c) = ret {
1501 *config = c;
1502 }
1503
1504 to_crosvm_rc(ret)
1505 }
1506
1507 #[no_mangle]
crosvm_reserve_range( self_: *mut crosvm, space: u32, start: u64, length: u64, ) -> c_int1508 pub unsafe extern "C" fn crosvm_reserve_range(
1509 self_: *mut crosvm,
1510 space: u32,
1511 start: u64,
1512 length: u64,
1513 ) -> c_int {
1514 let _u = record(Stat::ReserveRange);
1515 let self_ = &mut (*self_);
1516 let ret = self_.reserve_range(space, start, length, false);
1517 to_crosvm_rc(ret)
1518 }
1519
1520 #[no_mangle]
crosvm_reserve_async_write_range( self_: *mut crosvm, space: u32, start: u64, length: u64, ) -> c_int1521 pub unsafe extern "C" fn crosvm_reserve_async_write_range(
1522 self_: *mut crosvm,
1523 space: u32,
1524 start: u64,
1525 length: u64,
1526 ) -> c_int {
1527 let _u = record(Stat::ReserveAsyncWriteRange);
1528 let self_ = &mut (*self_);
1529 let ret = self_.reserve_range(space, start, length, true);
1530 to_crosvm_rc(ret)
1531 }
1532
1533 #[no_mangle]
crosvm_set_irq(self_: *mut crosvm, irq_id: u32, active: bool) -> c_int1534 pub unsafe extern "C" fn crosvm_set_irq(self_: *mut crosvm, irq_id: u32, active: bool) -> c_int {
1535 let _u = record(Stat::SetIrq);
1536 let self_ = &mut (*self_);
1537 let ret = self_.set_irq(irq_id, active);
1538 to_crosvm_rc(ret)
1539 }
1540
1541 #[no_mangle]
crosvm_set_irq_routing( self_: *mut crosvm, route_count: u32, routes: *const crosvm_irq_route, ) -> c_int1542 pub unsafe extern "C" fn crosvm_set_irq_routing(
1543 self_: *mut crosvm,
1544 route_count: u32,
1545 routes: *const crosvm_irq_route,
1546 ) -> c_int {
1547 let _u = record(Stat::SetIrqRouting);
1548 let self_ = &mut (*self_);
1549 let ret = self_.set_irq_routing(slice::from_raw_parts(routes, route_count as usize));
1550 to_crosvm_rc(ret)
1551 }
1552
1553 #[no_mangle]
crosvm_set_hypercall_hint( self_: *mut crosvm, hints_count: u32, hints: *const crosvm_hint, ) -> c_int1554 pub unsafe extern "C" fn crosvm_set_hypercall_hint(
1555 self_: *mut crosvm,
1556 hints_count: u32,
1557 hints: *const crosvm_hint,
1558 ) -> c_int {
1559 let _u = record(Stat::SetHypercallHint);
1560 let self_ = &mut (*self_);
1561
1562 if hints_count < 1 {
1563 let ret = self_.set_hint(0, 0, false, &[]);
1564 return to_crosvm_rc(ret);
1565 }
1566 if hints_count > CROSVM_MAX_HINT_COUNT {
1567 return -EINVAL;
1568 }
1569 let hints = slice::from_raw_parts(hints, hints_count as usize);
1570 let hint = &hints[0];
1571 if hint.hint_version != 0
1572 || hint.reserved != 0
1573 || hint.address == 0
1574 || (hint.address_flags != 0 && hint.address_flags != CROSVM_HINT_ON_WRITE)
1575 || hint.details_count > CROSVM_MAX_HINT_DETAIL_COUNT as u16
1576 {
1577 return -EINVAL;
1578 }
1579 let ret = self_.set_hint(
1580 hint.address_space,
1581 hint.address,
1582 hint.address_flags == CROSVM_HINT_ON_WRITE,
1583 slice::from_raw_parts(hint.details, hint.details_count as usize),
1584 );
1585 to_crosvm_rc(ret)
1586 }
1587
1588 #[no_mangle]
crosvm_get_pic_state( this: *mut crosvm, primary: bool, state: *mut kvm_pic_state, ) -> c_int1589 pub unsafe extern "C" fn crosvm_get_pic_state(
1590 this: *mut crosvm,
1591 primary: bool,
1592 state: *mut kvm_pic_state,
1593 ) -> c_int {
1594 let _u = record(Stat::GetPicState);
1595 let this = &mut *this;
1596 let state_set = if primary {
1597 MainRequest_StateSet::PIC0
1598 } else {
1599 MainRequest_StateSet::PIC1
1600 };
1601 let state = from_raw_parts_mut(state as *mut u8, size_of::<kvm_pic_state>());
1602 let ret = this.get_state(state_set, state);
1603 to_crosvm_rc(ret)
1604 }
1605
1606 #[no_mangle]
crosvm_set_pic_state( this: *mut crosvm, primary: bool, state: *mut kvm_pic_state, ) -> c_int1607 pub unsafe extern "C" fn crosvm_set_pic_state(
1608 this: *mut crosvm,
1609 primary: bool,
1610 state: *mut kvm_pic_state,
1611 ) -> c_int {
1612 let _u = record(Stat::SetPicState);
1613 let this = &mut *this;
1614 let state_set = if primary {
1615 MainRequest_StateSet::PIC0
1616 } else {
1617 MainRequest_StateSet::PIC1
1618 };
1619 let state = from_raw_parts(state as *mut u8, size_of::<kvm_pic_state>());
1620 let ret = this.set_state(state_set, state);
1621 to_crosvm_rc(ret)
1622 }
1623
1624 #[no_mangle]
crosvm_get_ioapic_state( this: *mut crosvm, state: *mut kvm_ioapic_state, ) -> c_int1625 pub unsafe extern "C" fn crosvm_get_ioapic_state(
1626 this: *mut crosvm,
1627 state: *mut kvm_ioapic_state,
1628 ) -> c_int {
1629 let _u = record(Stat::GetIoapicState);
1630 let this = &mut *this;
1631 let state = from_raw_parts_mut(state as *mut u8, size_of::<kvm_ioapic_state>());
1632 let ret = this.get_state(MainRequest_StateSet::IOAPIC, state);
1633 to_crosvm_rc(ret)
1634 }
1635
1636 #[no_mangle]
crosvm_set_ioapic_state( this: *mut crosvm, state: *const kvm_ioapic_state, ) -> c_int1637 pub unsafe extern "C" fn crosvm_set_ioapic_state(
1638 this: *mut crosvm,
1639 state: *const kvm_ioapic_state,
1640 ) -> c_int {
1641 let _u = record(Stat::SetIoapicState);
1642 let this = &mut *this;
1643 let state = from_raw_parts(state as *mut u8, size_of::<kvm_ioapic_state>());
1644 let ret = this.set_state(MainRequest_StateSet::IOAPIC, state);
1645 to_crosvm_rc(ret)
1646 }
1647
1648 #[no_mangle]
crosvm_get_pit_state( this: *mut crosvm, state: *mut kvm_pit_state2, ) -> c_int1649 pub unsafe extern "C" fn crosvm_get_pit_state(
1650 this: *mut crosvm,
1651 state: *mut kvm_pit_state2,
1652 ) -> c_int {
1653 let _u = record(Stat::GetPitState);
1654 let this = &mut *this;
1655 let state = from_raw_parts_mut(state as *mut u8, size_of::<kvm_pit_state2>());
1656 let ret = this.get_state(MainRequest_StateSet::PIT, state);
1657 to_crosvm_rc(ret)
1658 }
1659
1660 #[no_mangle]
crosvm_set_pit_state( this: *mut crosvm, state: *const kvm_pit_state2, ) -> c_int1661 pub unsafe extern "C" fn crosvm_set_pit_state(
1662 this: *mut crosvm,
1663 state: *const kvm_pit_state2,
1664 ) -> c_int {
1665 let _u = record(Stat::SetPitState);
1666 let this = &mut *this;
1667 let state = from_raw_parts(state as *mut u8, size_of::<kvm_pit_state2>());
1668 let ret = this.set_state(MainRequest_StateSet::PIT, state);
1669 to_crosvm_rc(ret)
1670 }
1671
1672 #[no_mangle]
crosvm_get_clock( this: *mut crosvm, clock_data: *mut kvm_clock_data, ) -> c_int1673 pub unsafe extern "C" fn crosvm_get_clock(
1674 this: *mut crosvm,
1675 clock_data: *mut kvm_clock_data,
1676 ) -> c_int {
1677 let _u = record(Stat::GetClock);
1678 let this = &mut *this;
1679 let state = from_raw_parts_mut(clock_data as *mut u8, size_of::<kvm_clock_data>());
1680 let ret = this.get_state(MainRequest_StateSet::CLOCK, state);
1681 to_crosvm_rc(ret)
1682 }
1683
1684 #[no_mangle]
crosvm_set_clock( this: *mut crosvm, clock_data: *const kvm_clock_data, ) -> c_int1685 pub unsafe extern "C" fn crosvm_set_clock(
1686 this: *mut crosvm,
1687 clock_data: *const kvm_clock_data,
1688 ) -> c_int {
1689 let _u = record(Stat::SetClock);
1690 let this = &mut *this;
1691 let state = from_raw_parts(clock_data as *mut u8, size_of::<kvm_clock_data>());
1692 let ret = this.set_state(MainRequest_StateSet::CLOCK, state);
1693 to_crosvm_rc(ret)
1694 }
1695
1696 #[no_mangle]
crosvm_set_identity_map_addr(self_: *mut crosvm, addr: u32) -> c_int1697 pub unsafe extern "C" fn crosvm_set_identity_map_addr(self_: *mut crosvm, addr: u32) -> c_int {
1698 let _u = record(Stat::SetIdentityMapAddr);
1699 let self_ = &mut (*self_);
1700 let ret = self_.set_identity_map_addr(addr);
1701 to_crosvm_rc(ret)
1702 }
1703
1704 #[no_mangle]
crosvm_pause_vcpus( self_: *mut crosvm, cpu_mask: u64, user: *mut c_void, ) -> c_int1705 pub unsafe extern "C" fn crosvm_pause_vcpus(
1706 self_: *mut crosvm,
1707 cpu_mask: u64,
1708 user: *mut c_void,
1709 ) -> c_int {
1710 let _u = record(Stat::PauseVcpus);
1711 let self_ = &mut (*self_);
1712 let ret = self_.pause_vcpus(cpu_mask, user);
1713 to_crosvm_rc(ret)
1714 }
1715
1716 #[no_mangle]
crosvm_start(self_: *mut crosvm) -> c_int1717 pub unsafe extern "C" fn crosvm_start(self_: *mut crosvm) -> c_int {
1718 let _u = record(Stat::Start);
1719 let self_ = &mut (*self_);
1720 let ret = self_.start();
1721 to_crosvm_rc(ret)
1722 }
1723
1724 #[no_mangle]
crosvm_get_vcpu( self_: *mut crosvm, cpu_id: u32, out: *mut *mut crosvm_vcpu, ) -> c_int1725 pub unsafe extern "C" fn crosvm_get_vcpu(
1726 self_: *mut crosvm,
1727 cpu_id: u32,
1728 out: *mut *mut crosvm_vcpu,
1729 ) -> c_int {
1730 let _u = record(Stat::GetVcpu);
1731 let self_ = &mut (*self_);
1732 let ret = self_.get_vcpu(cpu_id);
1733
1734 if let Ok(vcpu) = ret {
1735 *out = vcpu;
1736 }
1737 to_crosvm_rc(ret)
1738 }
1739
1740 #[no_mangle]
crosvm_vcpu_wait( this: *mut crosvm_vcpu, event: *mut crosvm_vcpu_event, ) -> c_int1741 pub unsafe extern "C" fn crosvm_vcpu_wait(
1742 this: *mut crosvm_vcpu,
1743 event: *mut crosvm_vcpu_event,
1744 ) -> c_int {
1745 let _u = record(Stat::VcpuWait);
1746 let this = &mut *this;
1747 let event = &mut *event;
1748 let ret = this.wait(event);
1749 to_crosvm_rc(ret)
1750 }
1751
1752 #[no_mangle]
crosvm_vcpu_resume(this: *mut crosvm_vcpu) -> c_int1753 pub unsafe extern "C" fn crosvm_vcpu_resume(this: *mut crosvm_vcpu) -> c_int {
1754 let _u = record(Stat::VcpuResume);
1755 let this = &mut *this;
1756 let ret = this.resume();
1757 to_crosvm_rc(ret)
1758 }
1759
1760 #[no_mangle]
crosvm_vcpu_get_regs( this: *mut crosvm_vcpu, regs: *mut kvm_regs, ) -> c_int1761 pub unsafe extern "C" fn crosvm_vcpu_get_regs(
1762 this: *mut crosvm_vcpu,
1763 regs: *mut kvm_regs,
1764 ) -> c_int {
1765 let _u = record(Stat::VcpuGetRegs);
1766 let this = &mut *this;
1767 if this.regs.set {
1768 if let Err(e) = this.set_state_from_cache(VcpuRequest_StateSet::REGS) {
1769 return -e;
1770 }
1771 }
1772 let regs = from_raw_parts_mut(regs as *mut u8, size_of::<kvm_regs>());
1773 if this.regs.get {
1774 regs.copy_from_slice(&this.regs.cache);
1775 0
1776 } else {
1777 let ret = this.get_state(VcpuRequest_StateSet::REGS, regs);
1778 to_crosvm_rc(ret)
1779 }
1780 }
1781
1782 #[no_mangle]
crosvm_vcpu_set_regs( this: *mut crosvm_vcpu, regs: *const kvm_regs, ) -> c_int1783 pub unsafe extern "C" fn crosvm_vcpu_set_regs(
1784 this: *mut crosvm_vcpu,
1785 regs: *const kvm_regs,
1786 ) -> c_int {
1787 let _u = record(Stat::VcpuSetRegs);
1788 let this = &mut *this;
1789 this.regs.get = false;
1790 let regs = from_raw_parts(regs as *mut u8, size_of::<kvm_regs>());
1791 this.regs.set = true;
1792 this.regs.cache = regs.to_vec();
1793 0
1794 }
1795
1796 #[no_mangle]
crosvm_vcpu_get_sregs( this: *mut crosvm_vcpu, sregs: *mut kvm_sregs, ) -> c_int1797 pub unsafe extern "C" fn crosvm_vcpu_get_sregs(
1798 this: *mut crosvm_vcpu,
1799 sregs: *mut kvm_sregs,
1800 ) -> c_int {
1801 let _u = record(Stat::VcpuGetSregs);
1802 let this = &mut *this;
1803 if this.sregs.set {
1804 if let Err(e) = this.set_state_from_cache(VcpuRequest_StateSet::SREGS) {
1805 return -e;
1806 }
1807 }
1808 let sregs = from_raw_parts_mut(sregs as *mut u8, size_of::<kvm_sregs>());
1809 if this.sregs.get {
1810 sregs.copy_from_slice(&this.sregs.cache);
1811 0
1812 } else {
1813 let ret = this.get_state(VcpuRequest_StateSet::SREGS, sregs);
1814 to_crosvm_rc(ret)
1815 }
1816 }
1817
1818 #[no_mangle]
crosvm_vcpu_set_sregs( this: *mut crosvm_vcpu, sregs: *const kvm_sregs, ) -> c_int1819 pub unsafe extern "C" fn crosvm_vcpu_set_sregs(
1820 this: *mut crosvm_vcpu,
1821 sregs: *const kvm_sregs,
1822 ) -> c_int {
1823 let _u = record(Stat::VcpuSetSregs);
1824 let this = &mut *this;
1825 this.sregs.get = false;
1826 let sregs = from_raw_parts(sregs as *mut u8, size_of::<kvm_sregs>());
1827 this.sregs.set = true;
1828 this.sregs.cache = sregs.to_vec();
1829 0
1830 }
1831
1832 #[no_mangle]
crosvm_vcpu_get_fpu(this: *mut crosvm_vcpu, fpu: *mut kvm_fpu) -> c_int1833 pub unsafe extern "C" fn crosvm_vcpu_get_fpu(this: *mut crosvm_vcpu, fpu: *mut kvm_fpu) -> c_int {
1834 let _u = record(Stat::GetFpu);
1835 let this = &mut *this;
1836 let fpu = from_raw_parts_mut(fpu as *mut u8, size_of::<kvm_fpu>());
1837 let ret = this.get_state(VcpuRequest_StateSet::FPU, fpu);
1838 to_crosvm_rc(ret)
1839 }
1840
1841 #[no_mangle]
crosvm_vcpu_set_fpu(this: *mut crosvm_vcpu, fpu: *const kvm_fpu) -> c_int1842 pub unsafe extern "C" fn crosvm_vcpu_set_fpu(this: *mut crosvm_vcpu, fpu: *const kvm_fpu) -> c_int {
1843 let _u = record(Stat::SetFpu);
1844 let this = &mut *this;
1845 let fpu = from_raw_parts(fpu as *mut u8, size_of::<kvm_fpu>());
1846 let ret = this.set_state(VcpuRequest_StateSet::FPU, fpu);
1847 to_crosvm_rc(ret)
1848 }
1849
1850 #[no_mangle]
crosvm_vcpu_get_debugregs( this: *mut crosvm_vcpu, dregs: *mut kvm_debugregs, ) -> c_int1851 pub unsafe extern "C" fn crosvm_vcpu_get_debugregs(
1852 this: *mut crosvm_vcpu,
1853 dregs: *mut kvm_debugregs,
1854 ) -> c_int {
1855 let _u = record(Stat::GetDebugRegs);
1856 let this = &mut *this;
1857 if this.debugregs.set {
1858 if let Err(e) = this.set_state_from_cache(VcpuRequest_StateSet::DEBUGREGS) {
1859 return -e;
1860 }
1861 }
1862 let dregs = from_raw_parts_mut(dregs as *mut u8, size_of::<kvm_debugregs>());
1863 if this.debugregs.get {
1864 dregs.copy_from_slice(&this.debugregs.cache);
1865 0
1866 } else {
1867 let ret = this.get_state(VcpuRequest_StateSet::DEBUGREGS, dregs);
1868 to_crosvm_rc(ret)
1869 }
1870 }
1871
1872 #[no_mangle]
crosvm_vcpu_set_debugregs( this: *mut crosvm_vcpu, dregs: *const kvm_debugregs, ) -> c_int1873 pub unsafe extern "C" fn crosvm_vcpu_set_debugregs(
1874 this: *mut crosvm_vcpu,
1875 dregs: *const kvm_debugregs,
1876 ) -> c_int {
1877 let _u = record(Stat::SetDebugRegs);
1878 let this = &mut *this;
1879 this.debugregs.get = false;
1880 let dregs = from_raw_parts(dregs as *mut u8, size_of::<kvm_debugregs>());
1881 this.debugregs.set = true;
1882 this.debugregs.cache = dregs.to_vec();
1883 0
1884 }
1885
1886 #[no_mangle]
crosvm_vcpu_get_xcrs( this: *mut crosvm_vcpu, xcrs: *mut kvm_xcrs, ) -> c_int1887 pub unsafe extern "C" fn crosvm_vcpu_get_xcrs(
1888 this: *mut crosvm_vcpu,
1889 xcrs: *mut kvm_xcrs,
1890 ) -> c_int {
1891 let _u = record(Stat::GetXCRegs);
1892 let this = &mut *this;
1893 let xcrs = from_raw_parts_mut(xcrs as *mut u8, size_of::<kvm_xcrs>());
1894 let ret = this.get_state(VcpuRequest_StateSet::XCREGS, xcrs);
1895 to_crosvm_rc(ret)
1896 }
1897
1898 #[no_mangle]
crosvm_vcpu_set_xcrs( this: *mut crosvm_vcpu, xcrs: *const kvm_xcrs, ) -> c_int1899 pub unsafe extern "C" fn crosvm_vcpu_set_xcrs(
1900 this: *mut crosvm_vcpu,
1901 xcrs: *const kvm_xcrs,
1902 ) -> c_int {
1903 let _u = record(Stat::SetXCRegs);
1904 let this = &mut *this;
1905 let xcrs = from_raw_parts(xcrs as *mut u8, size_of::<kvm_xcrs>());
1906 let ret = this.set_state(VcpuRequest_StateSet::XCREGS, xcrs);
1907 to_crosvm_rc(ret)
1908 }
1909
1910 #[no_mangle]
crosvm_get_hyperv_cpuid( this: *mut crosvm_vcpu, entry_count: u32, cpuid_entries: *mut kvm_cpuid_entry2, out_count: *mut u32, ) -> c_int1911 pub unsafe extern "C" fn crosvm_get_hyperv_cpuid(
1912 this: *mut crosvm_vcpu,
1913 entry_count: u32,
1914 cpuid_entries: *mut kvm_cpuid_entry2,
1915 out_count: *mut u32,
1916 ) -> c_int {
1917 let _u = record(Stat::GetHypervCpuid);
1918 let this = &mut *this;
1919 let cpuid_entries = from_raw_parts_mut(cpuid_entries, entry_count as usize);
1920 let mut cpuid_count: usize = 0;
1921 let ret = this.get_hyperv_cpuid(cpuid_entries, &mut cpuid_count);
1922 *out_count = cpuid_count as u32;
1923 to_crosvm_rc(ret)
1924 }
1925
1926 #[no_mangle]
crosvm_vcpu_get_msrs( this: *mut crosvm_vcpu, msr_count: u32, msr_entries: *mut kvm_msr_entry, out_count: *mut u32, ) -> c_int1927 pub unsafe extern "C" fn crosvm_vcpu_get_msrs(
1928 this: *mut crosvm_vcpu,
1929 msr_count: u32,
1930 msr_entries: *mut kvm_msr_entry,
1931 out_count: *mut u32,
1932 ) -> c_int {
1933 let _u = record(Stat::VcpuGetMsrs);
1934 let this = &mut *this;
1935 let msr_entries = from_raw_parts_mut(msr_entries, msr_count as usize);
1936 let mut count: usize = 0;
1937 let ret = this.get_msrs(msr_entries, &mut count);
1938 *out_count = count as u32;
1939 to_crosvm_rc(ret)
1940 }
1941
1942 #[no_mangle]
crosvm_vcpu_set_msrs( this: *mut crosvm_vcpu, msr_count: u32, msr_entries: *const kvm_msr_entry, ) -> c_int1943 pub unsafe extern "C" fn crosvm_vcpu_set_msrs(
1944 this: *mut crosvm_vcpu,
1945 msr_count: u32,
1946 msr_entries: *const kvm_msr_entry,
1947 ) -> c_int {
1948 let _u = record(Stat::VcpuSetMsrs);
1949 let this = &mut *this;
1950 let msr_entries = from_raw_parts(msr_entries, msr_count as usize);
1951 let ret = this.set_msrs(msr_entries);
1952 to_crosvm_rc(ret)
1953 }
1954
1955 #[no_mangle]
crosvm_vcpu_set_cpuid( this: *mut crosvm_vcpu, cpuid_count: u32, cpuid_entries: *const kvm_cpuid_entry2, ) -> c_int1956 pub unsafe extern "C" fn crosvm_vcpu_set_cpuid(
1957 this: *mut crosvm_vcpu,
1958 cpuid_count: u32,
1959 cpuid_entries: *const kvm_cpuid_entry2,
1960 ) -> c_int {
1961 let _u = record(Stat::VcpuSetCpuid);
1962 let this = &mut *this;
1963 let cpuid_entries = from_raw_parts(cpuid_entries, cpuid_count as usize);
1964 let ret = this.set_cpuid(cpuid_entries);
1965 to_crosvm_rc(ret)
1966 }
1967
1968 #[no_mangle]
crosvm_vcpu_enable_capability( this: *mut crosvm_vcpu, capability: u32, flags: u32, args: *const u64, ) -> c_int1969 pub unsafe extern "C" fn crosvm_vcpu_enable_capability(
1970 this: *mut crosvm_vcpu,
1971 capability: u32,
1972 flags: u32,
1973 args: *const u64,
1974 ) -> c_int {
1975 let _u = record(Stat::EnableVcpuCapability);
1976 let this = &mut *this;
1977 let args = slice::from_raw_parts(args, 4);
1978
1979 if flags != 0 || args.iter().any(|v| *v != 0) {
1980 return -EINVAL;
1981 }
1982
1983 let ret = this.enable_capability(capability);
1984 to_crosvm_rc(ret)
1985 }
1986
1987 #[no_mangle]
crosvm_vcpu_get_lapic_state( this: *mut crosvm_vcpu, state: *mut kvm_lapic_state, ) -> c_int1988 pub unsafe extern "C" fn crosvm_vcpu_get_lapic_state(
1989 this: *mut crosvm_vcpu,
1990 state: *mut kvm_lapic_state,
1991 ) -> c_int {
1992 let _u = record(Stat::VcpuGetLapicState);
1993 let this = &mut *this;
1994 let state = from_raw_parts_mut(state as *mut u8, size_of::<kvm_lapic_state>());
1995 let ret = this.get_state(VcpuRequest_StateSet::LAPIC, state);
1996 to_crosvm_rc(ret)
1997 }
1998
1999 #[no_mangle]
crosvm_vcpu_set_lapic_state( this: *mut crosvm_vcpu, state: *const kvm_lapic_state, ) -> c_int2000 pub unsafe extern "C" fn crosvm_vcpu_set_lapic_state(
2001 this: *mut crosvm_vcpu,
2002 state: *const kvm_lapic_state,
2003 ) -> c_int {
2004 let _u = record(Stat::VcpuSetLapicState);
2005 let this = &mut *this;
2006 let state = from_raw_parts(state as *mut u8, size_of::<kvm_lapic_state>());
2007 let ret = this.set_state(VcpuRequest_StateSet::LAPIC, state);
2008 to_crosvm_rc(ret)
2009 }
2010
2011 #[no_mangle]
crosvm_vcpu_get_mp_state( this: *mut crosvm_vcpu, state: *mut kvm_mp_state, ) -> c_int2012 pub unsafe extern "C" fn crosvm_vcpu_get_mp_state(
2013 this: *mut crosvm_vcpu,
2014 state: *mut kvm_mp_state,
2015 ) -> c_int {
2016 let _u = record(Stat::VcpuGetMpState);
2017 let this = &mut *this;
2018 let state = from_raw_parts_mut(state as *mut u8, size_of::<kvm_mp_state>());
2019 let ret = this.get_state(VcpuRequest_StateSet::MP, state);
2020 to_crosvm_rc(ret)
2021 }
2022
2023 #[no_mangle]
crosvm_vcpu_set_mp_state( this: *mut crosvm_vcpu, state: *const kvm_mp_state, ) -> c_int2024 pub unsafe extern "C" fn crosvm_vcpu_set_mp_state(
2025 this: *mut crosvm_vcpu,
2026 state: *const kvm_mp_state,
2027 ) -> c_int {
2028 let _u = record(Stat::VcpuSetMpState);
2029 let this = &mut *this;
2030 let state = from_raw_parts(state as *mut u8, size_of::<kvm_mp_state>());
2031 let ret = this.set_state(VcpuRequest_StateSet::MP, state);
2032 to_crosvm_rc(ret)
2033 }
2034
2035 #[no_mangle]
crosvm_vcpu_get_vcpu_events( this: *mut crosvm_vcpu, events: *mut kvm_vcpu_events, ) -> c_int2036 pub unsafe extern "C" fn crosvm_vcpu_get_vcpu_events(
2037 this: *mut crosvm_vcpu,
2038 events: *mut kvm_vcpu_events,
2039 ) -> c_int {
2040 let _u = record(Stat::VcpuGetVcpuEvents);
2041 let this = &mut *this;
2042 let events = from_raw_parts_mut(events as *mut u8, size_of::<kvm_vcpu_events>());
2043 let ret = this.get_state(VcpuRequest_StateSet::EVENTS, events);
2044 to_crosvm_rc(ret)
2045 }
2046
2047 #[no_mangle]
crosvm_vcpu_set_vcpu_events( this: *mut crosvm_vcpu, events: *const kvm_vcpu_events, ) -> c_int2048 pub unsafe extern "C" fn crosvm_vcpu_set_vcpu_events(
2049 this: *mut crosvm_vcpu,
2050 events: *const kvm_vcpu_events,
2051 ) -> c_int {
2052 let _u = record(Stat::VcpuSetVcpuEvents);
2053 let this = &mut *this;
2054 let events = from_raw_parts(events as *mut u8, size_of::<kvm_vcpu_events>());
2055 let ret = this.set_state(VcpuRequest_StateSet::EVENTS, events);
2056 to_crosvm_rc(ret)
2057 }
2058