1
2 /*--------------------------------------------------------------------*/
3 /*--- Xen Hypercalls syswrap-xen.c ---*/
4 /*--------------------------------------------------------------------*/
5
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
9
10 Copyright (C) 2012 Citrix Systems
11 ian.campbell@citrix.com
12
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
28 The GNU General Public License is contained in the file COPYING.
29 */
30
31 #include "pub_core_basics.h"
32 #include "pub_core_vki.h"
33
34 #if defined(ENABLE_XEN)
35
36 #include "pub_core_vkiscnums.h"
37 #include "pub_core_threadstate.h"
38 #include "pub_core_aspacemgr.h"
39 #include "pub_core_debuginfo.h" // VG_(di_notify_*)
40 #include "pub_core_transtab.h" // VG_(discard_translations)
41 #include "pub_core_xarray.h"
42 #include "pub_core_clientstate.h"
43 #include "pub_core_debuglog.h"
44 #include "pub_core_libcbase.h"
45 #include "pub_core_libcassert.h"
46 #include "pub_core_libcfile.h"
47 #include "pub_core_libcprint.h"
48 #include "pub_core_libcproc.h"
49 #include "pub_core_libcsignal.h"
50 #include "pub_core_mallocfree.h"
51 #include "pub_core_tooliface.h"
52 #include "pub_core_options.h"
53 #include "pub_core_scheduler.h"
54 #include "pub_core_signals.h"
55 #include "pub_core_syscall.h"
56 #include "pub_core_syswrap.h"
57 #include "pub_core_stacktrace.h" // For VG_(get_and_pp_StackTrace)()
58
59 #include "priv_types_n_macros.h"
60 #include "priv_syswrap-generic.h"
61 #include "priv_syswrap-xen.h"
62
63 #include <inttypes.h>
64
65 #define PRE(name) static DEFN_PRE_TEMPLATE(xen, name)
66 #define POST(name) static DEFN_POST_TEMPLATE(xen, name)
67
bad_subop(ThreadId tid,SyscallArgLayout * layout,SyscallArgs * args,SyscallStatus * status,UWord * flags,const HChar * hypercall,UWord subop)68 static void bad_subop ( ThreadId tid,
69 SyscallArgLayout* layout,
70 /*MOD*/SyscallArgs* args,
71 /*OUT*/SyscallStatus* status,
72 /*OUT*/UWord* flags,
73 const HChar* hypercall,
74 UWord subop)
75 {
76 VG_(dmsg)("WARNING: unhandled %s subop: %ld\n",
77 hypercall, subop);
78 if (VG_(clo_verbosity) > 1) {
79 VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size));
80 }
81 VG_(dmsg)("You may be able to write your own handler.\n");
82 VG_(dmsg)("Read the file README_MISSING_SYSCALL_OR_IOCTL.\n");
83 VG_(dmsg)("Nevertheless we consider this a bug. Please report\n");
84 VG_(dmsg)("it at http://valgrind.org/support/bug_reports.html &\n");
85 VG_(dmsg)("http://wiki.xen.org/wiki/Reporting_Bugs_against_Xen.\n");
86
87 SET_STATUS_Failure(VKI_ENOSYS);
88 }
89
PRE(memory_op)90 PRE(memory_op)
91 {
92 PRINT("__HYPERVISOR_memory_op ( %ld, %lx )", ARG1, ARG2);
93
94 switch (ARG1) {
95
96 case VKI_XENMEM_maximum_ram_page:
97 /* No inputs */
98 break;
99
100 case VKI_XENMEM_maximum_gpfn:
101 PRE_MEM_READ("XENMEM_maximum_gpfn domid",
102 (Addr)ARG2, sizeof(vki_xen_domid_t));
103 break;
104
105 case VKI_XENMEM_machphys_mfn_list: {
106 struct vki_xen_machphys_mfn_list *arg =
107 (struct vki_xen_machphys_mfn_list *)ARG2;
108 PRE_MEM_READ("XENMEM_machphys_mfn_list max_extents",
109 (Addr)&arg->max_extents, sizeof(arg->max_extents));
110 PRE_MEM_READ("XENMEM_machphys_mfn_list extent_start",
111 (Addr)&arg->extent_start, sizeof(arg->extent_start));
112 break;
113 }
114
115 case VKI_XENMEM_set_memory_map: {
116 struct vki_xen_foreign_memory_map *arg =
117 (struct vki_xen_foreign_memory_map *)ARG2;
118 PRE_MEM_READ("XENMEM_set_memory_map domid",
119 (Addr)&arg->domid, sizeof(arg->domid));
120 PRE_MEM_READ("XENMEM_set_memory_map map",
121 (Addr)&arg->map, sizeof(arg->map));
122 break;
123 }
124 case VKI_XENMEM_increase_reservation:
125 case VKI_XENMEM_decrease_reservation:
126 case VKI_XENMEM_populate_physmap:
127 case VKI_XENMEM_claim_pages: {
128 struct xen_memory_reservation *memory_reservation =
129 (struct xen_memory_reservation *)ARG2;
130 const HChar *which;
131
132 switch (ARG1) {
133 case VKI_XENMEM_increase_reservation:
134 which = "XENMEM_increase_reservation";
135 break;
136 case VKI_XENMEM_decrease_reservation:
137 which = "XENMEM_decrease_reservation";
138 PRE_MEM_READ(which,
139 (Addr)memory_reservation->extent_start.p,
140 sizeof(vki_xen_pfn_t) * memory_reservation->nr_extents);
141 break;
142 case VKI_XENMEM_populate_physmap:
143 which = "XENMEM_populate_physmap";
144 PRE_MEM_READ(which,
145 (Addr)memory_reservation->extent_start.p,
146 sizeof(vki_xen_pfn_t) * memory_reservation->nr_extents);
147 break;
148 case VKI_XENMEM_claim_pages:
149 which = "XENMEM_claim_pages";
150 break;
151 default:
152 which = "XENMEM_unknown";
153 break;
154 }
155
156 PRE_MEM_READ(which,
157 (Addr)&memory_reservation->extent_start,
158 sizeof(memory_reservation->extent_start));
159 PRE_MEM_READ(which,
160 (Addr)&memory_reservation->nr_extents,
161 sizeof(memory_reservation->nr_extents));
162 PRE_MEM_READ(which,
163 (Addr)&memory_reservation->extent_order,
164 sizeof(memory_reservation->extent_order));
165 PRE_MEM_READ(which,
166 (Addr)&memory_reservation->mem_flags,
167 sizeof(memory_reservation->mem_flags));
168 PRE_MEM_READ(which,
169 (Addr)&memory_reservation->domid,
170 sizeof(memory_reservation->domid));
171 break;
172 }
173
174 case VKI_XENMEM_add_to_physmap: {
175 struct vki_xen_add_to_physmap *arg =
176 (struct vki_xen_add_to_physmap *)ARG2;
177 PRE_MEM_READ("XENMEM_add_to_physmap domid",
178 (Addr)&arg->domid, sizeof(arg->domid));
179 PRE_MEM_READ("XENMEM_add_to_physmap size",
180 (Addr)&arg->size, sizeof(arg->size));
181 PRE_MEM_READ("XENMEM_add_to_physmap space",
182 (Addr)&arg->space, sizeof(arg->space));
183 PRE_MEM_READ("XENMEM_add_to_physmap idx",
184 (Addr)&arg->idx, sizeof(arg->idx));
185 PRE_MEM_READ("XENMEM_add_to_physmap gpfn",
186 (Addr)&arg->gpfn, sizeof(arg->gpfn));
187 break;
188 }
189
190 case VKI_XENMEM_remove_from_physmap: {
191 struct vki_xen_remove_from_physmap *arg =
192 (struct vki_xen_remove_from_physmap *)ARG2;
193 PRE_MEM_READ("XENMEM_remove_from_physmap domid",
194 (Addr)&arg->domid, sizeof(arg->domid));
195 PRE_MEM_READ("XENMEM_remove_from_physmap gpfn",
196 (Addr)&arg->gpfn, sizeof(arg->gpfn));
197 break;
198 }
199
200 case VKI_XENMEM_get_sharing_freed_pages:
201 case VKI_XENMEM_get_sharing_shared_pages:
202 break;
203
204 case VKI_XENMEM_access_op: {
205 struct vki_xen_mem_event_op *arg =
206 (struct vki_xen_mem_event_op *)ARG2;
207 PRE_MEM_READ("XENMEM_access_op domid",
208 (Addr)&arg->domain, sizeof(arg->domain));
209 PRE_MEM_READ("XENMEM_access_op op",
210 (Addr)&arg->op, sizeof(arg->op));
211 PRE_MEM_READ("XENMEM_access_op gfn",
212 (Addr)&arg->gfn, sizeof(arg->gfn));
213 break;
214 }
215 default:
216 bad_subop(tid, layout, arrghs, status, flags,
217 "__HYPERVISOR_memory_op", ARG1);
218 break;
219 }
220 }
221
PRE(mmuext_op)222 PRE(mmuext_op)
223 {
224 struct vki_xen_mmuext_op *ops = (struct vki_xen_mmuext_op *)ARG1;
225 unsigned int i, nr = ARG2;
226
227 for (i=0; i<nr; i++) {
228 struct vki_xen_mmuext_op *op = ops + i;
229 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP cmd",
230 (Addr)&op->cmd, sizeof(op->cmd));
231 switch(op->cmd) {
232 case VKI_XEN_MMUEXT_PIN_L1_TABLE:
233 case VKI_XEN_MMUEXT_PIN_L2_TABLE:
234 case VKI_XEN_MMUEXT_PIN_L3_TABLE:
235 case VKI_XEN_MMUEXT_PIN_L4_TABLE:
236 case VKI_XEN_MMUEXT_UNPIN_TABLE:
237 case VKI_XEN_MMUEXT_NEW_BASEPTR:
238 case VKI_XEN_MMUEXT_CLEAR_PAGE:
239 case VKI_XEN_MMUEXT_COPY_PAGE:
240 case VKI_XEN_MMUEXT_MARK_SUPER:
241 case VKI_XEN_MMUEXT_UNMARK_SUPER:
242 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg1.mfn",
243 (Addr)&op->arg1.mfn,
244 sizeof(op->arg1.mfn));
245 break;
246
247 case VKI_XEN_MMUEXT_INVLPG_LOCAL:
248 case VKI_XEN_MMUEXT_INVLPG_ALL:
249 case VKI_XEN_MMUEXT_SET_LDT:
250 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg1.mfn",
251 (Addr)&op->arg1.linear_addr,
252 sizeof(op->arg1.linear_addr));
253 break;
254
255 case VKI_XEN_MMUEXT_TLB_FLUSH_LOCAL:
256 case VKI_XEN_MMUEXT_TLB_FLUSH_MULTI:
257 case VKI_XEN_MMUEXT_INVLPG_MULTI:
258 case VKI_XEN_MMUEXT_TLB_FLUSH_ALL:
259 case VKI_XEN_MMUEXT_FLUSH_CACHE:
260 case VKI_XEN_MMUEXT_NEW_USER_BASEPTR:
261 case VKI_XEN_MMUEXT_FLUSH_CACHE_GLOBAL:
262 /* None */
263 break;
264 }
265
266 switch(op->cmd) {
267 case VKI_XEN_MMUEXT_SET_LDT:
268 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg2.nr_ents",
269 (Addr)&op->arg2.nr_ents,
270 sizeof(op->arg2.nr_ents));
271 break;
272
273 case VKI_XEN_MMUEXT_TLB_FLUSH_MULTI:
274 case VKI_XEN_MMUEXT_INVLPG_MULTI:
275 /* How many??? */
276 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg2.vcpumask",
277 (Addr)&op->arg2.vcpumask,
278 sizeof(op->arg2.vcpumask));
279 break;
280
281 case VKI_XEN_MMUEXT_COPY_PAGE:
282 PRE_MEM_READ("__HYPERVISOR_MMUEXT_OP arg2.src_mfn",
283 (Addr)&op->arg2.src_mfn,
284 sizeof(op->arg2.src_mfn));
285 break;
286
287 case VKI_XEN_MMUEXT_PIN_L1_TABLE:
288 case VKI_XEN_MMUEXT_PIN_L2_TABLE:
289 case VKI_XEN_MMUEXT_PIN_L3_TABLE:
290 case VKI_XEN_MMUEXT_PIN_L4_TABLE:
291 case VKI_XEN_MMUEXT_UNPIN_TABLE:
292 case VKI_XEN_MMUEXT_NEW_BASEPTR:
293 case VKI_XEN_MMUEXT_TLB_FLUSH_LOCAL:
294 case VKI_XEN_MMUEXT_INVLPG_LOCAL:
295 case VKI_XEN_MMUEXT_TLB_FLUSH_ALL:
296 case VKI_XEN_MMUEXT_INVLPG_ALL:
297 case VKI_XEN_MMUEXT_FLUSH_CACHE:
298 case VKI_XEN_MMUEXT_NEW_USER_BASEPTR:
299 case VKI_XEN_MMUEXT_CLEAR_PAGE:
300 case VKI_XEN_MMUEXT_FLUSH_CACHE_GLOBAL:
301 case VKI_XEN_MMUEXT_MARK_SUPER:
302 case VKI_XEN_MMUEXT_UNMARK_SUPER:
303 /* None */
304 break;
305 }
306 }
307 }
308
pre_evtchn_op(ThreadId tid,SyscallArgLayout * layout,SyscallArgs * arrghs,SyscallStatus * status,UWord * flags,__vki_u32 cmd,void * arg,int compat)309 static void pre_evtchn_op(ThreadId tid,
310 SyscallArgLayout* layout,
311 /*MOD*/SyscallArgs* arrghs,
312 /*OUT*/SyscallStatus* status,
313 /*OUT*/UWord* flags,
314 __vki_u32 cmd, void *arg, int compat)
315 {
316 PRINT("__HYPERVISOR_event_channel_op%s ( %d, %p )",
317 compat ? "_compat" : "", cmd, arg);
318
319 switch (cmd) {
320 case VKI_XEN_EVTCHNOP_alloc_unbound: {
321 struct vki_xen_evtchn_alloc_unbound *alloc_unbound = arg;
322 PRE_MEM_READ("EVTCHNOP_alloc_unbound dom",
323 (Addr)&alloc_unbound->dom, sizeof(alloc_unbound->dom));
324 PRE_MEM_READ("EVTCHNOP_alloc_unbound remote_dom",
325 (Addr)&alloc_unbound->remote_dom,
326 sizeof(alloc_unbound->remote_dom));
327 break;
328 }
329 default:
330 if ( compat )
331 bad_subop(tid, layout, arrghs, status, flags,
332 "__HYPERVISOR_event_channel_op_compat", cmd);
333 else
334 bad_subop(tid, layout, arrghs, status, flags,
335 "__HYPERVISOR_event_channel_op", cmd);
336 break;
337 }
338 }
339
PRE(evtchn_op)340 PRE(evtchn_op)
341 {
342 pre_evtchn_op(tid, layout, arrghs, status, flags,
343 ARG1, (void *)ARG2, 0);
344 }
345
PRE(evtchn_op_compat)346 PRE(evtchn_op_compat)
347 {
348 struct vki_xen_evtchn_op *evtchn = (struct vki_xen_evtchn_op *)ARG1;
349 PRE_MEM_READ("__HYPERVISOR_event_channel_op_compat",
350 ARG1, sizeof(*evtchn));
351
352 pre_evtchn_op(tid, layout, arrghs, status, flags,
353 evtchn->cmd, &evtchn->u, 1);
354 }
355
PRE(xen_version)356 PRE(xen_version)
357 {
358 PRINT("__HYPERVISOR_xen_version ( %ld, %lx )", ARG1, ARG2);
359
360 switch (ARG1) {
361 case VKI_XENVER_version:
362 case VKI_XENVER_extraversion:
363 case VKI_XENVER_compile_info:
364 case VKI_XENVER_capabilities:
365 case VKI_XENVER_changeset:
366 case VKI_XENVER_platform_parameters:
367 case VKI_XENVER_get_features:
368 case VKI_XENVER_pagesize:
369 case VKI_XENVER_guest_handle:
370 case VKI_XENVER_commandline:
371 /* No inputs */
372 break;
373
374 default:
375 bad_subop(tid, layout, arrghs, status, flags,
376 "__HYPERVISOR_xen_version", ARG1);
377 break;
378 }
379 }
380
PRE(grant_table_op)381 PRE(grant_table_op)
382 {
383 PRINT("__HYPERVISOR_grant_table_op ( %ld, 0x%lx, %ld )", ARG1, ARG2, ARG3);
384 switch (ARG1) {
385 case VKI_XEN_GNTTABOP_setup_table: {
386 struct vki_xen_gnttab_setup_table *gst =
387 (struct vki_xen_gnttab_setup_table*)ARG2;
388 PRE_MEM_READ("VKI_XEN_GNTTABOP_setup_table dom",
389 (Addr)&gst->dom, sizeof(gst->dom));
390 PRE_MEM_READ("VKI_XEN_GNTTABOP_setup_table nr_frames",
391 (Addr)&gst->nr_frames, sizeof(gst->nr_frames));
392 break;
393 }
394 default:
395 bad_subop(tid, layout, arrghs, status, flags,
396 "__HYPERVISOR_grant_table_op", ARG1);
397 break;
398 }
399 }
400
PRE(sysctl)401 PRE(sysctl) {
402 struct vki_xen_sysctl *sysctl = (struct vki_xen_sysctl *)ARG1;
403
404 PRINT("__HYPERVISOR_sysctl ( %d )", sysctl->cmd);
405
406 /*
407 * Common part of xen_sysctl:
408 * uint32_t cmd;
409 * uint32_t interface_version;
410 */
411 PRE_MEM_READ("__HYPERVISOR_sysctl", ARG1,
412 sizeof(vki_uint32_t) + sizeof(vki_uint32_t));
413
414 if (!sysctl)
415 return;
416
417 switch (sysctl->interface_version)
418 {
419 case 0x00000008:
420 case 0x00000009:
421 case 0x0000000a:
422 break;
423 default:
424 VG_(dmsg)("WARNING: sysctl version %"PRIx32" not supported\n",
425 sysctl->interface_version);
426 if (VG_(clo_verbosity) > 1) {
427 VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size));
428 }
429 VG_(dmsg)("You may be able to write your own handler.\n");
430 VG_(dmsg)("Read the file README_MISSING_SYSCALL_OR_IOCTL.\n");
431 VG_(dmsg)("Nevertheless we consider this a bug. Please report\n");
432 VG_(dmsg)("it at http://valgrind.org/support/bug_reports.html &\n");
433 VG_(dmsg)("http://wiki.xen.org/wiki/Reporting_Bugs_against_Xen.\n");
434
435 SET_STATUS_Failure(VKI_EINVAL);
436 return;
437 }
438
439 #define __PRE_XEN_SYSCTL_READ(_sysctl, _union, _field) \
440 PRE_MEM_READ("XEN_SYSCTL_" #_sysctl " u." #_union "." #_field, \
441 (Addr)&sysctl->u._union._field, \
442 sizeof(sysctl->u._union._field))
443 #define PRE_XEN_SYSCTL_READ(_sysctl, _field) \
444 __PRE_XEN_SYSCTL_READ(_sysctl, _sysctl, _field)
445
446 switch (sysctl->cmd) {
447 case VKI_XEN_SYSCTL_readconsole:
448 /* These are all unconditionally read */
449 PRE_XEN_SYSCTL_READ(readconsole, clear);
450 PRE_XEN_SYSCTL_READ(readconsole, incremental);
451 PRE_XEN_SYSCTL_READ(readconsole, buffer);
452 PRE_XEN_SYSCTL_READ(readconsole, count);
453
454 /* 'index' only read if 'incremental' is nonzero */
455 if (sysctl->u.readconsole.incremental)
456 PRE_XEN_SYSCTL_READ(readconsole, index);
457 break;
458
459 case VKI_XEN_SYSCTL_getdomaininfolist:
460 switch (sysctl->interface_version)
461 {
462 case 0x00000008:
463 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000008, first_domain);
464 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000008, max_domains);
465 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000008, buffer);
466 break;
467 case 0x00000009:
468 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000009, first_domain);
469 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000009, max_domains);
470 PRE_XEN_SYSCTL_READ(getdomaininfolist_00000009, buffer);
471 break;
472 case 0x0000000a:
473 PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a, first_domain);
474 PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a, max_domains);
475 PRE_XEN_SYSCTL_READ(getdomaininfolist_0000000a, buffer);
476 break;
477 default:
478 VG_(dmsg)("WARNING: XEN_SYSCTL_getdomaininfolist for sysctl version "
479 "%"PRIx32" not implemented yet\n",
480 sysctl->interface_version);
481 SET_STATUS_Failure(VKI_EINVAL);
482 return;
483 }
484 break;
485
486 case VKI_XEN_SYSCTL_debug_keys:
487 PRE_XEN_SYSCTL_READ(debug_keys, keys);
488 PRE_XEN_SYSCTL_READ(debug_keys, nr_keys);
489 PRE_MEM_READ("XEN_SYSCTL_debug_keys *keys",
490 (Addr)sysctl->u.debug_keys.keys.p,
491 sysctl->u.debug_keys.nr_keys * sizeof(char));
492 break;
493
494 case VKI_XEN_SYSCTL_sched_id:
495 /* No inputs */
496 break;
497
498 case VKI_XEN_SYSCTL_cpupool_op:
499 PRE_XEN_SYSCTL_READ(cpupool_op, op);
500
501 switch(sysctl->u.cpupool_op.op) {
502 case VKI_XEN_SYSCTL_CPUPOOL_OP_CREATE:
503 case VKI_XEN_SYSCTL_CPUPOOL_OP_DESTROY:
504 case VKI_XEN_SYSCTL_CPUPOOL_OP_INFO:
505 case VKI_XEN_SYSCTL_CPUPOOL_OP_ADDCPU:
506 case VKI_XEN_SYSCTL_CPUPOOL_OP_RMCPU:
507 case VKI_XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN:
508 PRE_XEN_SYSCTL_READ(cpupool_op, cpupool_id);
509 }
510
511 if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_CREATE)
512 PRE_XEN_SYSCTL_READ(cpupool_op, sched_id);
513
514 if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_MOVEDOMAIN)
515 PRE_XEN_SYSCTL_READ(cpupool_op, domid);
516
517 if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_ADDCPU ||
518 sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_RMCPU)
519 PRE_XEN_SYSCTL_READ(cpupool_op, cpu);
520
521 break;
522
523 case VKI_XEN_SYSCTL_physinfo:
524 /* No input params */
525 break;
526
527 case VKI_XEN_SYSCTL_topologyinfo:
528 PRE_XEN_SYSCTL_READ(topologyinfo, max_cpu_index);
529 PRE_XEN_SYSCTL_READ(topologyinfo, cpu_to_core);
530 PRE_XEN_SYSCTL_READ(topologyinfo, cpu_to_socket);
531 PRE_XEN_SYSCTL_READ(topologyinfo, cpu_to_node);
532 break;
533
534 case VKI_XEN_SYSCTL_numainfo:
535 PRE_XEN_SYSCTL_READ(numainfo, max_node_index);
536 PRE_XEN_SYSCTL_READ(numainfo, node_to_memsize);
537 PRE_XEN_SYSCTL_READ(numainfo, node_to_memfree);
538 PRE_XEN_SYSCTL_READ(numainfo, node_to_node_distance);
539 break;
540
541 default:
542 bad_subop(tid, layout, arrghs, status, flags,
543 "__HYPERVISOR_sysctl", sysctl->cmd);
544 break;
545 }
546 #undef PRE_XEN_SYSCTL_READ
547 #undef __PRE_XEN_SYSCTL_READ
548 }
549
PRE(domctl)550 PRE(domctl)
551 {
552 struct vki_xen_domctl *domctl = (struct vki_xen_domctl *)ARG1;
553
554 PRINT("__HYPERVISOR_domctl ( %d ) on dom%d", domctl->cmd, domctl->domain);
555
556 /*
557 * Common part of xen_domctl:
558 * vki_uint32_t cmd;
559 * vki_uint32_t interface_version;
560 * vki_xen_domid_t domain;
561 */
562 PRE_MEM_READ("__HYPERVISOR_domctl", ARG1,
563 sizeof(vki_uint32_t) + sizeof(vki_uint32_t)
564 + sizeof(vki_xen_domid_t));
565
566 if (!domctl)
567 return;
568
569 switch (domctl->interface_version)
570 {
571 case 0x00000007:
572 case 0x00000008:
573 case 0x00000009:
574 break;
575 default:
576 VG_(dmsg)("WARNING: domctl version %"PRIx32" not supported\n",
577 domctl->interface_version);
578 if (VG_(clo_verbosity) > 1) {
579 VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size));
580 }
581 VG_(dmsg)("You may be able to write your own handler.\n");
582 VG_(dmsg)("Read the file README_MISSING_SYSCALL_OR_IOCTL.\n");
583 VG_(dmsg)("Nevertheless we consider this a bug. Please report\n");
584 VG_(dmsg)("it at http://valgrind.org/support/bug_reports.html &\n");
585 VG_(dmsg)("http://wiki.xen.org/wiki/Reporting_Bugs_against_Xen.\n");
586
587 SET_STATUS_Failure(VKI_EINVAL);
588 return;
589 }
590
591 #define __PRE_XEN_DOMCTL_READ(_domctl, _union, _field) \
592 PRE_MEM_READ("XEN_DOMCTL_" #_domctl " u." #_union "." #_field, \
593 (Addr)&domctl->u._union._field, \
594 sizeof(domctl->u._union._field))
595 #define PRE_XEN_DOMCTL_READ(_domctl, _field) \
596 __PRE_XEN_DOMCTL_READ(_domctl, _domctl, _field)
597
598 switch (domctl->cmd) {
599 case VKI_XEN_DOMCTL_destroydomain:
600 case VKI_XEN_DOMCTL_pausedomain:
601 case VKI_XEN_DOMCTL_max_vcpus:
602 case VKI_XEN_DOMCTL_get_address_size:
603 case VKI_XEN_DOMCTL_gettscinfo:
604 case VKI_XEN_DOMCTL_getdomaininfo:
605 case VKI_XEN_DOMCTL_unpausedomain:
606 case VKI_XEN_DOMCTL_resumedomain:
607 /* No input fields. */
608 break;
609
610 case VKI_XEN_DOMCTL_createdomain:
611 PRE_XEN_DOMCTL_READ(createdomain, ssidref);
612 PRE_XEN_DOMCTL_READ(createdomain, handle);
613 PRE_XEN_DOMCTL_READ(createdomain, flags);
614 break;
615
616 case VKI_XEN_DOMCTL_gethvmcontext:
617 /* Xen unconditionally reads the 'buffer' pointer */
618 __PRE_XEN_DOMCTL_READ(gethvmcontext, hvmcontext, buffer);
619 /* Xen only consumes 'size' if 'buffer' is non NULL. A NULL
620 * buffer is a request for the required size. */
621 if ( domctl->u.hvmcontext.buffer.p )
622 __PRE_XEN_DOMCTL_READ(gethvmcontext, hvmcontext, size);
623 break;
624
625 case VKI_XEN_DOMCTL_sethvmcontext:
626 __PRE_XEN_DOMCTL_READ(sethvmcontext, hvmcontext, size);
627 __PRE_XEN_DOMCTL_READ(sethvmcontext, hvmcontext, buffer);
628 PRE_MEM_READ("XEN_DOMCTL_sethvmcontext *buffer",
629 (Addr)domctl->u.hvmcontext.buffer.p,
630 domctl->u.hvmcontext.size);
631 break;
632
633 case VKI_XEN_DOMCTL_gethvmcontext_partial:
634 __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial, type);
635 __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial, instance);
636 __PRE_XEN_DOMCTL_READ(gethvmcontext_partial, hvmcontext_partial, buffer);
637
638 switch (domctl->u.hvmcontext_partial.type) {
639 case VKI_HVM_SAVE_CODE(CPU):
640 if ( domctl->u.hvmcontext_partial.buffer.p )
641 PRE_MEM_WRITE("XEN_DOMCTL_gethvmcontext_partial *buffer",
642 (Addr)domctl->u.hvmcontext_partial.buffer.p,
643 VKI_HVM_SAVE_LENGTH(CPU));
644 break;
645 default:
646 bad_subop(tid, layout, arrghs, status, flags,
647 "__HYPERVISOR_domctl_gethvmcontext_partial type",
648 domctl->u.hvmcontext_partial.type);
649 break;
650 }
651 break;
652
653 case VKI_XEN_DOMCTL_max_mem:
654 PRE_XEN_DOMCTL_READ(max_mem, max_memkb);
655 break;
656
657 case VKI_XEN_DOMCTL_set_address_size:
658 __PRE_XEN_DOMCTL_READ(set_address_size, address_size, size);
659 break;
660
661 case VKI_XEN_DOMCTL_settscinfo:
662 __PRE_XEN_DOMCTL_READ(settscinfo, tsc_info, info.tsc_mode);
663 __PRE_XEN_DOMCTL_READ(settscinfo, tsc_info, info.gtsc_khz);
664 __PRE_XEN_DOMCTL_READ(settscinfo, tsc_info, info.incarnation);
665 __PRE_XEN_DOMCTL_READ(settscinfo, tsc_info, info.elapsed_nsec);
666 break;
667
668 case VKI_XEN_DOMCTL_ioport_permission:
669 PRE_XEN_DOMCTL_READ(ioport_permission, first_port);
670 PRE_XEN_DOMCTL_READ(ioport_permission, nr_ports);
671 PRE_XEN_DOMCTL_READ(ioport_permission, allow_access);
672 break;
673
674 case VKI_XEN_DOMCTL_hypercall_init:
675 PRE_XEN_DOMCTL_READ(hypercall_init, gmfn);
676 break;
677
678 case VKI_XEN_DOMCTL_settimeoffset:
679 PRE_XEN_DOMCTL_READ(settimeoffset, time_offset_seconds);
680 break;
681
682 case VKI_XEN_DOMCTL_getvcpuinfo:
683 PRE_XEN_DOMCTL_READ(getvcpuinfo, vcpu);
684 break;
685
686 case VKI_XEN_DOMCTL_scheduler_op:
687 PRE_XEN_DOMCTL_READ(scheduler_op, sched_id);
688 PRE_XEN_DOMCTL_READ(scheduler_op, cmd);
689 if ( domctl->u.scheduler_op.cmd == VKI_XEN_DOMCTL_SCHEDOP_putinfo ) {
690 switch(domctl->u.scheduler_op.sched_id) {
691 case VKI_XEN_SCHEDULER_SEDF:
692 PRE_XEN_DOMCTL_READ(scheduler_op, u.sedf.period);
693 PRE_XEN_DOMCTL_READ(scheduler_op, u.sedf.slice);
694 PRE_XEN_DOMCTL_READ(scheduler_op, u.sedf.latency);
695 PRE_XEN_DOMCTL_READ(scheduler_op, u.sedf.extratime);
696 PRE_XEN_DOMCTL_READ(scheduler_op, u.sedf.weight);
697 break;
698 case VKI_XEN_SCHEDULER_CREDIT:
699 PRE_XEN_DOMCTL_READ(scheduler_op, u.credit.weight);
700 PRE_XEN_DOMCTL_READ(scheduler_op, u.credit.cap);
701 break;
702 case VKI_XEN_SCHEDULER_CREDIT2:
703 PRE_XEN_DOMCTL_READ(scheduler_op, u.credit2.weight);
704 break;
705 case VKI_XEN_SCHEDULER_ARINC653:
706 break;
707 }
708 }
709 break;
710
711 case VKI_XEN_DOMCTL_getvcpuaffinity:
712 __PRE_XEN_DOMCTL_READ(getvcpuaffinity, vcpuaffinity, vcpu);
713 break;
714
715 case VKI_XEN_DOMCTL_setvcpuaffinity:
716 __PRE_XEN_DOMCTL_READ(setvcpuaffinity, vcpuaffinity, vcpu);
717 PRE_MEM_READ("XEN_DOMCTL_setvcpuaffinity u.vcpuaffinity.cpumap.bitmap",
718 (Addr)domctl->u.vcpuaffinity.cpumap.bitmap.p,
719 domctl->u.vcpuaffinity.cpumap.nr_bits / 8);
720 break;
721
722 case VKI_XEN_DOMCTL_getnodeaffinity:
723 __PRE_XEN_DOMCTL_READ(nodeaffinity, nodeaffinity, nodemap.nr_bits);
724 break;
725 case VKI_XEN_DOMCTL_setnodeaffinity:
726 __PRE_XEN_DOMCTL_READ(nodeaffinity, nodeaffinity, nodemap.nr_bits);
727 PRE_MEM_READ("XEN_DOMCTL_setnodeaffinity u.nodeaffinity.cpumap.bitmap",
728 (Addr)domctl->u.nodeaffinity.nodemap.bitmap.p,
729 domctl->u.nodeaffinity.nodemap.nr_bits / 8);
730 break;
731
732 case VKI_XEN_DOMCTL_getvcpucontext:
733 __PRE_XEN_DOMCTL_READ(getvcpucontext, vcpucontext, vcpu);
734 break;
735
736 case VKI_XEN_DOMCTL_setvcpucontext:
737 __PRE_XEN_DOMCTL_READ(setvcpucontext, vcpucontext, vcpu);
738 __PRE_XEN_DOMCTL_READ(setvcpucontext, vcpucontext, ctxt.p);
739 break;
740
741 case VKI_XEN_DOMCTL_set_cpuid:
742 PRE_MEM_READ("XEN_DOMCTL_set_cpuid u.cpuid",
743 (Addr)&domctl->u.cpuid, sizeof(domctl->u.cpuid));
744 break;
745
746 case VKI_XEN_DOMCTL_getpageframeinfo3:
747 PRE_XEN_DOMCTL_READ(getpageframeinfo3, num);
748 PRE_XEN_DOMCTL_READ(getpageframeinfo3, array.p);
749 PRE_MEM_READ("XEN_DOMCTL_getpageframeinfo3 *u.getpageframeinfo3.array.p",
750 (Addr)domctl->u.getpageframeinfo3.array.p,
751 domctl->u.getpageframeinfo3.num * sizeof(vki_xen_pfn_t));
752 break;
753
754 case VKI_XEN_DOMCTL_getvcpuextstate:
755 __PRE_XEN_DOMCTL_READ(getvcpuextstate, vcpuextstate, vcpu);
756 __PRE_XEN_DOMCTL_READ(getvcpuextstate, vcpuextstate, xfeature_mask);
757 __PRE_XEN_DOMCTL_READ(getvcpuextstate, vcpuextstate, size);
758 __PRE_XEN_DOMCTL_READ(getvcpuextstate, vcpuextstate, buffer);
759 break;
760
761 case VKI_XEN_DOMCTL_shadow_op:
762 PRE_XEN_DOMCTL_READ(shadow_op, op);
763
764 switch(domctl->u.shadow_op.op)
765 {
766 case VKI_XEN_DOMCTL_SHADOW_OP_OFF:
767 /* No further inputs */
768 break;
769
770 case VKI_XEN_DOMCTL_SHADOW_OP_ENABLE:
771 PRE_XEN_DOMCTL_READ(shadow_op, mode);
772 switch(domctl->u.shadow_op.mode)
773 {
774 case XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY:
775 goto domctl_shadow_op_enable_logdirty;
776
777
778 default:
779 bad_subop(tid, layout, arrghs, status, flags,
780 "__HYPERVISOR_domctl shadowop mode",
781 domctl->u.shadow_op.mode);
782 break;
783 }
784
785 case VKI_XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY:
786 domctl_shadow_op_enable_logdirty:
787 /* No further inputs */
788 break;
789
790 case VKI_XEN_DOMCTL_SHADOW_OP_CLEAN:
791 case VKI_XEN_DOMCTL_SHADOW_OP_PEEK:
792 PRE_XEN_DOMCTL_READ(shadow_op, dirty_bitmap);
793 PRE_XEN_DOMCTL_READ(shadow_op, pages);
794 break;
795
796 default:
797 bad_subop(tid, layout, arrghs, status, flags,
798 "__HYPERVISOR_domctl shadow(10)",
799 domctl->u.shadow_op.op);
800 break;
801 }
802 break;
803
804 case VKI_XEN_DOMCTL_set_max_evtchn:
805 PRE_XEN_DOMCTL_READ(set_max_evtchn, max_port);
806 break;
807
808 case VKI_XEN_DOMCTL_cacheflush:
809 PRE_XEN_DOMCTL_READ(cacheflush, start_pfn);
810 PRE_XEN_DOMCTL_READ(cacheflush, nr_pfns);
811 break;
812
813 case VKI_XEN_DOMCTL_set_access_required:
814 PRE_XEN_DOMCTL_READ(access_required, access_required);
815 break;
816
817 case VKI_XEN_DOMCTL_mem_event_op:
818 PRE_XEN_DOMCTL_READ(mem_event_op, op);
819 PRE_XEN_DOMCTL_READ(mem_event_op, mode);
820 break;
821
822 case VKI_XEN_DOMCTL_debug_op:
823 PRE_XEN_DOMCTL_READ(debug_op, op);
824 PRE_XEN_DOMCTL_READ(debug_op, vcpu);
825 break;
826
827 default:
828 bad_subop(tid, layout, arrghs, status, flags,
829 "__HYPERVISOR_domctl", domctl->cmd);
830 break;
831 }
832 #undef PRE_XEN_DOMCTL_READ
833 #undef __PRE_XEN_DOMCTL_READ
834 }
835
PRE(hvm_op)836 PRE(hvm_op)
837 {
838 unsigned long op = ARG1;
839 void *arg = (void *)(unsigned long)ARG2;
840
841 PRINT("__HYPERVISOR_hvm_op ( %ld, %p )", op, arg);
842
843 #define __PRE_XEN_HVMOP_READ(_hvm_op, _type, _field) \
844 PRE_MEM_READ("XEN_HVMOP_" # _hvm_op " " #_field, \
845 (Addr)&((_type*)arg)->_field, \
846 sizeof(((_type*)arg)->_field))
847 #define PRE_XEN_HVMOP_READ(_hvm_op, _field) \
848 __PRE_XEN_HVMOP_READ(_hvm_op, vki_xen_hvm_ ## _hvm_op ## _t, _field)
849
850 switch (op) {
851 case VKI_XEN_HVMOP_set_param:
852 __PRE_XEN_HVMOP_READ(set_param, struct vki_xen_hvm_param, domid);
853 __PRE_XEN_HVMOP_READ(set_param, struct vki_xen_hvm_param, index);
854 __PRE_XEN_HVMOP_READ(set_param, struct vki_xen_hvm_param, value);
855 break;
856
857 case VKI_XEN_HVMOP_get_param:
858 __PRE_XEN_HVMOP_READ(get_param, struct vki_xen_hvm_param, domid);
859 __PRE_XEN_HVMOP_READ(get_param, struct vki_xen_hvm_param, index);
860 break;
861
862 case VKI_XEN_HVMOP_set_isa_irq_level:
863 PRE_XEN_HVMOP_READ(set_isa_irq_level, domid);
864 PRE_XEN_HVMOP_READ(set_isa_irq_level, isa_irq);
865 PRE_XEN_HVMOP_READ(set_isa_irq_level, level);
866 break;
867
868 case VKI_XEN_HVMOP_set_pci_link_route:
869 PRE_XEN_HVMOP_READ(set_pci_link_route, domid);
870 PRE_XEN_HVMOP_READ(set_pci_link_route, link);
871 PRE_XEN_HVMOP_READ(set_pci_link_route, isa_irq);
872 break;
873
874 case VKI_XEN_HVMOP_set_mem_type:
875 PRE_XEN_HVMOP_READ(set_mem_type, domid);
876 PRE_XEN_HVMOP_READ(set_mem_type, hvmmem_type);
877 PRE_XEN_HVMOP_READ(set_mem_type, nr);
878 PRE_XEN_HVMOP_READ(set_mem_type, first_pfn);
879 break;
880
881 case VKI_XEN_HVMOP_set_mem_access:
882 PRE_XEN_HVMOP_READ(set_mem_access, domid);
883 PRE_XEN_HVMOP_READ(set_mem_access, hvmmem_access);
884 PRE_XEN_HVMOP_READ(set_mem_access, first_pfn);
885 /* if default access */
886 if ( ((vki_xen_hvm_set_mem_access_t*)arg)->first_pfn != ~0ULL)
887 PRE_XEN_HVMOP_READ(set_mem_access, nr);
888 break;
889
890 case VKI_XEN_HVMOP_get_mem_access:
891 PRE_XEN_HVMOP_READ(get_mem_access, domid);
892 PRE_XEN_HVMOP_READ(get_mem_access, pfn);
893
894 PRE_MEM_WRITE("XEN_HVMOP_get_mem_access *hvmmem_access",
895 (Addr)&(((vki_xen_hvm_get_mem_access_t*)arg)->hvmmem_access),
896 sizeof(vki_uint16_t));
897 break;
898
899 case VKI_XEN_HVMOP_inject_trap:
900 PRE_XEN_HVMOP_READ(inject_trap, domid);
901 PRE_XEN_HVMOP_READ(inject_trap, vcpuid);
902 PRE_XEN_HVMOP_READ(inject_trap, vector);
903 PRE_XEN_HVMOP_READ(inject_trap, type);
904 PRE_XEN_HVMOP_READ(inject_trap, error_code);
905 PRE_XEN_HVMOP_READ(inject_trap, insn_len);
906 PRE_XEN_HVMOP_READ(inject_trap, cr2);
907 break;
908
909 default:
910 bad_subop(tid, layout, arrghs, status, flags,
911 "__HYPERVISOR_hvm_op", op);
912 break;
913 }
914 #undef __PRE_XEN_HVMOP_READ
915 #undef PRE_XEN_HVMOP_READ
916 }
917
PRE(tmem_op)918 PRE(tmem_op)
919 {
920 struct vki_xen_tmem_op *tmem = (struct vki_xen_tmem_op *)ARG1;
921
922 PRINT("__HYPERVISOR_tmem_op ( %d )", tmem->cmd);
923
924 /* Common part for xen_tmem_op:
925 * vki_uint32_t cmd;
926 */
927 PRE_MEM_READ("__HYPERVISOR_tmem_op cmd", ARG1, sizeof(vki_uint32_t));
928
929
930 #define __PRE_XEN_TMEMOP_READ(_tmem, _union, _field) \
931 PRE_MEM_READ("XEN_tmem_op_" #_tmem " u." #_union "." #_field, \
932 (Addr)&tmem->u._union._field, \
933 sizeof(tmem->u._union._field))
934 #define PRE_XEN_TMEMOP_READ(_tmem, _field) \
935 __PRE_XEN_TMEMOP_READ(_tmem, _tmem, _field)
936
937 switch(tmem->cmd) {
938
939 case VKI_XEN_TMEM_control:
940
941 /* Common part for control hypercall:
942 * vki_int32_t pool_id;
943 * vki_uint32_t subop;
944 */
945 PRE_MEM_READ("__HYPERVISOR_tmem_op pool_id",
946 (Addr)&tmem->pool_id, sizeof(&tmem->pool_id));
947 PRE_XEN_TMEMOP_READ(ctrl, subop);
948
949 switch (tmem->u.ctrl.subop) {
950
951 case VKI_XEN_TMEMC_save_begin:
952 PRE_XEN_TMEMOP_READ(ctrl, cli_id);
953 PRE_XEN_TMEMOP_READ(ctrl, arg1);
954 PRE_XEN_TMEMOP_READ(ctrl, buf);
955 break;
956
957 default:
958 bad_subop(tid, layout, arrghs, status, flags,
959 "__HYPERVISOR_tmem_op_control", tmem->u.ctrl.subop);
960 }
961
962 break;
963
964 default:
965 bad_subop(tid, layout, arrghs, status, flags,
966 "__HYPERVISOR_tmem_op", ARG1);
967 }
968
969 #undef PRE_XEN_TMEMOP_READ
970 #undef __PRE_XEN_TMEMOP_READ
971 }
972
POST(memory_op)973 POST(memory_op)
974 {
975 switch (ARG1) {
976 case VKI_XENMEM_maximum_ram_page:
977 case VKI_XENMEM_set_memory_map:
978 case VKI_XENMEM_decrease_reservation:
979 case VKI_XENMEM_claim_pages:
980 case VKI_XENMEM_maximum_gpfn:
981 case VKI_XENMEM_remove_from_physmap:
982 case VKI_XENMEM_access_op:
983 /* No outputs */
984 break;
985 case VKI_XENMEM_increase_reservation:
986 case VKI_XENMEM_populate_physmap: {
987 struct xen_memory_reservation *memory_reservation =
988 (struct xen_memory_reservation *)ARG2;
989
990 POST_MEM_WRITE((Addr)memory_reservation->extent_start.p,
991 sizeof(vki_xen_pfn_t) * memory_reservation->nr_extents);
992 break;
993 }
994
995 case VKI_XENMEM_machphys_mfn_list: {
996 struct vki_xen_machphys_mfn_list *arg =
997 (struct vki_xen_machphys_mfn_list *)ARG2;
998 POST_MEM_WRITE((Addr)&arg->nr_extents, sizeof(arg->nr_extents));
999 POST_MEM_WRITE((Addr)arg->extent_start.p,
1000 sizeof(vki_xen_pfn_t) * arg->nr_extents);
1001 break;
1002 }
1003
1004 case VKI_XENMEM_add_to_physmap: {
1005 struct vki_xen_add_to_physmap *arg =
1006 (struct vki_xen_add_to_physmap *)ARG2;
1007 if (arg->space == VKI_XENMAPSPACE_gmfn_range)
1008 POST_MEM_WRITE(ARG2, sizeof(*arg));
1009 }
1010
1011 case VKI_XENMEM_get_sharing_freed_pages:
1012 case VKI_XENMEM_get_sharing_shared_pages:
1013 /* No outputs */
1014 break;
1015 }
1016 }
1017
POST(mmuext_op)1018 POST(mmuext_op)
1019 {
1020 unsigned int *pdone = (unsigned int *)ARG3;
1021 /* simplistic */
1022 POST_MEM_WRITE((Addr)pdone, sizeof(*pdone));
1023 }
1024
post_evtchn_op(ThreadId tid,__vki_u32 cmd,void * arg,int compat)1025 static void post_evtchn_op(ThreadId tid, __vki_u32 cmd, void *arg, int compat)
1026 {
1027 switch (cmd) {
1028 case VKI_XEN_EVTCHNOP_alloc_unbound: {
1029 struct vki_xen_evtchn_alloc_unbound *alloc_unbound = arg;
1030 POST_MEM_WRITE((Addr)&alloc_unbound->port, sizeof(alloc_unbound->port));
1031 break;
1032 }
1033 }
1034 }
1035
POST(evtchn_op)1036 POST(evtchn_op)
1037 {
1038 post_evtchn_op(tid, ARG1, (void *)ARG2, 0);
1039 }
1040
POST(evtchn_op_compat)1041 POST(evtchn_op_compat)
1042 {
1043 struct vki_xen_evtchn_op *evtchn = (struct vki_xen_evtchn_op *)ARG1;
1044 post_evtchn_op(tid, evtchn->cmd, &evtchn->u, 1);
1045 }
1046
POST(xen_version)1047 POST(xen_version)
1048 {
1049 switch (ARG1) {
1050 case VKI_XENVER_version:
1051 /* No outputs */
1052 break;
1053 case VKI_XENVER_extraversion:
1054 POST_MEM_WRITE((Addr)ARG2, sizeof(vki_xen_extraversion_t));
1055 break;
1056 case VKI_XENVER_compile_info:
1057 POST_MEM_WRITE((Addr)ARG2, sizeof(struct vki_xen_compile_info));
1058 break;
1059 case VKI_XENVER_capabilities:
1060 POST_MEM_WRITE((Addr)ARG2, sizeof(vki_xen_capabilities_info_t));
1061 break;
1062 case VKI_XENVER_changeset:
1063 POST_MEM_WRITE((Addr)ARG2, sizeof(vki_xen_changeset_info_t));
1064 break;
1065 case VKI_XENVER_platform_parameters:
1066 POST_MEM_WRITE((Addr)ARG2, sizeof(struct vki_xen_platform_parameters));
1067 break;
1068 case VKI_XENVER_get_features:
1069 POST_MEM_WRITE((Addr)ARG2, sizeof(struct vki_xen_feature_info));
1070 break;
1071 case VKI_XENVER_pagesize:
1072 /* No outputs */
1073 break;
1074 case VKI_XENVER_guest_handle:
1075 POST_MEM_WRITE((Addr)ARG2, sizeof(vki_xen_domain_handle_t));
1076 break;
1077 case VKI_XENVER_commandline:
1078 POST_MEM_WRITE((Addr)ARG2, sizeof(vki_xen_commandline_t));
1079 break;
1080 }
1081 }
1082
POST(grant_table_op)1083 POST(grant_table_op)
1084 {
1085 switch (ARG1) {
1086 case VKI_XEN_GNTTABOP_setup_table: {
1087 struct vki_xen_gnttab_setup_table *gst =
1088 (struct vki_xen_gnttab_setup_table*)ARG2;
1089 PRE_MEM_WRITE("VKI_XEN_GNTTABOP_setup_table",
1090 (Addr)&gst->status, sizeof(gst->status));
1091 PRE_MEM_WRITE("VKI_XEN_GNTTABOP_setup_table",
1092 (Addr)gst->frame_list.p,
1093 sizeof(*gst->frame_list.p) & gst->nr_frames);
1094 break;
1095 }
1096 }
1097 }
1098
POST(sysctl)1099 POST(sysctl)
1100 {
1101 struct vki_xen_sysctl *sysctl = (struct vki_xen_sysctl *)ARG1;
1102
1103 switch (sysctl->interface_version)
1104 {
1105 case 0x00000008:
1106 case 0x00000009:
1107 case 0x0000000a:
1108 break;
1109 default:
1110 return;
1111 }
1112
1113 #define __POST_XEN_SYSCTL_WRITE(_sysctl, _union, _field) \
1114 POST_MEM_WRITE((Addr)&sysctl->u._union._field, \
1115 sizeof(sysctl->u._union._field))
1116 #define POST_XEN_SYSCTL_WRITE(_sysctl, _field) \
1117 __POST_XEN_SYSCTL_WRITE(_sysctl, _sysctl, _field)
1118
1119 switch (sysctl->cmd) {
1120 case VKI_XEN_SYSCTL_readconsole:
1121 POST_MEM_WRITE((Addr)sysctl->u.readconsole.buffer.p,
1122 sysctl->u.readconsole.count * sizeof(char));
1123 break;
1124
1125 case VKI_XEN_SYSCTL_getdomaininfolist:
1126 switch (sysctl->interface_version)
1127 {
1128 case 0x00000008:
1129 POST_XEN_SYSCTL_WRITE(getdomaininfolist_00000008, num_domains);
1130 POST_MEM_WRITE((Addr)sysctl->u.getdomaininfolist_00000008.buffer.p,
1131 sizeof(*sysctl->u.getdomaininfolist_00000008.buffer.p)
1132 * sysctl->u.getdomaininfolist_00000008.num_domains);
1133 break;
1134 case 0x00000009:
1135 POST_XEN_SYSCTL_WRITE(getdomaininfolist_00000009, num_domains);
1136 POST_MEM_WRITE((Addr)sysctl->u.getdomaininfolist_00000009.buffer.p,
1137 sizeof(*sysctl->u.getdomaininfolist_00000009.buffer.p)
1138 * sysctl->u.getdomaininfolist_00000009.num_domains);
1139 break;
1140 case 0x0000000a:
1141 POST_XEN_SYSCTL_WRITE(getdomaininfolist_0000000a, num_domains);
1142 POST_MEM_WRITE((Addr)sysctl->u.getdomaininfolist_0000000a.buffer.p,
1143 sizeof(*sysctl->u.getdomaininfolist_0000000a.buffer.p)
1144 * sysctl->u.getdomaininfolist_0000000a.num_domains);
1145 break;
1146 }
1147 break;
1148
1149 case VKI_XEN_SYSCTL_sched_id:
1150 POST_XEN_SYSCTL_WRITE(sched_id, sched_id);
1151 break;
1152
1153 case VKI_XEN_SYSCTL_cpupool_op:
1154 if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_CREATE ||
1155 sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_INFO)
1156 POST_XEN_SYSCTL_WRITE(cpupool_op, cpupool_id);
1157 if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_INFO) {
1158 POST_XEN_SYSCTL_WRITE(cpupool_op, sched_id);
1159 POST_XEN_SYSCTL_WRITE(cpupool_op, n_dom);
1160 }
1161 if (sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_INFO ||
1162 sysctl->u.cpupool_op.op == VKI_XEN_SYSCTL_CPUPOOL_OP_FREEINFO)
1163 POST_XEN_SYSCTL_WRITE(cpupool_op, cpumap);
1164 break;
1165
1166 case VKI_XEN_SYSCTL_physinfo:
1167 switch (sysctl->interface_version)
1168 {
1169 case 0x00000008:
1170 case 0x00000009: /* Unchanged from version 8 */
1171 POST_XEN_SYSCTL_WRITE(physinfo_00000008, threads_per_core);
1172 POST_XEN_SYSCTL_WRITE(physinfo_00000008, cores_per_socket);
1173 POST_XEN_SYSCTL_WRITE(physinfo_00000008, nr_cpus);
1174 POST_XEN_SYSCTL_WRITE(physinfo_00000008, max_cpu_id);
1175 POST_XEN_SYSCTL_WRITE(physinfo_00000008, nr_nodes);
1176 POST_XEN_SYSCTL_WRITE(physinfo_00000008, max_node_id);
1177 POST_XEN_SYSCTL_WRITE(physinfo_00000008, cpu_khz);
1178 POST_XEN_SYSCTL_WRITE(physinfo_00000008, total_pages);
1179 POST_XEN_SYSCTL_WRITE(physinfo_00000008, free_pages);
1180 POST_XEN_SYSCTL_WRITE(physinfo_00000008, scrub_pages);
1181 POST_XEN_SYSCTL_WRITE(physinfo_00000008, hw_cap[8]);
1182 POST_XEN_SYSCTL_WRITE(physinfo_00000008, capabilities);
1183 break;
1184 case 0x0000000a:
1185 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, threads_per_core);
1186 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, cores_per_socket);
1187 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, nr_cpus);
1188 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, max_cpu_id);
1189 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, nr_nodes);
1190 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, max_node_id);
1191 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, cpu_khz);
1192 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, total_pages);
1193 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, free_pages);
1194 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, scrub_pages);
1195 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, outstanding_pages);
1196 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, hw_cap[8]);
1197 POST_XEN_SYSCTL_WRITE(physinfo_0000000a, capabilities);
1198 break;
1199 }
1200 break;
1201
1202 case VKI_XEN_SYSCTL_topologyinfo:
1203 POST_XEN_SYSCTL_WRITE(topologyinfo, max_cpu_index);
1204 if (sysctl->u.topologyinfo.cpu_to_core.p)
1205 POST_MEM_WRITE((Addr)sysctl->u.topologyinfo.cpu_to_core.p,
1206 sizeof(uint32_t) * sysctl->u.topologyinfo.max_cpu_index);
1207 if (sysctl->u.topologyinfo.cpu_to_socket.p)
1208 POST_MEM_WRITE((Addr)sysctl->u.topologyinfo.cpu_to_socket.p,
1209 sizeof(uint32_t) * sysctl->u.topologyinfo.max_cpu_index);
1210 if (sysctl->u.topologyinfo.cpu_to_node.p)
1211 POST_MEM_WRITE((Addr)sysctl->u.topologyinfo.cpu_to_node.p,
1212 sizeof(uint32_t) * sysctl->u.topologyinfo.max_cpu_index);
1213 break;
1214
1215 case VKI_XEN_SYSCTL_numainfo:
1216 POST_XEN_SYSCTL_WRITE(numainfo, max_node_index);
1217 POST_MEM_WRITE((Addr)sysctl->u.numainfo.node_to_memsize.p,
1218 sizeof(uint64_t) * sysctl->u.numainfo.max_node_index);
1219 POST_MEM_WRITE((Addr)sysctl->u.numainfo.node_to_memfree.p,
1220 sizeof(uint64_t) * sysctl->u.numainfo.max_node_index);
1221 POST_MEM_WRITE((Addr)sysctl->u.numainfo.node_to_node_distance.p,
1222 sizeof(uint32_t) * sysctl->u.numainfo.max_node_index);
1223 break;
1224
1225 /* No outputs */
1226 case VKI_XEN_SYSCTL_debug_keys:
1227 break;
1228 }
1229 #undef POST_XEN_SYSCTL_WRITE
1230 #undef __POST_XEN_SYSCTL_WRITE
1231 }
1232
POST(domctl)1233 POST(domctl){
1234 struct vki_xen_domctl *domctl = (struct vki_xen_domctl *)ARG1;
1235
1236 switch (domctl->interface_version) {
1237 case 0x00000007:
1238 case 0x00000008:
1239 case 0x00000009:
1240 break;
1241 default:
1242 return;
1243 }
1244
1245 #define __POST_XEN_DOMCTL_WRITE(_domctl, _union, _field) \
1246 POST_MEM_WRITE((Addr)&domctl->u._union._field, \
1247 sizeof(domctl->u._union._field));
1248 #define POST_XEN_DOMCTL_WRITE(_domctl, _field) \
1249 __POST_XEN_DOMCTL_WRITE(_domctl, _domctl, _field)
1250
1251 switch (domctl->cmd) {
1252 case VKI_XEN_DOMCTL_createdomain:
1253 case VKI_XEN_DOMCTL_destroydomain:
1254 case VKI_XEN_DOMCTL_pausedomain:
1255 case VKI_XEN_DOMCTL_max_mem:
1256 case VKI_XEN_DOMCTL_set_address_size:
1257 case VKI_XEN_DOMCTL_settscinfo:
1258 case VKI_XEN_DOMCTL_ioport_permission:
1259 case VKI_XEN_DOMCTL_hypercall_init:
1260 case VKI_XEN_DOMCTL_setvcpuaffinity:
1261 case VKI_XEN_DOMCTL_setvcpucontext:
1262 case VKI_XEN_DOMCTL_setnodeaffinity:
1263 case VKI_XEN_DOMCTL_set_cpuid:
1264 case VKI_XEN_DOMCTL_unpausedomain:
1265 case VKI_XEN_DOMCTL_sethvmcontext:
1266 case VKI_XEN_DOMCTL_debug_op:
1267 case VKI_XEN_DOMCTL_set_max_evtchn:
1268 case VKI_XEN_DOMCTL_cacheflush:
1269 case VKI_XEN_DOMCTL_resumedomain:
1270 case VKI_XEN_DOMCTL_set_access_required:
1271 /* No output fields */
1272 break;
1273
1274 case VKI_XEN_DOMCTL_max_vcpus:
1275 POST_XEN_DOMCTL_WRITE(max_vcpus, max);
1276 break;
1277
1278 case VKI_XEN_DOMCTL_get_address_size:
1279 __POST_XEN_DOMCTL_WRITE(get_address_size, address_size, size);
1280 break;
1281
1282 case VKI_XEN_DOMCTL_gettscinfo:
1283 __POST_XEN_DOMCTL_WRITE(settscinfo, tsc_info, info.tsc_mode);
1284 __POST_XEN_DOMCTL_WRITE(settscinfo, tsc_info, info.gtsc_khz);
1285 __POST_XEN_DOMCTL_WRITE(settscinfo, tsc_info, info.incarnation);
1286 __POST_XEN_DOMCTL_WRITE(settscinfo, tsc_info, info.elapsed_nsec);
1287 break;
1288
1289 case VKI_XEN_DOMCTL_getvcpuinfo:
1290 POST_XEN_DOMCTL_WRITE(getvcpuinfo, online);
1291 POST_XEN_DOMCTL_WRITE(getvcpuinfo, blocked);
1292 POST_XEN_DOMCTL_WRITE(getvcpuinfo, running);
1293 POST_XEN_DOMCTL_WRITE(getvcpuinfo, cpu_time);
1294 POST_XEN_DOMCTL_WRITE(getvcpuinfo, cpu);
1295 break;
1296
1297 case VKI_XEN_DOMCTL_gethvmcontext:
1298 /* Xen unconditionally writes size... */
1299 __POST_XEN_DOMCTL_WRITE(gethvmcontext, hvmcontext, size);
1300 /* ...but only writes to the buffer if it was non NULL */
1301 if ( domctl->u.hvmcontext.buffer.p )
1302 POST_MEM_WRITE((Addr)domctl->u.hvmcontext.buffer.p,
1303 sizeof(*domctl->u.hvmcontext.buffer.p)
1304 * domctl->u.hvmcontext.size);
1305 break;
1306
1307 case VKI_XEN_DOMCTL_gethvmcontext_partial:
1308 switch (domctl->u.hvmcontext_partial.type) {
1309 case VKI_HVM_SAVE_CODE(CPU):
1310 if ( domctl->u.hvmcontext_partial.buffer.p )
1311 POST_MEM_WRITE((Addr)domctl->u.hvmcontext_partial.buffer.p,
1312 VKI_HVM_SAVE_LENGTH(CPU));
1313 break;
1314 }
1315 break;
1316
1317 case VKI_XEN_DOMCTL_scheduler_op:
1318 if ( domctl->u.scheduler_op.cmd == VKI_XEN_DOMCTL_SCHEDOP_getinfo ) {
1319 switch(domctl->u.scheduler_op.sched_id) {
1320 case VKI_XEN_SCHEDULER_SEDF:
1321 POST_XEN_DOMCTL_WRITE(scheduler_op, u.sedf.period);
1322 POST_XEN_DOMCTL_WRITE(scheduler_op, u.sedf.slice);
1323 POST_XEN_DOMCTL_WRITE(scheduler_op, u.sedf.latency);
1324 POST_XEN_DOMCTL_WRITE(scheduler_op, u.sedf.extratime);
1325 POST_XEN_DOMCTL_WRITE(scheduler_op, u.sedf.weight);
1326 break;
1327 case VKI_XEN_SCHEDULER_CREDIT:
1328 POST_XEN_DOMCTL_WRITE(scheduler_op, u.credit.weight);
1329 POST_XEN_DOMCTL_WRITE(scheduler_op, u.credit.cap);
1330 break;
1331 case VKI_XEN_SCHEDULER_CREDIT2:
1332 POST_XEN_DOMCTL_WRITE(scheduler_op, u.credit2.weight);
1333 break;
1334 case VKI_XEN_SCHEDULER_ARINC653:
1335 break;
1336 }
1337 }
1338 break;
1339
1340 case VKI_XEN_DOMCTL_getvcpuaffinity:
1341 POST_MEM_WRITE((Addr)domctl->u.vcpuaffinity.cpumap.bitmap.p,
1342 domctl->u.vcpuaffinity.cpumap.nr_bits / 8);
1343 break;
1344
1345 case VKI_XEN_DOMCTL_getnodeaffinity:
1346 POST_MEM_WRITE((Addr)domctl->u.nodeaffinity.nodemap.bitmap.p,
1347 domctl->u.nodeaffinity.nodemap.nr_bits / 8);
1348 break;
1349
1350 case VKI_XEN_DOMCTL_getdomaininfo:
1351 switch (domctl->interface_version) {
1352 case 0x00000007:
1353 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, domain);
1354 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, flags);
1355 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, tot_pages);
1356 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, max_pages);
1357 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, shr_pages);
1358 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, shared_info_frame);
1359 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, cpu_time);
1360 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, nr_online_vcpus);
1361 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, max_vcpu_id);
1362 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, ssidref);
1363 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, handle);
1364 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000007, cpupool);
1365 break;
1366 case 0x00000008:
1367 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, domain);
1368 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, flags);
1369 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, tot_pages);
1370 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, max_pages);
1371 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, shr_pages);
1372 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, paged_pages);
1373 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, shared_info_frame);
1374 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, cpu_time);
1375 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, nr_online_vcpus);
1376 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, max_vcpu_id);
1377 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, ssidref);
1378 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, handle);
1379 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000008, cpupool);
1380 break;
1381 case 0x00000009:
1382 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, domain);
1383 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, flags);
1384 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, tot_pages);
1385 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, max_pages);
1386 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, outstanding_pages);
1387 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, shr_pages);
1388 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, paged_pages);
1389 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, shared_info_frame);
1390 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, cpu_time);
1391 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, nr_online_vcpus);
1392 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, max_vcpu_id);
1393 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, ssidref);
1394 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, handle);
1395 POST_XEN_DOMCTL_WRITE(getdomaininfo_00000009, cpupool);
1396 break;
1397 }
1398 break;
1399 case VKI_XEN_DOMCTL_getvcpucontext:
1400 __POST_XEN_DOMCTL_WRITE(getvcpucontext, vcpucontext, ctxt.p);
1401 break;
1402
1403 case VKI_XEN_DOMCTL_getpageframeinfo3:
1404 POST_MEM_WRITE((Addr)domctl->u.getpageframeinfo3.array.p,
1405 domctl->u.getpageframeinfo3.num * sizeof(vki_xen_pfn_t));
1406 break;
1407
1408
1409 case VKI_XEN_DOMCTL_getvcpuextstate:
1410 __POST_XEN_DOMCTL_WRITE(getvcpuextstate, vcpuextstate, xfeature_mask);
1411 __POST_XEN_DOMCTL_WRITE(getvcpuextstate, vcpuextstate, size);
1412 POST_MEM_WRITE((Addr)domctl->u.vcpuextstate.buffer.p,
1413 domctl->u.vcpuextstate.size);
1414 break;
1415
1416 case VKI_XEN_DOMCTL_shadow_op:
1417 switch(domctl->u.shadow_op.op)
1418 {
1419 case VKI_XEN_DOMCTL_SHADOW_OP_OFF:
1420 /* No outputs */
1421 break;
1422
1423 case VKI_XEN_DOMCTL_SHADOW_OP_CLEAN:
1424 case VKI_XEN_DOMCTL_SHADOW_OP_PEEK:
1425 POST_XEN_DOMCTL_WRITE(shadow_op, pages);
1426 POST_XEN_DOMCTL_WRITE(shadow_op, stats.fault_count);
1427 POST_XEN_DOMCTL_WRITE(shadow_op, stats.dirty_count);
1428 if(domctl->u.shadow_op.dirty_bitmap.p)
1429 POST_MEM_WRITE((Addr)domctl->u.shadow_op.dirty_bitmap.p,
1430 domctl->u.shadow_op.pages * sizeof(vki_uint8_t));
1431 break;
1432
1433 default:
1434 break;
1435 }
1436 break;
1437 case VKI_XEN_DOMCTL_mem_event_op:
1438 POST_XEN_DOMCTL_WRITE(mem_event_op, port);
1439
1440 break;
1441 }
1442 #undef POST_XEN_DOMCTL_WRITE
1443 #undef __POST_XEN_DOMCTL_WRITE
1444 }
1445
POST(hvm_op)1446 POST(hvm_op)
1447 {
1448 unsigned long op = ARG1;
1449 void *arg = (void *)(unsigned long)ARG2;
1450
1451 #define __POST_XEN_HVMOP_WRITE(_hvm_op, _type, _field) \
1452 POST_MEM_WRITE((Addr)&((_type*)arg)->_field, \
1453 sizeof(((_type*)arg)->_field))
1454 #define POST_XEN_HVMOP_WRITE(_hvm_op, _field) \
1455 __POST_XEN_HVMOP_WRITE(_hvm_op, vki_xen_hvm_ ## _hvm_op ## _t, _field)
1456
1457 switch (op) {
1458 case VKI_XEN_HVMOP_set_param:
1459 case VKI_XEN_HVMOP_set_isa_irq_level:
1460 case VKI_XEN_HVMOP_set_pci_link_route:
1461 case VKI_XEN_HVMOP_set_mem_type:
1462 case VKI_XEN_HVMOP_set_mem_access:
1463 case VKI_XEN_HVMOP_inject_trap:
1464 /* No output paramters */
1465 break;
1466
1467 case VKI_XEN_HVMOP_get_param:
1468 __POST_XEN_HVMOP_WRITE(get_param, struct vki_xen_hvm_param, value);
1469 break;
1470
1471 case VKI_XEN_HVMOP_get_mem_access:
1472 POST_XEN_HVMOP_WRITE(get_mem_access, hvmmem_access);
1473 break;
1474 }
1475 #undef __POST_XEN_HVMOP_WRITE
1476 #undef POST_XEN_HVMOP_WRITE
1477 }
1478
POST(tmem_op)1479 POST(tmem_op)
1480 {
1481 struct vki_xen_tmem_op *tmem = (struct vki_xen_tmem_op *)ARG1;
1482
1483 switch(tmem->cmd) {
1484
1485 case VKI_XEN_TMEM_control:
1486
1487 switch(tmem->u.ctrl.subop) {
1488 /* No outputs */
1489 case VKI_XEN_TMEMC_save_begin:
1490 break;
1491 }
1492
1493 break;
1494 }
1495 }
1496
1497 typedef
1498 struct {
1499 SyscallTableEntry entry;
1500 int nr_args;
1501 }
1502 XenHypercallTableEntry;
1503
1504 #define HYPX_(const, name, nr_args) \
1505 [const] = { { vgSysWrap_xen_##name##_before, NULL }, nr_args }
1506 #define HYPXY(const, name, nr_args) \
1507 [const] = { { vgSysWrap_xen_##name##_before, \
1508 vgSysWrap_xen_##name##_after }, \
1509 nr_args }
1510
1511 static XenHypercallTableEntry hypercall_table[] = {
1512 // __VKI_XEN_set_trap_table // 0
1513 // __VKI_XEN_mmu_update // 1
1514 // __VKI_XEN_set_gdt // 2
1515 // __VKI_XEN_stack_switch // 3
1516 // __VKI_XEN_set_callbacks // 4
1517
1518 // __VKI_XEN_fpu_taskswitch // 5
1519 // __VKI_XEN_sched_op_compat // 6
1520 // __VKI_XEN_platform_op // 7
1521 // __VKI_XEN_set_debugreg // 8
1522 // __VKI_XEN_get_debugreg // 9
1523
1524 // __VKI_XEN_update_descriptor // 10
1525 // // 11
1526 HYPXY(__VKI_XEN_memory_op, memory_op, 2), // 12
1527 // __VKI_XEN_multicall // 13
1528 // __VKI_XEN_update_va_mapping // 14
1529
1530 // __VKI_XEN_set_timer_op // 15
1531 HYPXY(__VKI_XEN_event_channel_op_compat, evtchn_op_compat, 1), // 16
1532 HYPXY(__VKI_XEN_xen_version, xen_version, 2), // 17
1533 // __VKI_XEN_console_io // 18
1534 // __VKI_XEN_physdev_op_compat // 19
1535
1536 HYPXY(__VKI_XEN_grant_table_op, grant_table_op, 3), // 20
1537 // __VKI_XEN_vm_assist // 21
1538 // __VKI_XEN_update_va_mapping_otherdomain // 22
1539 // __VKI_XEN_iret, iret // 23
1540 // __VKI_XEN_vcpu_op, vcpu_op // 24
1541
1542 // __VKI_XEN_set_segment_base // 25
1543 HYPXY(__VKI_XEN_mmuext_op, mmuext_op, 2), // 26
1544 // __VKI_XEN_xsm_op // 27
1545 // __VKI_XEN_nmi_op // 28
1546 // __VKI_XEN_sched_op // 29
1547
1548 // __VKI_XEN_callback_op // 30
1549 // __VKI_XEN_xenoprof_op // 31
1550 HYPXY(__VKI_XEN_event_channel_op, evtchn_op, 2), // 32
1551 // __VKI_XEN_physdev_op // 33
1552 HYPXY(__VKI_XEN_hvm_op, hvm_op, 2), // 34
1553
1554 HYPXY(__VKI_XEN_sysctl, sysctl, 1), // 35
1555 HYPXY(__VKI_XEN_domctl, domctl, 1), // 36
1556 // __VKI_XEN_kexec_op // 37
1557 HYPXY(__VKI_XEN_tmem_op, tmem_op, 1), // 38
1558 };
1559
bad_before(ThreadId tid,SyscallArgLayout * layout,SyscallArgs * args,SyscallStatus * status,UWord * flags)1560 static void bad_before ( ThreadId tid,
1561 SyscallArgLayout* layout,
1562 /*MOD*/SyscallArgs* args,
1563 /*OUT*/SyscallStatus* status,
1564 /*OUT*/UWord* flags )
1565 {
1566 VG_(dmsg)("WARNING: unhandled hypercall: %s\n",
1567 VG_SYSNUM_STRING(args->sysno));
1568 if (VG_(clo_verbosity) > 1) {
1569 VG_(get_and_pp_StackTrace)(tid, VG_(clo_backtrace_size));
1570 }
1571 VG_(dmsg)("You may be able to write your own handler.\n");
1572 VG_(dmsg)("Read the file README_MISSING_SYSCALL_OR_IOCTL.\n");
1573 VG_(dmsg)("Nevertheless we consider this a bug. Please report\n");
1574 VG_(dmsg)("it at http://valgrind.org/support/bug_reports.html &\n");
1575 VG_(dmsg)("http://wiki.xen.org/wiki/Reporting_Bugs_against_Xen.\n");
1576
1577 SET_STATUS_Failure(VKI_ENOSYS);
1578 }
1579
1580 static XenHypercallTableEntry bad_hyper =
1581 { { bad_before, NULL }, 0 };
1582
ML_(get_xen_hypercall_entry)1583 static XenHypercallTableEntry* ML_(get_xen_hypercall_entry) ( UInt sysno )
1584 {
1585 XenHypercallTableEntry *ret = &bad_hyper;
1586
1587 const UInt hypercall_table_size
1588 = sizeof(hypercall_table) / sizeof(hypercall_table[0]);
1589
1590 /* Is it in the contiguous initial section of the table? */
1591 if (sysno < hypercall_table_size) {
1592 XenHypercallTableEntry* ent = &hypercall_table[sysno];
1593 if (ent->entry.before != NULL)
1594 ret = ent;
1595 }
1596
1597 /* Can't find a wrapper */
1598 return ret;
1599 }
1600
DEFN_PRE_TEMPLATE(xen,hypercall)1601 DEFN_PRE_TEMPLATE(xen, hypercall)
1602 {
1603 XenHypercallTableEntry *ent = ML_(get_xen_hypercall_entry)(SYSNO);
1604
1605 /* Return number of arguments consumed */
1606 ARG8 = ent->nr_args;
1607
1608 vg_assert(ent);
1609 vg_assert(ent->entry.before);
1610 (ent->entry.before)( tid, layout, arrghs, status, flags );
1611
1612 }
1613
DEFN_POST_TEMPLATE(xen,hypercall)1614 DEFN_POST_TEMPLATE(xen, hypercall)
1615 {
1616 XenHypercallTableEntry *ent = ML_(get_xen_hypercall_entry)(SYSNO);
1617
1618 /* Return number of arguments consumed */
1619 ARG8 = ent->nr_args;
1620
1621 vg_assert(ent);
1622 if (ent->entry.after)
1623 (ent->entry.after)( tid, arrghs, status );
1624 }
1625
1626 #endif // defined(ENABLE_XEN)
1627