1
2 /*--------------------------------------------------------------------*/
3 /*--- Solaris-specific syscalls, etc. syswrap-solaris.c ---*/
4 /*--------------------------------------------------------------------*/
5
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
9
10 Copyright (C) 2011-2015 Petr Pavlu
11 setup@dagobah.cz
12
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
17
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
22
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26 02111-1307, USA.
27
28 The GNU General Public License is contained in the file COPYING.
29 */
30
31 /* Copyright 2013-2015, Ivo Raisr <ivosh@ivosh.net>. */
32
33 /* Copyright 2015-2015, Tomas Jedlicka <jedlickat@gmail.com>. */
34
35 /* Copyright 2013, OmniTI Computer Consulting, Inc. All rights reserved. */
36
37 #if defined(VGO_solaris)
38
39 #include "libvex_guest_offsets.h"
40 #include "pub_core_basics.h"
41 #include "pub_core_vki.h"
42 #include "pub_core_vkiscnums.h"
43 #include "pub_core_threadstate.h"
44 #include "pub_core_aspacemgr.h"
45 #include "pub_core_debuginfo.h" // VG_(di_notify_*)
46 #include "pub_core_debuglog.h"
47 #include "pub_core_clientstate.h"
48 #include "pub_core_gdbserver.h"
49 #include "pub_core_inner.h"
50 #include "pub_core_libcassert.h"
51 #include "pub_core_libcbase.h"
52 #include "pub_core_libcfile.h"
53 #include "pub_core_libcprint.h"
54 #include "pub_core_libcproc.h"
55 #include "pub_core_libcsignal.h"
56 #include "pub_core_machine.h" // VG_(get_SP)
57 #include "pub_core_mallocfree.h"
58 #include "pub_core_options.h"
59 #include "pub_core_tooliface.h"
60 #include "pub_core_transtab.h" // VG_(discard_translations)
61 #include "pub_core_scheduler.h"
62 #include "pub_core_sigframe.h"
63 #include "pub_core_signals.h"
64 #include "pub_core_stacks.h"
65 #include "pub_core_syscall.h"
66 #include "pub_core_syswrap.h"
67 #include "pub_core_ume.h"
68 #if defined(ENABLE_INNER_CLIENT_REQUEST)
69 #include "pub_core_clreq.h"
70 #endif
71
72 #include "priv_types_n_macros.h"
73 #include "priv_syswrap-generic.h"
74 #include "priv_syswrap-solaris.h"
75
76 /* Return the number of non-dead and daemon threads.
77 count_daemon == True: count daemon threads
78 count_daemon == False: count non-daemon threads */
count_living_daemon_threads(Bool count_daemon)79 static UInt count_living_daemon_threads(Bool count_daemon)
80 {
81 UInt count = 0;
82 for (ThreadId tid = 1; tid < VG_N_THREADS; tid++)
83 if (VG_(threads)[tid].status != VgTs_Empty &&
84 VG_(threads)[tid].status != VgTs_Zombie &&
85 VG_(threads)[tid].os_state.daemon_thread == count_daemon)
86 count++;
87
88 return count;
89 }
90
91 /* Note: The following functions (thread_wrapper, run_a_thread_NORETURN,
92 ML_(start_thread_NORETURN), ML_(allocstack) and
93 VG_(main_thread_wrapper_NORETURN)) are based on the code in
94 syswrap-linux.c. Keep them synchronized! */
95
96 /* Run a thread from beginning to end and return the thread's
97 scheduler-return-code. */
thread_wrapper(Word tidW)98 static VgSchedReturnCode thread_wrapper(Word /*ThreadId*/ tidW)
99 {
100 VgSchedReturnCode ret;
101 ThreadId tid = (ThreadId)tidW;
102 ThreadState *tst = VG_(get_ThreadState)(tid);
103
104 VG_(debugLog)(1, "syswrap-solaris",
105 "thread_wrapper(tid=%u): entry\n",
106 tid);
107
108 vg_assert(tst->status == VgTs_Init);
109
110 /* Make sure we get the CPU lock before doing anything significant. */
111 VG_(acquire_BigLock)(tid, "thread_wrapper(starting new thread)");
112
113 if (0)
114 VG_(printf)("thread tid %u started: stack = %p\n", tid, (void *)&tid);
115
116 /* Make sure error reporting is enabled in the new thread. */
117 tst->err_disablement_level = 0;
118
119 if (tid == 1)
120 VG_TRACK(pre_thread_first_insn, tid);
121 else {
122 /* For newly created threads, VG_TRACK(pre_thread_first_insn, tid) is
123 invoked later from PRE(sys_getsetcontext)() when setucontext()
124 called from _thrp_setup() concludes new thread setup. Invoking it
125 here would be way too early - new thread has no stack, yet. */
126 }
127
128 tst->os_state.lwpid = VG_(gettid)();
129 tst->os_state.threadgroup = VG_(getpid)();
130
131 /* Thread created with all signals blocked; scheduler will set the
132 appropriate mask. */
133
134 ret = VG_(scheduler)(tid);
135
136 vg_assert(VG_(is_exiting)(tid));
137
138 vg_assert(tst->status == VgTs_Runnable);
139 vg_assert(VG_(is_running_thread)(tid));
140
141 VG_(debugLog)(1, "syswrap-solaris",
142 "thread_wrapper(tid=%u): exit, schedreturncode %s\n",
143 tid, VG_(name_of_VgSchedReturnCode)(ret));
144
145 /* Return to caller, still holding the lock. */
146 return ret;
147 }
148
149 /* Run a thread all the way to the end, then do appropriate exit actions
150 (this is the last-one-out-turn-off-the-lights bit). */
run_a_thread_NORETURN(Word tidW)151 static void run_a_thread_NORETURN(Word tidW)
152 {
153 ThreadId tid = (ThreadId)tidW;
154 VgSchedReturnCode src;
155 Int c;
156 ThreadState *tst;
157 #ifdef ENABLE_INNER_CLIENT_REQUEST
158 Int registered_vgstack_id;
159 #endif
160
161 VG_(debugLog)(1, "syswrap-solaris",
162 "run_a_thread_NORETURN(tid=%u): pre-thread_wrapper\n",
163 tid);
164
165 tst = VG_(get_ThreadState)(tid);
166 vg_assert(tst);
167
168 /* A thread has two stacks:
169 * the simulated stack (used by the synthetic cpu. Guest process
170 is using this stack).
171 * the valgrind stack (used by the real cpu. Valgrind code is running
172 on this stack).
173 When Valgrind runs as an inner, it must signal that its (real) stack
174 is the stack to use by the outer to e.g. do stacktraces.
175 */
176 INNER_REQUEST
177 (registered_vgstack_id
178 = VALGRIND_STACK_REGISTER(tst->os_state.valgrind_stack_base,
179 tst->os_state.valgrind_stack_init_SP));
180
181 /* Run the thread all the way through. */
182 src = thread_wrapper(tid);
183
184 VG_(debugLog)(1, "syswrap-solaris",
185 "run_a_thread_NORETURN(tid=%u): post-thread_wrapper\n",
186 tid);
187
188 c = count_living_daemon_threads(False);
189 vg_assert(c >= 1); /* Stay sane. */
190
191 /* Tell the tool that schedctl data belonging to this thread are gone. */
192 Addr a = tst->os_state.schedctl_data;
193 if (a != 0)
194 VG_TRACK(die_mem_munmap, a, sizeof(struct vki_sc_shared));
195
196 /* Deregister thread's stack. */
197 if (tst->os_state.stk_id != (UWord)-1)
198 VG_(deregister_stack)(tst->os_state.stk_id);
199
200 /* Tell the tool this thread is exiting. */
201 VG_TRACK(pre_thread_ll_exit, tid);
202
203 /* If the thread is exiting with errors disabled, complain loudly;
204 doing so is bad (does the user know this has happened?) Also, in all
205 cases, be paranoid and clear the flag anyway so that the thread slot is
206 safe in this respect if later reallocated. This should be unnecessary
207 since the flag should be cleared when the slot is reallocated, in
208 thread_wrapper(). */
209 if (tst->err_disablement_level > 0) {
210 VG_(umsg)(
211 "WARNING: exiting thread has error reporting disabled.\n"
212 "WARNING: possibly as a result of some mistake in the use\n"
213 "WARNING: of the VALGRIND_DISABLE_ERROR_REPORTING macros.\n"
214 );
215 VG_(debugLog)(
216 1, "syswrap-solaris",
217 "run_a_thread_NORETURN(tid=%u): "
218 "WARNING: exiting thread has err_disablement_level = %u\n",
219 tid, tst->err_disablement_level
220 );
221 }
222 tst->err_disablement_level = 0;
223
224 if (c == 1) {
225 UInt daemon_threads = count_living_daemon_threads(True);
226 if (daemon_threads == 0)
227 VG_(debugLog)(1, "syswrap-solaris",
228 "run_a_thread_NORETURN(tid=%u): "
229 "last one standing\n",
230 tid);
231 else
232 VG_(debugLog)(1, "syswrap-solaris",
233 "run_a_thread_NORETURN(tid=%u): "
234 "last non-daemon thread standing "
235 "[daemon threads=%u]\n",
236 tid, daemon_threads);
237
238 /* We are the last non-daemon thread standing. Keep hold of the lock and
239 carry on to show final tool results, then exit the entire system.
240 Use the continuation pointer set at startup in m_main. */
241 if ((src == VgSrc_ExitThread) && (daemon_threads > 0))
242 src = VgSrc_ExitProcess;
243 (*VG_(address_of_m_main_shutdown_actions_NORETURN))(tid, src);
244 }
245 else {
246 VG_(debugLog)(1, "syswrap-solaris",
247 "run_a_thread_NORETURN(tid=%u): "
248 "not last one standing\n",
249 tid);
250
251 /* OK, thread is dead, but others still exist. Just exit. */
252
253 /* This releases the run lock. */
254 VG_(exit_thread)(tid);
255 vg_assert(tst->status == VgTs_Zombie);
256 vg_assert(sizeof(tst->status) == 4);
257
258 INNER_REQUEST(VALGRIND_STACK_DEREGISTER(registered_vgstack_id));
259
260 /* We have to use this sequence to terminate the thread to
261 prevent a subtle race. If VG_(exit_thread)() had left the
262 ThreadState as Empty, then it could have been reallocated, reusing
263 the stack while we're doing these last cleanups. Instead,
264 VG_(exit_thread) leaves it as Zombie to prevent reallocation. We
265 need to make sure we don't touch the stack between marking it Empty
266 and exiting. Hence the assembler. */
267 #if defined(VGP_x86_solaris)
268 /* Luckily lwp_exit doesn't take any arguments so we don't have to mess
269 with the stack. */
270 __asm__ __volatile__ (
271 "movl %[EMPTY], %[status]\n" /* set tst->status = VgTs_Empty */
272 "movl $"VG_STRINGIFY(__NR_lwp_exit)", %%eax\n"
273 "int $0x91\n" /* lwp_exit() */
274 : [status] "=m" (tst->status)
275 : [EMPTY] "n" (VgTs_Empty)
276 : "eax", "edx", "cc", "memory");
277 #elif defined(VGP_amd64_solaris)
278 __asm__ __volatile__ (
279 "movl %[EMPTY], %[status]\n" /* set tst->status = VgTs_Empty */
280 "movq $"VG_STRINGIFY(__NR_lwp_exit)", %%rax\n"
281 "syscall\n" /* lwp_exit() */
282 : [status] "=m" (tst->status)
283 : [EMPTY] "n" (VgTs_Empty)
284 : "rax", "rdx", "cc", "memory");
285 #else
286 # error "Unknown platform"
287 #endif
288
289 VG_(core_panic)("Thread exit failed?\n");
290 }
291
292 /*NOTREACHED*/
293 vg_assert(0);
294 }
295
ML_(start_thread_NORETURN)296 Word ML_(start_thread_NORETURN)(void *arg)
297 {
298 ThreadState *tst = (ThreadState*)arg;
299 ThreadId tid = tst->tid;
300
301 run_a_thread_NORETURN((Word)tid);
302 /*NOTREACHED*/
303 vg_assert(0);
304 }
305
306 /* Allocate a stack for this thread, if it doesn't already have one.
307 They're allocated lazily, and never freed. Returns the initial stack
308 pointer value to use, or 0 if allocation failed. */
ML_(allocstack)309 Addr ML_(allocstack)(ThreadId tid)
310 {
311 ThreadState *tst = VG_(get_ThreadState)(tid);
312 VgStack *stack;
313 Addr initial_SP;
314
315 /* Either the stack_base and stack_init_SP are both zero (in which
316 case a stack hasn't been allocated) or they are both non-zero,
317 in which case it has. */
318
319 if (tst->os_state.valgrind_stack_base == 0)
320 vg_assert(tst->os_state.valgrind_stack_init_SP == 0);
321
322 if (tst->os_state.valgrind_stack_base != 0)
323 vg_assert(tst->os_state.valgrind_stack_init_SP != 0);
324
325 /* If no stack is present, allocate one. */
326
327 if (tst->os_state.valgrind_stack_base == 0) {
328 stack = VG_(am_alloc_VgStack)( &initial_SP );
329 if (stack) {
330 tst->os_state.valgrind_stack_base = (Addr)stack;
331 tst->os_state.valgrind_stack_init_SP = initial_SP;
332 }
333 }
334
335 if (0)
336 VG_(printf)("stack for tid %u at %p; init_SP=%p\n",
337 tid,
338 (void*)tst->os_state.valgrind_stack_base,
339 (void*)tst->os_state.valgrind_stack_init_SP);
340
341 return tst->os_state.valgrind_stack_init_SP;
342 }
343
344 /* Allocate a stack for the main thread, and run it all the way to the
345 end. Although we already have a working VgStack (VG_(interim_stack)) it's
346 better to allocate a new one, so that overflow detection works uniformly
347 for all threads. Also initialize the GDT (for normal threads, this is done
348 in the PRE wrapper of lwp_create). */
VG_(main_thread_wrapper_NORETURN)349 void VG_(main_thread_wrapper_NORETURN)(ThreadId tid)
350 {
351 Addr sp;
352
353 VG_(debugLog)(1, "syswrap-solaris",
354 "entering VG_(main_thread_wrapper_NORETURN)\n");
355
356 sp = ML_(allocstack)(tid);
357 #if defined(ENABLE_INNER_CLIENT_REQUEST)
358 {
359 // we must register the main thread stack before the call
360 // to ML_(call_on_new_stack_0_1), otherwise the outer valgrind
361 // reports 'write error' on the non registered stack.
362 ThreadState *tst = VG_(get_ThreadState)(tid);
363 INNER_REQUEST
364 ((void)
365 VALGRIND_STACK_REGISTER(tst->os_state.valgrind_stack_base,
366 tst->os_state.valgrind_stack_init_SP));
367 }
368 #endif
369
370 #if defined(VGP_x86_solaris)
371 {
372 ThreadState *tst = VG_(get_ThreadState)(tid);
373 ML_(setup_gdt)(&tst->arch.vex);
374 ML_(update_gdt_lwpgs)(tid);
375 }
376 #elif defined(VGP_amd64_solaris)
377 /* Nothing to do. */
378 #else
379 # error "Unknown platform"
380 #endif
381
382 /* If we can't even allocate the first thread's stack, we're hosed.
383 Give up. */
384 vg_assert2(sp != 0, "Cannot allocate main thread's stack.");
385
386 /* Shouldn't be any other threads around yet. */
387 vg_assert(VG_(count_living_threads)() == 1);
388
389 ML_(call_on_new_stack_0_1)(
390 (Addr)sp, /* stack */
391 0, /* bogus return address */
392 run_a_thread_NORETURN, /* fn to call */
393 (Word)tid /* arg to give it */
394 );
395
396 /*NOTREACHED*/
397 vg_assert(0);
398 }
399
400 /* Deallocate the GDT for a thread. */
VG_(cleanup_thread)401 void VG_(cleanup_thread)(ThreadArchState *arch)
402 {
403 #if defined(VGP_x86_solaris)
404 ML_(cleanup_gdt)(&arch->vex);
405 #elif defined(VGP_amd64_solaris)
406 /* Nothing to do. */
407 #else
408 # error "Unknown platform"
409 #endif
410 }
411
412 /*
413 * Notify core about spring cleaning of schedctl data pages for all threads
414 * in child post-fork handler. Libc will issue new schedctl syscalls for threads
415 * in the child when needs arise.
416 *
417 * See also POST(schedctl) and run_a_thread_NORETURN() when a thread exits.
418 */
clean_schedctl_data(ThreadId tid)419 static void clean_schedctl_data(ThreadId tid)
420 {
421 UInt i;
422 for (i = 0; i < VG_N_THREADS; i++) {
423 ThreadState *tst = &VG_(threads)[i];
424 if (tst->status != VgTs_Empty) {
425 Addr a = tst->os_state.schedctl_data;
426 if (a != 0) {
427 tst->os_state.schedctl_data = 0;
428 a = VG_PGROUNDDN(a);
429 if (VG_(am_find_anon_segment(a)))
430 VG_(am_notify_munmap)(a, VKI_PAGE_SIZE);
431 }
432 }
433 }
434 }
435
VG_(syswrap_init)436 void VG_(syswrap_init)(void)
437 {
438 VG_(atfork)(NULL, NULL, clean_schedctl_data);
439 }
440
441 /* Changes ownership of a memory mapping shared between kernel and the client
442 process. This mapping should have already been pre-arranged during process
443 address space initialization happening in kernel. Valgrind on startup created
444 a segment for this mapping categorized as Valgrind's owned anonymous.
445 Size of this mapping typically varies among Solaris versions but should be
446 page aligned.
447 If 'once_only' is 'True', it is expected this function is called once only
448 and the mapping ownership has not been changed, yet [useful during
449 initialization]. If 'False', this function can be called many times but does
450 change ownership only upon the first invocation [useful in syscall wrappers].
451 */
VG_(change_mapping_ownership)452 void VG_(change_mapping_ownership)(Addr addr, Bool once_only)
453 {
454 const NSegment *seg = VG_(am_find_anon_segment)(addr);
455 vg_assert(seg != NULL);
456 vg_assert(seg->start == addr);
457 vg_assert(VG_IS_PAGE_ALIGNED(seg->start));
458 vg_assert(VG_IS_PAGE_ALIGNED(seg->end + 1));
459 SizeT size = seg->end - seg->start + 1;
460 vg_assert(size > 0);
461
462 Bool do_change = False;
463 if (once_only) {
464 vg_assert(VG_(am_is_valid_for_valgrind)(addr, size, VKI_PROT_READ));
465 do_change = True;
466 } else {
467 if (!VG_(am_is_valid_for_client)(addr, size, VKI_PROT_READ))
468 do_change = True;
469 }
470
471 if (do_change) {
472 Bool change_ownership_OK = VG_(am_change_ownership_v_to_c)(addr, size);
473 vg_assert(change_ownership_OK);
474
475 /* Tell the tool about just discovered mapping. */
476 VG_TRACK(new_mem_startup,
477 addr, size,
478 True /* readable? */,
479 False /* writable? */,
480 False /* executable? */,
481 0 /* di_handle */);
482 }
483 }
484
485 /* Calculate the Fletcher-32 checksum of a given buffer. */
ML_(fletcher32)486 UInt ML_(fletcher32)(UShort *buf, SizeT blocks)
487 {
488 UInt sum1 = 0;
489 UInt sum2 = 0;
490 SizeT i;
491
492 for (i = 0; i < blocks; i++) {
493 sum1 = (sum1 + buf[i]) % 0xffff;
494 sum2 = (sum2 + sum1) % 0xffff;
495 }
496
497 return (sum2 << 16) | sum1;
498 }
499
500 /* Calculate the Fletcher-64 checksum of a given buffer. */
ML_(fletcher64)501 ULong ML_(fletcher64)(UInt *buf, SizeT blocks)
502 {
503 ULong sum1 = 0;
504 ULong sum2 = 0;
505 SizeT i;
506
507 for (i = 0; i < blocks; i++) {
508 sum1 = (sum1 + buf[i]) % 0xffffffff;
509 sum2 = (sum2 + sum1) % 0xffffffff;
510 }
511 return (sum2 << 32) | sum1;
512 }
513
514 /* Save a complete context (VCPU state, sigmask) of a given client thread
515 into the vki_ucontext_t structure. This structure is supposed to be
516 allocated in the client memory, a caller must make sure that the memory can
517 be dereferenced. The active tool is informed about the save. */
VG_(save_context)518 void VG_(save_context)(ThreadId tid, vki_ucontext_t *uc, CorePart part)
519 {
520 ThreadState *tst = VG_(get_ThreadState)(tid);
521
522 VG_TRACK(pre_mem_write, part, tid, "save_context(uc)", (Addr)uc,
523 sizeof(*uc));
524
525 uc->uc_flags = VKI_UC_ALL;
526 VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_flags,
527 sizeof(uc->uc_flags));
528
529 /* Old context */
530 uc->uc_link = tst->os_state.oldcontext;
531 VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_link,
532 sizeof(uc->uc_link));
533
534 /* Clear uc->vki_uc_signo. This slot is used by the signal machinery to
535 store a signal number. */
536 VKI_UC_SIGNO(uc) = 0;
537
538 /* Sigmask */
539 uc->uc_sigmask = tst->sig_mask;
540 VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_sigmask,
541 sizeof(uc->uc_sigmask));
542
543 /* Stack */
544 {
545 if (tst->os_state.ustack
546 && ML_(safe_to_deref)(tst->os_state.ustack, sizeof(vki_stack_t))
547 && tst->os_state.ustack->ss_size) {
548 /* If ustack points to a valid stack copy it to ucontext. */
549 uc->uc_stack = *tst->os_state.ustack;
550 }
551 else {
552 /* Ustack is not valid. A correct stack has to be figured out
553 manually. */
554 SysRes res;
555 vki_stack_t altstack;
556
557 /* Get information about alternate stack. */
558 res = VG_(do_sys_sigaltstack)(tid, NULL, &altstack);
559 vg_assert(!sr_isError(res));
560
561 if (altstack.ss_flags == VKI_SS_ONSTACK) {
562 /* If the alternate stack is active copy it to ucontext. */
563 uc->uc_stack = altstack;
564 }
565 else {
566 /* No information about stack is present, save information about
567 current main stack to ucontext. This branch should be reached
568 only by the main thread. */
569 ThreadState *tst2 = VG_(get_ThreadState)(1);
570 uc->uc_stack.ss_sp = (void*)(tst2->client_stack_highest_byte + 1
571 - tst2->client_stack_szB);
572 uc->uc_stack.ss_size = tst2->client_stack_szB;
573 uc->uc_stack.ss_flags = 0;
574 }
575 }
576
577 VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_stack,
578 sizeof(uc->uc_stack));
579 }
580
581 /* Save the architecture-specific part of the context. */
582 ML_(save_machine_context)(tid, uc, part);
583 }
584
585 /* Set a complete context (VCPU state, sigmask) of a given client thread
586 according to values passed in the vki_ucontext_t structure. This structure
587 is supposed to be allocated in the client memory, a caller must make sure
588 that the memory can be dereferenced. The active tool is informed about
589 what parts of the structure are read.
590
591 This function is a counterpart to VG_(save_context)(). */
VG_(restore_context)592 void VG_(restore_context)(ThreadId tid, vki_ucontext_t *uc, CorePart part,
593 Bool esp_is_thrptr)
594 {
595 ThreadState *tst = VG_(get_ThreadState)(tid);
596 Addr old_esp = VG_(get_SP)(tid);
597
598 VG_TRACK(pre_mem_read, part, tid, "restore_context(uc->uc_flags)",
599 (Addr)&uc->uc_flags, sizeof(uc->uc_flags));
600
601 /* Old context */
602 VG_TRACK(pre_mem_read, part, tid, "restore_context(uc->uc_link)",
603 (Addr)&uc->uc_link, sizeof(uc->uc_link));
604 tst->os_state.oldcontext = uc->uc_link;
605
606 /* Sigmask */
607 if (uc->uc_flags & VKI_UC_SIGMASK) {
608 SysRes res;
609
610 VG_TRACK(pre_mem_read, part, tid, "restore_context(uc->uc_sigmask)",
611 (Addr)&uc->uc_sigmask, sizeof(uc->uc_sigmask));
612 res = VG_(do_sys_sigprocmask)(tid, VKI_SIG_SETMASK, &uc->uc_sigmask,
613 NULL);
614 /* Setting signal mask should never fail. */
615 vg_assert(!sr_isError(res));
616 }
617
618 /* Stack */
619 if (uc->uc_flags & VKI_UC_STACK) {
620 VG_TRACK(pre_mem_read, part, tid, "restore_context(uc->uc_stack)",
621 (Addr)&uc->uc_stack, sizeof(uc->uc_stack));
622
623 if (uc->uc_stack.ss_flags == VKI_SS_ONSTACK) {
624 /* This seems to be a little bit dangerous but it is what the kernel
625 does. */
626 if (VG_(clo_trace_signals))
627 VG_(dmsg)("restore_context, sigaltstack: tid %u, "
628 "ss %p{%p,sz=%lu,flags=%#x}\n",
629 tid, &uc->uc_stack, uc->uc_stack.ss_sp,
630 (SizeT)uc->uc_stack.ss_size, uc->uc_stack.ss_flags);
631
632 tst->altstack.ss_sp = uc->uc_stack.ss_sp;
633 tst->altstack.ss_size = uc->uc_stack.ss_size;
634 /* Do not copy ss_flags, they are calculated dynamically by
635 Valgrind. */
636 }
637
638 /* Copyout the new stack. */
639 if (tst->os_state.ustack
640 && VG_(am_is_valid_for_client)((Addr)tst->os_state.ustack,
641 sizeof(*tst->os_state.ustack),
642 VKI_PROT_WRITE))
643 *tst->os_state.ustack = uc->uc_stack;
644 VG_TRACK(post_mem_write, part, tid, (Addr)&tst->os_state.ustack,
645 sizeof(tst->os_state.ustack));
646 }
647
648 /* Restore the architecture-specific part of the context. */
649 ML_(restore_machine_context)(tid, uc, part, esp_is_thrptr);
650
651 /* If the thread stack is already known, kill the deallocated stack area.
652 This is important when returning from a signal handler. */
653 if (tst->client_stack_highest_byte && tst->client_stack_szB) {
654 Addr end = tst->client_stack_highest_byte;
655 Addr start = end + 1 - tst->client_stack_szB;
656 Addr new_esp = VG_(get_SP)(tid);
657
658 /* Make sure that the old and new stack pointer are on the same (active)
659 stack. Alternate stack is currently never affected by this code. */
660 if (start <= old_esp && old_esp <= end
661 && start <= new_esp && new_esp <= end
662 && new_esp > old_esp)
663 VG_TRACK(die_mem_stack, old_esp - VG_STACK_REDZONE_SZB,
664 (new_esp - old_esp) + VG_STACK_REDZONE_SZB);
665 }
666 }
667
668 /* Set a client stack associated with a given thread id according to values
669 passed in the vki_stack_t structure. */
set_stack(ThreadId tid,vki_stack_t * st)670 static void set_stack(ThreadId tid, vki_stack_t *st)
671 {
672 ThreadState *tst = VG_(get_ThreadState)(tid);
673 Addr new_start, new_end;
674 SizeT new_size;
675 Addr cur_start;
676 SizeT cur_size;
677
678 VG_(debugLog)(2, "syswrap-solaris",
679 "set stack: sp=%#lx, size=%#lx.\n",
680 (Addr)st->ss_sp, (SizeT)st->ss_size);
681
682 /* Stay sane. */
683 vg_assert(st->ss_flags == 0);
684
685 new_start = (Addr)st->ss_sp;
686 new_end = new_start + st->ss_size - 1;
687 new_size = st->ss_size;
688 cur_start = tst->client_stack_highest_byte + 1
689 - tst->client_stack_szB;
690 cur_size = tst->client_stack_szB;
691
692 if (new_start == cur_start && new_size == cur_size) {
693 /* No change is requested, bail out. */
694 return;
695 }
696
697 if (tid == 1 && (new_size == 0 || new_size > VG_(clstk_max_size))) {
698 /* The main thread requests to use a stack without any size checking, or
699 too big stack. Fallback to the maximum allocated client stack. */
700
701 /* TODO I think it is possible to give up on setting main stack anyway.
702 Valgrind knows where it is located and it is already registered as
703 VG_(clstk_id). */
704
705 new_size = VG_(clstk_max_size);
706 new_end = tst->client_stack_highest_byte;
707 new_start = new_end + 1 - new_size;
708 }
709
710 if (tst->os_state.stk_id == (UWord)-1) {
711 /* This thread doesn't have a stack set yet. */
712 VG_(debugLog)(2, "syswrap-solaris",
713 "Stack set to %#lx-%#lx (new) for thread %u.\n",
714 new_start, new_end, tid);
715 tst->os_state.stk_id = VG_(register_stack)(new_start, new_end);
716 }
717 else {
718 /* Change a thread stack. */
719 VG_(debugLog)(2, "syswrap-solaris",
720 "Stack set to %#lx-%#lx (change) for thread %u.\n",
721 new_start, new_end, tid);
722 VG_(change_stack)(tst->os_state.stk_id, new_start, new_end);
723 }
724 tst->client_stack_highest_byte = new_end;
725 tst->client_stack_szB = new_size;
726 }
727
728 /* ---------------------------------------------------------------------
729 Door tracking. Used mainly for server side where door_return()
730 parameters alone do not contain sufficient information.
731 Also used on client side when new door descriptors are passed via
732 door_call() in desc_ptr. Not used for tracking door descriptors
733 explicitly open()'ed [generic fd tracking is used in that case].
734 ------------------------------------------------------------------ */
735
736 /* One of these is allocated for each created door. */
737 typedef struct OpenDoor
738 {
739 Bool server; /* TRUE = server door, FALSE = client door */
740 Int fd; /* The file descriptor. */
741 union {
742 /* Server side. */
743 struct {
744 Addr server_procedure; /* The door server procedure. */
745 HChar *pathname; /* NULL if unknown. */
746 };
747 /* Client side. */
748 struct {
749 /* Hook called during PRE door_call()
750 to check contents of params->data_ptr. */
751 void (*pre_mem_hook)(ThreadId tid, Int fd,
752 void *data_ptr, SizeT data_size);
753 /* Hook called during POST door_call()
754 to define contents of params->rbuf. */
755 void (*post_mem_hook)(ThreadId tid, Int fd,
756 void *rbuf, SizeT rsize);
757 };
758 };
759 struct OpenDoor *next, *prev;
760 } OpenDoor;
761
762 /* List of allocated door fds. */
763 static OpenDoor *doors_recorded = NULL;
764 static UInt nr_doors_recorded = 0;
765
door_record_create(void)766 static OpenDoor *door_record_create(void)
767 {
768 OpenDoor *d = VG_(malloc)("syswrap.door_record_create.1", sizeof(OpenDoor));
769 d->prev = NULL;
770 d->next = doors_recorded;
771 if (doors_recorded != NULL)
772 doors_recorded->prev = d;
773 doors_recorded = d;
774 nr_doors_recorded += 1;
775
776 return d;
777 }
778
779 /* Records a server door. */
door_record_server(ThreadId tid,Addr server_procedure,Int fd)780 static void door_record_server(ThreadId tid, Addr server_procedure, Int fd)
781 {
782 OpenDoor *d = doors_recorded;
783
784 while (d != NULL) {
785 if ((d->server == TRUE) && (d->server_procedure == server_procedure)) {
786 if (d->pathname) {
787 VG_(free)(d->pathname);
788 }
789 break;
790 }
791 d = d->next;
792 }
793
794 if (d == NULL)
795 d = door_record_create();
796 vg_assert(d != NULL);
797
798 d->server = TRUE;
799 d->fd = fd;
800 d->server_procedure = server_procedure;
801 d->pathname = NULL;
802 }
803
804 /* Records a client door. */
door_record_client(ThreadId tid,Int fd,void (* pre_mem_hook)(ThreadId tid,Int fd,void * data_ptr,SizeT data_size),void (* post_mem_hook)(ThreadId tid,Int fd,void * rbuf,SizeT rsize))805 static void door_record_client(ThreadId tid, Int fd,
806 void (*pre_mem_hook)(ThreadId tid, Int fd, void *data_ptr, SizeT data_size),
807 void (*post_mem_hook)(ThreadId tid, Int fd, void *rbuf, SizeT rsize))
808 {
809 OpenDoor *d = doors_recorded;
810
811 while (d != NULL) {
812 if ((d->server == FALSE) && (d->fd == fd))
813 break;
814 d = d->next;
815 }
816
817 if (d == NULL)
818 d = door_record_create();
819 vg_assert(d != NULL);
820
821 d->server = FALSE;
822 d->fd = fd;
823 d->pre_mem_hook = pre_mem_hook;
824 d->post_mem_hook = post_mem_hook;
825 }
826
827 /* Revokes an open door, be it server side or client side. */
door_revoke(ThreadId tid,Int fd)828 static void door_revoke(ThreadId tid, Int fd)
829 {
830 OpenDoor *d = doors_recorded;
831
832 while (d != NULL) {
833 if (d->fd == fd) {
834 if (d->prev != NULL)
835 d->prev->next = d->next;
836 else
837 doors_recorded = d->next;
838 if (d->next != NULL)
839 d->next->prev = d->prev;
840
841 if ((d->server == TRUE) && (d->pathname != NULL))
842 VG_(free)(d->pathname);
843 VG_(free)(d);
844 nr_doors_recorded -= 1;
845 return;
846 }
847 d = d->next;
848 }
849 }
850
851 /* Attaches a server door to a filename. */
door_server_fattach(Int fd,HChar * pathname)852 static void door_server_fattach(Int fd, HChar *pathname)
853 {
854 OpenDoor *d = doors_recorded;
855
856 while (d != NULL) {
857 if (d->fd == fd) {
858 vg_assert(d->server == TRUE);
859
860 if (d->pathname != NULL)
861 VG_(free)(d->pathname);
862 d->pathname = VG_(strdup)("syswrap.door_server_fattach.1", pathname);
863 return;
864 }
865 d = d->next;
866 }
867 }
868
869 /* Finds a server door based on server procedure. */
door_find_by_proc(Addr server_procedure)870 static const OpenDoor *door_find_by_proc(Addr server_procedure)
871 {
872 OpenDoor *d = doors_recorded;
873
874 while (d != NULL) {
875 if ((d->server) && (d->server_procedure == server_procedure))
876 return d;
877 d = d->next;
878 }
879
880 return NULL;
881 }
882
883 /* Finds a client door based on fd. */
door_find_by_fd(Int fd)884 static const OpenDoor *door_find_by_fd(Int fd)
885 {
886 OpenDoor *d = doors_recorded;
887
888 while (d != NULL) {
889 if ((d->server == FALSE) && (d->fd == fd))
890 return d;
891 d = d->next;
892 }
893
894 return NULL;
895 }
896
897 /* ---------------------------------------------------------------------
898 PRE/POST wrappers for Solaris-specific syscalls
899 ------------------------------------------------------------------ */
900
901 #define PRE(name) DEFN_PRE_TEMPLATE(solaris, name)
902 #define POST(name) DEFN_POST_TEMPLATE(solaris, name)
903
904 /* prototypes */
905 DECL_TEMPLATE(solaris, sys_exit);
906 #if defined(SOLARIS_SPAWN_SYSCALL)
907 DECL_TEMPLATE(solaris, sys_spawn);
908 #endif /* SOLARIS_SPAWN_SYSCALL */
909 #if defined(SOLARIS_OLD_SYSCALLS)
910 DECL_TEMPLATE(solaris, sys_open);
911 #endif /* SOLARIS_OLD_SYSCALLS */
912 DECL_TEMPLATE(solaris, sys_close);
913 DECL_TEMPLATE(solaris, sys_linkat);
914 DECL_TEMPLATE(solaris, sys_symlinkat);
915 DECL_TEMPLATE(solaris, sys_time);
916 DECL_TEMPLATE(solaris, sys_brk);
917 DECL_TEMPLATE(solaris, sys_stat);
918 DECL_TEMPLATE(solaris, sys_lseek);
919 DECL_TEMPLATE(solaris, sys_mount);
920 DECL_TEMPLATE(solaris, sys_readlinkat);
921 DECL_TEMPLATE(solaris, sys_stime);
922 DECL_TEMPLATE(solaris, sys_fstat);
923 #if defined(SOLARIS_FREALPATHAT_SYSCALL)
924 DECL_TEMPLATE(solaris, sys_frealpathat);
925 #endif /* SOLARIS_FREALPATHAT_SYSCALL */
926 DECL_TEMPLATE(solaris, sys_stty);
927 DECL_TEMPLATE(solaris, sys_gtty);
928 DECL_TEMPLATE(solaris, sys_pgrpsys);
929 DECL_TEMPLATE(solaris, sys_pipe);
930 DECL_TEMPLATE(solaris, sys_faccessat);
931 DECL_TEMPLATE(solaris, sys_mknodat);
932 DECL_TEMPLATE(solaris, sys_sysi86);
933 DECL_TEMPLATE(solaris, sys_shmsys);
934 DECL_TEMPLATE(solaris, sys_semsys);
935 DECL_TEMPLATE(solaris, sys_ioctl);
936 DECL_TEMPLATE(solaris, sys_fchownat);
937 DECL_TEMPLATE(solaris, sys_fdsync);
938 DECL_TEMPLATE(solaris, sys_execve);
939 DECL_TEMPLATE(solaris, sys_fcntl);
940 DECL_TEMPLATE(solaris, sys_renameat);
941 DECL_TEMPLATE(solaris, sys_unlinkat);
942 DECL_TEMPLATE(solaris, sys_fstatat);
943 DECL_TEMPLATE(solaris, sys_openat);
944 DECL_TEMPLATE(solaris, sys_tasksys);
945 DECL_TEMPLATE(solaris, sys_getpagesizes);
946 DECL_TEMPLATE(solaris, sys_lwp_park);
947 DECL_TEMPLATE(solaris, sys_sendfilev);
948 #if defined(SOLARIS_LWP_NAME_SYSCALL)
949 DECL_TEMPLATE(solaris, sys_lwp_name);
950 #endif /* SOLARIS_LWP_NAME_SYSCALL */
951 DECL_TEMPLATE(solaris, sys_privsys);
952 DECL_TEMPLATE(solaris, sys_ucredsys);
953 DECL_TEMPLATE(solaris, sys_getmsg);
954 DECL_TEMPLATE(solaris, sys_putmsg);
955 DECL_TEMPLATE(solaris, sys_lstat);
956 DECL_TEMPLATE(solaris, sys_sigprocmask);
957 DECL_TEMPLATE(solaris, sys_sigaction);
958 DECL_TEMPLATE(solaris, sys_sigpending);
959 DECL_TEMPLATE(solaris, sys_getsetcontext);
960 DECL_TEMPLATE(solaris, sys_fchmodat);
961 DECL_TEMPLATE(solaris, sys_mkdirat);
962 DECL_TEMPLATE(solaris, sys_statvfs);
963 DECL_TEMPLATE(solaris, sys_fstatvfs);
964 DECL_TEMPLATE(solaris, sys_nfssys);
965 DECL_TEMPLATE(solaris, sys_waitid);
966 #if defined(SOLARIS_UTIMESYS_SYSCALL)
967 DECL_TEMPLATE(solaris, sys_utimesys);
968 #endif /* SOLARIS_UTIMESYS_SYSCALL */
969 #if defined(SOLARIS_UTIMENSAT_SYSCALL)
970 DECL_TEMPLATE(solaris, sys_utimensat);
971 #endif /* SOLARIS_UTIMENSAT_SYSCALL */
972 DECL_TEMPLATE(solaris, sys_sigresend);
973 DECL_TEMPLATE(solaris, sys_priocntlsys);
974 DECL_TEMPLATE(solaris, sys_pathconf);
975 DECL_TEMPLATE(solaris, sys_mmap);
976 #if defined(SOLARIS_UUIDSYS_SYSCALL)
977 DECL_TEMPLATE(solaris, sys_uuidsys);
978 #endif /* SOLARIS_UUIDSYS_SYSCALL */
979 DECL_TEMPLATE(solaris, sys_mmapobj);
980 DECL_TEMPLATE(solaris, sys_memcntl);
981 DECL_TEMPLATE(solaris, sys_getpmsg);
982 DECL_TEMPLATE(solaris, sys_putpmsg);
983 #if defined(SOLARIS_OLD_SYSCALLS)
984 DECL_TEMPLATE(solaris, sys_rename);
985 #endif /* SOLARIS_OLD_SYSCALLS */
986 DECL_TEMPLATE(solaris, sys_uname);
987 DECL_TEMPLATE(solaris, sys_setegid);
988 DECL_TEMPLATE(solaris, sys_sysconfig);
989 DECL_TEMPLATE(solaris, sys_systeminfo);
990 DECL_TEMPLATE(solaris, sys_seteuid);
991 DECL_TEMPLATE(solaris, sys_forksys);
992 DECL_TEMPLATE(solaris, sys_sigtimedwait);
993 DECL_TEMPLATE(solaris, sys_yield);
994 DECL_TEMPLATE(solaris, sys_lwp_sema_post);
995 DECL_TEMPLATE(solaris, sys_lwp_sema_trywait);
996 DECL_TEMPLATE(solaris, sys_lwp_detach);
997 DECL_TEMPLATE(solaris, sys_fchroot);
998 #if defined(SOLARIS_SYSTEM_STATS_SYSCALL)
999 DECL_TEMPLATE(solaris, sys_system_stats);
1000 #endif /* SOLARIS_SYSTEM_STATS_SYSCALL */
1001 DECL_TEMPLATE(solaris, sys_gettimeofday);
1002 DECL_TEMPLATE(solaris, sys_lwp_create);
1003 DECL_TEMPLATE(solaris, sys_lwp_exit);
1004 DECL_TEMPLATE(solaris, sys_lwp_suspend);
1005 DECL_TEMPLATE(solaris, sys_lwp_continue);
1006 #if defined(SOLARIS_LWP_SIGQUEUE_SYSCALL)
1007 DECL_TEMPLATE(solaris, sys_lwp_sigqueue);
1008 #else
1009 DECL_TEMPLATE(solaris, sys_lwp_kill);
1010 #endif /* SOLARIS_LWP_SIGQUEUE_SYSCALL */
1011 DECL_TEMPLATE(solaris, sys_lwp_self);
1012 DECL_TEMPLATE(solaris, sys_lwp_sigmask);
1013 DECL_TEMPLATE(solaris, sys_lwp_private);
1014 DECL_TEMPLATE(solaris, sys_lwp_wait);
1015 DECL_TEMPLATE(solaris, sys_lwp_mutex_wakeup);
1016 DECL_TEMPLATE(solaris, sys_lwp_cond_wait);
1017 DECL_TEMPLATE(solaris, sys_lwp_cond_broadcast);
1018 DECL_TEMPLATE(solaris, sys_pread);
1019 DECL_TEMPLATE(solaris, sys_pwrite);
1020 DECL_TEMPLATE(solaris, sys_rusagesys);
1021 DECL_TEMPLATE(solaris, sys_port);
1022 DECL_TEMPLATE(solaris, sys_pollsys);
1023 DECL_TEMPLATE(solaris, sys_labelsys);
1024 DECL_TEMPLATE(solaris, sys_acl);
1025 DECL_TEMPLATE(solaris, sys_auditsys);
1026 DECL_TEMPLATE(solaris, sys_p_online);
1027 DECL_TEMPLATE(solaris, sys_sigqueue);
1028 DECL_TEMPLATE(solaris, sys_clock_gettime);
1029 DECL_TEMPLATE(solaris, sys_clock_settime);
1030 DECL_TEMPLATE(solaris, sys_clock_getres);
1031 DECL_TEMPLATE(solaris, sys_timer_create);
1032 DECL_TEMPLATE(solaris, sys_timer_delete);
1033 DECL_TEMPLATE(solaris, sys_timer_settime);
1034 DECL_TEMPLATE(solaris, sys_timer_gettime);
1035 DECL_TEMPLATE(solaris, sys_timer_getoverrun);
1036 DECL_TEMPLATE(solaris, sys_facl);
1037 DECL_TEMPLATE(solaris, sys_door);
1038 DECL_TEMPLATE(solaris, sys_schedctl);
1039 DECL_TEMPLATE(solaris, sys_pset);
1040 DECL_TEMPLATE(solaris, sys_resolvepath);
1041 DECL_TEMPLATE(solaris, sys_lwp_mutex_timedlock);
1042 DECL_TEMPLATE(solaris, sys_lwp_rwlock_sys);
1043 DECL_TEMPLATE(solaris, sys_lwp_sema_timedwait);
1044 DECL_TEMPLATE(solaris, sys_zone);
1045 DECL_TEMPLATE(solaris, sys_getcwd);
1046 DECL_TEMPLATE(solaris, sys_so_socket);
1047 DECL_TEMPLATE(solaris, sys_so_socketpair);
1048 DECL_TEMPLATE(solaris, sys_bind);
1049 DECL_TEMPLATE(solaris, sys_listen);
1050 DECL_TEMPLATE(solaris, sys_accept);
1051 DECL_TEMPLATE(solaris, sys_connect);
1052 DECL_TEMPLATE(solaris, sys_shutdown);
1053 DECL_TEMPLATE(solaris, sys_recv);
1054 DECL_TEMPLATE(solaris, sys_recvfrom);
1055 DECL_TEMPLATE(solaris, sys_recvmsg);
1056 DECL_TEMPLATE(solaris, sys_send);
1057 DECL_TEMPLATE(solaris, sys_sendmsg);
1058 DECL_TEMPLATE(solaris, sys_sendto);
1059 DECL_TEMPLATE(solaris, sys_getpeername);
1060 DECL_TEMPLATE(solaris, sys_getsockname);
1061 DECL_TEMPLATE(solaris, sys_getsockopt);
1062 DECL_TEMPLATE(solaris, sys_setsockopt);
1063 DECL_TEMPLATE(solaris, sys_lwp_mutex_register);
1064 DECL_TEMPLATE(solaris, sys_uucopy);
1065 DECL_TEMPLATE(solaris, sys_umount2);
1066
1067 DECL_TEMPLATE(solaris, fast_gethrtime);
1068 DECL_TEMPLATE(solaris, fast_gethrvtime);
1069 DECL_TEMPLATE(solaris, fast_gethrestime);
1070 #if defined(SOLARIS_GETHRT_FASTTRAP)
1071 DECL_TEMPLATE(solaris, fast_gethrt);
1072 #endif /* SOLARIS_GETHRT_FASTTRAP */
1073 #if defined(SOLARIS_GETZONEOFFSET_FASTTRAP)
1074 DECL_TEMPLATE(solaris, fast_getzoneoffset);
1075 #endif /* SOLARIS_GETZONEOFFSET_FASTTRAP */
1076
1077 /* implementation */
PRE(sys_exit)1078 PRE(sys_exit)
1079 {
1080 /* void exit(int status); */
1081 ThreadId t;
1082
1083 PRINT("sys_exit( %ld )", SARG1);
1084 PRE_REG_READ1(void, "exit", int, status);
1085
1086 for (t = 1; t < VG_N_THREADS; t++) {
1087 if (VG_(threads)[t].status == VgTs_Empty)
1088 continue;
1089
1090 VG_(threads)[t].exitreason = VgSrc_ExitProcess;
1091 VG_(threads)[t].os_state.exitcode = ARG1;
1092
1093 /* Unblock it, if blocked. */
1094 if (t != tid)
1095 VG_(get_thread_out_of_syscall)(t);
1096 }
1097
1098 /* We have to claim the syscall already succeeded. */
1099 SET_STATUS_Success(0);
1100 }
1101
1102 #if defined(SOLARIS_SPAWN_SYSCALL)
spawn_pre_check_kfa(ThreadId tid,SyscallStatus * status,vki_kfile_attr_t * kfa)1103 static Bool spawn_pre_check_kfa(ThreadId tid, SyscallStatus *status,
1104 vki_kfile_attr_t *kfa)
1105 {
1106 PRE_FIELD_READ("spawn(attrs->kfa_size)", kfa->kfa_size);
1107 PRE_FIELD_READ("spawn(attrs->kfa_type)", kfa->kfa_type);
1108
1109 if (ML_(safe_to_deref)(kfa, kfa->kfa_size)) {
1110 switch (kfa->kfa_type) {
1111 case VKI_FA_DUP2:
1112 PRE_FIELD_READ("spawn(attrs->kfa_filedes)", kfa->kfa_filedes);
1113 PRE_FIELD_READ("spawn(attrs->kfa_newfiledes)", kfa->kfa_newfiledes);
1114 if (!ML_(fd_allowed)(kfa->kfa_filedes, "spawn(dup2)", tid, False) ||
1115 !ML_(fd_allowed)(kfa->kfa_newfiledes, "spawn(dup2)", tid, False)) {
1116 SET_STATUS_Failure(VKI_EBADF);
1117 return False;
1118 }
1119 break;
1120 case VKI_FA_CLOSE:
1121 PRE_FIELD_READ("spawn(attrs->kfa_filedes)", kfa->kfa_filedes);
1122 /* If doing -d style logging (which is to fd = 2 = stderr),
1123 don't allow that filedes to be closed. See ML_(fd_allowed)(). */
1124 if (!ML_(fd_allowed)(kfa->kfa_filedes, "spawn(close)", tid, False) ||
1125 (kfa->kfa_filedes == 2 && VG_(debugLog_getLevel)() > 0)) {
1126 SET_STATUS_Failure(VKI_EBADF);
1127 return False;
1128 }
1129 break;
1130 case VKI_FA_CLOSEFROM:
1131 /* :TODO: All file descriptors greater than or equal to
1132 kfa->kfa_filedes would have to be checked. */
1133 VG_(unimplemented)("Support for spawn() with file attribute type "
1134 "FA_CLOSEFROM.");
1135 break;
1136 case VKI_FA_OPEN:
1137 PRE_FIELD_READ("spawn(attrs->kfa_filedes)", kfa->kfa_filedes);
1138 PRE_FIELD_READ("spawn(attrs->kfa_oflag)", kfa->kfa_oflag);
1139 PRE_FIELD_READ("spawn(attrs->kfa_mode)", kfa->kfa_mode);
1140 if (!ML_(fd_allowed)(kfa->kfa_filedes, "spawn(open)", tid, False)) {
1141 SET_STATUS_Failure(VKI_EBADF);
1142 return False;
1143 }
1144 /* fallthrough */
1145 case VKI_FA_CHDIR:
1146 PRE_FIELD_READ("spawn(attrs->kfa_pathsize)", kfa->kfa_pathsize);
1147 if (kfa->kfa_pathsize != 0) {
1148 PRE_MEM_RASCIIZ("spawn(attrs->kfa_data)", (Addr) kfa->kfa_data);
1149 }
1150 break;
1151 default:
1152 VG_(unimplemented)("Support for spawn() with file attribute type %u.",
1153 kfa->kfa_type);
1154 }
1155 }
1156
1157 return True;
1158 }
1159
PRE(sys_spawn)1160 PRE(sys_spawn)
1161 {
1162 /* int spawn(char *path, void *attrs, size_t attrsize,
1163 char *argenv, size_t aesize); */
1164 PRINT("sys_spawn ( %#lx(%s), %#lx, %lu, %#lx, %lu )",
1165 ARG1, (HChar *) ARG1, ARG2, ARG3, ARG4, ARG5);
1166 PRE_REG_READ5(long, "spawn", const char *, path, void *, attrs,
1167 size_t, attrsize, char *, argenv, size_t, aesize);
1168
1169 /* First check input arguments. */
1170 PRE_MEM_RASCIIZ("spawn(path)", ARG1);
1171 if (ARG3 > 0) {
1172 /* --- vki_kspawn_attr_t --
1173 | ksa_version |
1174 | ksa_size |
1175 | ksa_attr_off | -----| (only if != 0)
1176 | ksa_attr_size | |
1177 | ksa_path_off | =====|====| (only if != 0)
1178 | ksa_path_size | | |
1179 | ksa_shell_off | -----|----|----| (only if != 0)
1180 | ksa_shell_size | | | |
1181 | ksa_data[0] | | | |
1182 ------------------------ | | |
1183 | vki_spawn_attr_t | <----| | |
1184 ------------------------ | |
1185 | path | <---------| |
1186 ------------------------ |
1187 | shell | <---------------
1188 ------------------------
1189 | file actions | (not included in ksa_size, only in ARG3)
1190 ------------------------
1191
1192 ksa_size = sizeof(vki_kspawn_attr_t) + ksa_attr_size + ksa_path_size +
1193 ksa_shell_size
1194 attrs_size (ARG3) = ksa_size + file actions size */
1195
1196 vki_kspawn_attr_t *attrs = (vki_kspawn_attr_t *) ARG2;
1197 PRE_FIELD_READ("spawn(attrs->ksa_version)", attrs->ksa_version);
1198 PRE_FIELD_READ("spawn(attrs->ksa_size)", attrs->ksa_size);
1199 PRE_FIELD_READ("spawn(attrs->ksa_attr_off)", attrs->ksa_attr_off);
1200 PRE_FIELD_READ("spawn(attrs->ksa_path_off)", attrs->ksa_path_off);
1201 PRE_FIELD_READ("spawn(attrs->ksa_shell_off)", attrs->ksa_shell_off);
1202
1203 if (ML_(safe_to_deref)(attrs, sizeof(vki_kspawn_attr_t))) {
1204 if (attrs->ksa_version != VKI_SPAWN_VERSION) {
1205 VG_(unimplemented)("Support for spawn() with attributes "
1206 "version %u.", attrs->ksa_version);
1207 }
1208
1209 if (attrs->ksa_attr_off != 0) {
1210 PRE_FIELD_READ("spawn(attrs->ksa_attr_size)", attrs->ksa_attr_size);
1211 vki_spawn_attr_t *sap =
1212 (vki_spawn_attr_t *) ((Addr) attrs + attrs->ksa_attr_off);
1213 PRE_MEM_READ("spawn(attrs->ksa_attr)",
1214 (Addr) sap, attrs->ksa_attr_size);
1215 if (ML_(safe_to_deref)(sap, sizeof(vki_spawn_attr_t))) {
1216 if (sap->sa_psflags & VKI_POSIX_SPAWN_SETVAMASK_NP) {
1217 VG_(unimplemented)("Support for spawn() with attributes flag "
1218 "including POSIX_SPAWN_SETVAMASK_NP.");
1219 }
1220 /* paranoia */
1221 Int rem = sap->sa_psflags & ~(
1222 VKI_POSIX_SPAWN_RESETIDS | VKI_POSIX_SPAWN_SETPGROUP |
1223 VKI_POSIX_SPAWN_SETSIGDEF | VKI_POSIX_SPAWN_SETSIGMASK |
1224 VKI_POSIX_SPAWN_SETSCHEDPARAM | VKI_POSIX_SPAWN_SETSCHEDULER |
1225 VKI_POSIX_SPAWN_SETSID_NP | VKI_POSIX_SPAWN_SETVAMASK_NP |
1226 VKI_POSIX_SPAWN_SETSIGIGN_NP | VKI_POSIX_SPAWN_NOSIGCHLD_NP |
1227 VKI_POSIX_SPAWN_WAITPID_NP | VKI_POSIX_SPAWN_NOEXECERR_NP);
1228 if (rem != 0) {
1229 VG_(unimplemented)("Support for spawn() with attributes flag "
1230 "%#x.", sap->sa_psflags);
1231 }
1232 }
1233 }
1234
1235 if (attrs->ksa_path_off != 0) {
1236 PRE_FIELD_READ("spawn(attrs->ksa_path_size)", attrs->ksa_path_size);
1237 PRE_MEM_RASCIIZ("spawn(attrs->ksa_path)",
1238 (Addr) attrs + attrs->ksa_path_off);
1239 }
1240
1241 if (attrs->ksa_shell_off != 0) {
1242 PRE_FIELD_READ("spawn(attrs->ksa_shell_size)",
1243 attrs->ksa_shell_size);
1244 PRE_MEM_RASCIIZ("spawn(attrs->ksa_shell)",
1245 (Addr) attrs + attrs->ksa_shell_off);
1246 }
1247
1248 vki_kfile_attr_t *kfa = (vki_kfile_attr_t *) (ARG2 + attrs->ksa_size);
1249 while ((Addr) kfa < ARG2 + ARG3) {
1250 if (spawn_pre_check_kfa(tid, status, kfa) == False) {
1251 return;
1252 }
1253 kfa = (vki_kfile_attr_t *) ((Addr) kfa + kfa->kfa_size);
1254 }
1255 }
1256 }
1257 PRE_MEM_READ("spawn(argenv)", ARG4, ARG5);
1258
1259 /* Check that the name at least begins in client-accessible storage. */
1260 if ((ARG1 == 0) || !ML_(safe_to_deref)((HChar *) ARG1, 1)) {
1261 SET_STATUS_Failure(VKI_EFAULT);
1262 return;
1263 }
1264
1265 /* Check that attrs reside in client-accessible storage. */
1266 if (ARG2 != 0) {
1267 if (!VG_(am_is_valid_for_client)(ARG2, ARG3, VKI_PROT_READ)) {
1268 SET_STATUS_Failure(VKI_EFAULT);
1269 return;
1270 }
1271 }
1272
1273 /* Check that the argenv reside in client-accessible storage.
1274 Solaris disallows to perform spawn() without any arguments & environment
1275 variables specified. */
1276 if ((ARG4 == 0) /* obviously bogus */ ||
1277 !VG_(am_is_valid_for_client)(ARG4, ARG5, VKI_PROT_READ)) {
1278 SET_STATUS_Failure(VKI_EFAULT);
1279 return;
1280 }
1281
1282 /* Copy existing attrs or create empty minimal ones. */
1283 vki_kspawn_attr_t *attrs;
1284 SizeT attrs_size;
1285 if (ARG2 == 0) {
1286 /* minimalistic kspawn_attr_t + spawn_attr_t */
1287 attrs_size = sizeof(vki_kspawn_attr_t) + sizeof(vki_spawn_attr_t);
1288 attrs = VG_(calloc)("syswrap.spawn.1", 1, attrs_size);
1289 attrs->ksa_version = VKI_SPAWN_VERSION;
1290 attrs->ksa_size = attrs_size;
1291 attrs->ksa_attr_off = sizeof(vki_kspawn_attr_t);
1292 attrs->ksa_attr_size = sizeof(vki_spawn_attr_t);
1293 } else if (((vki_kspawn_attr_t *) ARG2)->ksa_attr_off == 0) {
1294 /* existing kspawn_attr_t but missing spawn_attr_t */
1295 attrs_size = ARG3 + sizeof(vki_spawn_attr_t);
1296 attrs = VG_(calloc)("syswrap.spawn.2", 1, attrs_size);
1297 VG_(memcpy)(attrs, (void *) ARG2, sizeof(vki_kspawn_attr_t));
1298 SizeT file_actions_size = ARG3 - attrs->ksa_size;
1299 attrs->ksa_size += sizeof(vki_spawn_attr_t);
1300 attrs->ksa_attr_off = sizeof(vki_kspawn_attr_t);
1301 attrs->ksa_attr_size = sizeof(vki_spawn_attr_t);
1302 if (attrs->ksa_path_off != 0) {
1303 VG_(memcpy)((HChar *) attrs + attrs->ksa_path_off +
1304 sizeof(vki_spawn_attr_t), (HChar *) ARG2 +
1305 attrs->ksa_path_off, attrs->ksa_path_size);
1306 attrs->ksa_path_off += sizeof(vki_spawn_attr_t);
1307 }
1308 if (attrs->ksa_shell_off != 0) {
1309 VG_(memcpy)((HChar *) attrs + attrs->ksa_shell_off +
1310 sizeof(vki_spawn_attr_t), (HChar *) ARG2 +
1311 attrs->ksa_shell_off, attrs->ksa_shell_size);
1312 attrs->ksa_shell_off += sizeof(vki_spawn_attr_t);
1313 }
1314 if (file_actions_size > 0) {
1315 VG_(memcpy)((HChar *) attrs + attrs_size - file_actions_size,
1316 (HChar *) ARG2 + ARG3 - file_actions_size,
1317 file_actions_size);
1318 }
1319 } else {
1320 /* existing kspawn_attr_t + spawn_attr_t */
1321 attrs_size = ARG3;
1322 attrs = VG_(malloc)("syswrap.spawn.3", attrs_size);
1323 VG_(memcpy)(attrs, (void *) ARG2, attrs_size);
1324 }
1325 vki_spawn_attr_t *spa = (vki_spawn_attr_t *) ((HChar *) attrs +
1326 attrs->ksa_attr_off);
1327
1328 /* Convert argv and envp parts of argenv into their separate XArray's.
1329 Duplicate strings because argv and envp will be then modified. */
1330 XArray *argv = VG_(newXA)(VG_(malloc), "syswrap.spawn.4",
1331 VG_(free), sizeof(HChar *));
1332 XArray *envp = VG_(newXA)(VG_(malloc), "syswrap.spawn.5",
1333 VG_(free), sizeof(HChar *));
1334
1335 HChar *argenv = (HChar *) ARG4;
1336 XArray *current_xa = argv;
1337 while ((Addr) argenv < ARG4 + ARG5) {
1338 if (*argenv == '\0') {
1339 argenv += 1;
1340 if (current_xa == argv) {
1341 current_xa = envp;
1342 if ((*argenv == '\0') && ((Addr) argenv == ARG4 + ARG5 - 1)) {
1343 /* envp part is empty, it contained only {NULL}. */
1344 break;
1345 }
1346 } else {
1347 if ((Addr) argenv != ARG4 + ARG5) {
1348 if (VG_(clo_trace_syscalls))
1349 VG_(debugLog)(3, "syswrap-solaris", "spawn: bogus argenv\n");
1350 SET_STATUS_Failure(VKI_EINVAL);
1351 goto exit;
1352 }
1353 break;
1354 }
1355 }
1356
1357 if (*argenv != '\1') {
1358 if (VG_(clo_trace_syscalls))
1359 VG_(debugLog)(3, "syswrap-solaris", "spawn: bogus argenv\n");
1360 SET_STATUS_Failure(VKI_EINVAL);
1361 goto exit;
1362 }
1363 argenv += 1;
1364
1365 HChar *duplicate = VG_(strdup)("syswrap.spawn.6", argenv);
1366 VG_(addToXA)(current_xa, &duplicate);
1367 argenv += VG_(strlen)(argenv) + 1;
1368 }
1369
1370 /* Debug-only printing. */
1371 if (0) {
1372 VG_(printf)("\nARG1 = %#lx(%s)\n", ARG1, (HChar *) ARG1);
1373 VG_(printf)("ARG4 (argv) = ");
1374 for (Word i = 0; i < VG_(sizeXA)(argv); i++) {
1375 VG_(printf)("%s ", *(HChar **) VG_(indexXA)(argv, i));
1376 }
1377
1378 VG_(printf)("\nARG4 (envp) = ");
1379 for (Word i = 0; i < VG_(sizeXA)(envp); i++) {
1380 VG_(printf)("%s ", *(HChar **) VG_(indexXA)(envp, i));
1381 }
1382 VG_(printf)("\n");
1383 }
1384
1385 /* Decide whether or not we want to trace the spawned child.
1386 Omit the executable name itself from child_argv. */
1387 const HChar **child_argv = VG_(malloc)("syswrap.spawn.7",
1388 (VG_(sizeXA)(argv) - 1) * sizeof(HChar *));
1389 for (Word i = 1; i < VG_(sizeXA)(argv); i++) {
1390 child_argv[i - 1] = *(HChar **) VG_(indexXA)(argv, i);
1391 }
1392 Bool trace_this_child = VG_(should_we_trace_this_child)((HChar *) ARG1,
1393 child_argv);
1394 VG_(free)(child_argv);
1395
1396 /* If we're tracing the child, and the launcher name looks bogus (possibly
1397 because launcher.c couldn't figure it out, see comments therein) then we
1398 have no option but to fail. */
1399 if (trace_this_child &&
1400 (!VG_(name_of_launcher) || VG_(name_of_launcher)[0] != '/')) {
1401 SET_STATUS_Failure(VKI_ECHILD); /* "No child processes." */
1402 goto exit;
1403 }
1404
1405 /* Set up the child's exe path. */
1406 const HChar *path = (const HChar *) ARG1;
1407 const HChar *launcher_basename = NULL;
1408 if (trace_this_child) {
1409 /* We want to exec the launcher. */
1410 path = VG_(name_of_launcher);
1411 vg_assert(path != NULL);
1412
1413 launcher_basename = VG_(strrchr)(path, '/');
1414 if ((launcher_basename == NULL) || (launcher_basename[1] == '\0')) {
1415 launcher_basename = path; /* hmm, tres dubious */
1416 } else {
1417 launcher_basename++;
1418 }
1419 }
1420
1421 /* Set up the child's environment.
1422
1423 Remove the valgrind-specific stuff from the environment so the child
1424 doesn't get vgpreload_core.so, vgpreload_<tool>.so, etc. This is done
1425 unconditionally, since if we are tracing the child, the child valgrind
1426 will set up the appropriate client environment.
1427
1428 Then, if tracing the child, set VALGRIND_LIB for it. */
1429 HChar **child_envp = VG_(calloc)("syswrap.spawn.8",
1430 VG_(sizeXA)(envp) + 1, sizeof(HChar *));
1431 for (Word i = 0; i < VG_(sizeXA)(envp); i++) {
1432 child_envp[i] = *(HChar **) VG_(indexXA)(envp, i);
1433 }
1434 VG_(env_remove_valgrind_env_stuff)(child_envp, /* ro_strings */ False,
1435 VG_(free));
1436
1437 /* Stuff was removed from child_envp, reflect that in envp XArray. */
1438 VG_(dropTailXA)(envp, VG_(sizeXA)(envp));
1439 for (UInt i = 0; child_envp[i] != NULL; i++) {
1440 VG_(addToXA)(envp, &child_envp[i]);
1441 }
1442 VG_(free)(child_envp);
1443
1444 if (trace_this_child) {
1445 /* Set VALGRIND_LIB in envp. */
1446 SizeT len = VG_(strlen)(VALGRIND_LIB) + VG_(strlen)(VG_(libdir)) + 2;
1447 HChar *valstr = VG_(malloc)("syswrap.spawn.9", len);
1448 VG_(sprintf)(valstr, "%s=%s", VALGRIND_LIB, VG_(libdir));
1449 VG_(addToXA)(envp, &valstr);
1450 }
1451
1452 /* Set up the child's args. If not tracing it, they are left untouched.
1453 Otherwise, they are:
1454
1455 [launcher_basename] ++ VG_(args_for_valgrind) ++ [ARG1] ++ ARG4[1..],
1456
1457 except that the first VG_(args_for_valgrind_noexecpass) args are
1458 omitted. */
1459 if (trace_this_child) {
1460 vg_assert(VG_(args_for_valgrind) != NULL);
1461 vg_assert(VG_(args_for_valgrind_noexecpass) >= 0);
1462 vg_assert(VG_(args_for_valgrind_noexecpass)
1463 <= VG_(sizeXA)(VG_(args_for_valgrind)));
1464
1465 /* So what args will there be? Bear with me... */
1466 /* ... launcher basename, ... */
1467 HChar *duplicate = VG_(strdup)("syswrap.spawn.10", launcher_basename);
1468 VG_(insertIndexXA)(argv, 0, &duplicate);
1469
1470 /* ... Valgrind's args, ... */
1471 UInt v_args = VG_(sizeXA)(VG_(args_for_valgrind));
1472 v_args -= VG_(args_for_valgrind_noexecpass);
1473 for (Word i = VG_(args_for_valgrind_noexecpass);
1474 i < VG_(sizeXA)(VG_(args_for_valgrind)); i++) {
1475 duplicate = VG_(strdup)("syswrap.spawn.11",
1476 *(HChar **) VG_(indexXA)(VG_(args_for_valgrind), i));
1477 VG_(insertIndexXA)(argv, 1 + i, &duplicate);
1478 }
1479
1480 /* ... name of client executable, ... */
1481 duplicate = VG_(strdup)("syswrap.spawn.12", (HChar *) ARG1);
1482 VG_(insertIndexXA)(argv, 1 + v_args, &duplicate);
1483
1484 /* ... and args for client executable (without [0]). */
1485 duplicate = *(HChar **) VG_(indexXA)(argv, 1 + v_args + 1);
1486 VG_(free)(duplicate);
1487 VG_(removeIndexXA)(argv, 1 + v_args + 1);
1488 }
1489
1490 /* Debug-only printing. */
1491 if (0) {
1492 VG_(printf)("\npath = %s\n", path);
1493 VG_(printf)("argv = ");
1494 for (Word i = 0; i < VG_(sizeXA)(argv); i++) {
1495 VG_(printf)("%s ", *(HChar **) VG_(indexXA)(argv, i));
1496 }
1497
1498 VG_(printf)("\nenvp = ");
1499 for (Word i = 0; i < VG_(sizeXA)(envp); i++) {
1500 VG_(printf)("%s ", *(HChar **) VG_(indexXA)(envp, i));
1501 }
1502 VG_(printf)("\n");
1503 }
1504
1505 /* Set the signal state up for spawned child.
1506
1507 Signals set to be caught are equivalent to signals set to the default
1508 action, from the child's perspective.
1509
1510 Therefore query SCSS and prepare default (DFL) and ignore (IGN) signal
1511 sets. Then combine these sets with those passed from client, if flags
1512 POSIX_SPAWN_SETSIGDEF, or POSIX_SPAWN_SETSIGIGN_NP have been specified.
1513 */
1514 vki_sigset_t sig_default;
1515 vki_sigset_t sig_ignore;
1516 VG_(sigemptyset)(&sig_default);
1517 VG_(sigemptyset)(&sig_ignore);
1518 for (Int i = 1; i < VG_(max_signal); i++) {
1519 vki_sigaction_fromK_t sa;
1520 VG_(do_sys_sigaction)(i, NULL, &sa); /* query SCSS */
1521 if (sa.sa_handler == VKI_SIG_IGN) {
1522 VG_(sigaddset)(&sig_ignore, i);
1523 } else {
1524 VG_(sigaddset)(&sig_default, i);
1525 }
1526 }
1527
1528 if (spa->sa_psflags & VKI_POSIX_SPAWN_SETSIGDEF) {
1529 VG_(sigaddset_from_set)(&spa->sa_sigdefault, &sig_default);
1530 } else {
1531 spa->sa_psflags |= VKI_POSIX_SPAWN_SETSIGDEF;
1532 spa->sa_sigdefault = sig_default;
1533 }
1534
1535 if (spa->sa_psflags & VKI_POSIX_SPAWN_SETSIGIGN_NP) {
1536 VG_(sigaddset_from_set)(&spa->sa_sigignore, &sig_ignore);
1537 } else {
1538 spa->sa_psflags |= VKI_POSIX_SPAWN_SETSIGIGN_NP;
1539 spa->sa_sigignore = sig_ignore;
1540 }
1541
1542 /* Set the signal mask for spawned child.
1543
1544 Analogous to signal handlers: query SCSS for blocked signals mask
1545 and combine this mask with that passed from client, if flag
1546 POSIX_SPAWN_SETSIGMASK has been specified. */
1547 vki_sigset_t *sigmask = &VG_(get_ThreadState)(tid)->sig_mask;
1548 if (spa->sa_psflags & VKI_POSIX_SPAWN_SETSIGMASK) {
1549 VG_(sigaddset_from_set)(&spa->sa_sigmask, sigmask);
1550 } else {
1551 spa->sa_psflags |= VKI_POSIX_SPAWN_SETSIGMASK;
1552 spa->sa_sigmask = *sigmask;
1553 }
1554
1555 /* Lastly, reconstruct argenv from argv + envp. */
1556 SizeT argenv_size = 1 + 1;
1557 for (Word i = 0; i < VG_(sizeXA)(argv); i++) {
1558 argenv_size += VG_(strlen)(*(HChar **) VG_(indexXA)(argv, i)) + 2;
1559 }
1560 for (Word i = 0; i < VG_(sizeXA)(envp); i++) {
1561 argenv_size += VG_(strlen)(*(HChar **) VG_(indexXA)(envp, i)) + 2;
1562 }
1563
1564 argenv = VG_(malloc)("syswrap.spawn.13", argenv_size);
1565 HChar *current = argenv;
1566 #define COPY_CHAR_TO_ARGENV(dst, character) \
1567 do { \
1568 *(dst) = character; \
1569 (dst) += 1; \
1570 } while (0)
1571 #define COPY_STRING_TO_ARGENV(dst, src) \
1572 do { \
1573 COPY_CHAR_TO_ARGENV(dst, '\1'); \
1574 SizeT src_len = VG_(strlen)((src)) + 1; \
1575 VG_(memcpy)((dst), (src), src_len); \
1576 (dst) += src_len; \
1577 } while (0)
1578
1579 for (Word i = 0; i < VG_(sizeXA)(argv); i++) {
1580 COPY_STRING_TO_ARGENV(current, *(HChar **) VG_(indexXA)(argv, i));
1581 }
1582 COPY_CHAR_TO_ARGENV(current, '\0');
1583 for (Word i = 0; i < VG_(sizeXA)(envp); i++) {
1584 COPY_STRING_TO_ARGENV(current, *(HChar **) VG_(indexXA)(envp, i));
1585 }
1586 COPY_CHAR_TO_ARGENV(current, '\0');
1587 vg_assert(current == argenv + argenv_size);
1588 #undef COPY_CHAR_TO_ARGENV
1589 #undef COPY_STRING_TOARGENV
1590
1591 /* HACK: Temporarily restore the DATA rlimit for spawned child.
1592 This is a terrible hack to provide sensible brk limit for child. */
1593 VG_(setrlimit)(VKI_RLIMIT_DATA, &VG_(client_rlimit_data));
1594
1595 /* Actual spawn() syscall. */
1596 SysRes res = VG_(do_syscall5)(__NR_spawn, (UWord) path, (UWord) attrs,
1597 attrs_size, (UWord) argenv, argenv_size);
1598 SET_STATUS_from_SysRes(res);
1599 VG_(free)(argenv);
1600
1601 /* Restore DATA rlimit back to its previous value set in m_main.c. */
1602 struct vki_rlimit zero = { 0, 0 };
1603 zero.rlim_max = VG_(client_rlimit_data).rlim_max;
1604 VG_(setrlimit)(VKI_RLIMIT_DATA, &zero);
1605
1606 if (SUCCESS) {
1607 PRINT(" spawn: process %d spawned child %ld\n", VG_(getpid)(), RES);
1608 }
1609
1610 exit:
1611 VG_(free)(attrs);
1612 for (Word i = 0; i < VG_(sizeXA)(argv); i++) {
1613 VG_(free)(*(HChar **) VG_(indexXA)(argv, i));
1614 }
1615 for (Word i = 0; i < VG_(sizeXA)(envp); i++) {
1616 VG_(free)(*(HChar **) VG_(indexXA)(envp, i));
1617 }
1618 VG_(deleteXA)(argv);
1619 VG_(deleteXA)(envp);
1620 }
1621 #endif /* SOLARIS_SPAWN_SYSCALL */
1622
1623 /* Handles the case where the open is of /proc/self/psinfo or
1624 /proc/<pid>/psinfo. Fetch fresh contents into psinfo_t,
1625 fake fname, psargs, argc and argv. Write the structure to the fake
1626 file we cooked up at startup (in m_main) and give out a copy of this
1627 fd. Also seek the cloned fd back to the start. */
handle_psinfo_open(SyscallStatus * status,Bool use_openat,const HChar * filename,Int arg1,UWord arg3,UWord arg4)1628 static Bool handle_psinfo_open(SyscallStatus *status,
1629 Bool use_openat,
1630 const HChar *filename,
1631 Int arg1, UWord arg3, UWord arg4)
1632 {
1633 if (!ML_(safe_to_deref)((const void *) filename, 1))
1634 return False;
1635
1636 HChar name[VKI_PATH_MAX]; // large enough
1637 VG_(sprintf)(name, "/proc/%d/psinfo", VG_(getpid)());
1638
1639 if (!VG_STREQ(filename, name) && !VG_STREQ(filename, "/proc/self/psinfo"))
1640 return False;
1641
1642 /* Use original arguments to open() or openat(). */
1643 SysRes sres;
1644 #if defined(SOLARIS_OLD_SYSCALLS)
1645 if (use_openat)
1646 sres = VG_(do_syscall4)(SYS_openat, arg1, (UWord) filename,
1647 arg3, arg4);
1648 else
1649 sres = VG_(do_syscall3)(SYS_open, (UWord) filename, arg3, arg4);
1650 #else
1651 vg_assert(use_openat == True);
1652 sres = VG_(do_syscall4)(SYS_openat, arg1, (UWord) filename,
1653 arg3, arg4);
1654 #endif /* SOLARIS_OLD_SYSCALLS */
1655
1656 if (sr_isError(sres)) {
1657 SET_STATUS_from_SysRes(sres);
1658 return True;
1659 }
1660 Int fd = sr_Res(sres);
1661
1662 vki_psinfo_t psinfo;
1663 sres = VG_(do_syscall3)(SYS_read, fd, (UWord) &psinfo, sizeof(psinfo));
1664 if (sr_isError(sres)) {
1665 SET_STATUS_from_SysRes(sres);
1666 VG_(close)(fd);
1667 return True;
1668 }
1669 if (sr_Res(sres) != sizeof(psinfo)) {
1670 SET_STATUS_Failure(VKI_ENODATA);
1671 VG_(close)(fd);
1672 return True;
1673 }
1674
1675 VG_(close)(fd);
1676
1677 VG_(client_fname)(psinfo.pr_fname, sizeof(psinfo.pr_fname), True);
1678 VG_(client_cmd_and_args)(psinfo.pr_psargs, sizeof(psinfo.pr_psargs));
1679
1680 Addr *ptr = (Addr *) VG_(get_initial_client_SP)();
1681 psinfo.pr_argc = *ptr++;
1682 psinfo.pr_argv = (Addr) ptr;
1683
1684 sres = VG_(do_syscall4)(SYS_pwrite, VG_(cl_psinfo_fd),
1685 (UWord) &psinfo, sizeof(psinfo), 0);
1686 if (sr_isError(sres)) {
1687 SET_STATUS_from_SysRes(sres);
1688 return True;
1689 }
1690
1691 sres = VG_(dup)(VG_(cl_psinfo_fd));
1692 SET_STATUS_from_SysRes(sres);
1693 if (!sr_isError(sres)) {
1694 OffT off = VG_(lseek)(sr_Res(sres), 0, VKI_SEEK_SET);
1695 if (off < 0)
1696 SET_STATUS_Failure(VKI_EMFILE);
1697 }
1698
1699 return True;
1700 }
1701
1702 #if defined(SOLARIS_PROC_CMDLINE)
1703 /* Handles the case where the open is of /proc/self/cmdline or
1704 /proc/<pid>/cmdline. Just give it a copy of VG_(cl_cmdline_fd) for the
1705 fake file we cooked up at startup (in m_main). Also, seek the
1706 cloned fd back to the start. */
handle_cmdline_open(SyscallStatus * status,const HChar * filename)1707 static Bool handle_cmdline_open(SyscallStatus *status, const HChar *filename)
1708 {
1709 if (!ML_(safe_to_deref)((const void *) filename, 1))
1710 return False;
1711
1712 HChar name[VKI_PATH_MAX]; // large enough
1713 VG_(sprintf)(name, "/proc/%d/cmdline", VG_(getpid)());
1714
1715 if (!VG_STREQ(filename, name) && !VG_STREQ(filename, "/proc/self/cmdline"))
1716 return False;
1717
1718 SysRes sres = VG_(dup)(VG_(cl_cmdline_fd));
1719 SET_STATUS_from_SysRes(sres);
1720 if (!sr_isError(sres)) {
1721 OffT off = VG_(lseek)(sr_Res(sres), 0, VKI_SEEK_SET);
1722 if (off < 0)
1723 SET_STATUS_Failure(VKI_EMFILE);
1724 }
1725
1726 return True;
1727 }
1728 #endif /* SOLARIS_PROC_CMDLINE */
1729
1730
1731 #if defined(SOLARIS_OLD_SYSCALLS)
PRE(sys_open)1732 PRE(sys_open)
1733 {
1734 /* int open(const char *filename, int flags);
1735 int open(const char *filename, int flags, mode_t mode); */
1736
1737 if (ARG2 & VKI_O_CREAT) {
1738 /* 3-arg version */
1739 PRINT("sys_open ( %#lx(%s), %ld, %ld )", ARG1, (HChar *) ARG1,
1740 SARG2, ARG3);
1741 PRE_REG_READ3(long, "open", const char *, filename,
1742 int, flags, vki_mode_t, mode);
1743 } else {
1744 /* 2-arg version */
1745 PRINT("sys_open ( %#lx(%s), %ld )", ARG1, (HChar *) ARG1, SARG2);
1746 PRE_REG_READ2(long, "open", const char *, filename, int, flags);
1747 }
1748
1749 PRE_MEM_RASCIIZ("open(filename)", ARG1);
1750
1751 if (ML_(handle_auxv_open)(status, (const HChar*)ARG1, ARG2))
1752 return;
1753
1754 if (handle_psinfo_open(status, False /*use_openat*/, (const HChar*)ARG1, 0,
1755 ARG2, ARG3))
1756 return;
1757
1758 *flags |= SfMayBlock;
1759 }
1760
POST(sys_open)1761 POST(sys_open)
1762 {
1763 if (!ML_(fd_allowed)(RES, "open", tid, True)) {
1764 VG_(close)(RES);
1765 SET_STATUS_Failure(VKI_EMFILE);
1766 } else if (VG_(clo_track_fds))
1767 ML_(record_fd_open_with_given_name)(tid, RES, (HChar *) ARG1);
1768 }
1769 #endif /* SOLARIS_OLD_SYSCALLS */
1770
PRE(sys_close)1771 PRE(sys_close)
1772 {
1773 WRAPPER_PRE_NAME(generic, sys_close)(tid, layout, arrghs, status,
1774 flags);
1775 }
1776
POST(sys_close)1777 POST(sys_close)
1778 {
1779 WRAPPER_POST_NAME(generic, sys_close)(tid, arrghs, status);
1780 door_revoke(tid, ARG1);
1781 /* Possibly an explicitly open'ed client door fd was just closed.
1782 Generic sys_close wrapper calls this only if VG_(clo_track_fds) = True. */
1783 if (!VG_(clo_track_fds))
1784 ML_(record_fd_close)(ARG1);
1785 }
1786
PRE(sys_linkat)1787 PRE(sys_linkat)
1788 {
1789 /* int linkat(int fd1, const char *path1, int fd2,
1790 const char *path2, int flag);
1791 */
1792
1793 /* Interpret the first and third arguments as 32-bit values even on 64-bit
1794 architecture. This is different from Linux, for example, where glibc
1795 sign-extends them. */
1796 Int fd1 = (Int) ARG1;
1797 Int fd2 = (Int) ARG3;
1798
1799 PRINT("sys_linkat ( %d, %#lx(%s), %d, %#lx(%s), %ld )",
1800 fd1, ARG2, (HChar *) ARG2, fd2, ARG4, (HChar *) ARG4, SARG5);
1801 PRE_REG_READ5(long, "linkat", int, fd1, const char *, path1,
1802 int, fd2, const char *, path2, int, flags);
1803 PRE_MEM_RASCIIZ("linkat(path1)", ARG2);
1804 PRE_MEM_RASCIIZ("linkat(path2)", ARG4);
1805
1806 /* Be strict but ignore fd1/fd2 for absolute path1/path2. */
1807 if (fd1 != VKI_AT_FDCWD
1808 && ML_(safe_to_deref)((void *) ARG2, 1)
1809 && ((HChar *) ARG2)[0] != '/'
1810 && !ML_(fd_allowed)(fd1, "linkat", tid, False)) {
1811 SET_STATUS_Failure(VKI_EBADF);
1812 }
1813 if (fd2 != VKI_AT_FDCWD
1814 && ML_(safe_to_deref)((void *) ARG4, 1)
1815 && ((HChar *) ARG4)[0] != '/'
1816 && !ML_(fd_allowed)(fd2, "linkat", tid, False)) {
1817 SET_STATUS_Failure(VKI_EBADF);
1818 }
1819
1820 *flags |= SfMayBlock;
1821 }
1822
PRE(sys_symlinkat)1823 PRE(sys_symlinkat)
1824 {
1825 /* int symlinkat(const char *path1, int fd, const char *path2); */
1826
1827 /* Interpret the second argument as 32-bit value even on 64-bit architecture.
1828 This is different from Linux, for example, where glibc sign-extends it. */
1829 Int fd = (Int) ARG2;
1830
1831 PRINT("sys_symlinkat ( %#lx(%s), %d, %#lx(%s) )",
1832 ARG1, (HChar *) ARG1, fd, ARG3, (HChar *) ARG3);
1833 PRE_REG_READ3(long, "symlinkat", const char *, path1, int, fd,
1834 const char *, path2);
1835 PRE_MEM_RASCIIZ("symlinkat(path1)", ARG1);
1836 PRE_MEM_RASCIIZ("symlinkat(path2)", ARG3);
1837
1838 /* Be strict but ignore fd for absolute path2. */
1839 if (fd != VKI_AT_FDCWD
1840 && ML_(safe_to_deref)((void *) ARG3, 1)
1841 && ((HChar *) ARG3)[0] != '/'
1842 && !ML_(fd_allowed)(fd, "symlinkat", tid, False))
1843 SET_STATUS_Failure(VKI_EBADF);
1844
1845 *flags |= SfMayBlock;
1846 }
1847
PRE(sys_time)1848 PRE(sys_time)
1849 {
1850 /* time_t time(); */
1851 PRINT("sys_time ( )");
1852 PRE_REG_READ0(long, "time");
1853 }
1854
1855 /* Data segment for brk (heap). It is an expandable anonymous mapping
1856 abutting a 1-page reservation. The data segment starts at VG_(brk_base)
1857 and runs up to VG_(brk_limit). None of these two values have to be
1858 page-aligned.
1859 Initial data segment is established (see initimg-solaris.c for rationale):
1860 - directly during client program image initialization,
1861 - or on demand when the executed program is the runtime linker itself,
1862 after it has loaded its target dynamic executable (see PRE(sys_mmapobj)),
1863 or when the first brk() syscall is made.
1864
1865 Notable facts:
1866 - VG_(brk_base) is not page aligned; does not move
1867 - VG_(brk_limit) moves between [VG_(brk_base), data segment end]
1868 - data segment end is always page aligned
1869 - right after data segment end is 1-page reservation
1870
1871 | heap | 1 page
1872 +------+------+--------------+-------+
1873 | BSS | anon | anon | resvn |
1874 +------+------+--------------+-------+
1875
1876 ^ ^ ^ ^
1877 | | | |
1878 | | | data segment end
1879 | | VG_(brk_limit) -- no alignment constraint
1880 | brk_base_pgup -- page aligned
1881 VG_(brk_base) -- not page aligned -- does not move
1882
1883 Because VG_(brk_base) is not page-aligned and is initially located within
1884 pre-established BSS (data) segment, special care has to be taken in the code
1885 below to handle this feature.
1886
1887 Reservation segment is used to protect the data segment merging with
1888 a pre-existing segment. This should be no problem because address space
1889 manager ensures that requests for client address space are satisfied from
1890 the highest available addresses. However when memory is low, data segment
1891 can meet with mmap'ed objects and the reservation segment separates these.
1892 The page that contains VG_(brk_base) is already allocated by the program's
1893 loaded data segment. The brk syscall wrapper handles this special case. */
1894
1895 static Bool brk_segment_established = False;
1896
1897 /* Establishes initial data segment for brk (heap). */
VG_(setup_client_dataseg)1898 Bool VG_(setup_client_dataseg)(void)
1899 {
1900 /* Segment size is initially at least 1 MB and at most 8 MB. */
1901 SizeT m1 = 1024 * 1024;
1902 SizeT m8 = 8 * m1;
1903 SizeT initial_size = VG_(client_rlimit_data).rlim_cur;
1904 VG_(debugLog)(1, "syswrap-solaris", "Setup client data (brk) segment "
1905 "at %#lx\n", VG_(brk_base));
1906 if (initial_size < m1)
1907 initial_size = m1;
1908 if (initial_size > m8)
1909 initial_size = m8;
1910 initial_size = VG_PGROUNDUP(initial_size);
1911
1912 Addr anon_start = VG_PGROUNDUP(VG_(brk_base));
1913 SizeT anon_size = VG_PGROUNDUP(initial_size);
1914 Addr resvn_start = anon_start + anon_size;
1915 SizeT resvn_size = VKI_PAGE_SIZE;
1916
1917 vg_assert(VG_IS_PAGE_ALIGNED(anon_size));
1918 vg_assert(VG_IS_PAGE_ALIGNED(resvn_size));
1919 vg_assert(VG_IS_PAGE_ALIGNED(anon_start));
1920 vg_assert(VG_IS_PAGE_ALIGNED(resvn_start));
1921 vg_assert(VG_(brk_base) == VG_(brk_limit));
1922
1923 /* Find the loaded data segment and remember its protection. */
1924 const NSegment *seg = VG_(am_find_nsegment)(VG_(brk_base) - 1);
1925 vg_assert(seg != NULL);
1926 UInt prot = (seg->hasR ? VKI_PROT_READ : 0)
1927 | (seg->hasW ? VKI_PROT_WRITE : 0)
1928 | (seg->hasX ? VKI_PROT_EXEC : 0);
1929
1930 /* Try to create the data segment and associated reservation where
1931 VG_(brk_base) says. */
1932 Bool ok = VG_(am_create_reservation)(resvn_start, resvn_size, SmLower,
1933 anon_size);
1934 if (!ok) {
1935 /* That didn't work, we're hosed. */
1936 return False;
1937 }
1938
1939 /* Map the data segment. */
1940 SysRes sres = VG_(am_mmap_anon_fixed_client)(anon_start, anon_size, prot);
1941 vg_assert(!sr_isError(sres));
1942 vg_assert(sr_Res(sres) == anon_start);
1943
1944 brk_segment_established = True;
1945 return True;
1946 }
1947
1948 /* Tell the tool about the client data segment and then kill it which will
1949 make it initially inaccessible/unaddressable. */
VG_(track_client_dataseg)1950 void VG_(track_client_dataseg)(ThreadId tid)
1951 {
1952 const NSegment *seg = VG_(am_find_nsegment)(VG_PGROUNDUP(VG_(brk_base)));
1953 vg_assert(seg != NULL);
1954 vg_assert(seg->kind == SkAnonC);
1955
1956 VG_TRACK(new_mem_brk, VG_(brk_base), seg->end + 1 - VG_(brk_base), tid);
1957 VG_TRACK(die_mem_brk, VG_(brk_base), seg->end + 1 - VG_(brk_base));
1958 }
1959
PRE(sys_brk)1960 PRE(sys_brk)
1961 {
1962 /* unsigned long brk(caddr_t end_data_segment); */
1963 /* The Solaris kernel returns 0 on success.
1964 In addition to this, brk(0) returns current data segment end. This is
1965 very different from the Linux kernel, for example. */
1966
1967 Addr old_brk_limit = VG_(brk_limit);
1968 /* If VG_(brk_base) is page-aligned then old_brk_base_pgup is equal to
1969 VG_(brk_base). */
1970 Addr old_brk_base_pgup = VG_PGROUNDUP(VG_(brk_base));
1971 Addr new_brk = ARG1;
1972 const NSegment *seg, *seg2;
1973
1974 PRINT("sys_brk ( %#lx )", ARG1);
1975 PRE_REG_READ1(unsigned long, "brk", vki_caddr_t, end_data_segment);
1976
1977 if (new_brk == 0) {
1978 /* brk(0) - specific to Solaris 11 only. */
1979 SET_STATUS_Success(old_brk_limit);
1980 return;
1981 }
1982
1983 /* Handle some trivial cases. */
1984 if (new_brk == old_brk_limit) {
1985 SET_STATUS_Success(0);
1986 return;
1987 }
1988 if (new_brk < VG_(brk_base)) {
1989 /* Clearly impossible. */
1990 SET_STATUS_Failure(VKI_ENOMEM);
1991 return;
1992 }
1993 if (new_brk - VG_(brk_base) > VG_(client_rlimit_data).rlim_cur) {
1994 SET_STATUS_Failure(VKI_ENOMEM);
1995 return;
1996 }
1997
1998 /* The brk base and limit must have been already set. */
1999 vg_assert(VG_(brk_base) != -1);
2000 vg_assert(VG_(brk_limit) != -1);
2001
2002 if (!brk_segment_established) {
2003 /* Stay sane (because there should have been no brk activity yet). */
2004 vg_assert(VG_(brk_base) == VG_(brk_limit));
2005
2006 if (!VG_(setup_client_dataseg)()) {
2007 VG_(umsg)("Cannot map memory to initialize brk segment in thread #%d "
2008 "at %#lx\n", tid, VG_(brk_base));
2009 SET_STATUS_Failure(VKI_ENOMEM);
2010 return;
2011 }
2012
2013 VG_(track_client_dataseg)(tid);
2014 }
2015
2016 if (new_brk < old_brk_limit) {
2017 /* Shrinking the data segment. Be lazy and don't munmap the excess
2018 area. */
2019 if (old_brk_limit > old_brk_base_pgup) {
2020 /* Calculate new local brk (=MAX(new_brk, old_brk_base_pgup)). */
2021 Addr new_brk_local;
2022 if (new_brk < old_brk_base_pgup)
2023 new_brk_local = old_brk_base_pgup;
2024 else
2025 new_brk_local = new_brk;
2026
2027 /* Find a segment at the beginning and at the end of the shrinked
2028 range. */
2029 seg = VG_(am_find_nsegment)(new_brk_local);
2030 seg2 = VG_(am_find_nsegment)(old_brk_limit - 1);
2031 vg_assert(seg);
2032 vg_assert(seg->kind == SkAnonC);
2033 vg_assert(seg2);
2034 vg_assert(seg == seg2);
2035
2036 /* Discard any translations and zero-out the area. */
2037 if (seg->hasT)
2038 VG_(discard_translations)(new_brk_local,
2039 old_brk_limit - new_brk_local,
2040 "do_brk(shrink)");
2041 /* Since we're being lazy and not unmapping pages, we have to zero out
2042 the area, so that if the area later comes back into circulation, it
2043 will be filled with zeroes, as if it really had been unmapped and
2044 later remapped. Be a bit paranoid and try hard to ensure we're not
2045 going to segfault by doing the write - check that segment is
2046 writable. */
2047 if (seg->hasW)
2048 VG_(memset)((void*)new_brk_local, 0, old_brk_limit - new_brk_local);
2049 }
2050
2051 /* Fixup code if the VG_(brk_base) is not page-aligned. */
2052 if (new_brk < old_brk_base_pgup) {
2053 /* Calculate old local brk (=MIN(old_brk_limit, old_brk_base_up)). */
2054 Addr old_brk_local;
2055 if (old_brk_limit < old_brk_base_pgup)
2056 old_brk_local = old_brk_limit;
2057 else
2058 old_brk_local = old_brk_base_pgup;
2059
2060 /* Find a segment at the beginning and at the end of the shrinked
2061 range. */
2062 seg = VG_(am_find_nsegment)(new_brk);
2063 seg2 = VG_(am_find_nsegment)(old_brk_local - 1);
2064 vg_assert(seg);
2065 vg_assert(seg2);
2066 vg_assert(seg == seg2);
2067
2068 /* Discard any translations and zero-out the area. */
2069 if (seg->hasT)
2070 VG_(discard_translations)(new_brk, old_brk_local - new_brk,
2071 "do_brk(shrink)");
2072 if (seg->hasW)
2073 VG_(memset)((void*)new_brk, 0, old_brk_local - new_brk);
2074 }
2075
2076 /* We are done, update VG_(brk_limit), tell the tool about the changes,
2077 and leave. */
2078 VG_(brk_limit) = new_brk;
2079 VG_TRACK(die_mem_brk, new_brk, old_brk_limit - new_brk);
2080 SET_STATUS_Success(0);
2081 return;
2082 }
2083
2084 /* We are expanding the brk segment. */
2085
2086 /* Fixup code if the VG_(brk_base) is not page-aligned. */
2087 if (old_brk_limit < old_brk_base_pgup) {
2088 /* Calculate new local brk (=MIN(new_brk, old_brk_base_pgup)). */
2089 Addr new_brk_local;
2090 if (new_brk < old_brk_base_pgup)
2091 new_brk_local = new_brk;
2092 else
2093 new_brk_local = old_brk_base_pgup;
2094
2095 /* Find a segment at the beginning and at the end of the expanded
2096 range. */
2097 seg = VG_(am_find_nsegment)(old_brk_limit);
2098 seg2 = VG_(am_find_nsegment)(new_brk_local - 1);
2099 vg_assert(seg);
2100 vg_assert(seg2);
2101 vg_assert(seg == seg2);
2102
2103 /* Nothing else to do. */
2104 }
2105
2106 if (new_brk > old_brk_base_pgup) {
2107 /* Calculate old local brk (=MAX(old_brk_limit, old_brk_base_pgup)). */
2108 Addr old_brk_local;
2109 if (old_brk_limit < old_brk_base_pgup)
2110 old_brk_local = old_brk_base_pgup;
2111 else
2112 old_brk_local = old_brk_limit;
2113
2114 /* Find a segment at the beginning of the expanded range. */
2115 if (old_brk_local > old_brk_base_pgup)
2116 seg = VG_(am_find_nsegment)(old_brk_local - 1);
2117 else
2118 seg = VG_(am_find_nsegment)(old_brk_local);
2119 vg_assert(seg);
2120 vg_assert(seg->kind == SkAnonC);
2121
2122 /* Find the 1-page reservation segment. */
2123 seg2 = VG_(am_next_nsegment)(seg, True/*forwards*/);
2124 vg_assert(seg2);
2125 vg_assert(seg2->kind == SkResvn);
2126 vg_assert(seg->end + 1 == seg2->start);
2127 vg_assert(seg2->end - seg2->start + 1 == VKI_PAGE_SIZE);
2128
2129 if (new_brk <= seg2->start) {
2130 /* Still fits within the existing anon segment, nothing to do. */
2131 } else {
2132 /* Data segment limit was already checked. */
2133 Addr anon_start = seg->end + 1;
2134 Addr resvn_start = VG_PGROUNDUP(new_brk);
2135 SizeT anon_size = resvn_start - anon_start;
2136 SizeT resvn_size = VKI_PAGE_SIZE;
2137 SysRes sres;
2138
2139 vg_assert(VG_IS_PAGE_ALIGNED(anon_size));
2140 vg_assert(VG_IS_PAGE_ALIGNED(resvn_size));
2141 vg_assert(VG_IS_PAGE_ALIGNED(anon_start));
2142 vg_assert(VG_IS_PAGE_ALIGNED(resvn_start));
2143 vg_assert(anon_size > 0);
2144
2145 /* Address space manager checks for free address space for us;
2146 reservation would not be otherwise created. */
2147 Bool ok = VG_(am_create_reservation)(resvn_start, resvn_size, SmLower,
2148 anon_size);
2149 if (!ok) {
2150 VG_(umsg)("brk segment overflow in thread #%d: can't grow "
2151 "to %#lx\n", tid, new_brk);
2152 SET_STATUS_Failure(VKI_ENOMEM);
2153 return;
2154 }
2155
2156 /* Establish protection from the existing segment. */
2157 UInt prot = (seg->hasR ? VKI_PROT_READ : 0)
2158 | (seg->hasW ? VKI_PROT_WRITE : 0)
2159 | (seg->hasX ? VKI_PROT_EXEC : 0);
2160
2161 /* Address space manager will merge old and new data segments. */
2162 sres = VG_(am_mmap_anon_fixed_client)(anon_start, anon_size, prot);
2163 if (sr_isError(sres)) {
2164 VG_(umsg)("Cannot map memory to grow brk segment in thread #%d "
2165 "to %#lx\n", tid, new_brk);
2166 SET_STATUS_Failure(VKI_ENOMEM);
2167 return;
2168 }
2169 vg_assert(sr_Res(sres) == anon_start);
2170
2171 seg = VG_(am_find_nsegment)(old_brk_base_pgup);
2172 seg2 = VG_(am_find_nsegment)(VG_PGROUNDUP(new_brk) - 1);
2173 vg_assert(seg);
2174 vg_assert(seg2);
2175 vg_assert(seg == seg2);
2176 vg_assert(new_brk <= seg->end + 1);
2177 }
2178 }
2179
2180 /* We are done, update VG_(brk_limit), tell the tool about the changes, and
2181 leave. */
2182 VG_(brk_limit) = new_brk;
2183 VG_TRACK(new_mem_brk, old_brk_limit, new_brk - old_brk_limit, tid);
2184 SET_STATUS_Success(0);
2185 }
2186
PRE(sys_stat)2187 PRE(sys_stat)
2188 {
2189 /* int stat(const char *path, struct stat *buf); */
2190 /* Note: We could use here the sys_newstat generic wrapper, but the 'new'
2191 in its name is rather confusing in the Solaris context, thus we provide
2192 our own wrapper. */
2193 PRINT("sys_stat ( %#lx(%s), %#lx )", ARG1, (HChar *) ARG1, ARG2);
2194 PRE_REG_READ2(long, "stat", const char *, path, struct stat *, buf);
2195
2196 PRE_MEM_RASCIIZ("stat(path)", ARG1);
2197 PRE_MEM_WRITE("stat(buf)", ARG2, sizeof(struct vki_stat));
2198 }
2199
POST(sys_stat)2200 POST(sys_stat)
2201 {
2202 POST_MEM_WRITE(ARG2, sizeof(struct vki_stat));
2203 }
2204
PRE(sys_lseek)2205 PRE(sys_lseek)
2206 {
2207 /* off_t lseek(int fildes, off_t offset, int whence); */
2208 PRINT("sys_lseek ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
2209 PRE_REG_READ3(long, "lseek", int, fildes, vki_off_t, offset, int, whence);
2210
2211 /* Stay sane. */
2212 if (!ML_(fd_allowed)(ARG1, "lseek", tid, False))
2213 SET_STATUS_Failure(VKI_EBADF);
2214 }
2215
PRE(sys_mount)2216 PRE(sys_mount)
2217 {
2218 /* int mount(const char *spec, const char *dir, int mflag, char *fstype,
2219 char *dataptr, int datalen, char *optptr, int optlen); */
2220 *flags |= SfMayBlock;
2221 if (ARG3 & VKI_MS_OPTIONSTR) {
2222 /* 8-argument mount */
2223 PRINT("sys_mount ( %#lx(%s), %#lx(%s), %ld, %#lx(%s), %#lx, %ld, "
2224 "%#lx(%s), %ld )", ARG1, (HChar *) ARG1, ARG2, (HChar *) ARG2, SARG3,
2225 ARG4, (HChar *) ARG4, ARG5, ARG6, ARG7, (HChar *) ARG7, SARG8);
2226 PRE_REG_READ8(long, "mount", const char *, spec, const char *, dir,
2227 int, mflag, char *, fstype, char *, dataptr, int, datalen,
2228 char *, optptr, int, optlen);
2229 }
2230 else if (ARG3 & VKI_MS_DATA) {
2231 /* 6-argument mount */
2232 PRINT("sys_mount ( %#lx(%s), %#lx(%s), %ld, %#lx(%s), %#lx, %ld )",
2233 ARG1, (HChar *) ARG1, ARG2, (HChar *) ARG2, SARG3, ARG4,
2234 (HChar *) ARG4, ARG5, SARG6);
2235 PRE_REG_READ6(long, "mount", const char *, spec, const char *, dir,
2236 int, mflag, char *, fstype, char *, dataptr,
2237 int, datalen);
2238 }
2239 else {
2240 /* 4-argument mount */
2241 PRINT("sys_mount ( %#lx(%s), %#lx(%s), %ld, %#lx(%s) )", ARG1,
2242 (HChar *) ARG1, ARG2, (HChar *) ARG2, SARG3, ARG4, (HChar *) ARG4);
2243 PRE_REG_READ4(long, "mount", const char *, spec, const char *, dir,
2244 int, mflag, char *, fstype);
2245 }
2246 if (ARG1)
2247 PRE_MEM_RASCIIZ("mount(spec)", ARG1);
2248 PRE_MEM_RASCIIZ("mount(dir)", ARG2);
2249 if (ARG4 && ARG4 >= 256) {
2250 /* If ARG4 < 256, then it's an index to a fs table in the kernel. */
2251 PRE_MEM_RASCIIZ("mount(fstype)", ARG4);
2252 }
2253 if (ARG3 & (VKI_MS_DATA | VKI_MS_OPTIONSTR)) {
2254 if (ARG5)
2255 PRE_MEM_READ("mount(dataptr)", ARG5, ARG6);
2256 if ((ARG3 & VKI_MS_OPTIONSTR) && ARG7) {
2257 /* in/out buffer */
2258 PRE_MEM_RASCIIZ("mount(optptr)", ARG7);
2259 PRE_MEM_WRITE("mount(optptr)", ARG7, ARG8);
2260 }
2261 }
2262 }
2263
POST(sys_mount)2264 POST(sys_mount)
2265 {
2266 if (ARG3 & VKI_MS_OPTIONSTR) {
2267 POST_MEM_WRITE(ARG7, VG_(strlen)((HChar*)ARG7) + 1);
2268 } else if (ARG3 & VKI_MS_DATA) {
2269 if ((ARG2) &&
2270 (ARG3 & MS_NOMNTTAB) &&
2271 (VG_STREQ((HChar *) ARG4, "namefs")) &&
2272 (ARG6 == sizeof(struct vki_namefd)) &&
2273 ML_(safe_to_deref)((void *) ARG5, ARG6)) {
2274 /* Most likely an fattach() call for a door file descriptor. */
2275 door_server_fattach(((struct vki_namefd *) ARG5)->fd, (HChar *) ARG2);
2276 }
2277 }
2278 }
2279
PRE(sys_readlinkat)2280 PRE(sys_readlinkat)
2281 {
2282 /* ssize_t readlinkat(int dfd, const char *path, char *buf,
2283 size_t bufsiz); */
2284 HChar name[30]; // large enough
2285 Word saved = SYSNO;
2286
2287 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
2288 This is different from Linux, for example, where glibc sign-extends it. */
2289 Int dfd = (Int) ARG1;
2290
2291 PRINT("sys_readlinkat ( %d, %#lx(%s), %#lx, %ld )", dfd, ARG2,
2292 (HChar *) ARG2, ARG3, SARG4);
2293 PRE_REG_READ4(long, "readlinkat", int, dfd, const char *, path,
2294 char *, buf, int, bufsiz);
2295 PRE_MEM_RASCIIZ("readlinkat(path)", ARG2);
2296 PRE_MEM_WRITE("readlinkat(buf)", ARG3, ARG4);
2297
2298 /* Be strict but ignore dfd for absolute path. */
2299 if (dfd != VKI_AT_FDCWD
2300 && ML_(safe_to_deref)((void *) ARG2, 1)
2301 && ((HChar *) ARG2)[0] != '/'
2302 && !ML_(fd_allowed)(dfd, "readlinkat", tid, False)) {
2303 SET_STATUS_Failure(VKI_EBADF);
2304 return;
2305 }
2306
2307 /* Handle the case where readlinkat is looking at /proc/self/path/a.out or
2308 /proc/<pid>/path/a.out. */
2309 VG_(sprintf)(name, "/proc/%d/path/a.out", VG_(getpid)());
2310 if (ML_(safe_to_deref)((void*)ARG2, 1) &&
2311 (!VG_(strcmp)((HChar*)ARG2, name) ||
2312 !VG_(strcmp)((HChar*)ARG2, "/proc/self/path/a.out"))) {
2313 VG_(sprintf)(name, "/proc/self/path/%d", VG_(cl_exec_fd));
2314 SET_STATUS_from_SysRes(VG_(do_syscall4)(saved, dfd, (UWord)name, ARG3,
2315 ARG4));
2316 }
2317 }
2318
POST(sys_readlinkat)2319 POST(sys_readlinkat)
2320 {
2321 POST_MEM_WRITE(ARG3, RES);
2322 }
2323
PRE(sys_stime)2324 PRE(sys_stime)
2325 {
2326 /* Kernel: int stime(time_t time); */
2327 PRINT("sys_stime ( %ld )", ARG1);
2328 PRE_REG_READ1(long, "stime", vki_time_t, time);
2329 }
2330
PRE(sys_fstat)2331 PRE(sys_fstat)
2332 {
2333 /* int fstat(int fildes, struct stat *buf); */
2334 /* Note: We could use here the sys_newfstat generic wrapper, but the 'new'
2335 in its name is rather confusing in the Solaris context, thus we provide
2336 our own wrapper. */
2337 PRINT("sys_fstat ( %ld, %#lx )", SARG1, ARG2);
2338 PRE_REG_READ2(long, "fstat", int, fildes, struct stat *, buf);
2339 PRE_MEM_WRITE("fstat(buf)", ARG2, sizeof(struct vki_stat));
2340
2341 /* Be strict. */
2342 if (!ML_(fd_allowed)(ARG1, "fstat", tid, False))
2343 SET_STATUS_Failure(VKI_EBADF);
2344 }
2345
POST(sys_fstat)2346 POST(sys_fstat)
2347 {
2348 POST_MEM_WRITE(ARG2, sizeof(struct vki_stat));
2349 }
2350
2351 #if defined(SOLARIS_FREALPATHAT_SYSCALL)
PRE(sys_frealpathat)2352 PRE(sys_frealpathat)
2353 {
2354 /* int frealpathat(int fd, char *path, char *buf, size_t buflen); */
2355
2356 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
2357 This is different from Linux, for example, where glibc sign-extends it. */
2358 Int fd = (Int) ARG1;
2359
2360 PRINT("sys_frealpathat ( %d, %#lx(%s), %#lx, %lu )",
2361 fd, ARG2, (HChar *) ARG2, ARG3, ARG4);
2362 PRE_REG_READ4(long, "frealpathat", int, fd, char *, path,
2363 char *, buf, vki_size_t, buflen);
2364 PRE_MEM_RASCIIZ("frealpathat(path)", ARG2);
2365 PRE_MEM_WRITE("frealpathat(buf)", ARG3, ARG4);
2366
2367 /* Be strict but ignore fd for absolute path. */
2368 if (fd != VKI_AT_FDCWD
2369 && ML_(safe_to_deref)((void *) ARG2, 1)
2370 && ((HChar *) ARG2)[0] != '/'
2371 && !ML_(fd_allowed)(fd, "frealpathat", tid, False))
2372 SET_STATUS_Failure(VKI_EBADF);
2373 }
2374
POST(sys_frealpathat)2375 POST(sys_frealpathat)
2376 {
2377 POST_MEM_WRITE(ARG3, VG_(strlen)((HChar *) ARG3) + 1);
2378 }
2379 #endif /* SOLARIS_FREALPATHAT_SYSCALL */
2380
PRE(sys_stty)2381 PRE(sys_stty)
2382 {
2383 /* int stty(int fd, const struct sgttyb *tty); */
2384 PRINT("sys_stty ( %ld, %#lx )", SARG1, ARG2);
2385 PRE_REG_READ2(long, "stty", int, fd,
2386 const struct vki_sgttyb *, tty);
2387 PRE_MEM_READ("stty(tty)", ARG2, sizeof(struct vki_sgttyb));
2388
2389 /* Be strict. */
2390 if (!ML_(fd_allowed)(ARG1, "stty", tid, False))
2391 SET_STATUS_Failure(VKI_EBADF);
2392 }
2393
PRE(sys_gtty)2394 PRE(sys_gtty)
2395 {
2396 /* int gtty(int fd, struct sgttyb *tty); */
2397 PRINT("sys_gtty ( %ld, %#lx )", SARG1, ARG2);
2398 PRE_REG_READ2(long, "gtty", int, fd, struct vki_sgttyb *, tty);
2399 PRE_MEM_WRITE("gtty(tty)", ARG2, sizeof(struct vki_sgttyb));
2400
2401 /* Be strict. */
2402 if (!ML_(fd_allowed)(ARG1, "gtty", tid, False))
2403 SET_STATUS_Failure(VKI_EBADF);
2404 }
2405
POST(sys_gtty)2406 POST(sys_gtty)
2407 {
2408 POST_MEM_WRITE(ARG2, sizeof(struct vki_sgttyb));
2409 }
2410
PRE(sys_pgrpsys)2411 PRE(sys_pgrpsys)
2412 {
2413 /* Kernel: int setpgrp(int flag, int pid, int pgid); */
2414 switch (ARG1 /*flag*/) {
2415 case 0:
2416 /* Libc: pid_t getpgrp(void); */
2417 PRINT("sys_pgrpsys ( %ld )", SARG1);
2418 PRE_REG_READ1(long, SC2("pgrpsys", "getpgrp"), int, flag);
2419 break;
2420 case 1:
2421 /* Libc: pid_t setpgrp(void); */
2422 PRINT("sys_pgrpsys ( %ld )", SARG1);
2423 PRE_REG_READ1(long, SC2("pgrpsys", "setpgrp"), int, flag);
2424 break;
2425 case 2:
2426 /* Libc: pid_t getsid(pid_t pid); */
2427 PRINT("sys_pgrpsys ( %ld, %ld )", SARG1, SARG2);
2428 PRE_REG_READ2(long, SC2("pgrpsys", "getsid"), int, flag,
2429 vki_pid_t, pid);
2430 break;
2431 case 3:
2432 /* Libc: pid_t setsid(void); */
2433 PRINT("sys_pgrpsys ( %ld )", SARG1);
2434 PRE_REG_READ1(long, SC2("pgrpsys", "setsid"), int, flag);
2435 break;
2436 case 4:
2437 /* Libc: pid_t getpgid(pid_t pid); */
2438 PRINT("sys_pgrpsys ( %ld, %ld )", SARG1, SARG2);
2439 PRE_REG_READ2(long, SC2("pgrpsys", "getpgid"), int, flag,
2440 vki_pid_t, pid);
2441 break;
2442 case 5:
2443 /* Libc: int setpgid(pid_t pid, pid_t pgid); */
2444 PRINT("sys_pgrpsys ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
2445 PRE_REG_READ3(long, SC2("pgrpsys", "setpgid"), int, flag,
2446 vki_pid_t, pid, vki_pid_t, pgid);
2447 break;
2448 default:
2449 VG_(unimplemented)("Syswrap of the pgrpsys call with flag %ld.", SARG1);
2450 /*NOTREACHED*/
2451 break;
2452 }
2453 }
2454
PRE(sys_pipe)2455 PRE(sys_pipe)
2456 {
2457 #if defined(SOLARIS_NEW_PIPE_SYSCALL)
2458 /* int pipe(int fildes[2], int flags); */
2459 PRINT("sys_pipe ( %#lx, %ld )", ARG1, SARG2);
2460 PRE_REG_READ2(long, "pipe", int *, fildes, int, flags);
2461 PRE_MEM_WRITE("pipe(fildes)", ARG1, 2 * sizeof(int));
2462 #else
2463 /* longlong_t pipe(); */
2464 PRINT("sys_pipe ( )");
2465 PRE_REG_READ0(long, "pipe");
2466 #endif /* SOLARIS_NEW_PIPE_SYSCALL */
2467 }
2468
POST(sys_pipe)2469 POST(sys_pipe)
2470 {
2471 Int p0, p1;
2472
2473 #if defined(SOLARIS_NEW_PIPE_SYSCALL)
2474 int *fds = (int*)ARG1;
2475 p0 = fds[0];
2476 p1 = fds[1];
2477 POST_MEM_WRITE(ARG1, 2 * sizeof(int));
2478 #else
2479 p0 = RES;
2480 p1 = RESHI;
2481 #endif /* SOLARIS_NEW_PIPE_SYSCALL */
2482
2483 if (!ML_(fd_allowed)(p0, "pipe", tid, True) ||
2484 !ML_(fd_allowed)(p1, "pipe", tid, True)) {
2485 VG_(close)(p0);
2486 VG_(close)(p1);
2487 SET_STATUS_Failure(VKI_EMFILE);
2488 }
2489 else if (VG_(clo_track_fds)) {
2490 ML_(record_fd_open_nameless)(tid, p0);
2491 ML_(record_fd_open_nameless)(tid, p1);
2492 }
2493 }
2494
PRE(sys_faccessat)2495 PRE(sys_faccessat)
2496 {
2497 /* int faccessat(int fd, const char *path, int amode, int flag); */
2498
2499 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
2500 This is different from Linux, for example, where glibc sign-extends it. */
2501 Int fd = (Int) ARG1;
2502
2503 PRINT("sys_faccessat ( %d, %#lx(%s), %ld, %ld )", fd, ARG2,
2504 (HChar *) ARG2, SARG3, SARG4);
2505 PRE_REG_READ4(long, "faccessat", int, fd, const char *, path,
2506 int, amode, int, flag);
2507 PRE_MEM_RASCIIZ("faccessat(path)", ARG2);
2508
2509 /* Be strict but ignore fd for absolute path. */
2510 if (fd != VKI_AT_FDCWD
2511 && ML_(safe_to_deref)((void *) ARG2, 1)
2512 && ((HChar *) ARG2)[0] != '/'
2513 && !ML_(fd_allowed)(fd, "faccessat", tid, False))
2514 SET_STATUS_Failure(VKI_EBADF);
2515 }
2516
PRE(sys_mknodat)2517 PRE(sys_mknodat)
2518 {
2519 /* int mknodat(int fd, char *fname, mode_t fmode, dev_t dev); */
2520
2521 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
2522 This is different from Linux, for example, where glibc sign-extends it. */
2523 Int fd = (Int) ARG1;
2524
2525 PRINT("sys_mknodat ( %d, %#lx(%s), %ld, %ld )", fd, ARG2,
2526 (HChar *) ARG2, SARG3, SARG4);
2527 PRE_REG_READ4(long, "mknodat", int, fd, const char *, fname,
2528 vki_mode_t, fmode, vki_dev_t, dev);
2529 PRE_MEM_RASCIIZ("mknodat(fname)", ARG2);
2530
2531 /* Be strict but ignore fd for absolute path. */
2532 if (fd != VKI_AT_FDCWD
2533 && ML_(safe_to_deref)((void *) ARG2, 1)
2534 && ((HChar *) ARG2)[0] != '/'
2535 && !ML_(fd_allowed)(fd, "mknodat", tid, False))
2536 SET_STATUS_Failure(VKI_EBADF);
2537
2538 *flags |= SfMayBlock;
2539 }
2540
POST(sys_mknodat)2541 POST(sys_mknodat)
2542 {
2543 if (!ML_(fd_allowed)(RES, "mknodat", tid, True)) {
2544 VG_(close)(RES);
2545 SET_STATUS_Failure(VKI_EMFILE);
2546 } else if (VG_(clo_track_fds))
2547 ML_(record_fd_open_with_given_name)(tid, RES, (HChar *) ARG2);
2548 }
2549
PRE(sys_sysi86)2550 PRE(sys_sysi86)
2551 {
2552 /* int sysi86(int cmd, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3); */
2553 PRINT("sys_sysi86 ( %ld, %#lx, %#lx, %#lx )", SARG1, ARG2, ARG3, ARG4);
2554 PRE_REG_READ4(long, "sysi86", int, cmd, uintptr_t, arg1, uintptr_t, arg2,
2555 uintptr_t, arg3);
2556
2557 switch (ARG1 /*cmd*/) {
2558 case VKI_SI86FPSTART:
2559 PRE_MEM_WRITE("sysi86(fp_hw)", ARG2, sizeof(vki_uint_t));
2560 /* ARG3 is a desired x87 FCW value, ARG4 is a desired SSE MXCSR value.
2561 They are passed to the kernel but V will change them later anyway
2562 (this is a general Valgrind limitation described in the official
2563 documentation). */
2564 break;
2565 default:
2566 VG_(unimplemented)("Syswrap of the sysi86 call with cmd %ld.", SARG1);
2567 /*NOTREACHED*/
2568 break;
2569 }
2570 }
2571
POST(sys_sysi86)2572 POST(sys_sysi86)
2573 {
2574 switch (ARG1 /*cmd*/) {
2575 case VKI_SI86FPSTART:
2576 POST_MEM_WRITE(ARG2, sizeof(vki_uint_t));
2577 break;
2578 default:
2579 vg_assert(0);
2580 break;
2581 }
2582 }
2583
PRE(sys_shmsys)2584 PRE(sys_shmsys)
2585 {
2586 /* Kernel: uintptr_t shmsys(int opcode, uintptr_t a0, uintptr_t a1,
2587 uintptr_t a2, uintptr_t a3);
2588 */
2589 *flags |= SfMayBlock;
2590
2591 switch (ARG1 /*opcode*/) {
2592 case VKI_SHMAT:
2593 /* Libc: void *shmat(int shmid, const void *shmaddr, int shmflg); */
2594 PRINT("sys_shmsys ( %ld, %ld, %#lx, %ld )",
2595 SARG1, SARG2, ARG3, SARG4);
2596 PRE_REG_READ4(long, SC2("shmsys", "shmat"), int, opcode,
2597 int, shmid, const void *, shmaddr, int, shmflg);
2598
2599 UWord addr = ML_(generic_PRE_sys_shmat)(tid, ARG2, ARG3, ARG4);
2600 if (addr == 0)
2601 SET_STATUS_Failure(VKI_EINVAL);
2602 else
2603 ARG3 = addr;
2604 break;
2605
2606 case VKI_SHMCTL:
2607 /* Libc: int shmctl(int shmid, int cmd, struct shmid_ds *buf); */
2608 switch (ARG3 /* cmd */) {
2609 case VKI_SHM_LOCK:
2610 PRINT("sys_shmsys ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
2611 PRE_REG_READ3(long, SC3("shmsys", "shmctl", "lock"),
2612 int, opcode, int, shmid, int, cmd);
2613 break;
2614 case VKI_SHM_UNLOCK:
2615 PRINT("sys_shmsys ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
2616 PRE_REG_READ3(long, SC3("shmsys", "shmctl", "unlock"),
2617 int, opcode, int, shmid, int, cmd);
2618 break;
2619 case VKI_IPC_RMID:
2620 PRINT("sys_shmsys ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
2621 PRE_REG_READ3(long, SC3("shmsys", "shmctl", "rmid"),
2622 int, opcode, int, shmid, int, cmd);
2623 break;
2624 case VKI_IPC_SET:
2625 PRINT("sys_shmsys ( %ld, %ld, %ld, %#lx )",
2626 SARG1, SARG2, SARG3, ARG4);
2627 PRE_REG_READ4(long, SC3("shmsys", "shmctl", "set"),
2628 int, opcode, int, shmid, int, cmd,
2629 struct vki_shmid_ds *, buf);
2630
2631 struct vki_shmid_ds *buf = (struct vki_shmid_ds *) ARG4;
2632 PRE_FIELD_READ("shmsys(shmctl, ipc_set, buf->shm_perm.uid)",
2633 buf->shm_perm.uid);
2634 PRE_FIELD_READ("shmsys(shmctl, ipc_set, buf->shm_perm.gid)",
2635 buf->shm_perm.gid);
2636 PRE_FIELD_READ("shmsys(shmctl, ipc_set, buf->shm_perm.mode)",
2637 buf->shm_perm.mode);
2638 break;
2639 case VKI_IPC_STAT:
2640 PRINT("sys_shmsys ( %ld, %ld, %ld, %#lx )",
2641 SARG1, SARG2, SARG3, ARG4);
2642 PRE_REG_READ4(long, SC3("shmsys", "shmctl", "stat"),
2643 int, opcode, int, shmid, int, cmd,
2644 struct vki_shmid_ds *, buf);
2645 PRE_MEM_WRITE("shmsys(shmctl, ipc_stat, buf)", ARG4,
2646 sizeof(struct vki_shmid_ds));
2647 break;
2648 case VKI_IPC_SET64:
2649 PRINT("sys_shmsys ( %ld, %ld, %ld, %#lx )",
2650 SARG1, SARG2, SARG3, ARG4);
2651 PRE_REG_READ4(long, SC3("shmsys", "shmctl", "set64"),
2652 int, opcode, int, shmid, int, cmd,
2653 struct vki_shmid_ds64 *, buf);
2654
2655 struct vki_shmid_ds64 *buf64 = (struct vki_shmid_ds64 *) ARG4;
2656 PRE_FIELD_READ("shmsys(shmctl, ipc_set64, "
2657 "buf->shmx_perm.ipcx_uid)",
2658 buf64->shmx_perm.ipcx_uid);
2659 PRE_FIELD_READ("shmsys(shmctl, ipc_set64, "
2660 "buf->shmx_perm.ipcx_gid)",
2661 buf64->shmx_perm.ipcx_gid);
2662 PRE_FIELD_READ("shmsys(shmctl, ipc_set64, "
2663 "buf->shmx_perm.ipcx_mode)",
2664 buf64->shmx_perm.ipcx_mode);
2665 break;
2666 case VKI_IPC_STAT64:
2667 PRINT("sys_shmsys ( %ld, %ld, %ld, %#lx )",
2668 SARG1, SARG2, SARG3, ARG4);
2669 PRE_REG_READ4(long, SC3("shmsys", "shmctl", "stat64"),
2670 int, opcode, int, shmid, int, cmd,
2671 struct vki_shmid_ds64 *, buf);
2672 PRE_MEM_WRITE("shmsys(shmctl, ipc_stat64, buf)", ARG4,
2673 sizeof(struct vki_shmid_ds64));
2674 break;
2675 #if defined(SOLARIS_SHM_NEW)
2676 case VKI_IPC_XSTAT64:
2677 PRINT("sys_shmsys ( %ld, %ld, %ld, %#lx )",
2678 SARG1, SARG2, SARG3, ARG4);
2679 PRE_REG_READ4(long, SC3("shmsys", "shmctl", "xstat64"),
2680 int, opcode, int, shmid, int, cmd,
2681 struct vki_shmid_ds64 *, buf);
2682 PRE_MEM_WRITE("shmsys(shmctl, ipc_xstat64, buf)", ARG4,
2683 sizeof(struct vki_shmid_xds64));
2684 break;
2685 #endif /* SOLARIS_SHM_NEW */
2686 default:
2687 VG_(unimplemented)("Syswrap of the shmsys(shmctl) call with "
2688 "cmd %ld.", SARG3);
2689 /*NOTREACHED*/
2690 break;
2691 }
2692 break;
2693
2694 case VKI_SHMDT:
2695 /* Libc: int shmdt(const void *shmaddr); */
2696 PRINT("sys_shmsys ( %ld, %#lx )", SARG1, ARG2);
2697 PRE_REG_READ2(long, SC2("shmsys", "shmdt"), int, opcode,
2698 const void *, shmaddr);
2699
2700 if (!ML_(generic_PRE_sys_shmdt)(tid, ARG2))
2701 SET_STATUS_Failure(VKI_EINVAL);
2702 break;
2703
2704 case VKI_SHMGET:
2705 /* Libc: int shmget(key_t key, size_t size, int shmflg); */
2706 PRINT("sys_shmsys ( %ld, %ld, %lu, %ld )",
2707 SARG1, SARG2, ARG3, ARG4);
2708 PRE_REG_READ4(long, SC2("shmsys", "shmget"), int, opcode,
2709 vki_key_t, key, vki_size_t, size, int, shmflg);
2710 break;
2711
2712 case VKI_SHMIDS:
2713 /* Libc: int shmids(int *buf, uint_t nids, uint_t *pnids); */
2714 PRINT("sys_shmsys ( %ld, %#lx, %lu, %#lx )",
2715 SARG1, ARG2, ARG3, ARG4);
2716 PRE_REG_READ4(long, SC2("shmsys", "shmids"), int, opcode,
2717 int *, buf, vki_uint_t, nids, vki_uint_t *, pnids);
2718
2719 PRE_MEM_WRITE("shmsys(shmids, buf)", ARG2, ARG3 * sizeof(int *));
2720 PRE_MEM_WRITE("shmsys(shmids, pnids)", ARG4, sizeof(vki_uint_t));
2721 break;
2722
2723 #if defined(SOLARIS_SHM_NEW)
2724 case VKI_SHMADV:
2725 /* Libc: int shmadv(int shmid, uint_t cmd, uint_t *advice); */
2726 PRINT("sys_shmsys ( %ld, %ld, %lu, %ld )",
2727 SARG1, SARG2, ARG3, ARG4);
2728 PRE_REG_READ4(long, SC2("shmsys", "shmadv"), int, opcode,
2729 int, shmid, vki_uint_t, cmd, vki_uint_t *, advice);
2730
2731 switch (ARG3 /*cmd*/) {
2732 case VKI_SHM_ADV_GET:
2733 PRE_MEM_WRITE("shmsys(shmadv, advice)", ARG4,
2734 sizeof(vki_uint_t));
2735 break;
2736 case VKI_SHM_ADV_SET:
2737 PRE_MEM_READ("shmsys(shmadv, advice)", ARG4,
2738 sizeof(vki_uint_t));
2739 break;
2740 default:
2741 VG_(unimplemented)("Syswrap of the shmsys(shmadv) call with "
2742 "cmd %lu.", ARG3);
2743 /*NOTREACHED*/
2744 break;
2745 }
2746 break;
2747
2748 case VKI_SHMGET_OSM:
2749 /* Libc: int shmget_osm(key_t key, size_t size, int shmflg,
2750 size_t granule_sz);
2751 */
2752 PRINT("sys_shmsys ( %ld, %ld, %lu, %ld, %lu )",
2753 SARG1, SARG2, ARG3, SARG4, ARG5);
2754 PRE_REG_READ5(long, SC2("shmsys", "shmget_osm"), int, opcode,
2755 vki_key_t, key, vki_size_t, size, int, shmflg,
2756 vki_size_t, granule_sz);
2757 break;
2758 #endif /* SOLARIS_SHM_NEW */
2759
2760 default:
2761 VG_(unimplemented)("Syswrap of the shmsys call with opcode %ld.",
2762 SARG1);
2763 /*NOTREACHED*/
2764 break;
2765 }
2766 }
2767
POST(sys_shmsys)2768 POST(sys_shmsys)
2769 {
2770 switch (ARG1 /*opcode*/) {
2771 case VKI_SHMAT:
2772 ML_(generic_POST_sys_shmat)(tid, RES, ARG2, ARG3, ARG4);
2773 break;
2774
2775 case VKI_SHMCTL:
2776 switch (ARG3 /*cmd*/) {
2777 case VKI_SHM_LOCK:
2778 case VKI_SHM_UNLOCK:
2779 case VKI_IPC_RMID:
2780 case VKI_IPC_SET:
2781 break;
2782 case VKI_IPC_STAT:
2783 POST_MEM_WRITE(ARG4, sizeof(struct vki_shmid_ds));
2784 break;
2785 case VKI_IPC_SET64:
2786 break;
2787 case VKI_IPC_STAT64:
2788 POST_MEM_WRITE(ARG4, sizeof(struct vki_shmid_ds64));
2789 break;
2790 #if defined(SOLARIS_SHM_NEW)
2791 case VKI_IPC_XSTAT64:
2792 POST_MEM_WRITE(ARG4, sizeof(struct vki_shmid_xds64));
2793 break;
2794 #endif /* SOLARIS_SHM_NEW */
2795 default:
2796 vg_assert(0);
2797 break;
2798 }
2799 break;
2800
2801 case VKI_SHMDT:
2802 ML_(generic_POST_sys_shmdt)(tid, RES, ARG2);
2803 break;
2804
2805 case VKI_SHMGET:
2806 break;
2807
2808 case VKI_SHMIDS:
2809 {
2810 POST_MEM_WRITE(ARG4, sizeof(vki_uint_t));
2811
2812 uint_t *pnids = (vki_uint_t *) ARG4;
2813 if (*pnids <= ARG3)
2814 POST_MEM_WRITE(ARG2, *pnids * sizeof(int *));
2815 }
2816 break;
2817
2818 #if defined(SOLARIS_SHM_NEW)
2819 case VKI_SHMADV:
2820 switch (ARG3 /*cmd*/) {
2821 case VKI_SHM_ADV_GET:
2822 POST_MEM_WRITE(ARG4, sizeof(vki_uint_t));
2823 break;
2824 case VKI_SHM_ADV_SET:
2825 break;
2826 default:
2827 vg_assert(0);
2828 break;
2829 }
2830 break;
2831
2832 case VKI_SHMGET_OSM:
2833 break;
2834 #endif /* SOLARIS_SHM_NEW */
2835
2836 default:
2837 vg_assert(0);
2838 break;
2839 }
2840 }
2841
PRE(sys_semsys)2842 PRE(sys_semsys)
2843 {
2844 /* Kernel: int semsys(int opcode, uintptr_t a1, uintptr_t a2, uintptr_t a3,
2845 uintptr_t a4);
2846 */
2847 *flags |= SfMayBlock;
2848
2849 switch (ARG1 /*opcode*/) {
2850 case VKI_SEMCTL:
2851 /* Libc: int semctl(int semid, int semnum, int cmd...); */
2852 switch (ARG4) {
2853 case VKI_IPC_STAT:
2854 PRINT("sys_semsys ( %ld, %ld, %ld, %ld, %#lx )",
2855 SARG1, SARG2, SARG3, SARG4, ARG5);
2856 PRE_REG_READ5(long, SC3("semsys", "semctl", "stat"), int, opcode,
2857 int, semid, int, semnum, int, cmd,
2858 struct vki_semid_ds *, arg);
2859 break;
2860 case VKI_IPC_SET:
2861 PRINT("sys_semsys ( %ld, %ld, %ld, %ld, %#lx )",
2862 SARG1, SARG2, SARG3, SARG4, ARG5);
2863 PRE_REG_READ5(long, SC3("semsys", "semctl", "set"), int, opcode,
2864 int, semid, int, semnum, int, cmd,
2865 struct vki_semid_ds *, arg);
2866 break;
2867 case VKI_IPC_STAT64:
2868 PRINT("sys_semsys ( %ld, %ld, %ld, %ld, %#lx )",
2869 SARG1, SARG2, SARG3, SARG4, ARG5);
2870 PRE_REG_READ5(long, SC3("semsys", "semctl", "stat64"), int, opcode,
2871 int, semid, int, semnum, int, cmd,
2872 struct vki_semid64_ds *, arg);
2873 break;
2874 case VKI_IPC_SET64:
2875 PRINT("sys_semsys ( %ld, %ld, %ld, %ld, %#lx )",
2876 SARG1, SARG2, SARG3, SARG4, ARG5);
2877 PRE_REG_READ5(long, SC3("semsys", "semctl", "set64"), int, opcode,
2878 int, semid, int, semnum, int, cmd,
2879 struct vki_semid64_ds *, arg);
2880 break;
2881 case VKI_IPC_RMID:
2882 PRINT("sys_semsys ( %ld, %ld, %ld )", SARG1, SARG3, SARG4);
2883 PRE_REG_READ3(long, SC3("semsys", "semctl", "rmid"), int, opcode,
2884 int, semid, int, cmd);
2885 break;
2886 case VKI_GETALL:
2887 PRINT("sys_semsys ( %ld, %ld, %ld, %#lx )",
2888 SARG1, SARG2, SARG4, ARG5);
2889 PRE_REG_READ4(long, SC3("semsys", "semctl", "getall"), int, opcode,
2890 int, semid, int, cmd, ushort_t *, arg);
2891 break;
2892 case VKI_SETALL:
2893 PRINT("sys_semsys ( %ld, %ld, %ld, %#lx )",
2894 SARG1, SARG2, SARG4, ARG5);
2895 PRE_REG_READ4(long, SC3("semsys", "semctl", "setall"), int, opcode,
2896 int, semid, int, cmd, ushort_t *, arg);
2897 break;
2898 case VKI_GETVAL:
2899 PRINT("sys_semsys ( %ld, %ld, %ld, %ld )",
2900 SARG1, SARG2, SARG3, SARG4);
2901 PRE_REG_READ4(long, SC3("semsys", "semctl", "getval"), int, opcode,
2902 int, semid, int, semnum, int, cmd);
2903 break;
2904 case VKI_SETVAL:
2905 PRINT("sys_semsys ( %ld, %ld, %ld, %ld, %#lx )",
2906 SARG1, SARG2, SARG3, SARG4, ARG5);
2907 PRE_REG_READ5(long, SC3("semsys", "semctl", "setval"), int, opcode,
2908 int, semid, int, semnum, int, cmd,
2909 union vki_semun *, arg);
2910 break;
2911 case VKI_GETPID:
2912 PRINT("sys_semsys ( %ld, %ld, %ld, %ld )",
2913 SARG1, SARG2, SARG3, SARG4);
2914 PRE_REG_READ4(long, SC3("semsys", "semctl", "getpid"), int, opcode,
2915 int, semid, int, semnum, int, cmd);
2916 break;
2917 case VKI_GETNCNT:
2918 PRINT("sys_semsys ( %ld, %ld, %ld, %ld )",
2919 SARG1, SARG2, SARG3, SARG4);
2920 PRE_REG_READ4(long, SC3("semsys", "semctl", "getncnt"),
2921 int, opcode, int, semid, int, semnum, int, cmd);
2922 break;
2923 case VKI_GETZCNT:
2924 PRINT("sys_semsys ( %ld, %ld, %ld, %ld )",
2925 SARG1, SARG2, SARG3, SARG4);
2926 PRE_REG_READ4(long, SC3("semsys", "semctl", "getzcnt"),
2927 int, opcode, int, semid, int, semnum, int, cmd);
2928 break;
2929 default:
2930 VG_(unimplemented)("Syswrap of the semsys(semctl) call "
2931 "with cmd %ld.", SARG4);
2932 /*NOTREACHED*/
2933 break;
2934 }
2935 ML_(generic_PRE_sys_semctl)(tid, ARG2, ARG3, ARG4, ARG5);
2936 break;
2937 case VKI_SEMGET:
2938 /* Libc: int semget(key_t key, int nsems, int semflg); */
2939 PRINT("sys_semsys ( %ld, %ld, %ld, %ld )", SARG1, SARG2, SARG3, SARG4);
2940 PRE_REG_READ4(long, SC2("semsys", "semget"), int, opcode,
2941 vki_key_t, key, int, nsems, int, semflg);
2942 break;
2943 case VKI_SEMOP:
2944 /* Libc: int semop(int semid, struct sembuf *sops, size_t nsops); */
2945 PRINT("sys_semsys ( %ld, %ld, %#lx, %lu )", SARG1, SARG2, ARG3, ARG4);
2946 PRE_REG_READ4(long, SC2("semsys", "semop"), int, opcode, int, semid,
2947 struct vki_sembuf *, sops, vki_size_t, nsops);
2948 ML_(generic_PRE_sys_semop)(tid, ARG2, ARG3, ARG4);
2949 break;
2950 case VKI_SEMIDS:
2951 /* Libc: int semids(int *buf, uint_t nids, uint_t *pnids); */
2952 PRINT("sys_semsys ( %ld, %#lx, %lu, %#lx )", SARG1, ARG2, ARG3, ARG4);
2953 PRE_REG_READ4(long, SC2("semsys", "semids"), int, opcode, int *, buf,
2954 vki_uint_t, nids, vki_uint_t *, pnids);
2955
2956 PRE_MEM_WRITE("semsys(semids, buf)", ARG2, ARG3 * sizeof(int *));
2957 PRE_MEM_WRITE("semsys(semids, pnids)", ARG4, sizeof(vki_uint_t));
2958 break;
2959 case VKI_SEMTIMEDOP:
2960 /* Libc: int semtimedop(int semid, struct sembuf *sops, size_t nsops,
2961 const struct timespec *timeout);
2962 */
2963 PRINT("sys_semsys ( %ld, %ld, %#lx, %lu, %#lx )", SARG1, SARG2, ARG3,
2964 ARG4, ARG5);
2965 PRE_REG_READ5(long, SC2("semsys", "semtimedop"), int, opcode,
2966 int, semid, struct vki_sembuf *, sops, vki_size_t, nsops,
2967 struct vki_timespec *, timeout);
2968 ML_(generic_PRE_sys_semtimedop)(tid, ARG2, ARG3, ARG4, ARG5);
2969 break;
2970 default:
2971 VG_(unimplemented)("Syswrap of the semsys call with opcode %ld.", SARG1);
2972 /*NOTREACHED*/
2973 break;
2974 }
2975 }
2976
POST(sys_semsys)2977 POST(sys_semsys)
2978 {
2979 switch (ARG1 /*opcode*/) {
2980 case VKI_SEMCTL:
2981 ML_(generic_POST_sys_semctl)(tid, RES, ARG2, ARG3, ARG4, ARG5);
2982 break;
2983 case VKI_SEMGET:
2984 case VKI_SEMOP:
2985 break;
2986 case VKI_SEMIDS:
2987 {
2988 POST_MEM_WRITE(ARG4, sizeof(vki_uint_t));
2989
2990 uint_t *pnids = (uint_t *)ARG4;
2991 if (*pnids <= ARG3)
2992 POST_MEM_WRITE(ARG2, *pnids * sizeof(int *));
2993 }
2994 break;
2995 case VKI_SEMTIMEDOP:
2996 break;
2997 default:
2998 vg_assert(0);
2999 break;
3000 }
3001 }
3002
3003 /* ---------------------------------------------------------------------
3004 ioctl wrappers
3005 ------------------------------------------------------------------ */
3006
PRE(sys_ioctl)3007 PRE(sys_ioctl)
3008 {
3009 /* int ioctl(int fildes, int request, ...); */
3010 *flags |= SfMayBlock;
3011
3012 /* Prevent sign extending the switch case values to 64-bits on 64-bits
3013 architectures. */
3014 Int cmd = (Int) ARG2;
3015
3016 switch (cmd /*request*/) {
3017 /* Handle 2-arg specially here (they do not use ARG3 at all). */
3018 case VKI_TIOCNOTTY:
3019 case VKI_TIOCSCTTY:
3020 PRINT("sys_ioctl ( %ld, %#lx )", SARG1, ARG2);
3021 PRE_REG_READ2(long, "ioctl", int, fd, int, request);
3022 break;
3023 /* And now come the 3-arg ones. */
3024 default:
3025 PRINT("sys_ioctl ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
3026 PRE_REG_READ3(long, "ioctl", int, fd, int, request, intptr_t, arg);
3027 break;
3028 }
3029
3030 switch (cmd /*request*/) {
3031 /* pools */
3032 case VKI_POOL_STATUSQ:
3033 PRE_MEM_WRITE("ioctl(POOL_STATUSQ)", ARG3, sizeof(vki_pool_status_t));
3034 break;
3035
3036 /* mntio */
3037 case VKI_MNTIOC_GETMNTANY:
3038 {
3039 PRE_MEM_READ("ioctl(MNTIOC_GETMNTANY)",
3040 ARG3, sizeof(struct vki_mntentbuf));
3041
3042 struct vki_mntentbuf *embuf = (struct vki_mntentbuf *) ARG3;
3043 if (ML_(safe_to_deref(embuf, sizeof(*embuf)))) {
3044 PRE_MEM_READ("ioctl(MNTIOC_GETMNTANY, embuf->mbuf_emp)",
3045 (Addr) embuf->mbuf_emp,
3046 sizeof(struct vki_mnttab));
3047 PRE_MEM_WRITE("ioctl(MNTIOC_GETMNTANY, embuf->mbuf_buf)",
3048 (Addr) embuf->mbuf_buf,
3049 embuf->mbuf_bufsize);
3050 struct vki_mnttab *mnt
3051 = (struct vki_mnttab *) embuf->mbuf_emp;
3052 if (ML_(safe_to_deref(mnt, sizeof(struct vki_mnttab)))) {
3053 if (mnt->mnt_special != NULL)
3054 PRE_MEM_RASCIIZ("ioctl(MNTIOC_GETMNTANY, mnt->mnt_special)",
3055 (Addr) mnt->mnt_special);
3056 if (mnt->mnt_mountp != NULL)
3057 PRE_MEM_RASCIIZ("ioctl(MNTIOC_GETMNTANY, mnt->mnt_mountp)",
3058 (Addr) mnt->mnt_mountp);
3059 if (mnt->mnt_fstype != NULL)
3060 PRE_MEM_RASCIIZ("ioctl(MNTIOC_GETMNTANY, mnt->mnt_fstype)",
3061 (Addr) mnt->mnt_fstype);
3062 if (mnt->mnt_mntopts != NULL)
3063 PRE_MEM_RASCIIZ("ioctl(MNTIOC_GETMNTANY, mnt->mnt_mntopts)",
3064 (Addr) mnt->mnt_mntopts);
3065 if (mnt->mnt_time != NULL)
3066 PRE_MEM_RASCIIZ("ioctl(MNTIOC_GETMNTANY, mnt->mnt_time)",
3067 (Addr) mnt->mnt_time);
3068 }
3069 }
3070 }
3071 break;
3072
3073 /* termio/termios */
3074 case VKI_TCGETA:
3075 PRE_MEM_WRITE("ioctl(TCGETA)", ARG3, sizeof(struct vki_termio));
3076 break;
3077 case VKI_TCGETS:
3078 PRE_MEM_WRITE("ioctl(TCGETS)", ARG3, sizeof(struct vki_termios));
3079 break;
3080 case VKI_TCSETS:
3081 PRE_MEM_READ("ioctl(TCSETS)", ARG3, sizeof(struct vki_termios));
3082 break;
3083 case VKI_TCSETSW:
3084 PRE_MEM_READ("ioctl(TCSETSW)", ARG3, sizeof(struct vki_termios));
3085 break;
3086 case VKI_TCSETSF:
3087 PRE_MEM_READ("ioctl(TCSETSF)", ARG3, sizeof(struct vki_termios));
3088 break;
3089 case VKI_TIOCGWINSZ:
3090 PRE_MEM_WRITE("ioctl(TIOCGWINSZ)", ARG3, sizeof(struct vki_winsize));
3091 break;
3092 case VKI_TIOCSWINSZ:
3093 PRE_MEM_READ("ioctl(TIOCSWINSZ)", ARG3, sizeof(struct vki_winsize));
3094 break;
3095 case VKI_TIOCGPGRP:
3096 PRE_MEM_WRITE("ioctl(TIOCGPGRP)", ARG3, sizeof(vki_pid_t));
3097 break;
3098 case VKI_TIOCSPGRP:
3099 PRE_MEM_READ("ioctl(TIOCSPGRP)", ARG3, sizeof(vki_pid_t));
3100 break;
3101 case VKI_TIOCGSID:
3102 PRE_MEM_WRITE("ioctl(TIOCGSID)", ARG3, sizeof(vki_pid_t));
3103 break;
3104 case VKI_TIOCNOTTY:
3105 case VKI_TIOCSCTTY:
3106 break;
3107
3108 /* STREAMS */
3109 case VKI_I_PUSH:
3110 PRE_MEM_RASCIIZ("ioctl(I_PUSH)", ARG3);
3111 break;
3112 case VKI_I_STR:
3113 {
3114 PRE_MEM_READ("ioctl(I_STR)", ARG3, sizeof(struct vki_strioctl));
3115
3116 struct vki_strioctl *p = (struct vki_strioctl *) ARG3;
3117 if (ML_(safe_to_deref(p, sizeof(*p)))) {
3118 if ((p->ic_dp != NULL) && (p->ic_len > 0)) {
3119 PRE_MEM_READ("ioctl(I_STR, strioctl->ic_dp)",
3120 (Addr) p->ic_dp, p->ic_len);
3121 }
3122 }
3123 }
3124 break;
3125 case VKI_I_PEEK:
3126 {
3127 /* Try hard not to mark strpeek->*buf.len members as being read. */
3128 struct vki_strpeek *p = (struct vki_strpeek*)ARG3;
3129
3130 PRE_FIELD_READ("ioctl(I_PEEK, strpeek->ctlbuf.maxlen)",
3131 p->ctlbuf.maxlen);
3132 PRE_FIELD_WRITE("ioctl(I_PEEK, strpeek->ctlbuf.len)",
3133 p->ctlbuf.len);
3134 PRE_FIELD_READ("ioctl(I_PEEK, strpeek->ctlbuf.buf)",
3135 p->ctlbuf.buf);
3136 PRE_FIELD_READ("ioctl(I_PEEK, strpeek->databuf.maxlen)",
3137 p->databuf.maxlen);
3138 PRE_FIELD_WRITE("ioctl(I_PEEK, strpeek->databuf.len)",
3139 p->databuf.len);
3140 PRE_FIELD_READ("ioctl(I_PEEK, strpeek->databuf.buf)",
3141 p->databuf.buf);
3142 PRE_FIELD_READ("ioctl(I_PEEK, strpeek->flags)", p->flags);
3143 /*PRE_FIELD_WRITE("ioctl(I_PEEK, strpeek->flags)", p->flags);*/
3144
3145 if (ML_(safe_to_deref(p, sizeof(*p)))) {
3146 if (p->ctlbuf.buf && p->ctlbuf.maxlen > 0)
3147 PRE_MEM_WRITE("ioctl(I_PEEK, strpeek->ctlbuf.buf)",
3148 (Addr)p->ctlbuf.buf, p->ctlbuf.maxlen);
3149 if (p->databuf.buf && p->databuf.maxlen > 0)
3150 PRE_MEM_WRITE("ioctl(I_PEEK, strpeek->databuf.buf)",
3151 (Addr)p->databuf.buf, p->databuf.maxlen);
3152 }
3153 }
3154 break;
3155 case VKI_I_CANPUT:
3156 break;
3157
3158 /* sockio */
3159 case VKI_SIOCGIFCONF:
3160 {
3161 struct vki_ifconf *p = (struct vki_ifconf *) ARG3;
3162 PRE_FIELD_READ("ioctl(SIOCGIFCONF, ifconf->ifc_len)", p->ifc_len);
3163 PRE_FIELD_READ("ioctl(SIOCGIFCONF, ifconf->ifc_buf)", p->ifc_buf);
3164 if (ML_(safe_to_deref)(p, sizeof(*p))) {
3165 if ((p->ifc_buf != NULL) && (p->ifc_len > 0))
3166 PRE_MEM_WRITE("ioctl(SIOCGIFCONF, ifconf->ifc_buf)",
3167 (Addr) p->ifc_buf, p->ifc_len);
3168 }
3169 /* ifc_len gets also written to during SIOCGIFCONF ioctl. */
3170 }
3171 break;
3172 case VKI_SIOCGIFFLAGS:
3173 {
3174 struct vki_ifreq *p = (struct vki_ifreq *) ARG3;
3175 PRE_FIELD_READ("ioctl(SIOCGIFFLAGS, ifreq->ifr_name)", p->ifr_name);
3176 PRE_FIELD_WRITE("ioctl(SIOCGIFFLAGS, ifreq->ifr_flags)", p->ifr_flags);
3177 }
3178 break;
3179 case VKI_SIOCGIFNETMASK:
3180 {
3181 struct vki_ifreq *p = (struct vki_ifreq *) ARG3;
3182 PRE_FIELD_READ("ioctl(SIOCGIFFLAGS, ifreq->ifr_name)", p->ifr_name);
3183 PRE_FIELD_WRITE("ioctl(SIOCGIFFLAGS, ifreq->ifr_addr)", p->ifr_addr);
3184 }
3185 break;
3186 case VKI_SIOCGIFNUM:
3187 PRE_MEM_WRITE("ioctl(SIOCGIFNUM)", ARG3, sizeof(int));
3188 break;
3189 case VKI_SIOCGLIFBRDADDR:
3190 {
3191 struct vki_lifreq *p = (struct vki_lifreq *) ARG3;
3192 PRE_FIELD_READ("ioctl(SIOCGLIFBRDADDR, lifreq->lifr_name)",
3193 p->lifr_name);
3194 PRE_FIELD_WRITE("ioctl(SIOCGLIFBRDADDR, lifreq->lifr_addr)",
3195 p->lifr_addr);
3196 }
3197 break;
3198 case VKI_SIOCGLIFCONF:
3199 {
3200 struct vki_lifconf *p = (struct vki_lifconf *) ARG3;
3201 PRE_FIELD_READ("ioctl(SIOCGLIFCONF, lifconf->lifc_len)", p->lifc_len);
3202 PRE_FIELD_READ("ioctl(SIOCGLIFCONF, lifconf->lifc_buf)", p->lifc_buf);
3203 PRE_FIELD_READ("ioctl(SIOCGLIFCONF, lifconf->lifc_family)",
3204 p->lifc_family);
3205 PRE_FIELD_READ("ioctl(SIOCGLIFCONF, lifconf->lifc_flags)",
3206 p->lifc_flags);
3207 if (ML_(safe_to_deref)(p, sizeof(*p))) {
3208 if ((p->lifc_buf != NULL) && (p->lifc_len > 0))
3209 PRE_MEM_WRITE("ioctl(SIOCGLIFCONF, lifconf->lifc_buf)",
3210 (Addr) p->lifc_buf, p->lifc_len);
3211 }
3212 /* lifc_len gets also written to during SIOCGLIFCONF ioctl. */
3213 }
3214 break;
3215 case VKI_SIOCGLIFFLAGS:
3216 {
3217 struct vki_lifreq *p = (struct vki_lifreq *) ARG3;
3218 PRE_FIELD_READ("ioctl(SIOCGLIFFLAGS, lifreq->lifr_name)",
3219 p->lifr_name);
3220 PRE_FIELD_WRITE("ioctl(SIOCGLIFFLAGS, lifreq->lifr_flags)",
3221 p->lifr_flags);
3222 }
3223 break;
3224 case VKI_SIOCGLIFNETMASK:
3225 {
3226 struct vki_lifreq *p = (struct vki_lifreq *) ARG3;
3227 PRE_FIELD_READ("ioctl(SIOCGLIFNETMASK, lifreq->lifr_name)",
3228 p->lifr_name);
3229 PRE_FIELD_WRITE("ioctl(SIOCGLIFNETMASK, lifreq->lifr_addr)",
3230 p->lifr_addr);
3231 }
3232 break;
3233 case VKI_SIOCGLIFNUM:
3234 {
3235 struct vki_lifnum *p = (struct vki_lifnum *) ARG3;
3236 PRE_FIELD_READ("ioctl(SIOCGLIFNUM, lifn->lifn_family)",
3237 p->lifn_family);
3238 PRE_FIELD_READ("ioctl(SIOCGLIFNUM, lifn->lifn_flags)",
3239 p->lifn_flags);
3240 PRE_FIELD_WRITE("ioctl(SIOCGLIFNUM, lifn->lifn_count)",
3241 p->lifn_count);
3242 }
3243 break;
3244
3245 /* filio */
3246 case VKI_FIOSETOWN:
3247 PRE_MEM_READ("ioctl(FIOSETOWN)", ARG3, sizeof(vki_pid_t));
3248 break;
3249 case VKI_FIOGETOWN:
3250 PRE_MEM_WRITE("ioctl(FIOGETOWN)", ARG3, sizeof(vki_pid_t));
3251 break;
3252
3253 /* CRYPTO */
3254 case VKI_CRYPTO_GET_PROVIDER_LIST:
3255 {
3256 vki_crypto_get_provider_list_t *pl =
3257 (vki_crypto_get_provider_list_t *) ARG3;
3258 PRE_FIELD_READ("ioctl(CRYPTO_GET_PROVIDER_LIST, pl->pl_count)",
3259 pl->pl_count);
3260
3261 if (ML_(safe_to_deref)(pl, sizeof(*pl))) {
3262 PRE_MEM_WRITE("ioctl(CRYPTO_GET_PROVIDER_LIST)", ARG3,
3263 MAX(1, pl->pl_count) *
3264 sizeof(vki_crypto_get_provider_list_t));
3265 }
3266 /* Save the requested count to unused ARG4 below,
3267 when we know pre-handler succeeded.
3268 */
3269 }
3270 break;
3271
3272 /* dtrace */
3273 case VKI_DTRACEHIOC_REMOVE:
3274 break;
3275 case VKI_DTRACEHIOC_ADDDOF:
3276 {
3277 vki_dof_helper_t *dh = (vki_dof_helper_t *) ARG3;
3278 PRE_MEM_RASCIIZ("ioctl(DTRACEHIOC_ADDDOF, dh->dofhp_mod)",
3279 (Addr) dh->dofhp_mod);
3280 PRE_FIELD_READ("ioctl(DTRACEHIOC_ADDDOF, dh->dofhp_addr",
3281 dh->dofhp_addr);
3282 PRE_FIELD_READ("ioctl(DTRACEHIOC_ADDDOF, dh->dofhp_dof",
3283 dh->dofhp_dof);
3284 }
3285 break;
3286
3287 default:
3288 ML_(PRE_unknown_ioctl)(tid, ARG2, ARG3);
3289 break;
3290 }
3291
3292 /* Be strict. */
3293 if (!ML_(fd_allowed)(ARG1, "ioctl", tid, False)) {
3294 SET_STATUS_Failure(VKI_EBADF);
3295 } else if (ARG2 == VKI_CRYPTO_GET_PROVIDER_LIST) {
3296 /* Save the requested count to unused ARG4 now. */
3297 ARG4 = ARG3;
3298 }
3299 }
3300
POST(sys_ioctl)3301 POST(sys_ioctl)
3302 {
3303 /* Prevent sign extending the switch case values to 64-bits on 64-bits
3304 architectures. */
3305 Int cmd = (Int) ARG2;
3306
3307 switch (cmd /*request*/) {
3308 /* pools */
3309 case VKI_POOL_STATUSQ:
3310 POST_MEM_WRITE(ARG3, sizeof(vki_pool_status_t));
3311 break;
3312
3313 /* mntio */
3314 case VKI_MNTIOC_GETMNTANY:
3315 {
3316 struct vki_mntentbuf *embuf = (struct vki_mntentbuf *) ARG3;
3317 struct vki_mnttab *mnt = (struct vki_mnttab *) embuf->mbuf_emp;
3318
3319 POST_MEM_WRITE((Addr) mnt, sizeof(struct vki_mnttab));
3320 if (mnt != NULL) {
3321 if (mnt->mnt_special != NULL)
3322 POST_MEM_WRITE((Addr) mnt->mnt_special,
3323 VG_(strlen)(mnt->mnt_special) + 1);
3324 if (mnt->mnt_mountp != NULL)
3325 POST_MEM_WRITE((Addr) mnt->mnt_mountp,
3326 VG_(strlen)(mnt->mnt_mountp) + 1);
3327 if (mnt->mnt_fstype != NULL)
3328 POST_MEM_WRITE((Addr) mnt->mnt_fstype,
3329 VG_(strlen)(mnt->mnt_fstype) + 1);
3330 if (mnt->mnt_mntopts != NULL)
3331 POST_MEM_WRITE((Addr) mnt->mnt_mntopts,
3332 VG_(strlen)(mnt->mnt_mntopts) + 1);
3333 if (mnt->mnt_time != NULL)
3334 POST_MEM_WRITE((Addr) mnt->mnt_time,
3335 VG_(strlen)(mnt->mnt_time) + 1);
3336 }
3337 }
3338 break;
3339
3340 /* termio/termios */
3341 case VKI_TCGETA:
3342 POST_MEM_WRITE(ARG3, sizeof(struct vki_termio));
3343 break;
3344 case VKI_TCGETS:
3345 POST_MEM_WRITE(ARG3, sizeof(struct vki_termios));
3346 break;
3347 case VKI_TCSETS:
3348 break;
3349 case VKI_TCSETSW:
3350 break;
3351 case VKI_TCSETSF:
3352 break;
3353 case VKI_TIOCGWINSZ:
3354 POST_MEM_WRITE(ARG3, sizeof(struct vki_winsize));
3355 break;
3356 case VKI_TIOCSWINSZ:
3357 break;
3358 case VKI_TIOCGPGRP:
3359 POST_MEM_WRITE(ARG3, sizeof(vki_pid_t));
3360 break;
3361 case VKI_TIOCSPGRP:
3362 break;
3363 case VKI_TIOCGSID:
3364 POST_MEM_WRITE(ARG3, sizeof(vki_pid_t));
3365 break;
3366 case VKI_TIOCNOTTY:
3367 case VKI_TIOCSCTTY:
3368 break;
3369
3370 /* STREAMS */
3371 case VKI_I_PUSH:
3372 break;
3373 case VKI_I_STR:
3374 {
3375 struct vki_strioctl *p = (struct vki_strioctl *) ARG3;
3376
3377 POST_FIELD_WRITE(p->ic_len);
3378 if ((p->ic_dp != NULL) && (p->ic_len > 0))
3379 POST_MEM_WRITE((Addr) p->ic_dp, p->ic_len);
3380 }
3381 break;
3382 case VKI_I_PEEK:
3383 {
3384 struct vki_strpeek *p = (struct vki_strpeek*)ARG3;
3385
3386 POST_FIELD_WRITE(p->ctlbuf.len);
3387 POST_FIELD_WRITE(p->databuf.len);
3388 POST_FIELD_WRITE(p->flags);
3389
3390 if (p->ctlbuf.buf && p->ctlbuf.len > 0)
3391 POST_MEM_WRITE((Addr)p->ctlbuf.buf, p->ctlbuf.len);
3392 if (p->databuf.buf && p->databuf.len > 0)
3393 POST_MEM_WRITE((Addr)p->databuf.buf, p->databuf.len);
3394 }
3395 break;
3396 case VKI_I_CANPUT:
3397 break;
3398
3399 /* sockio */
3400 case VKI_SIOCGIFCONF:
3401 {
3402 struct vki_ifconf *p = (struct vki_ifconf *) ARG3;
3403 POST_FIELD_WRITE(p->ifc_len);
3404 POST_FIELD_WRITE(p->ifc_req);
3405 if ((p->ifc_req != NULL) && (p->ifc_len > 0))
3406 POST_MEM_WRITE((Addr) p->ifc_req, p->ifc_len);
3407 }
3408 break;
3409 case VKI_SIOCGIFFLAGS:
3410 {
3411 struct vki_ifreq *p = (struct vki_ifreq *) ARG3;
3412 POST_FIELD_WRITE(p->ifr_flags);
3413 }
3414 break;
3415 case VKI_SIOCGIFNETMASK:
3416 {
3417 struct vki_ifreq *p = (struct vki_ifreq *) ARG3;
3418 POST_FIELD_WRITE(p->ifr_addr);
3419 }
3420 break;
3421 case VKI_SIOCGIFNUM:
3422 POST_MEM_WRITE(ARG3, sizeof(int));
3423 break;
3424 case VKI_SIOCGLIFBRDADDR:
3425 {
3426 struct vki_lifreq *p = (struct vki_lifreq *) ARG3;
3427 POST_FIELD_WRITE(p->lifr_addr);
3428 }
3429 break;
3430 case VKI_SIOCGLIFCONF:
3431 {
3432 struct vki_lifconf *p = (struct vki_lifconf *) ARG3;
3433 POST_FIELD_WRITE(p->lifc_len);
3434 POST_FIELD_WRITE(p->lifc_req);
3435 if ((p->lifc_req != NULL) && (p->lifc_len > 0))
3436 POST_MEM_WRITE((Addr) p->lifc_req, p->lifc_len);
3437 }
3438 break;
3439 case VKI_SIOCGLIFFLAGS:
3440 {
3441 struct vki_lifreq *p = (struct vki_lifreq *) ARG3;
3442 POST_FIELD_WRITE(p->lifr_flags);
3443 }
3444 break;
3445 case VKI_SIOCGLIFNETMASK:
3446 {
3447 struct vki_lifreq *p = (struct vki_lifreq *) ARG3;
3448 POST_FIELD_WRITE(p->lifr_addr);
3449 }
3450 break;
3451 case VKI_SIOCGLIFNUM:
3452 {
3453 struct vki_lifnum *p = (struct vki_lifnum *) ARG3;
3454 POST_FIELD_WRITE(p->lifn_count);
3455 }
3456 break;
3457
3458 /* filio */
3459 case VKI_FIOSETOWN:
3460 break;
3461 case VKI_FIOGETOWN:
3462 POST_MEM_WRITE(ARG3, sizeof(vki_pid_t));
3463 break;
3464
3465 /* CRYPTO */
3466 case VKI_CRYPTO_GET_PROVIDER_LIST:
3467 {
3468 vki_crypto_get_provider_list_t *pl =
3469 (vki_crypto_get_provider_list_t *) ARG3;
3470
3471 POST_FIELD_WRITE(pl->pl_count);
3472 POST_FIELD_WRITE(pl->pl_return_value);
3473
3474 if ((ARG4 > 0) && (pl->pl_return_value == VKI_CRYPTO_SUCCESS))
3475 POST_MEM_WRITE((Addr) pl->pl_list, pl->pl_count *
3476 sizeof(vki_crypto_provider_entry_t));
3477 }
3478 break;
3479
3480 /* dtrace */
3481 case VKI_DTRACEHIOC_REMOVE:
3482 case VKI_DTRACEHIOC_ADDDOF:
3483 break;
3484
3485 default:
3486 /* Not really anything to do since ioctl direction hints are hardly used
3487 on Solaris. */
3488 break;
3489 }
3490 }
3491
PRE(sys_fchownat)3492 PRE(sys_fchownat)
3493 {
3494 /* int fchownat(int fd, const char *path, uid_t owner, gid_t group,
3495 int flag); */
3496
3497 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
3498 This is different from Linux, for example, where glibc sign-extends it. */
3499 Int fd = (Int) ARG1;
3500
3501 PRINT("sys_fchownat ( %d, %#lx(%s), %ld, %ld, %ld )", fd,
3502 ARG2, (HChar *) ARG2, SARG3, SARG4, ARG5);
3503 PRE_REG_READ5(long, "fchownat", int, fd, const char *, path,
3504 vki_uid_t, owner, vki_gid_t, group, int, flag);
3505
3506 if (ARG2)
3507 PRE_MEM_RASCIIZ("fchownat(path)", ARG2);
3508
3509 /* Be strict but ignore fd for absolute path. */
3510 if (fd != VKI_AT_FDCWD
3511 && ML_(safe_to_deref)((void *) ARG2, 1)
3512 && ((HChar *) ARG2)[0] != '/'
3513 && !ML_(fd_allowed)(fd, "fchownat", tid, False))
3514 SET_STATUS_Failure(VKI_EBADF);
3515 }
3516
PRE(sys_fdsync)3517 PRE(sys_fdsync)
3518 {
3519 /* int fdsync(int fd, int flag); */
3520 PRINT("sys_fdsync ( %ld, %ld )", SARG1, SARG2);
3521 PRE_REG_READ2(long, "fdsync", int, fd, int, flag);
3522
3523 /* Be strict. */
3524 if (!ML_(fd_allowed)(ARG1, "fdsync", tid, False))
3525 SET_STATUS_Failure(VKI_EBADF);
3526 }
3527
PRE(sys_execve)3528 PRE(sys_execve)
3529 {
3530 Int i, j;
3531 /* This is a Solaris specific version of the generic pre-execve wrapper. */
3532
3533 #if defined(SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS)
3534 /* int execve(uintptr_t file, const char **argv, const char **envp,
3535 int flags); */
3536 PRINT("sys_execve ( %#lx, %#lx, %#lx, %ld )", ARG1, ARG2, ARG3, SARG4);
3537 PRE_REG_READ4(long, "execve", uintptr_t, file, const char **, argv,
3538 const char **, envp, int, flags);
3539
3540 #else
3541
3542 /* int execve(const char *fname, const char **argv, const char **envp); */
3543 PRINT("sys_execve ( %#lx(%s), %#lx, %#lx )",
3544 ARG1, (HChar *) ARG1, ARG2, ARG3);
3545 PRE_REG_READ3(long, "execve", const char *, file, const char **, argv,
3546 const char **, envp);
3547 #endif /* SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS */
3548
3549 Bool ARG1_is_fd = False;
3550 #if defined(SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS)
3551 if (ARG4 & VKI_EXEC_DESCRIPTOR) {
3552 ARG1_is_fd = True;
3553 }
3554 #endif /* SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS */
3555
3556 if (ARG1_is_fd == False)
3557 PRE_MEM_RASCIIZ("execve(filename)", ARG1);
3558 if (ARG2)
3559 ML_(pre_argv_envp)(ARG2, tid, "execve(argv)", "execve(argv[i])");
3560 if (ARG3)
3561 ML_(pre_argv_envp)(ARG3, tid, "execve(envp)", "execve(envp[i])");
3562
3563 /* Erk. If the exec fails, then the following will have made a mess of
3564 things which makes it hard for us to continue. The right thing to do is
3565 piece everything together again in POST(execve), but that's close to
3566 impossible. Instead, we make an effort to check that the execve will
3567 work before actually doing it. */
3568
3569 const HChar *fname = (const HChar *) ARG1;
3570 if (ARG1_is_fd) {
3571 if (!ML_(fd_allowed)(ARG1, "execve", tid, False)) {
3572 SET_STATUS_Failure(VKI_EBADF);
3573 return;
3574 }
3575
3576 if (VG_(resolve_filename)(ARG1, &fname) == False) {
3577 SET_STATUS_Failure(VKI_EBADF);
3578 return;
3579 }
3580
3581 struct vg_stat stats;
3582 if (VG_(fstat)(ARG1, &stats) != 0) {
3583 SET_STATUS_Failure(VKI_EBADF);
3584 return;
3585 }
3586
3587 if (stats.nlink > 1)
3588 VG_(unimplemented)("Syswrap of execve where fd points to a hardlink.");
3589 }
3590
3591 /* Check that the name at least begins in client-accessible storage. */
3592 if (ARG1_is_fd == False) {
3593 if ((fname == NULL) || !ML_(safe_to_deref)(fname, 1)) {
3594 SET_STATUS_Failure(VKI_EFAULT);
3595 return;
3596 }
3597 }
3598
3599 /* Check that the args at least begin in client-accessible storage.
3600 Solaris disallows to perform the exec without any arguments specified.
3601 */
3602 if (!ARG2 /* obviously bogus */ ||
3603 !VG_(am_is_valid_for_client)(ARG2, 1, VKI_PROT_READ)) {
3604 SET_STATUS_Failure(VKI_EFAULT);
3605 return;
3606 }
3607
3608 /* Debug-only printing. */
3609 if (0) {
3610 VG_(printf)("ARG1 = %#lx(%s)\n", ARG1, fname);
3611 if (ARG2) {
3612 Int q;
3613 HChar** vec = (HChar**)ARG2;
3614
3615 VG_(printf)("ARG2 = ");
3616 for (q = 0; vec[q]; q++)
3617 VG_(printf)("%p(%s) ", vec[q], vec[q]);
3618 VG_(printf)("\n");
3619 }
3620 else
3621 VG_(printf)("ARG2 = null\n");
3622 }
3623
3624 /* Decide whether or not we want to follow along. */
3625 /* Make 'child_argv' be a pointer to the child's arg vector (skipping the
3626 exe name) */
3627 const HChar **child_argv = (const HChar **) ARG2;
3628 if (child_argv[0] == NULL)
3629 child_argv = NULL;
3630 Bool trace_this_child = VG_(should_we_trace_this_child)(fname, child_argv);
3631
3632 /* Do the important checks: it is a file, is executable, permissions are
3633 ok, etc. We allow setuid executables to run only in the case when
3634 we are not simulating them, that is, they to be run natively. */
3635 Bool setuid_allowed = trace_this_child ? False : True;
3636 SysRes res = VG_(pre_exec_check)(fname, NULL, setuid_allowed);
3637 if (sr_isError(res)) {
3638 SET_STATUS_Failure(sr_Err(res));
3639 return;
3640 }
3641
3642 /* If we're tracing the child, and the launcher name looks bogus (possibly
3643 because launcher.c couldn't figure it out, see comments therein) then we
3644 have no option but to fail. */
3645 if (trace_this_child &&
3646 (!VG_(name_of_launcher) || VG_(name_of_launcher)[0] != '/')) {
3647 SET_STATUS_Failure(VKI_ECHILD); /* "No child processes." */
3648 return;
3649 }
3650
3651 /* After this point, we can't recover if the execve fails. */
3652 VG_(debugLog)(1, "syswrap", "Exec of %s\n", fname);
3653
3654 /* Terminate gdbserver if it is active. */
3655 if (VG_(clo_vgdb) != Vg_VgdbNo) {
3656 /* If the child will not be traced, we need to terminate gdbserver to
3657 cleanup the gdbserver resources (e.g. the FIFO files). If child will
3658 be traced, we also terminate gdbserver: the new Valgrind will start a
3659 fresh gdbserver after exec. */
3660 VG_(gdbserver)(0);
3661 }
3662
3663 /* Resistance is futile. Nuke all other threads. POSIX mandates this.
3664 (Really, nuke them all, since the new process will make its own new
3665 thread.) */
3666 VG_(nuke_all_threads_except)(tid, VgSrc_ExitThread);
3667 VG_(reap_threads)(tid);
3668
3669 /* Set up the child's exe path. */
3670 const HChar *path = fname;
3671 const HChar *launcher_basename = NULL;
3672 if (trace_this_child) {
3673 /* We want to exec the launcher. Get its pre-remembered path. */
3674 path = VG_(name_of_launcher);
3675 /* VG_(name_of_launcher) should have been acquired by m_main at
3676 startup. */
3677 vg_assert(path);
3678
3679 launcher_basename = VG_(strrchr)(path, '/');
3680 if (!launcher_basename || launcher_basename[1] == '\0')
3681 launcher_basename = path; /* hmm, tres dubious */
3682 else
3683 launcher_basename++;
3684 }
3685
3686 /* Set up the child's environment.
3687
3688 Remove the valgrind-specific stuff from the environment so the child
3689 doesn't get vgpreload_core.so, vgpreload_<tool>.so, etc. This is done
3690 unconditionally, since if we are tracing the child, the child valgrind
3691 will set up the appropriate client environment. Nb: we make a copy of
3692 the environment before trying to mangle it as it might be in read-only
3693 memory (bug #101881).
3694
3695 Then, if tracing the child, set VALGRIND_LIB for it. */
3696 HChar **envp = NULL;
3697 if (ARG3 != 0) {
3698 envp = VG_(env_clone)((HChar**)ARG3);
3699 vg_assert(envp != NULL);
3700 VG_(env_remove_valgrind_env_stuff)(envp, True /*ro_strings*/, NULL);
3701 }
3702
3703 if (trace_this_child) {
3704 /* Set VALGRIND_LIB in ARG3 (the environment). */
3705 VG_(env_setenv)( &envp, VALGRIND_LIB, VG_(libdir));
3706 }
3707
3708 /* Set up the child's args. If not tracing it, they are simply ARG2.
3709 Otherwise, they are:
3710
3711 [launcher_basename] ++ VG_(args_for_valgrind) ++ [ARG1] ++ ARG2[1..],
3712
3713 except that the first VG_(args_for_valgrind_noexecpass) args are
3714 omitted. */
3715 HChar **argv = NULL;
3716 if (!trace_this_child)
3717 argv = (HChar **) ARG2;
3718 else {
3719 Int tot_args;
3720
3721 vg_assert(VG_(args_for_valgrind));
3722 vg_assert(VG_(args_for_valgrind_noexecpass) >= 0);
3723 vg_assert(VG_(args_for_valgrind_noexecpass)
3724 <= VG_(sizeXA)(VG_(args_for_valgrind)));
3725
3726 /* How many args in total will there be? */
3727 /* launcher basename */
3728 tot_args = 1;
3729 /* V's args */
3730 tot_args += VG_(sizeXA)(VG_(args_for_valgrind));
3731 tot_args -= VG_(args_for_valgrind_noexecpass);
3732 /* name of client exe */
3733 tot_args++;
3734 /* args for client exe, skipping [0] */
3735 HChar **arg2copy = (HChar **) ARG2;
3736 if (arg2copy[0] != NULL)
3737 for (i = 1; arg2copy[i]; i++)
3738 tot_args++;
3739 /* allocate */
3740 argv = VG_(malloc)("syswrap.exec.5", (tot_args + 1) * sizeof(HChar*));
3741 /* copy */
3742 j = 0;
3743 argv[j++] = CONST_CAST(HChar *, launcher_basename);
3744 for (i = 0; i < VG_(sizeXA)(VG_(args_for_valgrind)); i++) {
3745 if (i < VG_(args_for_valgrind_noexecpass))
3746 continue;
3747 argv[j++] = *(HChar**)VG_(indexXA)(VG_(args_for_valgrind), i);
3748 }
3749 argv[j++] = CONST_CAST(HChar *, fname);
3750 if (arg2copy[0] != NULL)
3751 for (i = 1; arg2copy[i]; i++)
3752 argv[j++] = arg2copy[i];
3753 argv[j++] = NULL;
3754 /* check */
3755 vg_assert(j == tot_args + 1);
3756 }
3757
3758 /* Set the signal state up for exec.
3759
3760 We need to set the real signal state to make sure the exec'd process
3761 gets SIG_IGN properly.
3762
3763 Also set our real sigmask to match the client's sigmask so that the
3764 exec'd child will get the right mask. First we need to clear out any
3765 pending signals so they they don't get delivered, which would confuse
3766 things.
3767
3768 XXX This is a bug - the signals should remain pending, and be delivered
3769 to the new process after exec. There's also a race-condition, since if
3770 someone delivers us a signal between the sigprocmask and the execve,
3771 we'll still get the signal. Oh well.
3772 */
3773 {
3774 vki_sigset_t allsigs;
3775 vki_siginfo_t info;
3776
3777 /* What this loop does: it queries SCSS (the signal state that the
3778 client _thinks_ the kernel is in) by calling VG_(do_sys_sigaction),
3779 and modifies the real kernel signal state accordingly. */
3780 for (i = 1; i < VG_(max_signal); i++) {
3781 vki_sigaction_fromK_t sa_f;
3782 vki_sigaction_toK_t sa_t;
3783 VG_(do_sys_sigaction)(i, NULL, &sa_f);
3784 VG_(convert_sigaction_fromK_to_toK)(&sa_f, &sa_t);
3785 VG_(sigaction)(i, &sa_t, NULL);
3786 }
3787
3788 VG_(sigfillset)(&allsigs);
3789 while (VG_(sigtimedwait_zero)(&allsigs, &info) > 0)
3790 ;
3791
3792 ThreadState *tst = VG_(get_ThreadState)(tid);
3793 VG_(sigprocmask)(VKI_SIG_SETMASK, &tst->sig_mask, NULL);
3794 }
3795
3796 /* Restore the DATA rlimit for the child. */
3797 VG_(setrlimit)(VKI_RLIMIT_DATA, &VG_(client_rlimit_data));
3798
3799 /* Debug-only printing. */
3800 if (0) {
3801 HChar **cpp;
3802 VG_(printf)("exec: %s\n", path);
3803 for (cpp = argv; cpp && *cpp; cpp++)
3804 VG_(printf)("argv: %s\n", *cpp);
3805 if (0)
3806 for (cpp = envp; cpp && *cpp; cpp++)
3807 VG_(printf)("env: %s\n", *cpp);
3808 }
3809
3810 #if defined(SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS)
3811 res = VG_(do_syscall4)(__NR_execve, (UWord) path, (UWord) argv,
3812 (UWord) envp, ARG4 & ~VKI_EXEC_DESCRIPTOR);
3813 #else
3814 res = VG_(do_syscall3)(__NR_execve, (UWord) path, (UWord) argv,
3815 (UWord) envp);
3816 #endif /* SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS */
3817 SET_STATUS_from_SysRes(res);
3818
3819 /* If we got here, then the execve failed. We've already made way too much
3820 of a mess to continue, so we have to abort. */
3821 vg_assert(FAILURE);
3822 #if defined(SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS)
3823 if (ARG1_is_fd)
3824 VG_(message)(Vg_UserMsg, "execve(%ld, %#lx, %#lx, %lu) failed, "
3825 "errno %ld\n", SARG1, ARG2, ARG3, ARG4, ERR);
3826 else
3827 VG_(message)(Vg_UserMsg, "execve(%#lx(%s), %#lx, %#lx, %ld) failed, errno"
3828 " %lu\n", ARG1, (HChar *) ARG1, ARG2, ARG3, SARG4, ERR);
3829 #else
3830 VG_(message)(Vg_UserMsg, "execve(%#lx(%s), %#lx, %#lx) failed, errno %lu\n",
3831 ARG1, (HChar *) ARG1, ARG2, ARG3, ERR);
3832 #endif /* SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS */
3833 VG_(message)(Vg_UserMsg, "EXEC FAILED: I can't recover from "
3834 "execve() failing, so I'm dying.\n");
3835 VG_(message)(Vg_UserMsg, "Add more stringent tests in PRE(sys_execve), "
3836 "or work out how to recover.\n");
3837 VG_(exit)(101);
3838 /*NOTREACHED*/
3839 }
3840
pre_mem_read_flock(ThreadId tid,struct vki_flock * lock)3841 static void pre_mem_read_flock(ThreadId tid, struct vki_flock *lock)
3842 {
3843 PRE_FIELD_READ("fcntl(lock->l_type)", lock->l_type);
3844 PRE_FIELD_READ("fcntl(lock->l_whence)", lock->l_whence);
3845 PRE_FIELD_READ("fcntl(lock->l_start)", lock->l_start);
3846 PRE_FIELD_READ("fcntl(lock->l_len)", lock->l_len);
3847 }
3848
3849 #if defined(VGP_x86_solaris)
pre_mem_read_flock64(ThreadId tid,struct vki_flock64 * lock)3850 static void pre_mem_read_flock64(ThreadId tid, struct vki_flock64 *lock)
3851 {
3852 PRE_FIELD_READ("fcntl(lock->l_type)", lock->l_type);
3853 PRE_FIELD_READ("fcntl(lock->l_whence)", lock->l_whence);
3854 PRE_FIELD_READ("fcntl(lock->l_start)", lock->l_start);
3855 PRE_FIELD_READ("fcntl(lock->l_len)", lock->l_len);
3856 }
3857 #endif /* VGP_x86_solaris */
3858
PRE(sys_fcntl)3859 PRE(sys_fcntl)
3860 {
3861 /* int fcntl(int fildes, int cmd, ...); */
3862
3863 switch (ARG2 /*cmd*/) {
3864 /* These ones ignore ARG3. */
3865 case VKI_F_GETFD:
3866 case VKI_F_GETFL:
3867 case VKI_F_GETXFL:
3868 PRINT("sys_fcntl ( %ld, %ld )", SARG1, SARG2);
3869 PRE_REG_READ2(long, "fcntl", int, fildes, int, cmd);
3870 break;
3871
3872 /* These ones use ARG3 as "arg". */
3873 case VKI_F_DUPFD:
3874 case VKI_F_SETFD:
3875 case VKI_F_SETFL:
3876 case VKI_F_DUP2FD:
3877 case VKI_F_BADFD:
3878 PRINT("sys_fcntl ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
3879 PRE_REG_READ3(long, "fcntl", int, fildes, int, cmd, int, arg);
3880 /* Check if a client program isn't going to poison any of V's output
3881 fds. */
3882 if (ARG2 == VKI_F_DUP2FD &&
3883 !ML_(fd_allowed)(ARG3, "fcntl(F_DUP2FD)", tid, False)) {
3884 SET_STATUS_Failure(VKI_EBADF);
3885 return;
3886 }
3887 break;
3888
3889 /* These ones use ARG3 as "native lock" (input only). */
3890 case VKI_F_SETLK:
3891 case VKI_F_SETLKW:
3892 case VKI_F_ALLOCSP:
3893 case VKI_F_FREESP:
3894 case VKI_F_SETLK_NBMAND:
3895 PRINT("sys_fcntl ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
3896 PRE_REG_READ3(long, "fcntl", int, fildes, int, cmd,
3897 struct flock *, lock);
3898 pre_mem_read_flock(tid, (struct vki_flock*)ARG3);
3899 break;
3900
3901 /* This one uses ARG3 as "native lock" (input&output). */
3902 case VKI_F_GETLK:
3903 PRINT("sys_fcntl ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
3904 PRE_REG_READ3(long, "fcntl", int, fildes, int, cmd,
3905 struct flock *, lock);
3906 pre_mem_read_flock(tid, (struct vki_flock*)ARG3);
3907 PRE_MEM_WRITE("fcntl(lock)", ARG3, sizeof(struct vki_flock));
3908 break;
3909
3910 #if defined(VGP_x86_solaris)
3911 /* These ones use ARG3 as "transitional 64b lock" (input only). */
3912 case VKI_F_SETLK64:
3913 case VKI_F_SETLKW64:
3914 case VKI_F_ALLOCSP64:
3915 case VKI_F_FREESP64:
3916 case VKI_F_SETLK64_NBMAND:
3917 PRINT("sys_fcntl ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
3918 PRE_REG_READ3(long, "fcntl", int, fildes, int, cmd,
3919 struct flock64 *, lock);
3920 pre_mem_read_flock64(tid, (struct vki_flock64*)ARG3);
3921 break;
3922
3923 /* This one uses ARG3 as "transitional 64b lock" (input&output). */
3924 case VKI_F_GETLK64:
3925 PRINT("sys_fcntl ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
3926 PRE_REG_READ3(long, "fcntl", int, fildes, int, cmd,
3927 struct flock64 *, lock);
3928 pre_mem_read_flock64(tid, (struct vki_flock64*)ARG3);
3929 PRE_MEM_WRITE("fcntl(lock)", ARG3, sizeof(struct vki_flock64));
3930 break;
3931 #endif /* VGP_x86_solaris */
3932
3933 /* These ones use ARG3 as "fshare". */
3934 case VKI_F_SHARE:
3935 case VKI_F_UNSHARE:
3936 case VKI_F_SHARE_NBMAND:
3937 PRINT("sys_fcntl[ARG3=='fshare'] ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
3938 PRE_REG_READ3(long, "fcntl", int, fildes, int, cmd,
3939 struct fshare *, sh);
3940 PRE_MEM_READ("fcntl(fshare)", ARG3, sizeof(struct vki_fshare));
3941 break;
3942
3943 default:
3944 VG_(unimplemented)("Syswrap of the fcntl call with cmd %ld.", SARG2);
3945 /*NOTREACHED*/
3946 break;
3947 }
3948
3949 if (ARG2 == VKI_F_SETLKW
3950 #if defined(VGP_x86_solaris)
3951 || ARG2 == VKI_F_SETLKW64
3952 #endif /* VGP_x86_solaris */
3953 )
3954 *flags |= SfMayBlock;
3955
3956 /* Be strict. */
3957 if (!ML_(fd_allowed)(ARG1, "fcntl", tid, False))
3958 SET_STATUS_Failure(VKI_EBADF);
3959 }
3960
POST(sys_fcntl)3961 POST(sys_fcntl)
3962 {
3963 switch (ARG2 /*cmd*/) {
3964 case VKI_F_DUPFD:
3965 if (!ML_(fd_allowed)(RES, "fcntl(F_DUPFD)", tid, True)) {
3966 VG_(close)(RES);
3967 SET_STATUS_Failure(VKI_EMFILE);
3968 }
3969 else if (VG_(clo_track_fds))
3970 ML_(record_fd_open_named)(tid, RES);
3971 break;
3972
3973 case VKI_F_DUP2FD:
3974 if (!ML_(fd_allowed)(RES, "fcntl(F_DUP2FD)", tid, True)) {
3975 VG_(close)(RES);
3976 SET_STATUS_Failure(VKI_EMFILE);
3977 }
3978 else if (VG_(clo_track_fds))
3979 ML_(record_fd_open_named)(tid, RES);
3980 break;
3981
3982 /* This one uses ARG3 as "native lock" (input&output). */
3983 case VKI_F_GETLK:
3984 POST_MEM_WRITE(ARG3, sizeof(struct vki_flock));
3985 break;
3986
3987 #if defined(VGP_x86_solaris)
3988 /* This one uses ARG3 as "transitional 64b lock" (input&output). */
3989 case VKI_F_GETLK64:
3990 POST_MEM_WRITE(ARG3, sizeof(struct vki_flock64));
3991 break;
3992 #endif /* VGP_x86_solaris */
3993
3994 default:
3995 break;
3996 }
3997 }
3998
PRE(sys_renameat)3999 PRE(sys_renameat)
4000 {
4001 /* int renameat(int fromfd, const char *old, int tofd, const char *new); */
4002
4003 /* Interpret the first and third arguments as 32-bit values even on 64-bit
4004 architecture. This is different from Linux, for example, where glibc
4005 sign-extends them. */
4006 Int fromfd = (Int) ARG1;
4007 Int tofd = (Int) ARG3;
4008
4009 *flags |= SfMayBlock;
4010 PRINT("sys_renameat ( %d, %#lx(%s), %d, %#lx(%s) )", fromfd,
4011 ARG2, (HChar *) ARG2, tofd, ARG4, (HChar *) ARG4);
4012 PRE_REG_READ4(long, "renameat", int, fromfd, const char *, old,
4013 int, tofd, const char *, new);
4014
4015 PRE_MEM_RASCIIZ("renameat(old)", ARG2);
4016 PRE_MEM_RASCIIZ("renameat(new)", ARG4);
4017
4018 /* Be strict but ignore fromfd/tofd for absolute old/new. */
4019 if (fromfd != VKI_AT_FDCWD
4020 && ML_(safe_to_deref)((void *) ARG2, 1)
4021 && ((HChar *) ARG2)[0] != '/'
4022 && !ML_(fd_allowed)(fromfd, "renameat", tid, False)) {
4023 SET_STATUS_Failure(VKI_EBADF);
4024 }
4025 if (tofd != VKI_AT_FDCWD
4026 && ML_(safe_to_deref)((void *) ARG4, 1)
4027 && ((HChar *) ARG4)[0] != '/'
4028 && !ML_(fd_allowed)(tofd, "renameat", tid, False)) {
4029 SET_STATUS_Failure(VKI_EBADF);
4030 }
4031 }
4032
PRE(sys_unlinkat)4033 PRE(sys_unlinkat)
4034 {
4035 /* int unlinkat(int dirfd, const char *pathname, int flags); */
4036
4037 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
4038 This is different from Linux, for example, where glibc sign-extends it. */
4039 Int dfd = (Int) ARG1;
4040
4041 *flags |= SfMayBlock;
4042 PRINT("sys_unlinkat ( %d, %#lx(%s), %ld )", dfd, ARG2, (HChar *) ARG2,
4043 SARG3);
4044 PRE_REG_READ3(long, "unlinkat", int, dirfd, const char *, pathname,
4045 int, flags);
4046 PRE_MEM_RASCIIZ("unlinkat(pathname)", ARG2);
4047
4048 /* Be strict but ignore dfd for absolute pathname. */
4049 if (dfd != VKI_AT_FDCWD
4050 && ML_(safe_to_deref)((void *) ARG2, 1)
4051 && ((HChar *) ARG2)[0] != '/'
4052 && !ML_(fd_allowed)(dfd, "unlinkat", tid, False))
4053 SET_STATUS_Failure(VKI_EBADF);
4054 }
4055
PRE(sys_fstatat)4056 PRE(sys_fstatat)
4057 {
4058 /* int fstatat(int fildes, const char *path, struct stat *buf,
4059 int flag); */
4060
4061 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
4062 This is different from Linux, for example, where glibc sign-extends it. */
4063 Int fd = (Int) ARG1;
4064
4065 PRINT("sys_fstatat ( %d, %#lx(%s), %#lx, %ld )", fd, ARG2,
4066 (HChar *) ARG2, ARG3, SARG4);
4067 PRE_REG_READ4(long, "fstatat", int, fildes, const char *, path,
4068 struct stat *, buf, int, flag);
4069 if (ARG2) {
4070 /* Only test ARG2 if it isn't NULL. The kernel treats the NULL-case as
4071 fstat(fildes, buf). */
4072 PRE_MEM_RASCIIZ("fstatat(path)", ARG2);
4073 }
4074 PRE_MEM_WRITE("fstatat(buf)", ARG3, sizeof(struct vki_stat));
4075
4076 /* Be strict but ignore fildes for absolute path. */
4077 if (fd != VKI_AT_FDCWD
4078 && ML_(safe_to_deref)((void *) ARG2, 1)
4079 && ((HChar *) ARG2)[0] != '/'
4080 && !ML_(fd_allowed)(fd, "fstatat", tid, False))
4081 SET_STATUS_Failure(VKI_EBADF);
4082 }
4083
POST(sys_fstatat)4084 POST(sys_fstatat)
4085 {
4086 POST_MEM_WRITE(ARG3, sizeof(struct vki_stat));
4087 }
4088
PRE(sys_openat)4089 PRE(sys_openat)
4090 {
4091 /* int openat(int fildes, const char *filename, int flags);
4092 int openat(int fildes, const char *filename, int flags, mode_t mode); */
4093
4094 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
4095 This is different from Linux, for example, where glibc sign-extends it. */
4096 Int fd = (Int) ARG1;
4097
4098 if (ARG3 & VKI_O_CREAT) {
4099 /* 4-arg version */
4100 PRINT("sys_openat ( %d, %#lx(%s), %ld, %ld )", fd, ARG2, (HChar *) ARG2,
4101 SARG3, SARG4);
4102 PRE_REG_READ4(long, "openat", int, fildes, const char *, filename,
4103 int, flags, vki_mode_t, mode);
4104 }
4105 else {
4106 /* 3-arg version */
4107 PRINT("sys_openat ( %d, %#lx(%s), %ld )", fd, ARG2, (HChar *) ARG2,
4108 SARG3);
4109 PRE_REG_READ3(long, "openat", int, fildes, const char *, filename,
4110 int, flags);
4111 }
4112
4113 PRE_MEM_RASCIIZ("openat(filename)", ARG2);
4114
4115 /* Be strict but ignore fildes for absolute pathname. */
4116 if (fd != VKI_AT_FDCWD
4117 && ML_(safe_to_deref)((void *) ARG2, 1)
4118 && ((HChar *) ARG2)[0] != '/'
4119 && !ML_(fd_allowed)(fd, "openat", tid, False)) {
4120 SET_STATUS_Failure(VKI_EBADF);
4121 return;
4122 }
4123
4124 if (ML_(handle_auxv_open)(status, (const HChar *) ARG2, ARG3))
4125 return;
4126
4127 if (handle_psinfo_open(status, True /*use_openat*/, (const HChar *) ARG2,
4128 fd, ARG3, ARG4))
4129 return;
4130
4131 #if defined(SOLARIS_PROC_CMDLINE)
4132 if (handle_cmdline_open(status, (const HChar *) ARG2))
4133 return;
4134 #endif /* SOLARIS_PROC_CMDLINE */
4135
4136 *flags |= SfMayBlock;
4137 }
4138
POST(sys_openat)4139 POST(sys_openat)
4140 {
4141 if (!ML_(fd_allowed)(RES, "openat", tid, True)) {
4142 VG_(close)(RES);
4143 SET_STATUS_Failure(VKI_EMFILE);
4144 }
4145 else if (VG_(clo_track_fds))
4146 ML_(record_fd_open_with_given_name)(tid, RES, (HChar*)ARG2);
4147 }
4148
PRE(sys_tasksys)4149 PRE(sys_tasksys)
4150 {
4151 /* Kernel: long tasksys(int code, projid_t projid, uint_t flags,
4152 void *projidbuf, size_t pbufsz);
4153 */
4154 switch (ARG1 /*code*/) {
4155 case 0:
4156 /* Libc: taskid_t settaskid(projid_t project, uint_t flags); */
4157 PRINT("sys_tasksys ( %ld, %ld, %lu )", SARG1, SARG2, ARG3);
4158 PRE_REG_READ3(long, SC2("tasksys", "settaskid"), int, code,
4159 vki_projid_t, projid, vki_uint_t, flags);
4160 break;
4161 case 1:
4162 /* Libc: taskid_t gettaskid(void); */
4163 PRINT("sys_tasksys ( %ld )", SARG1);
4164 PRE_REG_READ1(long, SC2("tasksys", "gettaskid"), int, code);
4165 break;
4166 case 2:
4167 /* Libc: projid_t getprojid(void); */
4168 PRINT("sys_tasksys ( %ld )", SARG1);
4169 PRE_REG_READ1(long, SC2("tasksys", "getprojid"), int, code);
4170 break;
4171 case 3:
4172 /* Libproject: size_t projlist(id_t *idbuf, size_t idbufsz); */
4173 PRINT("sys_tasksys ( %ld, %#lx, %lu )", SARG1, ARG4, ARG5);
4174 PRE_REG_READ3(long, SC2("tasksys", "projlist"), int, code,
4175 vki_id_t *, idbuf, vki_size_t, idbufsz);
4176 PRE_MEM_WRITE("tasksys(idbuf)", ARG4, ARG5);
4177 break;
4178 default:
4179 VG_(unimplemented)("Syswrap of the tasksys call with code %ld.", SARG1);
4180 /*NOTREACHED*/
4181 break;
4182 }
4183 }
4184
POST(sys_tasksys)4185 POST(sys_tasksys)
4186 {
4187 switch (ARG1 /*code*/) {
4188 case 0:
4189 case 1:
4190 case 2:
4191 break;
4192 case 3:
4193 if ((ARG4 != 0) && (ARG5 != 0))
4194 POST_MEM_WRITE(ARG4, MIN(RES, ARG5));
4195 break;
4196 default:
4197 vg_assert(0);
4198 break;
4199 }
4200 }
4201
PRE(sys_lwp_park)4202 PRE(sys_lwp_park)
4203 {
4204 /* Kernel: int lwp_park(int which, uintptr_t arg1, uintptr_t arg2);
4205 */
4206 *flags |= SfMayBlock;
4207 switch (ARG1 /*which*/) {
4208 case 0:
4209 /* Libc: int lwp_park(timespec_t *timeout, id_t lwpid); */
4210 PRINT("sys_lwp_park ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
4211 PRE_REG_READ3(long, SC2("lwp_park", "lwp_park"), int, which,
4212 timespec_t *, timeout, vki_id_t, lwpid);
4213 if (ARG2) {
4214 PRE_MEM_READ("lwp_park(timeout)", ARG2, sizeof(vki_timespec_t));
4215 /*PRE_MEM_WRITE("lwp_park(timeout)", ARG2,
4216 sizeof(vki_timespec_t));*/
4217 }
4218 break;
4219 case 1:
4220 /* Libc: int lwp_unpark(id_t lwpid); */
4221 PRINT("sys_lwp_park ( %ld, %ld )", SARG1, SARG2);
4222 PRE_REG_READ2(long, SC2("lwp_park", "lwp_unpark"), int, which,
4223 vki_id_t, lwpid);
4224 break;
4225 case 2:
4226 /* Libc: int lwp_unpark_all(id_t *lwpid, int nids); */
4227 PRINT("sys_lwp_park ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
4228 PRE_REG_READ3(long, SC2("lwp_park", "lwp_unpark_all"), int, which,
4229 id_t *, lwpid, int, nids);
4230 PRE_MEM_READ("lwp_park(lwpid)", ARG2, ARG3 * sizeof(vki_id_t));
4231 break;
4232 default:
4233 VG_(unimplemented)("Syswrap of the lwp_park call with which %ld.", SARG1);
4234 /*NOTREACHED*/
4235 break;
4236 }
4237 }
4238
POST(sys_lwp_park)4239 POST(sys_lwp_park)
4240 {
4241 switch (ARG1 /*which*/) {
4242 case 0:
4243 if (ARG2)
4244 POST_MEM_WRITE(ARG2, sizeof(vki_timespec_t));
4245 break;
4246 case 1:
4247 case 2:
4248 break;
4249 default:
4250 vg_assert(0);
4251 break;
4252 }
4253 }
4254
PRE(sys_sendfilev)4255 PRE(sys_sendfilev)
4256 {
4257 /* Kernel: ssize_t sendfilev(int opcode, int fd,
4258 const struct sendfilevec *vec,
4259 int sfvcnt, size_t *xferred);
4260 */
4261 PRINT("sys_sendfilev ( %ld, %ld, %#lx, %ld, %#lx )",
4262 SARG1, SARG2, ARG3, SARG4, ARG5);
4263
4264 switch (ARG1 /*opcode*/) {
4265 case VKI_SENDFILEV:
4266 {
4267 PRE_REG_READ5(long, "sendfilev", int, opcode, int, fd,
4268 const struct vki_sendfilevec *, vec,
4269 int, sfvcnt, vki_size_t *, xferred);
4270
4271 PRE_MEM_READ("sendfilev(vec)", ARG3,
4272 ARG4 * sizeof(struct vki_sendfilevec));
4273 PRE_MEM_WRITE("sendfilev(xferred)", ARG5, sizeof(vki_size_t));
4274
4275 struct vki_sendfilevec *vec = (struct vki_sendfilevec *) ARG3;
4276 if (ML_(safe_to_deref)(vec, ARG4 *
4277 sizeof(struct vki_sendfilevec))) {
4278 UInt i;
4279 for (i = 0; i < ARG4; i++) {
4280 HChar desc[35]; // large enough
4281 if (vec[i].sfv_fd == VKI_SFV_FD_SELF) {
4282 VG_(snprintf)(desc, sizeof(desc),
4283 "sendfilev(vec[%u].sfv_off", i);
4284 PRE_MEM_READ(desc, vec[i].sfv_off, vec[i].sfv_len);
4285 } else {
4286 VG_(snprintf)(desc, sizeof(desc),
4287 "sendfilev(vec[%u].sfv_fd)", i);
4288 if (!ML_(fd_allowed)(vec[i].sfv_fd, desc, tid, False))
4289 SET_STATUS_Failure(VKI_EBADF);
4290 }
4291 }
4292 }
4293 }
4294 break;
4295 case VKI_SENDFILEV64:
4296 {
4297 PRE_REG_READ5(long, "sendfilev", int, opcode, int, fd,
4298 const struct vki_sendfilevec64 *, vec,
4299 int, sfvcnt, vki_size_t *, xferred);
4300
4301 PRE_MEM_READ("sendfilev(vec)", ARG3,
4302 ARG4 * sizeof(struct vki_sendfilevec64));
4303 PRE_MEM_WRITE("sendfilev(xferred)", ARG5, sizeof(vki_size_t));
4304
4305 struct vki_sendfilevec64 *vec64 =
4306 (struct vki_sendfilevec64 *) ARG3;
4307 if (ML_(safe_to_deref)(vec64, ARG4 *
4308 sizeof(struct vki_sendfilevec64))) {
4309 UInt i;
4310 for (i = 0; i < ARG4; i++) {
4311 HChar desc[35]; // large enough
4312 if (vec64[i].sfv_fd == VKI_SFV_FD_SELF) {
4313 VG_(snprintf)(desc, sizeof(desc),
4314 "sendfilev(vec[%u].sfv_off", i);
4315 PRE_MEM_READ(desc, vec64[i].sfv_off, vec64[i].sfv_len);
4316 } else {
4317 VG_(snprintf)(desc, sizeof(desc),
4318 "sendfilev(vec[%u].sfv_fd)", i);
4319 if (!ML_(fd_allowed)(vec64[i].sfv_fd, desc,
4320 tid, False))
4321 SET_STATUS_Failure(VKI_EBADF);
4322 }
4323 }
4324 }
4325 }
4326 break;
4327 default:
4328 VG_(unimplemented)("Syswrap of the sendfilev call with "
4329 "opcode %ld.", SARG1);
4330 /*NOTREACHED*/
4331 break;
4332 }
4333
4334 /* Be strict. */
4335 if (!ML_(fd_allowed)(ARG2, "sendfilev(fd)", tid, False))
4336 SET_STATUS_Failure(VKI_EBADF);
4337
4338 *flags |= SfMayBlock;
4339 }
4340
POST(sys_sendfilev)4341 POST(sys_sendfilev)
4342 {
4343 POST_MEM_WRITE(ARG5, sizeof(vki_size_t));
4344 }
4345
4346 #if defined(SOLARIS_LWP_NAME_SYSCALL)
PRE(sys_lwp_name)4347 PRE(sys_lwp_name)
4348 {
4349 /* int lwp_name(int opcode, id_t lwpid, char *name, size_t len); */
4350 PRINT("sys_lwp_name ( %ld, %ld, %#lx, %lu )", SARG1, SARG2, ARG3, ARG4);
4351
4352 switch (ARG1 /*opcode*/) {
4353 case 0:
4354 /* lwp_setname */
4355 PRE_REG_READ3(long, "lwp_name", int, opcode, vki_id_t, lwpid,
4356 char *, name);
4357 PRE_MEM_RASCIIZ("lwp_name(name)", ARG3);
4358 break;
4359 case 1:
4360 /* lwp_getname */
4361 PRE_REG_READ4(long, "lwp_name", int, opcode, vki_id_t, lwpid,
4362 char *, name, vki_size_t, len);
4363 PRE_MEM_WRITE("lwp_name(name)", ARG3, ARG4);
4364 break;
4365 default:
4366 VG_(unimplemented)("Syswrap of the lwp_name call with opcode %ld.", SARG1);
4367 /*NOTREACHED*/
4368 break;
4369 }
4370 }
4371
POST(sys_lwp_name)4372 POST(sys_lwp_name)
4373 {
4374 switch (ARG1 /*opcode*/) {
4375 case 0:
4376 if (ARG3) { // Paranoia
4377 const HChar *new_name = (const HChar *) ARG3;
4378 ThreadState *tst = VG_(get_ThreadState)(tid);
4379 SizeT new_len = VG_(strlen)(new_name);
4380
4381 /* Don't bother reusing the memory. This is a rare event. */
4382 tst->thread_name = VG_(realloc)("syswrap.lwp_name", tst->thread_name,
4383 new_len + 1);
4384 VG_(strcpy)(tst->thread_name, new_name);
4385 }
4386 break;
4387 case 1:
4388 POST_MEM_WRITE(ARG3, VG_(strlen)((HChar *) ARG3) + 1);
4389 break;
4390 default:
4391 vg_assert(0);
4392 break;
4393 }
4394 }
4395 #endif /* SOLARIS_LWP_NAME_SYSCALL */
4396
PRE(sys_privsys)4397 PRE(sys_privsys)
4398 {
4399 /* Kernel: int privsys(int code, priv_op_t op, priv_ptype_t type,
4400 void *buf, size_t bufsize, int itype);
4401 */
4402 switch (ARG1 /*code*/) {
4403 case VKI_PRIVSYS_SETPPRIV:
4404 /* Libc: int setppriv(priv_op_t op, priv_ptype_t type,
4405 const priv_set_t *pset);
4406 */
4407 PRINT("sys_privsys ( %ld, %ld, %ld, %#lx, %lu )", SARG1, SARG2, SARG3,
4408 ARG4, ARG5);
4409 PRE_REG_READ5(long, SC2("privsys", "setppriv"), int, code,
4410 vki_priv_op_t, op, vki_priv_ptype_t, type,
4411 const priv_set_t *, pset, vki_size_t, bufsize);
4412 PRE_MEM_READ("privsys(pset)", ARG4, ARG5);
4413 break;
4414 case VKI_PRIVSYS_GETPPRIV:
4415 /* Libc: int getppriv(priv_ptype_t type, priv_set_t *pset);
4416 priv_set_t *pset -> void *buf
4417 */
4418 PRINT("sys_privsys ( %ld, %ld, %ld, %#lx, %lu )", SARG1, SARG2, SARG3,
4419 ARG4, ARG5);
4420 PRE_REG_READ5(long, SC2("privsys", "getppriv"), int, code,
4421 vki_priv_op_t, op, vki_priv_ptype_t, type, priv_set_t *, pset,
4422 vki_size_t, bufsize);
4423 PRE_MEM_WRITE("privsys(pset)", ARG4, ARG5);
4424 break;
4425 case VKI_PRIVSYS_GETIMPLINFO:
4426 /* Libc: int getprivinfo(priv_impl_info_t *buf, size_t bufsize);
4427 priv_impl_info_t *buf -> void *buf
4428 */
4429 PRINT("sys_privsys ( %ld, %ld, %ld, %#lx, %lu )", SARG1, SARG2, SARG3,
4430 ARG4, ARG5);
4431 PRE_REG_READ5(long, SC2("privsys", "getprivinfo"), int, code,
4432 vki_priv_op_t, op, vki_priv_ptype_t, type,
4433 priv_impl_info_t *, buf, vki_size_t, bufsize);
4434 PRE_MEM_WRITE("privsys(buf)", ARG4, ARG5);
4435 break;
4436 case VKI_PRIVSYS_SETPFLAGS:
4437 /* Libc: int setpflags(uint_t flag, uint_t val);
4438 uint_t flag -> priv_op_t op
4439 uint_t val -> priv_ptype_t type
4440 */
4441 PRINT("sys_privsys ( %ld, %lu, %lu )", SARG1, ARG2, ARG3);
4442 PRE_REG_READ3(long, SC2("privsys", "setpflags"), int, code,
4443 vki_uint_t, flag, vki_uint_t, val);
4444 break;
4445 case VKI_PRIVSYS_GETPFLAGS:
4446 /* Libc: uint_t getpflags(uint_t flag);
4447 uint_t flag -> priv_op_t op
4448 */
4449 PRINT("sys_privsys ( %ld, %lu )", SARG1, ARG2);
4450 PRE_REG_READ2(long, SC2("privsys", "setpflags"), int, code,
4451 vki_uint_t, flag);
4452 break;
4453 case VKI_PRIVSYS_ISSETUGID:
4454 /* Libc: int issetugid(void); */
4455 PRINT("sys_privsys ( %ld )", SARG1);
4456 PRE_REG_READ1(long, SC2("privsys", "issetugid"), int, code);
4457 break;
4458 case VKI_PRIVSYS_PFEXEC_REG:
4459 /* Libc: int register_pfexec(int did);
4460 int did -> priv_op_t op
4461 */
4462 PRINT("sys_privsys ( %ld, %ld )", SARG1, SARG2);
4463 PRE_REG_READ2(long, SC2("privsys", "register_pfexec"), int, code,
4464 int, did);
4465 break;
4466 case VKI_PRIVSYS_PFEXEC_UNREG:
4467 /* Libc: int unregister_pfexec(int did); */
4468 PRINT("sys_privsys ( %ld, %ld )", SARG1, SARG2);
4469 PRE_REG_READ2(long, SC2("privsys", "unregister_pfexec"), int, code,
4470 int, did);
4471 break;
4472 default:
4473 VG_(unimplemented)("Syswrap of the privsys call with code %ld.", SARG1);
4474 /*NOTREACHED*/
4475 break;
4476 }
4477
4478 /* Be strict. */
4479 if ((ARG1 == VKI_PRIVSYS_PFEXEC_REG ||
4480 ARG1 == VKI_PRIVSYS_PFEXEC_UNREG) &&
4481 !ML_(fd_allowed)(ARG2, "privsys", tid, False))
4482 SET_STATUS_Failure(VKI_EBADF);
4483 }
4484
POST(sys_privsys)4485 POST(sys_privsys)
4486 {
4487 switch (ARG1 /*code*/) {
4488 case VKI_PRIVSYS_SETPPRIV:
4489 break;
4490 case VKI_PRIVSYS_GETPPRIV:
4491 POST_MEM_WRITE(ARG4, sizeof(vki_priv_set_t));
4492 break;
4493 case VKI_PRIVSYS_GETIMPLINFO:
4494 /* The kernel copy outs data of size min(bufsize, privinfosize).
4495 Unfortunately, it does not seem to be possible to easily obtain the
4496 privinfosize value. The code below optimistically marks all ARG5
4497 bytes (aka bufsize) as written by the kernel. */
4498 POST_MEM_WRITE(ARG4, ARG5);
4499 break;
4500 case VKI_PRIVSYS_SETPFLAGS:
4501 case VKI_PRIVSYS_GETPFLAGS:
4502 case VKI_PRIVSYS_ISSETUGID:
4503 case VKI_PRIVSYS_PFEXEC_REG:
4504 case VKI_PRIVSYS_PFEXEC_UNREG:
4505 break;
4506 default:
4507 vg_assert(0);
4508 break;
4509 }
4510 }
4511
PRE(sys_ucredsys)4512 PRE(sys_ucredsys)
4513 {
4514 /* Kernel: int ucredsys(int code, int obj, void *buf); */
4515 PRINT("sys_ucredsys ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
4516
4517 switch (ARG1 /*code*/) {
4518 case VKI_UCREDSYS_UCREDGET:
4519 /* Libc: ucred_t *ucred_get(pid_t pid); */
4520 PRE_REG_READ3(long, SC2("ucredsys", "ucredget"), int, code,
4521 vki_pid_t, pid, vki_ucred_t *, buf);
4522 PRE_MEM_WRITE("ucredsys(buf)", ARG3, sizeof(vki_ucred_t));
4523 break;
4524
4525 case VKI_UCREDSYS_GETPEERUCRED:
4526 /* Libc: int getpeerucred(int fd, ucred_t **ucred); */
4527 PRE_REG_READ3(long, SC2("ucredsys", "getpeerucred"), int, code,
4528 int, fd, vki_ucred_t *, buf);
4529 PRE_MEM_WRITE("ucredsys(buf)", ARG3, sizeof(vki_ucred_t));
4530
4531 /* Be strict. */
4532 if (!ML_(fd_allowed)(ARG2, "ucredsys", tid, False))
4533 SET_STATUS_Failure(VKI_EBADF);
4534 break;
4535
4536 default:
4537 VG_(unimplemented)("Syswrap of the ucredsys call with code %ld.", SARG1);
4538 /*NOTREACHED*/
4539 break;
4540 }
4541 }
4542
POST(sys_ucredsys)4543 POST(sys_ucredsys)
4544 {
4545 switch (ARG1 /*code*/) {
4546 case VKI_UCREDSYS_UCREDGET:
4547 case VKI_UCREDSYS_GETPEERUCRED:
4548 vg_assert(ARG3 != 0);
4549 POST_MEM_WRITE(ARG3, ((vki_ucred_t *) ARG3)->uc_size);
4550 break;
4551
4552 default:
4553 vg_assert(0);
4554 break;
4555 }
4556 }
4557
PRE(sys_getmsg)4558 PRE(sys_getmsg)
4559 {
4560 /* int getmsg(int fildes, struct strbuf *ctlptr, struct strbuf *dataptr,
4561 int *flagsp); */
4562 struct vki_strbuf *ctrlptr = (struct vki_strbuf *)ARG2;
4563 struct vki_strbuf *dataptr = (struct vki_strbuf *)ARG3;
4564 *flags |= SfMayBlock;
4565 PRINT("sys_getmsg ( %ld, %#lx, %#lx, %#lx )", SARG1, ARG2, ARG3, ARG4);
4566 PRE_REG_READ4(long, "getmsg", int, fildes, struct vki_strbuf *, ctlptr,
4567 struct vki_strbuf *, dataptr, int *, flagsp);
4568 if (ctrlptr) {
4569 PRE_FIELD_READ("getmsg(ctrlptr->maxlen)", ctrlptr->maxlen);
4570 PRE_FIELD_WRITE("getmsg(ctrlptr->len)", ctrlptr->len);
4571 PRE_FIELD_READ("getmsg(ctrlptr->buf)", ctrlptr->buf);
4572 if (ML_(safe_to_deref)((void*)ARG2, sizeof(struct vki_strbuf))
4573 && ctrlptr->maxlen > 0)
4574 PRE_MEM_WRITE("getmsg(ctrlptr->buf)", (Addr)ctrlptr->buf,
4575 ctrlptr->maxlen);
4576 }
4577 if (dataptr) {
4578 PRE_FIELD_READ("getmsg(dataptr->maxlen)", dataptr->maxlen);
4579 PRE_FIELD_WRITE("getmsg(dataptr->len)", dataptr->len);
4580 PRE_FIELD_READ("getmsg(dataptr->buf)", dataptr->buf);
4581 if (ML_(safe_to_deref)((void*)ARG3, sizeof(struct vki_strbuf))
4582 && dataptr->maxlen > 0)
4583 PRE_MEM_WRITE("getmsg(dataptr->buf)", (Addr)dataptr->buf,
4584 dataptr->maxlen);
4585 }
4586 PRE_MEM_READ("getmsg(flagsp)", ARG4, sizeof(int));
4587 /*PRE_MEM_WRITE("getmsg(flagsp)", ARG4, sizeof(int));*/
4588
4589 /* Be strict. */
4590 if (!ML_(fd_allowed)(ARG1, "getmsg", tid, False))
4591 SET_STATUS_Failure(VKI_EBADF);
4592 }
4593
POST(sys_getmsg)4594 POST(sys_getmsg)
4595 {
4596 struct vki_strbuf *ctrlptr = (struct vki_strbuf *)ARG2;
4597 struct vki_strbuf *dataptr = (struct vki_strbuf *)ARG3;
4598
4599 if (ctrlptr && ctrlptr->len > 0)
4600 POST_MEM_WRITE((Addr)ctrlptr->buf, ctrlptr->len);
4601 if (dataptr && dataptr->len > 0)
4602 POST_MEM_WRITE((Addr)dataptr->buf, dataptr->len);
4603 POST_MEM_WRITE(ARG4, sizeof(int));
4604 }
4605
PRE(sys_putmsg)4606 PRE(sys_putmsg)
4607 {
4608 /* int putmsg(int fildes, struct strbuf *ctlptr, struct strbuf *dataptr,
4609 int flags); */
4610 struct vki_strbuf *ctrlptr = (struct vki_strbuf *)ARG2;
4611 struct vki_strbuf *dataptr = (struct vki_strbuf *)ARG3;
4612 *flags |= SfMayBlock;
4613 PRINT("sys_putmsg ( %ld, %#lx, %#lx, %ld )", SARG1, ARG2, ARG3, SARG4);
4614 PRE_REG_READ4(long, "putmsg", int, fildes, struct vki_strbuf *, ctrlptr,
4615 struct vki_strbuf *, dataptr, int, flags);
4616 if (ctrlptr) {
4617 PRE_FIELD_READ("putmsg(ctrlptr->len)", ctrlptr->len);
4618 PRE_FIELD_READ("putmsg(ctrlptr->buf)", ctrlptr->buf);
4619 if (ML_(safe_to_deref)((void*)ARG2, sizeof(struct vki_strbuf))
4620 && ctrlptr->len > 0)
4621 PRE_MEM_READ("putmsg(ctrlptr->buf)", (Addr)ctrlptr->buf,
4622 ctrlptr->len);
4623 }
4624 if (dataptr) {
4625 PRE_FIELD_READ("putmsg(dataptr->len)", dataptr->len);
4626 PRE_FIELD_READ("putmsg(dataptr->buf)", dataptr->buf);
4627 if (ML_(safe_to_deref)((void*)ARG3, sizeof(struct vki_strbuf))
4628 && dataptr->len > 0)
4629 PRE_MEM_READ("putmsg(dataptr->buf)", (Addr)dataptr->buf,
4630 dataptr->len);
4631 }
4632
4633 /* Be strict. */
4634 if (!ML_(fd_allowed)(ARG1, "putmsg", tid, False))
4635 SET_STATUS_Failure(VKI_EBADF);
4636 }
4637
PRE(sys_lstat)4638 PRE(sys_lstat)
4639 {
4640 /* int lstat(const char *path, struct stat *buf); */
4641 /* Note: We could use here the sys_newlstat generic wrapper, but the 'new'
4642 in its name is rather confusing in the Solaris context, thus we provide
4643 our own wrapper. */
4644 PRINT("sys_lstat ( %#lx(%s), %#lx )", ARG1, (HChar *) ARG1, ARG2);
4645 PRE_REG_READ2(long, "lstat", const char *, path, struct stat *, buf);
4646
4647 PRE_MEM_RASCIIZ("lstat(path)", ARG1);
4648 PRE_MEM_WRITE("lstat(buf)", ARG2, sizeof(struct vki_stat));
4649 }
4650
POST(sys_lstat)4651 POST(sys_lstat)
4652 {
4653 POST_MEM_WRITE(ARG2, sizeof(struct vki_stat));
4654 }
4655
PRE(sys_sigprocmask)4656 PRE(sys_sigprocmask)
4657 {
4658 /* int sigprocmask(int how, const sigset_t *set, sigset_t *oset); */
4659 PRINT("sys_sigprocmask ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
4660 PRE_REG_READ3(long, "sigprocmask",
4661 int, how, vki_sigset_t *, set, vki_sigset_t *, oset);
4662 if (ARG2)
4663 PRE_MEM_READ("sigprocmask(set)", ARG2, sizeof(vki_sigset_t));
4664 if (ARG3)
4665 PRE_MEM_WRITE("sigprocmask(oset)", ARG3, sizeof(vki_sigset_t));
4666
4667 /* Be safe. */
4668 if (ARG2 && !ML_(safe_to_deref((void*)ARG2, sizeof(vki_sigset_t)))) {
4669 SET_STATUS_Failure(VKI_EFAULT);
4670 }
4671 if (ARG3 && !ML_(safe_to_deref((void*)ARG3, sizeof(vki_sigset_t)))) {
4672 SET_STATUS_Failure(VKI_EFAULT);
4673 }
4674
4675 if (!FAILURE)
4676 SET_STATUS_from_SysRes(
4677 VG_(do_sys_sigprocmask)(tid, ARG1 /*how*/, (vki_sigset_t*)ARG2,
4678 (vki_sigset_t*)ARG3)
4679 );
4680
4681 if (SUCCESS)
4682 *flags |= SfPollAfter;
4683 }
4684
POST(sys_sigprocmask)4685 POST(sys_sigprocmask)
4686 {
4687 if (ARG3)
4688 POST_MEM_WRITE(ARG3, sizeof(vki_sigset_t));
4689 }
4690
PRE(sys_sigaction)4691 PRE(sys_sigaction)
4692 {
4693 /* int sigaction(int signal, const struct sigaction *act,
4694 struct sigaction *oact); */
4695 PRINT("sys_sigaction ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
4696 PRE_REG_READ3(long, "sigaction", int, signal,
4697 const struct sigaction *, act, struct sigaction *, oact);
4698
4699 /* Note that on Solaris, vki_sigaction_toK_t and vki_sigaction_fromK_t are
4700 both typedefs of 'struct sigaction'. */
4701
4702 if (ARG2) {
4703 vki_sigaction_toK_t *sa = (vki_sigaction_toK_t*)ARG2;
4704 PRE_FIELD_READ("sigaction(act->sa_flags)", sa->sa_flags);
4705 PRE_FIELD_READ("sigaction(act->sa_handler)", sa->ksa_handler);
4706 PRE_FIELD_READ("sigaction(act->sa_mask)", sa->sa_mask);
4707 }
4708 if (ARG3)
4709 PRE_MEM_WRITE("sigaction(oact)", ARG3, sizeof(vki_sigaction_fromK_t));
4710
4711 /* Be safe. */
4712 if (ARG2 && !ML_(safe_to_deref((void*)ARG2,
4713 sizeof(vki_sigaction_toK_t)))) {
4714 SET_STATUS_Failure(VKI_EFAULT);
4715 }
4716 if (ARG3 && !ML_(safe_to_deref((void*)ARG3,
4717 sizeof(vki_sigaction_fromK_t)))) {
4718 SET_STATUS_Failure(VKI_EFAULT);
4719 }
4720
4721 if (!FAILURE)
4722 SET_STATUS_from_SysRes(
4723 VG_(do_sys_sigaction)(ARG1, (const vki_sigaction_toK_t*)ARG2,
4724 (vki_sigaction_fromK_t*)ARG3));
4725 }
4726
POST(sys_sigaction)4727 POST(sys_sigaction)
4728 {
4729 if (ARG3)
4730 POST_MEM_WRITE(ARG3, sizeof(vki_sigaction_fromK_t));
4731 }
4732
PRE(sys_sigpending)4733 PRE(sys_sigpending)
4734 {
4735 /* int sigpending(int flag, sigset_t *setp); */
4736 PRINT("sys_sigpending ( %ld, %#lx )", SARG1, ARG2);
4737 PRE_REG_READ2(long, "sigpending", int, flag, sigset_t *, setp);
4738 PRE_MEM_WRITE("sigpending(setp)", ARG2, sizeof(vki_sigset_t));
4739 }
4740
POST(sys_sigpending)4741 POST(sys_sigpending)
4742 {
4743 POST_MEM_WRITE(ARG2, sizeof(vki_sigset_t));
4744 }
4745
PRE(sys_getsetcontext)4746 PRE(sys_getsetcontext)
4747 {
4748 /* Kernel: int getsetcontext(int flag, void *arg) */
4749 ThreadState *tst = VG_(get_ThreadState)(tid);
4750 PRINT("sys_getsetcontext ( %ld, %#lx )", SARG1, ARG2);
4751 switch (ARG1 /*flag*/) {
4752 case VKI_GETCONTEXT:
4753 /* Libc: int getcontext(ucontext_t *ucp); */
4754 PRE_REG_READ2(long, SC2("getsetcontext", "getcontext"), int, flag,
4755 ucontext_t *, ucp);
4756 PRE_MEM_WRITE("getsetcontext(ucp)", ARG2, sizeof(vki_ucontext_t));
4757
4758 if (!ML_(safe_to_deref((void*)ARG2, sizeof(vki_ucontext_t)))) {
4759 SET_STATUS_Failure(VKI_EFAULT);
4760 return;
4761 }
4762 VG_(save_context)(tid, (vki_ucontext_t*)ARG2, Vg_CoreSysCall);
4763 SET_STATUS_Success(0);
4764 break;
4765 case VKI_SETCONTEXT:
4766 /* Libc: int setcontext(const ucontext_t *ucp); */
4767 PRE_REG_READ2(long, SC2("getsetcontext", "setcontext"), int, flag,
4768 const ucontext_t *, ucp);
4769
4770 if (!ARG2) {
4771 /* Setting NULL context causes thread exit. */
4772 tst->exitreason = VgSrc_ExitThread;
4773 tst->os_state.exitcode = 0;
4774 SET_STATUS_Success(0);
4775 return;
4776 }
4777
4778 if (!ML_(safe_to_deref((void*)ARG2, sizeof(vki_ucontext_t)))) {
4779 SET_STATUS_Failure(VKI_EFAULT);
4780 return;
4781 }
4782
4783 VG_(restore_context)(tid, (vki_ucontext_t*)ARG2,
4784 Vg_CoreSysCall, False/*esp_is_thrptr*/);
4785 /* Tell the driver not to update the guest state with the "result". */
4786 *flags |= SfNoWriteResult;
4787 /* Check to see if any signals arose as a result of this. */
4788 *flags |= SfPollAfter;
4789
4790 /* Check if this is a possible return from a signal handler. */
4791 VG_(sigframe_return)(tid, (vki_ucontext_t*)ARG2);
4792
4793 SET_STATUS_Success(0);
4794 break;
4795 case VKI_GETUSTACK:
4796 /* Libc: int getustack(stack_t **spp); */
4797 PRE_REG_READ2(long, SC2("getsetcontext", "getustack"), int, flag,
4798 stack_t **, spp);
4799 PRE_MEM_WRITE("getsetcontext(spp)", ARG2, sizeof(vki_stack_t*));
4800
4801 if (!ML_(safe_to_deref((void*)ARG2, sizeof(vki_stack_t*)))) {
4802 SET_STATUS_Failure(VKI_EFAULT);
4803 return;
4804 }
4805
4806 *(vki_stack_t**)ARG2 = tst->os_state.ustack;
4807 POST_MEM_WRITE(ARG2, sizeof(vki_stack_t*));
4808 SET_STATUS_Success(0);
4809 break;
4810 case VKI_SETUSTACK:
4811 {
4812 /* Libc: int setustack(stack_t *sp); */
4813 PRE_REG_READ2(long, SC2("getsetcontext", "setustack"), int, flag,
4814 stack_t *, sp);
4815
4816 /* The kernel does not read the stack data instantly but it can read
4817 them later so it is better to make sure the data are defined. */
4818 PRE_MEM_READ("getsetcontext_setustack(sp)", ARG2, sizeof(vki_stack_t));
4819
4820 if (!ML_(safe_to_deref((void*)ARG2, sizeof(vki_stack_t)))) {
4821 SET_STATUS_Failure(VKI_EFAULT);
4822 return;
4823 }
4824
4825 vki_stack_t *old_stack = tst->os_state.ustack;
4826 tst->os_state.ustack = (vki_stack_t*)ARG2;
4827
4828 /* The thread is setting the ustack pointer. It is a good time to get
4829 information about its stack. */
4830 if (tst->os_state.ustack->ss_flags == 0) {
4831 /* If the sanity check of ss_flags passed set the stack. */
4832 set_stack(tid, tst->os_state.ustack);
4833
4834 if ((old_stack == NULL) && (tid > 1)) {
4835 /* New thread creation is now completed. Inform the tool. */
4836 VG_TRACK(pre_thread_first_insn, tid);
4837 }
4838 }
4839
4840 SET_STATUS_Success(0);
4841 }
4842 break;
4843 default:
4844 VG_(unimplemented)("Syswrap of the context call with flag %ld.", SARG1);
4845 /*NOTREACHED*/
4846 break;
4847 }
4848 }
4849
PRE(sys_fchmodat)4850 PRE(sys_fchmodat)
4851 {
4852 /* int fchmodat(int fd, const char *path, mode_t mode, int flag); */
4853
4854 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
4855 This is different from Linux, for example, where glibc sign-extends it. */
4856 Int fd = (Int) ARG1;
4857
4858 PRINT("sys_fchmodat ( %d, %#lx(%s), %ld, %ld )",
4859 fd, ARG2, (HChar *) ARG2, SARG3, SARG4);
4860 PRE_REG_READ4(long, "fchmodat",
4861 int, fd, const char *, path, vki_mode_t, mode, int, flag);
4862
4863 if (ARG2)
4864 PRE_MEM_RASCIIZ("fchmodat(path)", ARG2);
4865
4866 /* Be strict but ignore fd for absolute path. */
4867 if (fd != VKI_AT_FDCWD
4868 && ML_(safe_to_deref)((void *) ARG2, 1)
4869 && ((HChar *) ARG2)[0] != '/'
4870 && !ML_(fd_allowed)(fd, "fchmodat", tid, False))
4871 SET_STATUS_Failure(VKI_EBADF);
4872 }
4873
PRE(sys_mkdirat)4874 PRE(sys_mkdirat)
4875 {
4876 /* int mkdirat(int fd, const char *path, mode_t mode); */
4877
4878 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
4879 This is different from Linux, for example, where glibc sign-extends it. */
4880 Int fd = (Int) ARG1;
4881
4882 *flags |= SfMayBlock;
4883 PRINT("sys_mkdirat ( %d, %#lx(%s), %ld )", fd, ARG2, (HChar *) ARG2, SARG3);
4884 PRE_REG_READ3(long, "mkdirat", int, fd, const char *, path,
4885 vki_mode_t, mode);
4886 PRE_MEM_RASCIIZ("mkdirat(path)", ARG2);
4887
4888 /* Be strict but ignore fd for absolute path. */
4889 if (fd != VKI_AT_FDCWD
4890 && ML_(safe_to_deref)((void *) ARG2, 1)
4891 && ((HChar *) ARG2)[0] != '/'
4892 && !ML_(fd_allowed)(fd, "mkdirat", tid, False))
4893 SET_STATUS_Failure(VKI_EBADF);
4894 }
4895
do_statvfs_post(struct vki_statvfs * stats,ThreadId tid)4896 static void do_statvfs_post(struct vki_statvfs *stats, ThreadId tid)
4897 {
4898 POST_FIELD_WRITE(stats->f_bsize);
4899 POST_FIELD_WRITE(stats->f_frsize);
4900 POST_FIELD_WRITE(stats->f_blocks);
4901 POST_FIELD_WRITE(stats->f_bfree);
4902 POST_FIELD_WRITE(stats->f_bavail);
4903 POST_FIELD_WRITE(stats->f_files);
4904 POST_FIELD_WRITE(stats->f_ffree);
4905 POST_FIELD_WRITE(stats->f_favail);
4906 POST_FIELD_WRITE(stats->f_fsid);
4907 POST_MEM_WRITE((Addr) stats->f_basetype, VG_(strlen)(stats->f_basetype) + 1);
4908 POST_FIELD_WRITE(stats->f_flag);
4909 POST_FIELD_WRITE(stats->f_namemax);
4910 POST_MEM_WRITE((Addr) stats->f_fstr, VG_(strlen)(stats->f_fstr) + 1);
4911 }
4912
PRE(sys_statvfs)4913 PRE(sys_statvfs)
4914 {
4915 /* int statvfs(const char *path, struct statvfs *buf); */
4916 *flags |= SfMayBlock;
4917 PRINT("sys_statvfs ( %#lx(%s), %#lx )", ARG1, (HChar *) ARG1, ARG2);
4918 PRE_REG_READ2(long, "statvfs", const char *, path,
4919 struct vki_statvfs *, buf);
4920 PRE_MEM_RASCIIZ("statvfs(path)", ARG1);
4921 PRE_MEM_WRITE("statvfs(buf)", ARG2, sizeof(struct vki_statvfs));
4922 }
4923
POST(sys_statvfs)4924 POST(sys_statvfs)
4925 {
4926 do_statvfs_post((struct vki_statvfs *) ARG2, tid);
4927 }
4928
PRE(sys_fstatvfs)4929 PRE(sys_fstatvfs)
4930 {
4931 /* int fstatvfs(int fd, struct statvfs *buf); */
4932 *flags |= SfMayBlock;
4933 PRINT("sys_fstatvfs ( %ld, %#lx )", SARG1, ARG2);
4934 PRE_REG_READ2(long, "fstatvfs", int, fd, struct vki_statvfs *, buf);
4935 PRE_MEM_WRITE("fstatvfs(buf)", ARG2, sizeof(struct vki_statvfs));
4936
4937 /* Be strict. */
4938 if (!ML_(fd_allowed)(ARG1, "fstatvfs", tid, False))
4939 SET_STATUS_Failure(VKI_EBADF);
4940 }
4941
POST(sys_fstatvfs)4942 POST(sys_fstatvfs)
4943 {
4944 do_statvfs_post((struct vki_statvfs *) ARG2, tid);
4945 }
4946
PRE(sys_nfssys)4947 PRE(sys_nfssys)
4948 {
4949 /* int nfssys(enum nfssys_op opcode, void *arg); */
4950 *flags |= SfMayBlock;
4951 PRINT("sys_nfssys ( %ld, %#lx )", SARG1, ARG2);
4952
4953 switch (ARG1 /*opcode*/) {
4954 case VKI_NFS_REVAUTH:
4955 PRE_REG_READ2(long, SC2("nfssys", "nfs_revauth"), int, opcode,
4956 struct vki_nfs_revauth_args *, args);
4957 PRE_MEM_READ("nfssys(arg)", ARG2,
4958 sizeof(struct vki_nfs_revauth_args));
4959 break;
4960 default:
4961 VG_(unimplemented)("Syswrap of the nfssys call with opcode %ld.", SARG1);
4962 /*NOTREACHED*/
4963 break;
4964 }
4965 }
4966
POST(sys_nfssys)4967 POST(sys_nfssys)
4968 {
4969 switch (ARG1 /*opcode*/) {
4970 case VKI_NFS_REVAUTH:
4971 break;
4972 default:
4973 vg_assert(0);
4974 break;
4975 }
4976 }
4977
PRE(sys_waitid)4978 PRE(sys_waitid)
4979 {
4980 /* int waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options); */
4981 *flags |= SfMayBlock;
4982 PRINT("sys_waitid( %ld, %ld, %#lx, %ld )", SARG1, SARG2, ARG3, SARG4);
4983 PRE_REG_READ4(long, "waitid", vki_idtype_t, idtype, vki_id_t, id,
4984 siginfo_t *, infop, int, options);
4985 PRE_MEM_WRITE("waitid(infop)", ARG3, sizeof(vki_siginfo_t));
4986 }
4987
POST(sys_waitid)4988 POST(sys_waitid)
4989 {
4990 POST_MEM_WRITE(ARG3, sizeof(vki_siginfo_t));
4991 }
4992
4993 #if defined(SOLARIS_UTIMESYS_SYSCALL)
PRE(sys_utimesys)4994 PRE(sys_utimesys)
4995 {
4996 /* Kernel: int utimesys(int code, uintptr_t arg1, uintptr_t arg2,
4997 uintptr_t arg3, uintptr_t arg4);
4998 */
4999
5000 switch (ARG1 /*code*/) {
5001 case 0:
5002 /* Libc: int futimens(int fd, const timespec_t times[2]); */
5003 PRINT("sys_utimesys ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
5004 PRE_REG_READ3(long, "utimesys", int, code, int, fd,
5005 const vki_timespec_t *, times);
5006 if (ARG3)
5007 PRE_MEM_READ("utimesys(times)", ARG3, 2 * sizeof(vki_timespec_t));
5008
5009 /* Be strict. */
5010 if (!ML_(fd_allowed)(ARG2, "utimesys", tid, False))
5011 SET_STATUS_Failure(VKI_EBADF);
5012 break;
5013 case 1:
5014 {
5015 /* Libc: int utimensat(int fd, const char *path,
5016 const timespec_t times[2], int flag);
5017 */
5018
5019 /* Interpret the second argument as 32-bit value even on 64-bit
5020 architecture. This is different from Linux, for example, where glibc
5021 sign-extends it. */
5022 Int fd = (Int) ARG2;
5023
5024 PRINT("sys_utimesys ( %ld, %d, %#lx(%s), %#lx, %ld )",
5025 SARG1, fd, ARG3, (HChar *) ARG3, ARG4, SARG5);
5026 PRE_REG_READ5(long, "utimesys", int, code, int, fd, const char *, path,
5027 const vki_timespec_t *, times, int, flag);
5028 if (ARG3)
5029 PRE_MEM_RASCIIZ("utimesys(path)", ARG3);
5030 if (ARG4)
5031 PRE_MEM_READ("utimesys(times)", ARG4, 2 * sizeof(vki_timespec_t));
5032
5033 /* Be strict but ignore fd for absolute path. */
5034 if (fd != VKI_AT_FDCWD
5035 && ML_(safe_to_deref)((void *) ARG3, 1)
5036 && ((HChar *) ARG3)[0] != '/'
5037 && !ML_(fd_allowed)(fd, "utimesys", tid, False))
5038 SET_STATUS_Failure(VKI_EBADF);
5039 break;
5040 }
5041 default:
5042 VG_(unimplemented)("Syswrap of the utimesys call with code %ld.", SARG1);
5043 /*NOTREACHED*/
5044 break;
5045 }
5046 }
5047 #endif /* SOLARIS_UTIMESYS_SYSCALL */
5048
5049 #if defined(SOLARIS_UTIMENSAT_SYSCALL)
PRE(sys_utimensat)5050 PRE(sys_utimensat)
5051 {
5052 /* int utimensat(int fd, const char *path, const timespec_t times[2],
5053 int flag);
5054 */
5055
5056 /* Interpret the first argument as 32-bit value even on 64-bit architecture.
5057 This is different from Linux, for example, where glibc sign-extends it. */
5058 Int fd = (Int) ARG1;
5059
5060 PRINT("sys_utimensat ( %d, %#lx(%s), %#lx, %ld )",
5061 fd, ARG2, (HChar *) ARG2, ARG3, SARG4);
5062 PRE_REG_READ4(long, "utimensat", int, fd, const char *, path,
5063 const vki_timespec_t *, times, int, flag);
5064 if (ARG2)
5065 PRE_MEM_RASCIIZ("utimensat(path)", ARG2);
5066 if (ARG3)
5067 PRE_MEM_READ("utimensat(times)", ARG3, 2 * sizeof(vki_timespec_t));
5068
5069 /* Be strict but ignore fd for absolute path. */
5070 if (fd != VKI_AT_FDCWD
5071 && ML_(safe_to_deref)((void *) ARG2, 1)
5072 && ((HChar *) ARG2)[0] != '/'
5073 && !ML_(fd_allowed)(fd, "utimensat", tid, False))
5074 SET_STATUS_Failure(VKI_EBADF);
5075 }
5076 #endif /* SOLARIS_UTIMENSAT_SYSCALL */
5077
PRE(sys_sigresend)5078 PRE(sys_sigresend)
5079 {
5080 /* int sigresend(int signal, siginfo_t *siginfo, sigset_t *mask); */
5081 /* Sends a signal to the calling thread, the mask parameter specifies a new
5082 signal mask. */
5083
5084 /* Static (const) mask accessible from outside of this function. */
5085 static vki_sigset_t block_all;
5086
5087 PRINT("sys_sigresend( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
5088 PRE_REG_READ3(long, "sigresend", int, signal, vki_siginfo_t *, siginfo,
5089 vki_sigset_t *, mask);
5090
5091 if (ARG2)
5092 PRE_MEM_READ("sigresend(siginfo)", ARG2, sizeof(vki_siginfo_t));
5093 PRE_MEM_WRITE("sigresend(mask)", ARG3, sizeof(vki_sigset_t));
5094
5095 /* Check the signal and mask. */
5096 if (!ML_(client_signal_OK)(ARG1)) {
5097 SET_STATUS_Failure(VKI_EINVAL);
5098 }
5099 if (!ML_(safe_to_deref)((void*)ARG3, sizeof(vki_sigset_t))) {
5100 SET_STATUS_Failure(VKI_EFAULT);
5101 }
5102
5103 /* Exit early if there are problems. */
5104 if (FAILURE)
5105 return;
5106
5107 /* Save the requested mask to unused ARG4. */
5108 ARG4 = ARG3;
5109
5110 /* Fake the requested sigmask with a block-all mask. If the syscall
5111 suceeds then we will block "all" signals for a few instructions (in
5112 syscall-x86-solaris.S) but the correct mask will be almost instantly set
5113 again by a call to sigprocmask (also in syscall-x86-solaris.S). If the
5114 syscall fails then the mask is not changed, so everything is ok too. */
5115 VG_(sigfillset)(&block_all);
5116 ARG3 = (UWord)&block_all;
5117
5118 /* Check to see if this gave us a pending signal. */
5119 *flags |= SfPollAfter;
5120
5121 if (VG_(clo_trace_signals))
5122 VG_(message)(Vg_DebugMsg, "sigresend: resending signal %ld\n", ARG1);
5123
5124 /* Handle SIGKILL specially. */
5125 if (ARG1 == VKI_SIGKILL && ML_(do_sigkill)(tid, -1)) {
5126 SET_STATUS_Success(0);
5127 return;
5128 }
5129
5130 /* Ask to handle this syscall via the slow route, since that's the only one
5131 that sets tst->status to VgTs_WaitSys. If the result of doing the
5132 syscall is an immediate run of async_signalhandler() in m_signals.c,
5133 then we need the thread to be properly tidied away. */
5134 *flags |= SfMayBlock;
5135 }
5136
POST(sys_sigresend)5137 POST(sys_sigresend)
5138 {
5139 /* The syscall succeeded, set the requested mask. */
5140 VG_(do_sys_sigprocmask)(tid, VKI_SIG_SETMASK, (vki_sigset_t*)ARG4, NULL);
5141
5142 if (VG_(clo_trace_signals))
5143 VG_(message)(Vg_DebugMsg, "sigresend: resent signal %lu\n", ARG1);
5144 }
5145
mem_priocntlsys_parm_ok(ThreadId tid,Bool pre,Bool reade,vki_pc_vaparm_t * parm)5146 static void mem_priocntlsys_parm_ok(ThreadId tid, Bool pre, Bool reade,
5147 vki_pc_vaparm_t *parm)
5148 {
5149 if (reade)
5150 return;
5151
5152 if (pre)
5153 PRE_FIELD_WRITE("priocntlsys(parm)", parm->pc_parm);
5154 else
5155 POST_FIELD_WRITE(parm->pc_parm);
5156 }
5157
mem_priocntlsys_parm(ThreadId tid,Bool pre,Bool reade,const HChar * clname,vki_pc_vaparm_t * parm)5158 static void mem_priocntlsys_parm(ThreadId tid, Bool pre, Bool reade,
5159 const HChar *clname,
5160 vki_pc_vaparm_t *parm)
5161 {
5162 /* This function is used to handle the PC_SETXPARMS and PC_GETXPARMS
5163 parameters. In the case of PC_SETXPARMS, the code below merely checks
5164 if all parameters are scalar, PRE_MEM_READ() for these parameters is
5165 already done by the PC_SETXPARMS handler in PRE(sys_priocntlsys).
5166
5167 A caller of this function is responsible for checking that clname and
5168 &parm->key can be dereferenced. */
5169
5170 if (VG_STREQ(clname, "RT")) {
5171 switch (parm->pc_key) {
5172 case VKI_RT_KY_PRI:
5173 case VKI_RT_KY_TQSECS:
5174 case VKI_RT_KY_TQNSECS:
5175 case VKI_RT_KY_TQSIG:
5176 /* Scalar values that are stored directly in pc_parm. */
5177 mem_priocntlsys_parm_ok(tid, pre, reade, parm);
5178 return;
5179 }
5180 }
5181 else if (VG_STREQ(clname, "TS")) {
5182 switch (parm->pc_key) {
5183 case VKI_TS_KY_UPRILIM:
5184 case VKI_TS_KY_UPRI:
5185 /* Scalar values that are stored directly in pc_parm. */
5186 mem_priocntlsys_parm_ok(tid, pre, reade, parm);
5187 return;
5188 }
5189 }
5190 else if (VG_STREQ(clname, "IA")) {
5191 switch (parm->pc_key) {
5192 case VKI_IA_KY_UPRILIM:
5193 case VKI_IA_KY_UPRI:
5194 case VKI_IA_KY_MODE:
5195 /* Scalar values that are stored directly in pc_parm. */
5196 mem_priocntlsys_parm_ok(tid, pre, reade, parm);
5197 return;
5198 }
5199 }
5200 else if (VG_STREQ(clname, "FSS")) {
5201 switch (parm->pc_key) {
5202 case VKI_FSS_KY_UPRILIM:
5203 case VKI_FSS_KY_UPRI:
5204 /* Scalar values that are stored directly in pc_parm. */
5205 mem_priocntlsys_parm_ok(tid, pre, reade, parm);
5206 return;
5207 }
5208 }
5209 else if (VG_STREQ(clname, "FX")) {
5210 switch (parm->pc_key) {
5211 case VKI_FX_KY_UPRILIM:
5212 case VKI_FX_KY_UPRI:
5213 case VKI_FX_KY_TQSECS:
5214 case VKI_FX_KY_TQNSECS:
5215 /* Scalar values that are stored directly in pc_parm. */
5216 mem_priocntlsys_parm_ok(tid, pre, reade, parm);
5217 return;
5218 }
5219 }
5220 else {
5221 /* Unknown class. */
5222 VG_(unimplemented)("Syswrap of the priocntlsys call where clname=%s.",
5223 clname);
5224 /*NOTREACHED*/
5225 }
5226
5227 /* The class is known but pc_key is unknown. */
5228 VG_(unimplemented)("Syswrap of the priocntlsys call where clname=%s "
5229 "and pc_key=%d.", clname, parm->pc_key);
5230 /*NOTREACHED*/
5231 }
5232
PRE(sys_priocntlsys)5233 PRE(sys_priocntlsys)
5234 {
5235 /* long priocntlsys(int pc_version, procset_t *psp, int cmd, caddr_t arg,
5236 caddr_t arg2); */
5237
5238 if (ARG1 != 1) {
5239 /* Only the first version of priocntlsys is supported by the code below.
5240 */
5241 VG_(unimplemented)("Syswrap of the priocntlsys where pc_version=%lu.",
5242 ARG1);
5243 /*NOTREACHED*/
5244 }
5245
5246 PRINT("sys_priocntlsys ( %ld, %#lx, %ld, %#lx, %#lx )", SARG1, ARG2, SARG3,
5247 ARG4, ARG5);
5248 PRE_REG_READ5(long, "priocntlsys", int, pc_version, procset_t *, psp,
5249 int, cmd, void *, arg, void *, arg2);
5250
5251 switch (ARG3 /*cmd*/) {
5252 case VKI_PC_GETCID:
5253 if (ARG4) {
5254 vki_pcinfo_t *info = (vki_pcinfo_t*)ARG4;
5255 PRE_MEM_RASCIIZ("priocntlsys(clname)", (Addr)info->pc_clname);
5256 /* The next line says that the complete pcinfo_t structure can be
5257 written, but this actually isn't true for pc_clname which is
5258 always only read. */
5259 PRE_MEM_WRITE("priocntlsys(pcinfo)", ARG4, sizeof(vki_pcinfo_t));
5260 }
5261 break;
5262 case VKI_PC_GETCLINFO:
5263 if (ARG4) {
5264 vki_pcinfo_t *info = (vki_pcinfo_t*)ARG4;
5265 PRE_FIELD_READ("priocntlsys(cid)", info->pc_cid);
5266 /* The next line says that the complete pcinfo_t structure can be
5267 written, but this actually isn't true for pc_cid which is
5268 always only read. */
5269 PRE_MEM_WRITE("priocntlsys(pcinfo)", ARG4, sizeof(vki_pcinfo_t));
5270 }
5271 break;
5272 case VKI_PC_SETPARMS:
5273 PRE_MEM_READ("priocntlsys(psp)", ARG2, sizeof(vki_procset_t));
5274 /* The next line says that the complete pcparms_t structure is read
5275 which is never actually true (we are too pessimistic here).
5276 Unfortunately we can't do better because we don't know what
5277 process class is involved. */
5278 PRE_MEM_READ("priocntlsys(parms)", ARG4, sizeof(vki_pcparms_t));
5279 break;
5280 case VKI_PC_GETPARMS:
5281 PRE_MEM_READ("priocntlsys(psp)", ARG2, sizeof(vki_procset_t));
5282 PRE_MEM_WRITE("priocntlsys(parms)", ARG4, sizeof(vki_pcparms_t));
5283 break;
5284 case VKI_PC_GETPRIRANGE:
5285 {
5286 vki_pcpri_t *pcpri = (vki_pcpri_t*)ARG4;
5287 PRE_FIELD_READ("priocntlsys(cid)", pcpri->pc_cid);
5288 }
5289 PRE_MEM_WRITE("priocntlsys(pri)", ARG4, sizeof(vki_pcpri_t));
5290 break;
5291 case VKI_PC_DONICE:
5292 PRE_MEM_READ("priocntlsys(psp)", ARG2, sizeof(vki_procset_t));
5293 {
5294 vki_pcnice_t *nicee = (vki_pcnice_t*)ARG4;
5295 PRE_FIELD_READ("priocntlsys(op)", nicee->pc_op);
5296 if (ML_(safe_to_deref)(&nicee->pc_op, sizeof(nicee->pc_op))) {
5297 switch (nicee->pc_op) {
5298 case VKI_PC_GETNICE:
5299 PRE_FIELD_WRITE("priocntlsys(val)", nicee->pc_val);
5300 break;
5301 case VKI_PC_SETNICE:
5302 PRE_FIELD_READ("priocntlsys(val)", nicee->pc_val);
5303 break;
5304 default:
5305 VG_(unimplemented)("Syswrap of the priocntlsys call where "
5306 "cmd=PC_DONICE and pc_op=%d", nicee->pc_op);
5307 /*NOTREACHED*/
5308 break;
5309 }
5310 }
5311 }
5312 break;
5313 case VKI_PC_SETXPARMS:
5314 PRE_MEM_READ("priocntlsys(psp)", ARG2, sizeof(vki_procset_t));
5315 PRE_MEM_RASCIIZ("priocntlsys(clname)", ARG4);
5316 if (ARG5) {
5317 vki_pc_vaparms_t *parms = (vki_pc_vaparms_t*)ARG5;
5318 PRE_FIELD_READ("priocntlsys(vaparmscnt)", parms->pc_vaparmscnt);
5319 if (ML_(safe_to_deref)(&parms->pc_vaparmscnt,
5320 sizeof(parms->pc_vaparmscnt))) {
5321 vki_uint_t i;
5322 PRE_MEM_READ("priocntlsys(parms)", (Addr)parms->pc_parms,
5323 parms->pc_vaparmscnt * sizeof(parms->pc_parms[0]));
5324 for (i = 0; i < parms->pc_vaparmscnt; i++) {
5325 vki_pc_vaparm_t *parm = &parms->pc_parms[i];
5326 if (ML_(safe_to_deref)(parm, sizeof(*parm)) &&
5327 ML_(safe_to_deref)((void*)ARG4, 1))
5328 mem_priocntlsys_parm(tid, True /*pre*/, True /*read*/,
5329 (HChar*)ARG4, parm);
5330 }
5331 }
5332 }
5333 break;
5334 case VKI_PC_GETXPARMS:
5335 PRE_MEM_READ("priocntlsys(psp)", ARG2, sizeof(vki_procset_t));
5336 if (ARG4)
5337 PRE_MEM_RASCIIZ("priocntlsys(clname)", ARG4);
5338 if (ARG5) {
5339 vki_pc_vaparms_t *parms = (vki_pc_vaparms_t*)ARG5;
5340 PRE_FIELD_READ("priocntlsys(vaparmscnt)", parms->pc_vaparmscnt);
5341 if (ML_(safe_to_deref)(&parms->pc_vaparmscnt,
5342 sizeof(parms->pc_vaparmscnt))) {
5343 vki_uint_t i;
5344 for (i = 0; i < parms->pc_vaparmscnt; i++) {
5345 vki_pc_vaparm_t *parm = &parms->pc_parms[i];
5346 PRE_MEM_READ("priocntlsys(parms)", (Addr)&parm->pc_key,
5347 parms->pc_vaparmscnt * sizeof(parm->pc_key));
5348 if (ML_(safe_to_deref)(&parm->pc_key,
5349 sizeof(parm->pc_key))) {
5350 /* First handle PC_KY_CLNAME, then class specific keys.
5351 Note that PC_KY_CLNAME can be used only with
5352 ARG4==NULL && parms->pc_vaparmscnt==1. We are not so
5353 strict here and handle this special case as a regular
5354 one which makes the code simpler. */
5355 if (parm->pc_key == VKI_PC_KY_CLNAME)
5356 PRE_MEM_WRITE("priocntlsys(clname)", parm->pc_parm,
5357 VKI_PC_CLNMSZ);
5358 else if (ARG4 && ML_(safe_to_deref)((void*)ARG4, 1))
5359 mem_priocntlsys_parm(tid, True /*pre*/,
5360 False /*read*/, (HChar*)ARG4,
5361 parm);
5362 }
5363 }
5364 }
5365 }
5366 break;
5367 case VKI_PC_SETDFLCL:
5368 PRE_MEM_RASCIIZ("priocntlsys(clname)", ARG4);
5369 break;
5370 case VKI_PC_GETDFLCL:
5371 if (ARG4) {
5372 /* GETDFLCL writes to the ARG4 buffer only if ARG4 isn't NULL. Also
5373 note that if ARG4 is NULL then the syscall succeeds. */
5374 PRE_MEM_WRITE("priocntlsys(clname)", ARG4, VKI_PC_CLNMSZ);
5375 }
5376 break;
5377 case VKI_PC_DOPRIO:
5378 PRE_MEM_READ("priocntlsys(psp)", ARG2, sizeof(vki_procset_t));
5379 {
5380 vki_pcprio_t *prio = (vki_pcprio_t*)ARG4;
5381 PRE_FIELD_READ("priocntlsys(op)", prio->pc_op);
5382 if (ML_(safe_to_deref)(&prio->pc_op, sizeof(prio->pc_op))) {
5383 switch (prio->pc_op) {
5384 case VKI_PC_GETPRIO:
5385 PRE_FIELD_WRITE("priocntlsys(cid)", prio->pc_cid);
5386 PRE_FIELD_WRITE("priocntlsys(val)", prio->pc_val);
5387 break;
5388 case VKI_PC_SETPRIO:
5389 PRE_FIELD_READ("priocntlsys(cid)", prio->pc_cid);
5390 PRE_FIELD_READ("priocntlsys(val)", prio->pc_val);
5391 break;
5392 default:
5393 VG_(unimplemented)("Syswrap of the priocntlsys call where "
5394 "cmd=PC_DOPRIO and pc_op=%d", prio->pc_op);
5395 /*NOTREACHED*/
5396 break;
5397 }
5398 }
5399 }
5400 break;
5401 case VKI_PC_ADMIN:
5402 default:
5403 VG_(unimplemented)("Syswrap of the priocntlsys call with cmd %ld.", SARG3);
5404 /*NOTREACHED*/
5405 break;
5406 }
5407 }
5408
post_mem_write_priocntlsys_clinfo(ThreadId tid,const HChar * clname,Addr clinfo)5409 static void post_mem_write_priocntlsys_clinfo(ThreadId tid,
5410 const HChar *clname, Addr clinfo)
5411 {
5412 if (VG_STREQ(clname, "RT"))
5413 POST_MEM_WRITE(clinfo, sizeof(vki_rtinfo_t));
5414 else if (VG_STREQ(clname, "TS"))
5415 POST_MEM_WRITE(clinfo, sizeof(vki_tsinfo_t));
5416 else if (VG_STREQ(clname, "IA"))
5417 POST_MEM_WRITE(clinfo, sizeof(vki_iainfo_t));
5418 else if (VG_STREQ(clname, "FSS"))
5419 POST_MEM_WRITE(clinfo, sizeof(vki_fssinfo_t));
5420 else if (VG_STREQ(clname, "FX"))
5421 POST_MEM_WRITE(clinfo, sizeof(vki_fxinfo_t));
5422 else if (VG_STREQ(clname, "SDC")) {
5423 /* Relax. */
5424 }
5425 else {
5426 VG_(unimplemented)("Syswrap of the priocntlsys call where clname=%s.",
5427 clname);
5428 /*NOTREACHED*/
5429 }
5430 }
5431
POST(sys_priocntlsys)5432 POST(sys_priocntlsys)
5433 {
5434 switch (ARG3 /*cmd*/) {
5435 case VKI_PC_GETCID:
5436 if (ARG4) {
5437 vki_pcinfo_t *info = (vki_pcinfo_t*)ARG4;
5438 POST_FIELD_WRITE(info->pc_cid);
5439 post_mem_write_priocntlsys_clinfo(tid, info->pc_clname,
5440 (Addr)&info->pc_clinfo);
5441 }
5442 break;
5443 case VKI_PC_GETCLINFO:
5444 if (ARG4) {
5445 vki_pcinfo_t *info = (vki_pcinfo_t*)ARG4;
5446 POST_MEM_WRITE((Addr)info->pc_clname,
5447 VG_(strlen)((HChar*)info->pc_clname) + 1);
5448 post_mem_write_priocntlsys_clinfo(tid, info->pc_clname,
5449 (Addr)&info->pc_clinfo);
5450 }
5451 break;
5452 case VKI_PC_SETPARMS:
5453 /* Relax. */
5454 break;
5455 case VKI_PC_GETPARMS:
5456 /* The next line says that the complete pcparms_t structure is
5457 written which is never actually true (we are too optimistic here).
5458 Unfortunately we can't do better because we don't know what
5459 process class is involved. */
5460 POST_MEM_WRITE(ARG4, sizeof(vki_pcparms_t));
5461 break;
5462 case VKI_PC_GETPRIRANGE:
5463 POST_MEM_WRITE(ARG4, sizeof(vki_pcpri_t));
5464 break;
5465 case VKI_PC_DONICE:
5466 {
5467 vki_pcnice_t *nicee = (vki_pcnice_t*)ARG4;
5468 if (nicee->pc_op == VKI_PC_GETNICE)
5469 POST_FIELD_WRITE(nicee->pc_val);
5470 }
5471 break;
5472 case VKI_PC_SETXPARMS:
5473 /* Relax. */
5474 break;
5475 case VKI_PC_GETXPARMS:
5476 {
5477 vki_pc_vaparms_t *parms = (vki_pc_vaparms_t*)ARG5;
5478 vki_uint_t i;
5479 for (i = 0; i < parms->pc_vaparmscnt; i++) {
5480 vki_pc_vaparm_t *parm = &parms->pc_parms[i];
5481 if (parm->pc_key == VKI_PC_KY_CLNAME)
5482 POST_MEM_WRITE(parm->pc_parm,
5483 VG_(strlen)((HChar*)(Addr)parm->pc_parm) + 1);
5484 else if (ARG4)
5485 mem_priocntlsys_parm(tid, False /*pre*/, False /*read*/,
5486 (HChar*)ARG4, parm);
5487 }
5488 }
5489 break;
5490 case VKI_PC_SETDFLCL:
5491 /* Relax. */
5492 break;
5493 case VKI_PC_GETDFLCL:
5494 if (ARG4)
5495 POST_MEM_WRITE(ARG4, VG_(strlen)((HChar*)ARG4) + 1);
5496 break;
5497 case VKI_PC_DOPRIO:
5498 {
5499 vki_pcprio_t *prio = (vki_pcprio_t*)ARG4;
5500 if (prio->pc_op == VKI_PC_GETPRIO) {
5501 POST_FIELD_WRITE(prio->pc_cid);
5502 POST_FIELD_WRITE(prio->pc_val);
5503 }
5504 }
5505 break;
5506 case VKI_PC_ADMIN:
5507 default:
5508 vg_assert(0);
5509 break;
5510 }
5511 }
5512
PRE(sys_pathconf)5513 PRE(sys_pathconf)
5514 {
5515 /* long pathconf(const char *path, int name); */
5516 PRINT("sys_pathconf ( %#lx(%s), %ld )", ARG1, (HChar *) ARG1, SARG2);
5517 PRE_REG_READ2(long, "pathconf", const char *, path, int, name);
5518 PRE_MEM_RASCIIZ("pathconf(path)", ARG1);
5519 }
5520
PRE(sys_mmap)5521 PRE(sys_mmap)
5522 {
5523 /* void *mmap(void *addr, size_t len, int prot, int flags,
5524 int fildes, off_t off); */
5525 SysRes r;
5526 OffT offset;
5527
5528 /* Stay sane. */
5529 vg_assert(VKI_PAGE_SIZE == 4096);
5530 vg_assert(sizeof(offset) == sizeof(ARG6));
5531
5532 PRINT("sys_mmap ( %#lx, %#lx, %#lx, %#lx, %ld, %#lx )",
5533 ARG1, ARG2, ARG3, ARG4, SARG5, ARG6);
5534 PRE_REG_READ6(long, "mmap", void *, start, vki_size_t, length,
5535 int, prot, int, flags, int, fd, vki_off_t, offset);
5536
5537 /* Make sure that if off < 0 then it's passed correctly to the generic mmap
5538 wraper. */
5539 offset = *(OffT*)&ARG6;
5540
5541 r = ML_(generic_PRE_sys_mmap)(tid, ARG1, ARG2, ARG3, ARG4, ARG5, offset);
5542 SET_STATUS_from_SysRes(r);
5543 }
5544
5545 #if defined(SOLARIS_UUIDSYS_SYSCALL)
PRE(sys_uuidsys)5546 PRE(sys_uuidsys)
5547 {
5548 /* int uuidsys(struct uuid *uuid); */
5549 PRINT("sys_uuidsys ( %#lx )", ARG1);
5550 PRE_REG_READ1(long, "uuidsys", struct vki_uuid *, uuid);
5551 PRE_MEM_WRITE("uuidsys(uuid)", ARG1, sizeof(struct vki_uuid));
5552 }
5553
POST(sys_uuidsys)5554 POST(sys_uuidsys)
5555 {
5556 POST_MEM_WRITE(ARG1, sizeof(struct vki_uuid));
5557 }
5558 #endif /* SOLARIS_UUIDSYS_SYSCALL */
5559
5560 /* Syscall mmapobj emulation. Processes ELF program headers
5561 and maps them into correct place in memory. Not an easy task, though.
5562 ELF program header of PT_LOAD/PT_SUNWBSS type specifies:
5563 o p_vaddr - actually a memory offset
5564 o p_memsz - total segment size, including text, data and BSS
5565 o p_filesz - file-based segment size mapping (includes only text and data);
5566 p_memsz - p_filesz is the size of BSS
5567 o p_offset - offset into the ELF file where the file-based mapping starts
5568
5569 Several problematic areas to cover here:
5570 1. p_offset can contain a value which is not page-aligned. In that case
5571 we mmap a part of the file prior to p_offset to make the start address
5572 page-aligned.
5573 2. Partially unused page after the file-based mapping must be zeroed.
5574 3. The first mapping is flagged with MR_HDR_ELF and needs to contain
5575 the ELF header. This information is used and verified by the dynamic
5576 linker (ld.so.1). */
mmapobj_process_phdrs(ThreadId tid,Int fd,vki_mmapobj_result_t * storage,vki_uint_t * elements,const VKI_ESZ (Ehdr)* ehdr,const VKI_ESZ (Phdr)* phdrs)5577 static SysRes mmapobj_process_phdrs(ThreadId tid, Int fd,
5578 vki_mmapobj_result_t *storage,
5579 vki_uint_t *elements,
5580 const VKI_ESZ(Ehdr) *ehdr,
5581 const VKI_ESZ(Phdr) *phdrs)
5582 {
5583 #define ADVANCE_PHDR(ehdr, phdr) \
5584 (const VKI_ESZ(Phdr) *) ((const HChar *) (phdr) + (ehdr)->e_phentsize)
5585
5586 SysRes res;
5587 Int i;
5588 Int first_segment_idx = -1;
5589 UInt idx;
5590 UInt segments = 0; /* loadable segments */
5591 Addr start_addr = 0;
5592 Addr end_addr = 0;
5593 Addr elfbrk = 0;
5594 SizeT max_align = VKI_PAGE_SIZE;
5595
5596 /* 1. First pass over phdrs - determine number, span and max alignment. */
5597 const VKI_ESZ(Phdr) *phdr = phdrs;
5598 for (idx = 0; idx < ehdr->e_phnum; idx++, phdr = ADVANCE_PHDR(ehdr, phdr)) {
5599 /* Skip this header if no memory is requested. */
5600 if (phdr->p_memsz == 0)
5601 continue;
5602
5603 if ((phdr->p_type == VKI_PT_LOAD) || (phdr->p_type == VKI_PT_SUNWBSS)) {
5604 Off64T offset = 0;
5605
5606 if (VG_(clo_trace_syscalls))
5607 VG_(debugLog)(2, "syswrap-solaris", "mmapobj_process_phdrs: "
5608 "program header #%u: addr=%#lx type=%#lx "
5609 "prot=%#lx memsz=%#lx filesz=%#lx file "
5610 "offset=%#lx\n", idx, phdr->p_vaddr,
5611 (UWord) phdr->p_type, (UWord) phdr->p_flags,
5612 phdr->p_memsz, phdr->p_filesz, phdr->p_offset);
5613
5614 if (segments == 0) {
5615 first_segment_idx = idx;
5616
5617 if (phdr->p_filesz == 0) {
5618 VG_(unimplemented)("Syswrap of the mmapobj call with the first "
5619 "loadable ELF program header specifying "
5620 "p_filesz == 0");
5621 /*NOTREACHED*/
5622 return res;
5623 }
5624
5625 /* Address of the first segment must be either NULL or within the
5626 first page. */
5627 if ((ehdr->e_type == VKI_ET_DYN) &&
5628 ((phdr->p_vaddr & VKI_PAGEMASK) != 0)) {
5629 if (VG_(clo_trace_syscalls))
5630 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
5631 "ELF program header #%u does not land on "
5632 "the first page (vaddr=%#lx)\n", idx,
5633 phdr->p_vaddr);
5634 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
5635 }
5636
5637 start_addr = phdr->p_vaddr;
5638 /* The first segment is mapped from the beginning of the file (to
5639 include also the ELF header), so include this memory as well.
5640 Later on we flag this mapping with MR_HDR_ELF. */
5641 offset = phdr->p_offset;
5642 }
5643
5644 if (phdr->p_align > 1) {
5645 if ((phdr->p_vaddr % phdr->p_align) !=
5646 (phdr->p_offset % phdr->p_align)) {
5647 if (VG_(clo_trace_syscalls))
5648 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
5649 "ELF program header #%u does not have "
5650 "congruent offset and vaddr (vaddr=%#lx "
5651 "file offset=%#lx align=%#lx)\n", idx,
5652 phdr->p_vaddr, phdr->p_offset,
5653 phdr->p_align);
5654 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
5655 }
5656 }
5657
5658 if (phdr->p_vaddr < end_addr) {
5659 if (VG_(clo_trace_syscalls))
5660 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
5661 "ELF program header #%u specifies overlaping "
5662 "address (vaddr=%#lx end_addr=%#lx)\n",
5663 idx, phdr->p_vaddr, end_addr);
5664 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
5665 }
5666
5667 end_addr = elfbrk = phdr->p_vaddr + phdr->p_memsz + offset;
5668 end_addr = VG_PGROUNDUP(end_addr);
5669 if (phdr->p_align > max_align) {
5670 max_align = phdr->p_align;
5671 }
5672
5673 segments += 1;
5674 }
5675 }
5676
5677 /* Alignment check - it should be power of two. */
5678 if ((max_align & (max_align - 1)) != 0) {
5679 if (VG_(clo_trace_syscalls))
5680 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: alignment "
5681 "is not a power of 2 (%#lx)\n", max_align);
5682 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
5683 }
5684 vg_assert(max_align >= VKI_PAGE_SIZE);
5685
5686 #if defined(VGP_x86_solaris)
5687 if (max_align > VKI_UINT_MAX) {
5688 if (VG_(clo_trace_syscalls))
5689 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: alignment "
5690 "for 32-bit ELF is >32-bits (%#lx)\n", max_align);
5691 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
5692 }
5693 #endif /* VGP_x86_solaris */
5694
5695 if (segments == 0) {
5696 if (VG_(clo_trace_syscalls))
5697 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: nothing "
5698 "to map (0 loadable segments)");
5699 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
5700 }
5701
5702 vg_assert(end_addr >= start_addr);
5703 SizeT span = end_addr - start_addr;
5704 if (span == 0) {
5705 if (VG_(clo_trace_syscalls))
5706 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: nothing "
5707 "to map (%u loadable segments spanning 0 bytes)\n",
5708 segments);
5709 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
5710 }
5711 vg_assert(first_segment_idx >= 0);
5712
5713 if (segments > *elements) {
5714 if (VG_(clo_trace_syscalls))
5715 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: too many "
5716 "segments (%u)\n", segments);
5717 return VG_(mk_SysRes_Error)(VKI_E2BIG);
5718 }
5719
5720 if (VG_(clo_trace_syscalls))
5721 VG_(debugLog)(2, "syswrap-solaris", "mmapobj_process_phdrs: there "
5722 "are %u loadable segments spanning %#lx bytes; max "
5723 "align is %#lx\n", segments, span, max_align);
5724
5725 /* Now get the aspacemgr oraculum advisory.
5726 Later on we mmap file-based and BSS mappings into this address space area
5727 as required and leave the holes unmapped. */
5728 if (ehdr->e_type == VKI_ET_DYN) {
5729 MapRequest mreq = {MAlign, max_align, span};
5730 Bool ok;
5731 start_addr = VG_(am_get_advisory)(&mreq, True /* forClient */, &ok);
5732 if (!ok) {
5733 if (VG_(clo_trace_syscalls))
5734 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
5735 "failed to reserve address space of %#lx bytes "
5736 "with alignment %#lx\n", span, max_align);
5737 return VG_(mk_SysRes_Error)(VKI_ENOMEM);
5738 }
5739 vg_assert(VG_ROUNDUP(start_addr, max_align) == start_addr);
5740
5741 if (VG_(clo_trace_syscalls))
5742 VG_(debugLog)(2, "syswrap-solaris", "PRE(sys_mmapobj): address space "
5743 "reserved at: vaddr=%#lx size=%#lx\n",
5744 start_addr, span);
5745 } else {
5746 vg_assert(ehdr->e_type == VKI_ET_EXEC);
5747 /* ET_EXEC uses fixed mappings. Will be checked when processing phdrs. */
5748 }
5749
5750 /* This is an utterly ugly hack, the aspacemgr assumes that only one
5751 segment is added at the time. However we add here multiple segments so
5752 AM_SANITY_CHECK inside the aspacemgr can easily fail. We want to
5753 prevent that thus we disable these checks. The scheduler will check the
5754 aspacemgr sanity after the syscall. */
5755 UInt sanity_level = VG_(clo_sanity_level);
5756 VG_(clo_sanity_level) = 1;
5757
5758 /* 2. Second pass over phdrs - map the program headers and fill in
5759 the mmapobj_result_t array. */
5760 phdr = phdrs;
5761 *elements = 0;
5762 for (idx = 0; idx < ehdr->e_phnum; idx++, phdr = ADVANCE_PHDR(ehdr, phdr)) {
5763 /* Skip this header if no memory is requested. */
5764 if (phdr->p_memsz == 0)
5765 continue;
5766
5767 if ((phdr->p_type == VKI_PT_LOAD) || (phdr->p_type == VKI_PT_SUNWBSS)) {
5768 UInt prot = 0;
5769 if (phdr->p_flags & VKI_PF_R)
5770 prot |= VKI_PROT_READ;
5771 if (phdr->p_flags & VKI_PF_W)
5772 prot |= VKI_PROT_WRITE;
5773 if (phdr->p_flags & VKI_PF_X)
5774 prot |= VKI_PROT_EXEC;
5775
5776 vki_mmapobj_result_t *mrp = &storage[*elements];
5777 mrp->mr_msize = phdr->p_memsz;
5778 mrp->mr_fsize = phdr->p_filesz;
5779 mrp->mr_offset = 0;
5780 mrp->mr_prot = prot;
5781 mrp->mr_flags = 0;
5782 Off64T file_offset = phdr->p_offset;
5783 if (idx == first_segment_idx) {
5784 mrp->mr_flags = VKI_MR_HDR_ELF;
5785 if (ehdr->e_type == VKI_ET_DYN) {
5786 if (phdr->p_offset > 0) {
5787 /* Include the ELF header into the first segment.
5788 This means we ignore p_offset from the program header
5789 and map from file offset 0. */
5790 mrp->mr_msize += phdr->p_offset;
5791 mrp->mr_fsize += phdr->p_offset;
5792 file_offset = 0;
5793 }
5794 } else {
5795 vg_assert(ehdr->e_type == VKI_ET_EXEC);
5796 start_addr = phdr->p_vaddr;
5797 }
5798 }
5799
5800 /* p_vaddr is absolute for ET_EXEC, and relative for ET_DYN. */
5801 mrp->mr_addr = (vki_caddr_t) phdr->p_vaddr;
5802 if (ehdr->e_type == VKI_ET_DYN) {
5803 mrp->mr_addr += start_addr;
5804 }
5805
5806 SizeT page_offset = (Addr) mrp->mr_addr & VKI_PAGEOFFSET;
5807 if (page_offset > 0) {
5808 vg_assert(file_offset >= page_offset);
5809 /* Mapping address does not start at the beginning of a page.
5810 Therefore include some bytes before to make it page aligned. */
5811 mrp->mr_addr -= page_offset;
5812 mrp->mr_msize += page_offset;
5813 mrp->mr_offset = page_offset;
5814 file_offset -= page_offset;
5815 }
5816 SizeT file_size = mrp->mr_fsize + mrp->mr_offset;
5817 if (VG_(clo_trace_syscalls))
5818 VG_(debugLog)(2, "syswrap-solaris", "mmapobj_process_phdrs: "
5819 "mmapobj result #%u: addr=%#lx msize=%#lx "
5820 "fsize=%#lx mr_offset=%#lx prot=%#x flags=%#x\n",
5821 *elements, (Addr) mrp->mr_addr,
5822 (UWord) mrp->mr_msize, (UWord) mrp->mr_fsize,
5823 (UWord) mrp->mr_offset, mrp->mr_prot,
5824 mrp->mr_flags);
5825
5826 UInt flags = VKI_MAP_PRIVATE | VKI_MAP_FIXED;
5827 if ((mrp->mr_prot & (VKI_PROT_WRITE | VKI_PROT_EXEC)) ==
5828 VKI_PROT_EXEC) {
5829 flags |= VKI_MAP_TEXT;
5830 } else {
5831 flags |= VKI_MAP_INITDATA;
5832 }
5833
5834 /* Determine if there will be partially unused page after file-based
5835 mapping. If so, then we need to zero it explicitly afterwards. */
5836 Addr mapping_end = (Addr) mrp->mr_addr + file_size;
5837 SizeT zeroed_size = VG_PGROUNDUP(mapping_end) - mapping_end;
5838 Bool mprotect_needed = False;
5839 if ((zeroed_size > 0) && ((prot & VKI_PROT_WRITE) == 0)) {
5840 prot |= VKI_PROT_WRITE;
5841 mprotect_needed = True;
5842 }
5843
5844 if (ehdr->e_type == VKI_ET_EXEC) {
5845 /* Now check if the requested address space is available. */
5846 if (!VG_(am_is_free_or_resvn)((Addr) mrp->mr_addr, mrp->mr_msize)) {
5847 if (VG_(clo_trace_syscalls))
5848 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
5849 "requested segment at %#lx with size of "
5850 "%#lx bytes is not available\n",
5851 (Addr) mrp->mr_addr, (UWord) mrp->mr_msize);
5852 res = VG_(mk_SysRes_Error)(VKI_EADDRINUSE);
5853 goto mmap_error;
5854 }
5855 }
5856
5857 if (file_size > 0) {
5858 res = VG_(am_mmap_file_fixed_client_flags)((Addr) mrp->mr_addr,
5859 file_size, prot, flags, fd, file_offset);
5860 if (sr_isError(res)) {
5861 if (VG_(clo_trace_syscalls))
5862 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
5863 "mmap failed: addr=%#lx size=%#lx prot=%#x "
5864 "flags=%#x fd=%d file offset=%#llx\n",
5865 (Addr) mrp->mr_addr, file_size,
5866 prot, flags, fd, file_offset);
5867 goto mmap_error;
5868 }
5869
5870 VG_(debugLog)(1, "syswrap-solaris", "PRE(sys_mmapobj): new "
5871 "segment: vaddr=%#lx size=%#lx prot=%#x "
5872 "flags=%#x fd=%d file offset=%#llx\n",
5873 (Addr) mrp->mr_addr, file_size, mrp->mr_prot,
5874 flags, fd, file_offset);
5875 }
5876
5877 if (zeroed_size > 0) {
5878 /* Now zero out the end of partially used page. */
5879 VG_(memset)((void *) mapping_end, 0, zeroed_size);
5880 if (mprotect_needed) {
5881 prot &= ~VKI_PROT_WRITE;
5882 res = VG_(do_syscall3)(SYS_mprotect, (Addr) mrp->mr_addr,
5883 file_size, prot);
5884 if (sr_isError(res)) {
5885 if (VG_(clo_trace_syscalls))
5886 VG_(debugLog)(3, "syswrap-solaris",
5887 "mmapobj_process_phdrs: mprotect failed: "
5888 "addr=%#lx size=%#lx prot=%#x",
5889 (Addr) mrp->mr_addr, file_size, prot);
5890 /* Mapping for this segment was already established. */
5891 idx += 1;
5892 goto mmap_error;
5893 }
5894 }
5895 }
5896
5897 if (file_size > 0) {
5898 ML_(notify_core_and_tool_of_mmap)((Addr) mrp->mr_addr, file_size,
5899 prot, flags, fd, file_offset);
5900 }
5901
5902 /* Page(s) after the mapping backed up by the file are part of BSS.
5903 They need to be mmap'ed over with correct flags and will be
5904 implicitly zeroed. */
5905 mapping_end = VG_PGROUNDUP(mrp->mr_addr + mrp->mr_msize);
5906 Addr page_end = VG_PGROUNDUP(mrp->mr_addr + file_size);
5907 vg_assert(mapping_end >= page_end);
5908 zeroed_size = mapping_end - page_end;
5909 if (zeroed_size > 0) {
5910 flags = VKI_MAP_FIXED | VKI_MAP_PRIVATE | VKI_MAP_ANONYMOUS;
5911 res = VG_(am_mmap_anon_fixed_client)(page_end, zeroed_size, prot);
5912 if (sr_isError(res)) {
5913 if (VG_(clo_trace_syscalls))
5914 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
5915 "mmap_anon failed: addr=%#lx size=%#lx "
5916 "prot=%#x\n", page_end, zeroed_size, prot);
5917 idx += 1; /* mapping for this segment was already established */
5918 goto mmap_error;
5919 }
5920
5921 VG_(debugLog)(1, "syswrap-solaris", "PRE(sys_mmapobj): new "
5922 "anonymous segment (BSS): vaddr=%#lx size=%#lx "
5923 "prot=%#x\n", page_end, zeroed_size, prot);
5924 ML_(notify_core_and_tool_of_mmap)(page_end, zeroed_size,
5925 prot, flags, -1, 0);
5926 }
5927
5928 VG_(di_notify_mmap)((Addr) mrp->mr_addr, False /*allow_SkFileV*/, fd);
5929
5930 *elements += 1;
5931 vg_assert(*elements <= segments);
5932 }
5933 }
5934
5935 if ((ehdr->e_type == VKI_ET_EXEC) && (!brk_segment_established)) {
5936 vg_assert(VG_(brk_base) == VG_(brk_limit));
5937 vg_assert(VG_(brk_base) == -1);
5938 VG_(brk_base) = VG_(brk_limit) = elfbrk;
5939
5940 if (!VG_(setup_client_dataseg)()) {
5941 VG_(umsg)("Cannot map memory to initialize brk segment in thread #%d "
5942 "at %#lx\n", tid, VG_(brk_base));
5943 res = VG_(mk_SysRes_Error)(VKI_ENOMEM);
5944 goto mmap_error;
5945 }
5946
5947 VG_(track_client_dataseg)(tid);
5948 }
5949
5950 /* Restore VG_(clo_sanity_level). The scheduler will perform the aspacemgr
5951 sanity check after the syscall. */
5952 VG_(clo_sanity_level) = sanity_level;
5953
5954 return VG_(mk_SysRes_Success)(0);
5955
5956 mmap_error:
5957 for (i = idx - 1; i > 0; i--) {
5958 Bool discard_translations;
5959 Addr addr = (Addr) storage[i].mr_addr;
5960
5961 VG_(am_munmap_client)(&discard_translations, addr, storage[i].mr_msize);
5962 ML_(notify_core_and_tool_of_munmap)(addr, storage[i].mr_msize);
5963 }
5964 *elements = 0;
5965 return res;
5966
5967 #undef ADVANCE_PHDR
5968 }
5969
mmapobj_interpret(ThreadId tid,Int fd,vki_mmapobj_result_t * storage,vki_uint_t * elements)5970 static SysRes mmapobj_interpret(ThreadId tid, Int fd,
5971 vki_mmapobj_result_t *storage,
5972 vki_uint_t *elements)
5973 {
5974 SysRes res;
5975
5976 struct vg_stat stats;
5977 if (VG_(fstat)(fd, &stats) != 0) {
5978 return VG_(mk_SysRes_Error)(VKI_EBADF);
5979 }
5980
5981 if (stats.size < sizeof(VKI_ESZ(Ehdr))) {
5982 if (VG_(clo_trace_syscalls))
5983 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: insufficient "
5984 "file size (%lld)\n", stats.size);
5985 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
5986 }
5987
5988 /* Align the header buffer appropriately. */
5989 vki_ulong_t lheader[sizeof(VKI_ESZ(Ehdr)) / sizeof(vki_ulong_t) + 1];
5990 HChar *header = (HChar *) &lheader;
5991
5992 res = VG_(pread)(fd, header, sizeof(VKI_ESZ(Ehdr)), 0);
5993 if (sr_isError(res)) {
5994 if (VG_(clo_trace_syscalls))
5995 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: read of ELF "
5996 "header failed\n");
5997 return res;
5998 } else if (sr_Res(res) != sizeof(VKI_ESZ(Ehdr))) {
5999 if (VG_(clo_trace_syscalls))
6000 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: read of ELF "
6001 "header failed - only %lu bytes out of %lu\n",
6002 sr_Res(res), (UWord) sizeof(VKI_ESZ(Ehdr)));
6003 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
6004 }
6005
6006 /* Verify file type is ELF. */
6007 if ((header[VKI_EI_MAG0] != VKI_ELFMAG0) ||
6008 (header[VKI_EI_MAG1] != VKI_ELFMAG1) ||
6009 (header[VKI_EI_MAG2] != VKI_ELFMAG2) ||
6010 (header[VKI_EI_MAG3] != VKI_ELFMAG3)) {
6011 if (VG_(clo_trace_syscalls))
6012 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: ELF header "
6013 "missing magic\n");
6014 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
6015 }
6016
6017 if (header[VKI_EI_CLASS] != VG_ELF_CLASS) {
6018 if (VG_(clo_trace_syscalls))
6019 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: ELF class "
6020 "mismatch (%u vs %u)\n", header[VKI_EI_CLASS],
6021 VG_ELF_CLASS);
6022 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
6023 }
6024
6025 VKI_ESZ(Ehdr) *ehdr = (VKI_ESZ(Ehdr) *) header;
6026 if ((ehdr->e_type != VKI_ET_EXEC) && (ehdr->e_type != VKI_ET_DYN)) {
6027 VG_(unimplemented)("Syswrap of the mmapobj call with ELF type %u.",
6028 ehdr->e_type);
6029 /*NOTREACHED*/
6030 return res;
6031 }
6032
6033 if (ehdr->e_phnum == VKI_PN_XNUM) {
6034 VG_(unimplemented)("Syswrap of the mmapobj call with number of ELF "
6035 "program headers == PN_XNUM");
6036 /*NOTREACHED*/
6037 return res;
6038 }
6039
6040 /* Check alignment. */
6041 #if defined(VGP_x86_solaris)
6042 if (!VG_IS_4_ALIGNED(ehdr->e_phentsize)) {
6043 #elif defined(VGP_amd64_solaris)
6044 if (!VG_IS_8_ALIGNED(ehdr->e_phentsize)) {
6045 #else
6046 # error "Unknown platform"
6047 #endif
6048 if (VG_(clo_trace_syscalls))
6049 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: ELF header "
6050 "phentsize not aligned properly (%u)\n",
6051 ehdr->e_phentsize);
6052 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
6053 }
6054
6055 SizeT phdrs_size = ehdr->e_phnum * ehdr->e_phentsize;
6056 if (phdrs_size == 0) {
6057 if (VG_(clo_trace_syscalls))
6058 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: no ELF "
6059 "program headers\n");
6060 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
6061 }
6062
6063 VKI_ESZ(Phdr) *phdrs = VG_(malloc)("syswrap.mi.1", phdrs_size);
6064 res = VG_(pread)(fd, phdrs, phdrs_size, ehdr->e_phoff);
6065 if (sr_isError(res)) {
6066 if (VG_(clo_trace_syscalls))
6067 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: read of ELF "
6068 "program headers failed\n");
6069 VG_(free)(phdrs);
6070 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
6071 } else if (sr_Res(res) != phdrs_size) {
6072 if (VG_(clo_trace_syscalls))
6073 VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: read of ELF "
6074 "program headers failed - only %lu bytes out of %lu\n",
6075 sr_Res(res), phdrs_size);
6076 VG_(free)(phdrs);
6077 return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
6078 }
6079
6080 if (VG_(clo_trace_syscalls))
6081 VG_(debugLog)(2, "syswrap-solaris", "mmapobj_interpret: %u ELF "
6082 "program headers with total size of %lu bytes\n",
6083 ehdr->e_phnum, phdrs_size);
6084
6085 /* Now process the program headers. */
6086 res = mmapobj_process_phdrs(tid, fd, storage, elements, ehdr, phdrs);
6087 VG_(free)(phdrs);
6088 return res;
6089 }
6090
6091 PRE(sys_mmapobj)
6092 {
6093 /* int mmapobj(int fd, uint_t flags, mmapobj_result_t *storage,
6094 uint_t *elements, void *arg); */
6095 PRINT("sys_mmapobj ( %ld, %#lx, %#lx, %#lx, %#lx )", SARG1, ARG2, ARG3,
6096 ARG4, ARG5);
6097 PRE_REG_READ5(long, "mmapobj", int, fd, vki_uint_t, flags,
6098 mmapobj_result_t *, storage, uint_t *, elements,
6099 void *, arg);
6100
6101 PRE_MEM_READ("mmapobj(elements)", ARG4, sizeof(vki_uint_t));
6102 /*PRE_MEM_WRITE("mmapobj(elements)", ARG4, sizeof(vki_uint_t));*/
6103 if (ML_(safe_to_deref)((void*)ARG4, sizeof(vki_uint_t))) {
6104 vki_uint_t *u = (vki_uint_t*)ARG4;
6105 PRE_MEM_WRITE("mmapobj(storage)", ARG3,
6106 *u * sizeof(vki_mmapobj_result_t));
6107 }
6108
6109 if (ARG2 & VKI_MMOBJ_PADDING)
6110 PRE_MEM_READ("mmapobj(arg)", ARG5, sizeof(vki_size_t));
6111
6112 /* Be strict. */
6113 if (!ML_(fd_allowed)(ARG1, "mmapobj", tid, False)) {
6114 SET_STATUS_Failure(VKI_EBADF);
6115 return;
6116 }
6117
6118 /* We cannot advise mmapobj about desired address(es). Unfortunately
6119 kernel places mappings from mmapobj at the end of process address
6120 space, defeating memcheck's optimized fast 2-level array algorithm.
6121 So we need to emulate what mmapobj does in the kernel. */
6122
6123 /* Sanity check on parameters. */
6124 if ((ARG2 & ~VKI_MMOBJ_ALL_FLAGS) != 0) {
6125 SET_STATUS_Failure(VKI_EINVAL);
6126 return;
6127 }
6128
6129 if (!ML_(safe_to_deref)((void *) ARG4, sizeof(vki_uint_t))) {
6130 SET_STATUS_Failure(VKI_EFAULT);
6131 return;
6132 }
6133 vki_uint_t *elements = (vki_uint_t *) ARG4;
6134
6135 if (*elements > 0) {
6136 if (!ML_(safe_to_deref)((void *) ARG3,
6137 *elements * sizeof(vki_mmapobj_result_t))) {
6138 SET_STATUS_Failure(VKI_EFAULT);
6139 return;
6140 }
6141 }
6142
6143 /* For now, supported is only MMOBJ_INTERPRET and no MMOBJ_PADDING. */
6144 if (ARG2 != VKI_MMOBJ_INTERPRET) {
6145 VG_(unimplemented)("Syswrap of the mmapobj call with flags %lu.", ARG2);
6146 /*NOTREACHED*/
6147 return;
6148 }
6149
6150 SysRes res = mmapobj_interpret(tid, (Int) ARG1,
6151 (vki_mmapobj_result_t *) ARG3, elements);
6152 SET_STATUS_from_SysRes(res);
6153
6154 if (!sr_isError(res)) {
6155 POST_MEM_WRITE(ARG4, sizeof(vki_uint_t));
6156
6157 UInt idx;
6158 for (idx = 0; idx < *(vki_uint_t *) ARG4; idx++) {
6159 vki_mmapobj_result_t *mrp = &((vki_mmapobj_result_t *) ARG3)[idx];
6160 POST_FIELD_WRITE(mrp->mr_addr);
6161 POST_FIELD_WRITE(mrp->mr_msize);
6162 POST_FIELD_WRITE(mrp->mr_fsize);
6163 POST_FIELD_WRITE(mrp->mr_prot);
6164 POST_FIELD_WRITE(mrp->mr_flags);
6165 POST_FIELD_WRITE(mrp->mr_offset);
6166 }
6167 }
6168 }
6169
6170 PRE(sys_memcntl)
6171 {
6172 /* int memcntl(caddr_t addr, size_t len, int cmd, caddr_t arg,
6173 int attr, int mask); */
6174 PRINT("sys_memcntl ( %#lx, %#lx, %ld, %#lx, %#lx, %#lx )", ARG1, ARG2,
6175 SARG3, ARG4, ARG5, ARG6);
6176 PRE_REG_READ6(long, "memcntl", void *, addr, vki_size_t, len, int, cmd,
6177 void *, arg, int, attr, int, mask);
6178
6179 if (ARG3 != VKI_MC_LOCKAS && ARG3 != VKI_MC_UNLOCKAS &&
6180 !ML_(valid_client_addr)(ARG1, ARG2, tid, "memcntl")) {
6181 /* MC_LOCKAS and MC_UNLOCKAS work on the complete address space thus we
6182 don't check the address range validity if these commands are
6183 requested. */
6184 SET_STATUS_Failure(VKI_ENOMEM);
6185 return;
6186 }
6187
6188 if (ARG3 == VKI_MC_HAT_ADVISE)
6189 PRE_MEM_READ("memcntl(arg)", ARG4, sizeof(struct vki_memcntl_mha));
6190 }
6191
6192 PRE(sys_getpmsg)
6193 {
6194 /* int getpmsg(int fildes, struct strbuf *ctlptr, struct strbuf *dataptr,
6195 int *bandp, int *flagsp); */
6196 struct vki_strbuf *ctrlptr = (struct vki_strbuf *)ARG2;
6197 struct vki_strbuf *dataptr = (struct vki_strbuf *)ARG3;
6198 *flags |= SfMayBlock;
6199 PRINT("sys_getpmsg ( %ld, %#lx, %#lx, %#lx, %#lx )", SARG1, ARG2, ARG3,
6200 ARG4, ARG5);
6201 PRE_REG_READ5(long, "getpmsg", int, fildes, struct vki_strbuf *, ctlptr,
6202 struct vki_strbuf *, dataptr, int *, bandp, int *, flagsp);
6203 if (ctrlptr) {
6204 PRE_FIELD_READ("getpmsg(ctrlptr->maxlen)", ctrlptr->maxlen);
6205 PRE_FIELD_WRITE("getpmsg(ctrlptr->len)", ctrlptr->len);
6206 PRE_FIELD_READ("getpmsg(ctrlptr->buf)", ctrlptr->buf);
6207 if (ML_(safe_to_deref)((void*)ARG2, sizeof(struct vki_strbuf))
6208 && ctrlptr->maxlen > 0)
6209 PRE_MEM_WRITE("getpmsg(ctrlptr->buf)", (Addr)ctrlptr->buf,
6210 ctrlptr->maxlen);
6211 }
6212 if (dataptr) {
6213 PRE_FIELD_READ("getpmsg(dataptr->maxlen)", dataptr->maxlen);
6214 PRE_FIELD_WRITE("getpmsg(dataptr->len)", dataptr->len);
6215 PRE_FIELD_READ("getpmsg(dataptr->buf)", dataptr->buf);
6216 if (ML_(safe_to_deref)((void*)ARG3, sizeof(struct vki_strbuf))
6217 && dataptr->maxlen > 0)
6218 PRE_MEM_WRITE("getpmsg(dataptr->buf)", (Addr)dataptr->buf,
6219 dataptr->maxlen);
6220 }
6221 PRE_MEM_READ("getpmsg(bandp)", ARG4, sizeof(int));
6222 /*PRE_MEM_WRITE("getpmsg(bandp)", ARG4, sizeof(int));*/
6223 PRE_MEM_READ("getpmsg(flagsp)", ARG5, sizeof(int));
6224 /*PRE_MEM_WRITE("getpmsg(flagsp)", ARG5, sizeof(int));*/
6225
6226 /* Be strict. */
6227 if (!ML_(fd_allowed)(ARG1, "getpmsg", tid, False))
6228 SET_STATUS_Failure(VKI_EBADF);
6229 }
6230
6231 POST(sys_getpmsg)
6232 {
6233 struct vki_strbuf *ctrlptr = (struct vki_strbuf *)ARG2;
6234 struct vki_strbuf *dataptr = (struct vki_strbuf *)ARG3;
6235
6236 if (ctrlptr && ctrlptr->len > 0)
6237 POST_MEM_WRITE((Addr)ctrlptr->buf, ctrlptr->len);
6238 if (dataptr && dataptr->len > 0)
6239 POST_MEM_WRITE((Addr)dataptr->buf, dataptr->len);
6240 POST_MEM_WRITE(ARG4, sizeof(int));
6241 POST_MEM_WRITE(ARG5, sizeof(int));
6242 }
6243
6244 PRE(sys_putpmsg)
6245 {
6246 /* int putpmsg(int fildes, const struct strbuf *ctlptr,
6247 const struct strbuf *dataptr, int band, int flags); */
6248 struct vki_strbuf *ctrlptr = (struct vki_strbuf *)ARG2;
6249 struct vki_strbuf *dataptr = (struct vki_strbuf *)ARG3;
6250 *flags |= SfMayBlock;
6251 PRINT("sys_putpmsg ( %ld, %#lx, %#lx, %ld, %ld )", SARG1, ARG2, ARG3, SARG4,
6252 SARG5);
6253 PRE_REG_READ5(long, "putpmsg", int, fildes, struct vki_strbuf *, ctrlptr,
6254 struct vki_strbuf *, dataptr, int, band, int, flags);
6255 if (ctrlptr) {
6256 PRE_FIELD_READ("putpmsg(ctrlptr->len)", ctrlptr->len);
6257 PRE_FIELD_READ("putpmsg(ctrlptr->buf)", ctrlptr->buf);
6258 if (ML_(safe_to_deref)((void*)ARG2, sizeof(struct vki_strbuf))
6259 && ctrlptr->len > 0)
6260 PRE_MEM_READ("putpmsg(ctrlptr->buf)", (Addr)ctrlptr->buf,
6261 ctrlptr->len);
6262 }
6263 if (dataptr) {
6264 PRE_FIELD_READ("putpmsg(dataptr->len)", dataptr->len);
6265 PRE_FIELD_READ("putpmsg(dataptr->buf)", dataptr->buf);
6266 if (ML_(safe_to_deref)((void*)ARG3, sizeof(struct vki_strbuf))
6267 && dataptr->len > 0)
6268 PRE_MEM_READ("putpmsg(dataptr->buf)", (Addr)dataptr->buf,
6269 dataptr->len);
6270 }
6271
6272 /* Be strict. */
6273 if (!ML_(fd_allowed)(ARG1, "putpmsg", tid, False))
6274 SET_STATUS_Failure(VKI_EBADF);
6275 }
6276
6277 #if defined(SOLARIS_OLD_SYSCALLS)
6278 PRE(sys_rename)
6279 {
6280 /* int rename(const char *from, const char *to); */
6281
6282 *flags |= SfMayBlock;
6283 PRINT("sys_rename ( %#lx(%s), %#lx(%s) )",
6284 ARG1, (HChar *) ARG1, ARG2, (HChar *) ARG2);
6285 PRE_REG_READ2(long, "rename", const char *, from, const char *, to);
6286
6287 PRE_MEM_RASCIIZ("rename(from)", ARG1);
6288 PRE_MEM_RASCIIZ("rename(to)", ARG2);
6289 }
6290 #endif /* SOLARIS_OLD_SYSCALLS */
6291
6292 PRE(sys_uname)
6293 {
6294 /* int uname(struct utsname *name); */
6295 PRINT("sys_uname ( %#lx )", ARG1);
6296 PRE_REG_READ1(long, "uname", struct vki_utsname *, name);
6297 PRE_MEM_WRITE("uname(name)", ARG1, sizeof(struct vki_utsname));
6298 }
6299
6300 POST(sys_uname)
6301 {
6302 struct vki_utsname *name = (struct vki_utsname *) ARG1;
6303 POST_MEM_WRITE((Addr) name->sysname, VG_(strlen)(name->sysname) + 1);
6304 POST_MEM_WRITE((Addr) name->nodename, VG_(strlen)(name->nodename) + 1);
6305 POST_MEM_WRITE((Addr) name->release, VG_(strlen)(name->release) + 1);
6306 POST_MEM_WRITE((Addr) name->version, VG_(strlen)(name->version) + 1);
6307 POST_MEM_WRITE((Addr) name->machine, VG_(strlen)(name->machine) + 1);
6308 }
6309
6310 PRE(sys_setegid)
6311 {
6312 /* int setegid(gid_t egid); */
6313 PRINT("sys_setegid ( %ld )", SARG1);
6314 PRE_REG_READ1(long, "setegid", vki_gid_t, egid);
6315 }
6316
6317 PRE(sys_sysconfig)
6318 {
6319 /* long sysconf(int name); */
6320 PRINT("sys_sysconfig ( %ld )", SARG1);
6321 PRE_REG_READ1(long, "sysconf", int, name);
6322
6323 if (ARG1 == VKI_CONFIG_OPEN_FILES)
6324 SET_STATUS_Success(VG_(fd_soft_limit));
6325 }
6326
6327 PRE(sys_systeminfo)
6328 {
6329 /* int sysinfo(int command, char *buf, long count); */
6330 PRINT("sys_systeminfo ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
6331 PRE_REG_READ3(long, "sysinfo", int, command, char *, buf, long, count);
6332
6333 switch (ARG1 /*command*/) {
6334 case VKI_SI_SYSNAME:
6335 case VKI_SI_HOSTNAME:
6336 case VKI_SI_RELEASE:
6337 case VKI_SI_VERSION:
6338 case VKI_SI_MACHINE:
6339 case VKI_SI_ARCHITECTURE:
6340 case VKI_SI_HW_SERIAL:
6341 case VKI_SI_HW_PROVIDER:
6342 case VKI_SI_SRPC_DOMAIN:
6343 case VKI_SI_PLATFORM:
6344 case VKI_SI_ISALIST:
6345 case VKI_SI_DHCP_CACHE:
6346 case VKI_SI_ARCHITECTURE_32:
6347 case VKI_SI_ARCHITECTURE_64:
6348 case VKI_SI_ARCHITECTURE_K:
6349 case VKI_SI_ARCHITECTURE_NATIVE:
6350 PRE_MEM_WRITE("sysinfo(buf)", ARG2, ARG3);
6351 break;
6352
6353 case VKI_SI_SET_HOSTNAME:
6354 case VKI_SI_SET_SRCP_DOMAIN:
6355 PRE_MEM_RASCIIZ("sysinfo(buf)", ARG2);
6356 break;
6357
6358 default:
6359 VG_(unimplemented)("Syswrap of the sysinfo call with command %ld.", SARG1);
6360 /*NOTREACHED*/
6361 break;
6362 }
6363 }
6364
6365 POST(sys_systeminfo)
6366 {
6367 if (ARG1 != VKI_SI_SET_HOSTNAME && ARG1 != VKI_SI_SET_SRCP_DOMAIN)
6368 POST_MEM_WRITE(ARG2, MIN(RES, ARG3));
6369 }
6370
6371 PRE(sys_seteuid)
6372 {
6373 /* int seteuid(uid_t euid); */
6374 PRINT("sys_seteuid ( %ld )", SARG1);
6375 PRE_REG_READ1(long, "seteuid", vki_uid_t, euid);
6376 }
6377
6378 PRE(sys_forksys)
6379 {
6380 /* int64_t forksys(int subcode, int flags); */
6381 Int fds[2];
6382 Int res;
6383 PRINT("sys_forksys ( %ld, %ld )", SARG1, SARG2);
6384 PRE_REG_READ2(long, "forksys", int, subcode, int, flags);
6385
6386 if (ARG1 == 1) {
6387 /* Support for forkall() requires changes to the big lock processing
6388 which are not yet implemented. */
6389 VG_(unimplemented)("Support for forkall().");
6390 /*NOTREACHED*/
6391 return;
6392 }
6393
6394 if (ARG1 != 0 && ARG1 != 2) {
6395 VG_(unimplemented)("Syswrap of the forksys call where subcode=%ld.",
6396 SARG1);
6397 /*NOTREACHED*/
6398 }
6399
6400 if (ARG1 == 2) {
6401 /* vfork() is requested. Translate it to a normal fork() but work around
6402 a problem with posix_spawn() which relies on the real vfork()
6403 behaviour. See a description in vg_preloaded.c for details. */
6404 res = VG_(pipe)(fds);
6405 vg_assert(res == 0);
6406
6407 vg_assert(fds[0] != fds[1]);
6408
6409 /* Move to Valgrind fds and set close-on-exec flag on both of them (done
6410 by VG_(safe_fd). */
6411 fds[0] = VG_(safe_fd)(fds[0]);
6412 fds[1] = VG_(safe_fd)(fds[1]);
6413 vg_assert(fds[0] != fds[1]);
6414
6415 vg_assert(VG_(vfork_fildes_addr) != NULL);
6416 vg_assert(*VG_(vfork_fildes_addr) == -1);
6417 *VG_(vfork_fildes_addr) = fds[0];
6418 }
6419
6420 VG_(do_atfork_pre)(tid);
6421 SET_STATUS_from_SysRes(VG_(do_syscall2)(__NR_forksys, 0, ARG2));
6422
6423 if (!SUCCESS) {
6424 /* vfork */
6425 if (ARG1 == 2) {
6426 VG_(close)(fds[0]);
6427 VG_(close)(fds[1]);
6428 }
6429
6430 return;
6431 }
6432
6433 if (RESHI) {
6434 VG_(do_atfork_child)(tid);
6435
6436 /* If --child-silent-after-fork=yes was specified, set the output file
6437 descriptors to 'impossible' values. This is noticed by
6438 send_bytes_to_logging_sink() in m_libcprint.c, which duly stops
6439 writing any further output. */
6440 if (VG_(clo_child_silent_after_fork)) {
6441 if (!VG_(log_output_sink).is_socket)
6442 VG_(log_output_sink).fd = -1;
6443 if (!VG_(xml_output_sink).is_socket)
6444 VG_(xml_output_sink).fd = -1;
6445 }
6446
6447 /* vfork */
6448 if (ARG1 == 2)
6449 VG_(close)(fds[1]);
6450 }
6451 else {
6452 VG_(do_atfork_parent)(tid);
6453
6454 /* Print information about the fork. */
6455 PRINT(" fork: process %d created child %d\n", VG_(getpid)(),
6456 (Int)RES);
6457
6458 /* vfork */
6459 if (ARG1 == 2) {
6460 /* Wait for the child to finish (exec or exit). */
6461 UChar w;
6462
6463 VG_(close)(fds[0]);
6464
6465 res = VG_(read)(fds[1], &w, 1);
6466 if (res == 1)
6467 SET_STATUS_Failure(w);
6468 VG_(close)(fds[1]);
6469
6470 *VG_(vfork_fildes_addr) = -1;
6471 }
6472 }
6473 }
6474
6475 PRE(sys_sigtimedwait)
6476 {
6477 /* int sigtimedwait(const sigset_t *set, siginfo_t *info,
6478 const timespec_t *timeout); */
6479 *flags |= SfMayBlock;
6480 PRINT("sys_sigtimedwait ( %#lx, %#lx, %#lx )", ARG1, ARG2, ARG3);
6481 PRE_REG_READ3(long, "sigtimedwait", vki_sigset_t *, set,
6482 vki_siginfo_t *, info, vki_timespec_t *, timeout);
6483 PRE_MEM_READ("sigtimewait(set)", ARG1, sizeof(vki_sigset_t));
6484 if (ARG2)
6485 PRE_MEM_WRITE("sigtimedwait(info)", ARG2, sizeof(vki_siginfo_t));
6486 if (ARG3)
6487 PRE_MEM_READ("sigtimedwait(timeout)", ARG3, sizeof(vki_timespec_t));
6488 }
6489
6490 POST(sys_sigtimedwait)
6491 {
6492 if (ARG2)
6493 POST_MEM_WRITE(ARG2, sizeof(vki_siginfo_t));
6494 }
6495
6496 PRE(sys_yield)
6497 {
6498 /* void yield(void); */
6499 *flags |= SfMayBlock;
6500 PRINT("sys_yield ( )");
6501 PRE_REG_READ0(long, "yield");
6502 }
6503
6504 PRE(sys_lwp_sema_post)
6505 {
6506 /* int lwp_sema_post(lwp_sema_t *sema); */
6507 vki_lwp_sema_t *sema = (vki_lwp_sema_t*)ARG1;
6508 *flags |= SfMayBlock;
6509 PRINT("sys_lwp_sema_post ( %#lx )", ARG1);
6510 PRE_REG_READ1(long, "lwp_sema_post", lwp_sema_t *, sema);
6511
6512 PRE_FIELD_READ("lwp_sema_post(sema->type)", sema->vki_sema_type);
6513 PRE_FIELD_READ("lwp_sema_post(sema->count)", sema->vki_sema_count);
6514 /*PRE_FIELD_WRITE("lwp_sema_post(sema->count)", sema->vki_sema_count);*/
6515 PRE_FIELD_READ("lwp_sema_post(sema->waiters)", sema->vki_sema_waiters);
6516 /*PRE_FIELD_WRITE("lwp_sema_post(sema->waiters)", sema->vki_sema_waiters);*/
6517 }
6518
6519 POST(sys_lwp_sema_post)
6520 {
6521 vki_lwp_sema_t *sema = (vki_lwp_sema_t*)ARG1;
6522 POST_FIELD_WRITE(sema->vki_sema_count);
6523 POST_FIELD_WRITE(sema->vki_sema_waiters);
6524 }
6525
6526 PRE(sys_lwp_sema_trywait)
6527 {
6528 /* int lwp_sema_trywait(lwp_sema_t *sema); */
6529 vki_lwp_sema_t *sema = (vki_lwp_sema_t*)ARG1;
6530 PRINT("sys_lwp_sema_trywait ( %#lx )", ARG1);
6531 PRE_REG_READ1(long, "lwp_sema_trywait", lwp_sema_t *, sema);
6532
6533 PRE_FIELD_READ("lwp_sema_trywait(sema->type)", sema->vki_sema_type);
6534 PRE_FIELD_READ("lwp_sema_trywait(sema->count)", sema->vki_sema_count);
6535 /*PRE_FIELD_WRITE("lwp_sema_trywait(sema->count)", sema->vki_sema_count);*/
6536 PRE_FIELD_READ("lwp_sema_trywait(sema->waiters)", sema->vki_sema_waiters);
6537 /*PRE_FIELD_WRITE("lwp_sema_trywait(sema->waiters)",
6538 sema->vki_sema_waiters);*/
6539 }
6540
6541 POST(sys_lwp_sema_trywait)
6542 {
6543 vki_lwp_sema_t *sema = (vki_lwp_sema_t*)ARG1;
6544 POST_FIELD_WRITE(sema->vki_sema_count);
6545 POST_FIELD_WRITE(sema->vki_sema_waiters);
6546 }
6547
6548 PRE(sys_lwp_detach)
6549 {
6550 /* int lwp_detach(id_t lwpid); */
6551 PRINT("sys_lwp_detach ( %ld )", SARG1);
6552 PRE_REG_READ1(long, "lwp_detach", vki_id_t, lwpid);
6553 }
6554
6555 PRE(sys_fchroot)
6556 {
6557 /* int fchroot(int fd); */
6558 PRINT("sys_fchroot ( %ld )", SARG1);
6559 PRE_REG_READ1(long, "fchroot", int, fd);
6560
6561 /* Be strict. */
6562 if (!ML_(fd_allowed)(ARG1, "fchroot", tid, False))
6563 SET_STATUS_Failure(VKI_EBADF);
6564 }
6565
6566 #if defined(SOLARIS_SYSTEM_STATS_SYSCALL)
6567 PRE(sys_system_stats)
6568 {
6569 /* void system_stats(int flag); */
6570 PRINT("sys_system_stats ( %ld )", SARG1);
6571 PRE_REG_READ1(void, "system_stats", int, flag);
6572 }
6573 #endif /* SOLARIS_SYSTEM_STATS_SYSCALL */
6574
6575 PRE(sys_gettimeofday)
6576 {
6577 /* Kernel: int gettimeofday(struct timeval *tp); */
6578 PRINT("sys_gettimeofday ( %#lx )", ARG1);
6579 PRE_REG_READ1(long, "gettimeofday", struct timeval *, tp);
6580 if (ARG1)
6581 PRE_timeval_WRITE("gettimeofday(tp)", ARG1);
6582 }
6583
6584 POST(sys_gettimeofday)
6585 {
6586 if (ARG1)
6587 POST_timeval_WRITE(ARG1);
6588 }
6589
6590 PRE(sys_lwp_create)
6591 {
6592 /* int lwp_create(ucontext_t *ucp, int flags, id_t *new_lwp) */
6593
6594 ThreadId ctid;
6595 ThreadState *ptst;
6596 ThreadState *ctst;
6597 Addr stack;
6598 SysRes res;
6599 vki_ucontext_t uc;
6600 Bool tool_informed = False;
6601
6602 PRINT("sys_lwp_create ( %#lx, %ld, %#lx )", ARG1, ARG2, ARG3);
6603 PRE_REG_READ3(long, "lwp_create", ucontext_t *, ucp, int, flags,
6604 id_t *, new_lwp);
6605
6606 if (ARG3 != 0)
6607 PRE_MEM_WRITE("lwp_create(new_lwp)", ARG3, sizeof(vki_id_t));
6608
6609 /* If we can't deref ucontext_t then we can't do anything. */
6610 if (!ML_(safe_to_deref)((void*)ARG1, sizeof(vki_ucontext_t))) {
6611 SET_STATUS_Failure(VKI_EINVAL);
6612 return;
6613 }
6614
6615 ctid = VG_(alloc_ThreadState)();
6616 ptst = VG_(get_ThreadState)(tid);
6617 ctst = VG_(get_ThreadState)(ctid);
6618
6619 /* Stay sane. */
6620 vg_assert(VG_(is_running_thread)(tid));
6621 vg_assert(VG_(is_valid_tid)(ctid));
6622
6623 stack = ML_(allocstack)(ctid);
6624 if (!stack) {
6625 res = VG_(mk_SysRes_Error)(VKI_ENOMEM);
6626 goto out;
6627 }
6628
6629 /* First inherit parent's guest state */
6630 ctst->arch.vex = ptst->arch.vex;
6631 ctst->arch.vex_shadow1 = ptst->arch.vex_shadow1;
6632 ctst->arch.vex_shadow2 = ptst->arch.vex_shadow2;
6633
6634 /* Set up some values. */
6635 ctst->os_state.parent = tid;
6636 ctst->os_state.threadgroup = ptst->os_state.threadgroup;
6637 ctst->sig_mask = ptst->sig_mask;
6638 ctst->tmp_sig_mask = ptst->sig_mask;
6639
6640 /* No stack definition should be currently present. The stack will be set
6641 later by libc by a setustack() call (the getsetcontext syscall). */
6642 ctst->client_stack_highest_byte = 0;
6643 ctst->client_stack_szB = 0;
6644 vg_assert(ctst->os_state.stk_id == (UWord)(-1));
6645
6646 /* Inform a tool that a new thread is created. This has to be done before
6647 any other core->tool event is sent. */
6648 vg_assert(VG_(owns_BigLock_LL)(tid));
6649 VG_TRACK(pre_thread_ll_create, tid, ctid);
6650 tool_informed = True;
6651
6652 #if defined(VGP_x86_solaris)
6653 /* Set up GDT (this has to be done before calling
6654 VG_(restore_context)(). */
6655 ML_(setup_gdt)(&ctst->arch.vex);
6656 #elif defined(VGP_amd64_solaris)
6657 /* Nothing to do. */
6658 #else
6659 # error "Unknown platform"
6660 #endif
6661
6662 /* Now set up the new thread according to ucontext_t. */
6663 VG_(restore_context)(ctid, (vki_ucontext_t*)ARG1, Vg_CoreSysCall,
6664 True/*esp_is_thrptr*/);
6665
6666 /* Set up V thread (this also tells the kernel to block all signals in the
6667 thread). */
6668 ML_(setup_start_thread_context)(ctid, &uc);
6669
6670 /* Actually create the new thread. */
6671 res = VG_(do_syscall3)(__NR_lwp_create, (UWord)&uc, ARG2, ARG3);
6672
6673 if (!sr_isError(res)) {
6674 if (ARG3 != 0)
6675 POST_MEM_WRITE(ARG3, sizeof(vki_id_t));
6676 if (ARG2 & VKI_LWP_DAEMON)
6677 ctst->os_state.daemon_thread = True;
6678 }
6679
6680 out:
6681 if (sr_isError(res)) {
6682 if (tool_informed) {
6683 /* Tell a tool the thread exited in a hurry. */
6684 VG_TRACK(pre_thread_ll_exit, ctid);
6685 }
6686
6687 /* lwp_create failed. */
6688 VG_(cleanup_thread)(&ctst->arch);
6689 ctst->status = VgTs_Empty;
6690 }
6691
6692 SET_STATUS_from_SysRes(res);
6693 }
6694
6695 PRE(sys_lwp_exit)
6696 {
6697 /* void syslwp_exit(); */
6698 ThreadState *tst = VG_(get_ThreadState)(tid);
6699 PRINT("sys_lwp_exit ( )");
6700 PRE_REG_READ0(long, "lwp_exit");
6701
6702 /* Set the thread's status to be exiting, then claim that the syscall
6703 succeeded. */
6704 tst->exitreason = VgSrc_ExitThread;
6705 tst->os_state.exitcode = 0;
6706 SET_STATUS_Success(0);
6707 }
6708
6709 PRE(sys_lwp_suspend)
6710 {
6711 /* int lwp_suspend(id_t lwpid); */
6712 ThreadState *tst = VG_(get_ThreadState)(tid);
6713 PRINT("sys_lwp_suspend ( %ld )", SARG1);
6714 PRE_REG_READ1(long, "lwp_suspend", vki_id_t, lwpid);
6715
6716 if (ARG1 == tst->os_state.lwpid) {
6717 /* Set the SfMayBlock flag only if the currently running thread should
6718 be suspended. If this flag was used also when suspending other
6719 threads then it could happen that a thread holding the_BigLock would
6720 be suspended and Valgrind would hang. */
6721 *flags |= SfMayBlock;
6722 }
6723 }
6724
6725 PRE(sys_lwp_continue)
6726 {
6727 /* int lwp_continue(id_t target_lwp); */
6728 PRINT("sys_lwp_continue ( %ld )", SARG1);
6729 PRE_REG_READ1(long, "lwp_continue", vki_id_t, target_lwp);
6730 }
6731
6732 static void
6733 do_lwp_sigqueue(const HChar *syscall_name, UWord target_lwp, UWord signo,
6734 SyscallStatus *status, UWord *flags)
6735 {
6736 if (!ML_(client_signal_OK)(signo)) {
6737 SET_STATUS_Failure(VKI_EINVAL);
6738 return;
6739 }
6740
6741 /* Check to see if this gave us a pending signal. */
6742 *flags |= SfPollAfter;
6743
6744 if (VG_(clo_trace_signals))
6745 VG_(message)(Vg_DebugMsg, "%s: sending signal %lu to thread %lu\n",
6746 syscall_name, signo, target_lwp);
6747
6748 /* If we're sending SIGKILL, check to see if the target is one of our
6749 threads and handle it specially. */
6750 if (signo == VKI_SIGKILL && ML_(do_sigkill)(target_lwp, -1)) {
6751 SET_STATUS_Success(0);
6752 return;
6753 }
6754
6755 /* Ask to handle this syscall via the slow route, since that's the only one
6756 that sets tst->status to VgTs_WaitSys. If the result of doing the
6757 syscall is an immediate run of async_signalhandler() in m_signals.c,
6758 then we need the thread to be properly tidied away. */
6759 *flags |= SfMayBlock;
6760 }
6761
6762 #if defined(SOLARIS_LWP_SIGQUEUE_SYSCALL)
6763 #if defined(SOLARIS_LWP_SIGQUEUE_SYSCALL_TAKES_PID)
6764 PRE(sys_lwp_sigqueue)
6765 {
6766 /* int lwp_sigqueue(pid_t target_pid, id_t target_lwp, int signal,
6767 void *value, int si_code, timespec_t *timeout);
6768 */
6769 PRINT("sys_lwp_sigqueue ( %ld, %ld, %ld, %#lx, %ld, %#lx )",
6770 SARG1, SARG2, SARG3, ARG4, SARG5, ARG6);
6771 PRE_REG_READ6(long, "lwp_sigqueue", vki_pid_t, target_pid,
6772 vki_id_t, target_lwp, int, signal, void *, value, int, si_code,
6773 vki_timespec_t *, timeout);
6774
6775 if (ARG6)
6776 PRE_MEM_READ("lwp_sigqueue(timeout)", ARG6, sizeof(vki_timespec_t));
6777
6778 if ((ARG1 == 0) || (ARG1 == VG_(getpid)())) {
6779 do_lwp_sigqueue("lwp_sigqueue", ARG2, ARG3, status, flags);
6780 } else {
6781 /* Signal is sent to a different process. */
6782 if (VG_(clo_trace_signals))
6783 VG_(message)(Vg_DebugMsg, "lwp_sigqueue: sending signal %ld to "
6784 "process %ld, thread %ld\n", SARG3, SARG1, SARG2);
6785 *flags |= SfMayBlock;
6786 }
6787 }
6788
6789 POST(sys_lwp_sigqueue)
6790 {
6791 if (VG_(clo_trace_signals))
6792 VG_(message)(Vg_DebugMsg, "lwp_sigqueue: sent signal %ld to process %ld, "
6793 "thread %ld\n", SARG3, SARG1, SARG2);
6794 }
6795
6796 #else
6797
6798 PRE(sys_lwp_sigqueue)
6799 {
6800 /* int lwp_sigqueue(id_t target_lwp, int signal, void *value,
6801 int si_code, timespec_t *timeout);
6802 */
6803 PRINT("sys_lwp_sigqueue ( %ld, %ld, %#lx, %ld, %#lx )",
6804 SARG1, SARG2, ARG3, SARG4, ARG5);
6805 PRE_REG_READ5(long, "lwp_sigqueue", vki_id_t, target_lwp, int, signal,
6806 void *, value, int, si_code, vki_timespec_t *, timeout);
6807
6808 if (ARG5)
6809 PRE_MEM_READ("lwp_sigqueue(timeout)", ARG5, sizeof(vki_timespec_t));
6810
6811 do_lwp_sigqueue("lwp_sigqueue", ARG1, ARG2, status, flags);
6812 }
6813
6814 POST(sys_lwp_sigqueue)
6815 {
6816 if (VG_(clo_trace_signals))
6817 VG_(message)(Vg_DebugMsg, "lwp_sigqueue: sent signal %lu to thread %lu\n",
6818 ARG2, ARG1);
6819 }
6820
6821
6822 #endif /* SOLARIS_LWP_SIGQUEUE_SYSCALL_TAKES_PID */
6823
6824 #else
6825
6826 PRE(sys_lwp_kill)
6827 {
6828 /* int lwp_kill(id_t target_lwp, int signal); */
6829 PRINT("sys_lwp_kill ( %ld, %ld )", SARG1, SARG2);
6830 PRE_REG_READ2(long, "lwp_kill", vki_id_t, target_lwp, int, signal);
6831
6832 do_lwp_sigqueue("lwp_kill", ARG1, ARG2, status, flags);
6833 }
6834
6835 POST(sys_lwp_kill)
6836 {
6837 if (VG_(clo_trace_signals))
6838 VG_(message)(Vg_DebugMsg, "lwp_kill: sent signal %lu to thread %lu\n",
6839 ARG2, ARG1);
6840 }
6841 #endif /* SOLARIS_LWP_SIGQUEUE_SYSCALL */
6842
6843 PRE(sys_lwp_self)
6844 {
6845 /* id_t lwp_self(void); */
6846 PRINT("sys_lwp_self ( )");
6847 PRE_REG_READ0(long, "lwp_self");
6848 }
6849
6850 PRE(sys_lwp_sigmask)
6851 {
6852 /* int64_t lwp_sigmask(int how, uint_t bits0, uint_t bits1, uint_t bits2,
6853 uint_t bits3); */
6854 vki_sigset_t sigset;
6855 PRINT("sys_lwp_sigmask ( %ld, %#lx, %#lx, %#lx, %#lx )", SARG1, ARG2, ARG3,
6856 ARG4, ARG5);
6857 PRE_REG_READ5(long, "lwp_sigmask", int, how, vki_uint_t, bits0,
6858 vki_uint_t, bits1, vki_uint_t, bits2, vki_uint_t, bits3);
6859
6860 sigset.__sigbits[0] = ARG2;
6861 sigset.__sigbits[1] = ARG3;
6862 sigset.__sigbits[2] = ARG4;
6863 sigset.__sigbits[3] = ARG5;
6864
6865 SET_STATUS_from_SysRes(
6866 VG_(do_sys_sigprocmask)(tid, ARG1 /*how*/, &sigset, NULL)
6867 );
6868
6869 if (SUCCESS)
6870 *flags |= SfPollAfter;
6871 }
6872
6873 PRE(sys_lwp_private)
6874 {
6875 /* int lwp_private(int cmd, int which, uintptr_t base); */
6876 ThreadState *tst = VG_(get_ThreadState)(tid);
6877 Int supported_base, supported_sel;
6878 PRINT("sys_lwp_private ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
6879 PRE_REG_READ3(long, "lwp_private", int, cmd, int, which,
6880 uintptr_t, base);
6881
6882 /* Note: Only the %gs base is currently supported on x86 and the %fs base
6883 on amd64. Support for the %fs base on x86 and for the %gs base on amd64
6884 should be added. Anything else is probably a client program error. */
6885 #if defined(VGP_x86_solaris)
6886 supported_base = VKI_LWP_GSBASE;
6887 supported_sel = VKI_LWPGS_SEL;
6888 #elif defined(VGP_amd64_solaris)
6889 supported_base = VKI_LWP_FSBASE;
6890 supported_sel = 0;
6891 #else
6892 #error "Unknown platform"
6893 #endif
6894 if (ARG2 != supported_base) {
6895 VG_(unimplemented)("Syswrap of the lwp_private call where which=%ld.",
6896 SARG2);
6897 /*NOTREACHED*/
6898 }
6899
6900 switch (ARG1 /*cmd*/) {
6901 case VKI_LWP_SETPRIVATE:
6902 #if defined(VGP_x86_solaris)
6903 tst->os_state.thrptr = ARG3;
6904 ML_(update_gdt_lwpgs)(tid);
6905 #elif defined(VGP_amd64_solaris)
6906 tst->arch.vex.guest_FS_CONST = ARG3;
6907 #else
6908 #error "Unknown platform"
6909 #endif
6910 SET_STATUS_Success(supported_sel);
6911 break;
6912 case VKI_LWP_GETPRIVATE:
6913 {
6914 int thrptr;
6915 #if defined(VGP_x86_solaris)
6916 thrptr = tst->os_state.thrptr;
6917 #elif defined(VGP_amd64_solaris)
6918 thrptr = tst->arch.vex.guest_FS_CONST;
6919 #else
6920 #error "Unknown platform"
6921 #endif
6922
6923 if (thrptr == 0) {
6924 SET_STATUS_Failure(VKI_EINVAL);
6925 return;
6926 }
6927
6928 #if defined(VGP_x86_solaris)
6929 if (tst->arch.vex.guest_GS != supported_sel) {
6930 SET_STATUS_Failure(VKI_EINVAL);
6931 return;
6932 }
6933 #elif defined(VGP_amd64_solaris)
6934 /* Valgrind on amd64 does not allow to change the gs register so
6935 a check that guest_GS is equal to supported_sel is not needed
6936 here. */
6937 #else
6938 #error "Unknown platform"
6939 #endif
6940
6941 PRE_MEM_WRITE("lwp_private(base)", ARG3, sizeof(Addr));
6942 if (!ML_(safe_to_deref((void*)ARG3, sizeof(Addr)))) {
6943 SET_STATUS_Failure(VKI_EFAULT);
6944 return;
6945 }
6946 *(Addr*)ARG3 = thrptr;
6947 POST_MEM_WRITE((Addr)ARG3, sizeof(Addr));
6948 SET_STATUS_Success(0);
6949 break;
6950 }
6951 default:
6952 VG_(unimplemented)("Syswrap of the lwp_private call where cmd=%ld.",
6953 SARG1);
6954 /*NOTREACHED*/
6955 break;
6956 }
6957 }
6958
6959 PRE(sys_lwp_wait)
6960 {
6961 /* int lwp_wait(id_t lwpid, id_t *departed); */
6962 *flags |= SfMayBlock;
6963 PRINT("sys_lwp_wait ( %ld, %#lx )", SARG1, ARG2);
6964 PRE_REG_READ2(long, "lwp_wait", vki_id_t, lwpid, vki_id_t *, departed);
6965 if (ARG2)
6966 PRE_MEM_WRITE("lwp_wait(departed)", ARG2, sizeof(vki_id_t));
6967 }
6968
6969 POST(sys_lwp_wait)
6970 {
6971 POST_MEM_WRITE(ARG2, sizeof(vki_id_t));
6972 }
6973
6974 PRE(sys_lwp_mutex_wakeup)
6975 {
6976 /* int lwp_mutex_wakeup(lwp_mutex_t *lp, int release_all); */
6977 *flags |= SfMayBlock;
6978 PRINT("sys_lwp_mutex_wakeup ( %#lx, %ld )", ARG1, SARG2);
6979 PRE_REG_READ2(long, "lwp_mutex_wakeup", vki_lwp_mutex_t *, lp,
6980 int, release_all);
6981 vki_lwp_mutex_t *lp = (vki_lwp_mutex_t *) ARG1;
6982 PRE_FIELD_READ("lwp_mutex_wakeup(lp->mutex_type)", lp->vki_mutex_type);
6983 PRE_FIELD_WRITE("lwp_mutex_wakeup(lp->mutex_waiters)",
6984 lp->vki_mutex_waiters);
6985 }
6986
6987 POST(sys_lwp_mutex_wakeup)
6988 {
6989 vki_lwp_mutex_t *lp = (vki_lwp_mutex_t *) ARG1;
6990 POST_FIELD_WRITE(lp->vki_mutex_waiters);
6991 }
6992
6993 PRE(sys_lwp_cond_wait)
6994 {
6995 /* int lwp_cond_wait(lwp_cond_t *cvp, lwp_mutex_t *mp, timespec_t *tsp,
6996 int check_park); */
6997 *flags |= SfMayBlock;
6998 PRINT("sys_lwp_cond_wait( %#lx, %#lx, %#lx, %ld )", ARG1, ARG2, ARG3, SARG4);
6999 PRE_REG_READ4(long, "lwp_cond_wait", vki_lwp_cond_t *, cvp,
7000 vki_lwp_mutex_t *, mp, vki_timespec_t *, tsp, int, check_part);
7001
7002 vki_lwp_cond_t *cvp = (vki_lwp_cond_t *) ARG1;
7003 vki_lwp_mutex_t *mp = (vki_lwp_mutex_t *) ARG2;
7004 PRE_FIELD_READ("lwp_cond_wait(cvp->type)", cvp->vki_cond_type);
7005 PRE_FIELD_READ("lwp_cond_wait(cvp->waiters_kernel)",
7006 cvp->vki_cond_waiters_kernel);
7007 PRE_FIELD_READ("lwp_cond_wait(mp->mutex_type)", mp->vki_mutex_type);
7008 PRE_FIELD_WRITE("lwp_cond_wait(mp->mutex_waiters)", mp->vki_mutex_waiters);
7009 if (ARG3 != 0)
7010 PRE_MEM_READ("lwp_cond_wait(tsp)", ARG3, sizeof(vki_timespec_t));
7011 }
7012
7013 POST(sys_lwp_cond_wait)
7014 {
7015 vki_lwp_cond_t *cvp = (vki_lwp_cond_t *) ARG1;
7016 vki_lwp_mutex_t *mp = (vki_lwp_mutex_t *) ARG2;
7017 POST_FIELD_WRITE(cvp->vki_cond_waiters_kernel);
7018 POST_FIELD_WRITE(mp->vki_mutex_waiters);
7019 if (ARG3 != 0)
7020 POST_MEM_WRITE(ARG3, sizeof(vki_timespec_t));
7021 }
7022
7023 PRE(sys_lwp_cond_broadcast)
7024 {
7025 /* int lwp_cond_broadcast(lwp_cond_t *cvp); */
7026 *flags |= SfMayBlock;
7027 PRINT("sys_lwp_cond_broadcast ( %#lx )", ARG1);
7028 PRE_REG_READ1(long, "lwp_cond_broadcast", vki_lwp_cond_t *, cvp);
7029
7030 vki_lwp_cond_t *cvp = (vki_lwp_cond_t *) ARG1;
7031 PRE_FIELD_READ("lwp_cond_broadcast(cvp->type)", cvp->vki_cond_type);
7032 PRE_FIELD_READ("lwp_cond_broadcast(cvp->waiters_kernel)",
7033 cvp->vki_cond_waiters_kernel);
7034 /*PRE_FIELD_WRITE("lwp_cond_broadcast(cvp->waiters_kernel)",
7035 cvp->vki_cond_waiters_kernel);*/
7036 }
7037
7038 POST(sys_lwp_cond_broadcast)
7039 {
7040 vki_lwp_cond_t *cvp = (vki_lwp_cond_t *) ARG1;
7041 POST_FIELD_WRITE(cvp->vki_cond_waiters_kernel);
7042 }
7043
7044 PRE(sys_pread)
7045 {
7046 /* ssize_t pread(int fildes, void *buf, size_t nbyte, off_t offset); */
7047 *flags |= SfMayBlock;
7048 PRINT("sys_pread ( %ld, %#lx, %lu, %ld )", SARG1, ARG2, ARG3, SARG4);
7049 PRE_REG_READ4(long, "pread", int, fildes, void *, buf,
7050 vki_size_t, nbyte, vki_off_t, offset);
7051 PRE_MEM_WRITE("pread(buf)", ARG2, ARG3);
7052
7053 /* Be strict. */
7054 if (!ML_(fd_allowed)(ARG1, "pread", tid, False))
7055 SET_STATUS_Failure(VKI_EBADF);
7056 }
7057
7058 POST(sys_pread)
7059 {
7060 POST_MEM_WRITE(ARG2, RES);
7061 }
7062
7063 PRE(sys_pwrite)
7064 {
7065 /* ssize_t pwrite(int fildes, const void *buf, size_t nbyte,
7066 off_t offset); */
7067 *flags |= SfMayBlock;
7068 PRINT("sys_pwrite ( %ld, %#lx, %lu, %ld )", SARG1, ARG2, ARG3, SARG4);
7069 PRE_REG_READ4(long, "pwrite", int, fildes, const void *, buf,
7070 vki_size_t, nbyte, vki_off_t, offset);
7071 PRE_MEM_READ("pwrite(buf)", ARG2, ARG3);
7072
7073 /* Be strict. */
7074 if (!ML_(fd_allowed)(ARG1, "pwrite", tid, False))
7075 SET_STATUS_Failure(VKI_EBADF);
7076 }
7077
7078 PRE(sys_getpagesizes)
7079 {
7080 /* int getpagesizes(int legacy, size_t *buf, int nelem); */
7081 PRINT("sys_getpagesizes ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
7082 PRE_REG_READ3(long, "getpagesizes", int, legacy, size_t *, buf,
7083 int, nelem);
7084 if (ARG2)
7085 PRE_MEM_WRITE("getpagesizes(buf)", ARG2, ARG3 * sizeof(vki_size_t));
7086 }
7087
7088 POST(sys_getpagesizes)
7089 {
7090 if (ARG2)
7091 POST_MEM_WRITE(ARG2, RES * sizeof(vki_size_t));
7092 }
7093
7094 PRE(sys_rusagesys)
7095 {
7096 /* Kernel: int rusagesys(int code, void *arg1, void *arg2,
7097 void *arg3, void *arg4); */
7098 switch (ARG1 /*code*/) {
7099 case VKI__RUSAGESYS_GETRUSAGE:
7100 case VKI__RUSAGESYS_GETRUSAGE_CHLD:
7101 case VKI__RUSAGESYS_GETRUSAGE_LWP:
7102 /* Libc: int getrusage(int who, struct rusage *r_usage); */
7103 PRINT("sys_rusagesys ( %ld, %#lx )", SARG1, ARG2);
7104 PRE_REG_READ2(long, SC2("rusagesys", "getrusage"), int, code,
7105 struct vki_rusage *, r_usage);
7106 PRE_MEM_WRITE("rusagesys(r_usage)", ARG2, sizeof(struct vki_rusage));
7107 break;
7108
7109 case VKI__RUSAGESYS_GETVMUSAGE:
7110 /* Libc: int getvmusage(uint_t flags, time_t age,
7111 vmusage_t *buf, size_t *nres); */
7112 PRINT("sys_rusagesys ( %ld, %lu, %ld, %#lx, %#lx )",
7113 SARG1, ARG2, SARG3, ARG4, ARG5);
7114 PRE_REG_READ5(long, SC2("rusagesys", "getvmusage"), int, code,
7115 vki_uint_t, flags, vki_time_t, age,
7116 vki_vmusage_t *, buf, vki_size_t *, nres);
7117 PRE_MEM_READ("rusagesys(nres)", ARG5, sizeof(vki_size_t));
7118 /* PRE_MEM_WRITE("rusagesys(nres)", ARG5, sizeof(vki_size_t)); */
7119
7120 if (ML_(safe_to_deref)((void *) ARG5, sizeof(vki_size_t))) {
7121 vki_size_t *nres = (vki_size_t *) ARG5;
7122 PRE_MEM_WRITE("rusagesys(buf)", ARG4,
7123 *nres * sizeof(vki_vmusage_t));
7124 }
7125 *flags |= SfMayBlock;
7126 break;
7127
7128 default:
7129 VG_(unimplemented)("Syswrap of the rusagesys call with code %ld.", SARG1);
7130 /*NOTREACHED*/
7131 break;
7132 }
7133 }
7134
7135 POST(sys_rusagesys)
7136 {
7137 switch (ARG1 /*code*/) {
7138 case VKI__RUSAGESYS_GETRUSAGE:
7139 case VKI__RUSAGESYS_GETRUSAGE_CHLD:
7140 case VKI__RUSAGESYS_GETRUSAGE_LWP:
7141 POST_MEM_WRITE(ARG2, sizeof(struct vki_rusage));
7142 break;
7143 case VKI__RUSAGESYS_GETVMUSAGE:
7144 {
7145 vki_size_t *nres = (vki_size_t *) ARG5;
7146 POST_MEM_WRITE(ARG5, sizeof(vki_size_t));
7147 POST_MEM_WRITE(ARG4, *nres * sizeof(vki_vmusage_t));
7148 }
7149 break;
7150 default:
7151 vg_assert(0);
7152 break;
7153 }
7154
7155 }
7156
7157 PRE(sys_port)
7158 {
7159 /* Kernel: int64_t portfs(int opcode, uintptr_t a0, uintptr_t a1,
7160 uintptr_t a2, uintptr_t a3, uintptr_t a4); */
7161 Int opcode = ARG1 & VKI_PORT_CODE_MASK;
7162 *flags |= SfMayBlock;
7163 switch (opcode) {
7164 case VKI_PORT_CREATE:
7165 PRINT("sys_port ( %ld )", SARG1);
7166 PRE_REG_READ1(long, SC2("port", "create"), int, opcode);
7167 break;
7168 case VKI_PORT_ASSOCIATE:
7169 case VKI_PORT_DISSOCIATE:
7170 PRINT("sys_port ( %ld, %ld, %ld, %#lx, %ld, %#lx )", SARG1, SARG2, SARG3,
7171 ARG4, SARG5, ARG6);
7172 if (opcode == VKI_PORT_ASSOCIATE) {
7173 PRE_REG_READ6(long, SC2("port", "associate"), int, opcode, int, a0,
7174 int, a1, uintptr_t, a2, int, a3, void *, a4);
7175 }
7176 else {
7177 PRE_REG_READ6(long, SC2("port", "dissociate"), int, opcode, int, a0,
7178 int, a1, uintptr_t, a2, int, a3, void *, a4);
7179 }
7180
7181 switch (ARG3 /*source*/) {
7182 case VKI_PORT_SOURCE_FD:
7183 if (!ML_(fd_allowed)(ARG4, "port", tid, False)) {
7184 SET_STATUS_Failure(VKI_EBADF);
7185 }
7186 break;
7187 case VKI_PORT_SOURCE_FILE:
7188 {
7189 struct vki_file_obj *fo = (struct vki_file_obj *)ARG4;
7190 PRE_MEM_READ("port(file_obj)", ARG4, sizeof(struct vki_file_obj));
7191 if (ML_(safe_to_deref)(&fo->fo_name, sizeof(fo->fo_name)))
7192 PRE_MEM_RASCIIZ("port(file_obj->fo_name)", (Addr)fo->fo_name);
7193 }
7194 break;
7195 default:
7196 VG_(unimplemented)("Syswrap of the port_associate/dissociate call "
7197 "type %ld.", SARG3);
7198 /*NOTREACHED*/
7199 break;
7200 }
7201 break;
7202 case VKI_PORT_SEND:
7203 PRINT("sys_port ( %ld, %ld, %ld, %#lx )", SARG1, SARG2, SARG3, ARG4);
7204 PRE_REG_READ4(long, SC2("port", "send"), int, opcode, int, a0, int, a1,
7205 void *, a2);
7206 break;
7207 case VKI_PORT_SENDN:
7208 PRINT("sys_port ( %ld, %#lx, %#lx, %lu, %lx, %#lx)", SARG1, ARG2, ARG3,
7209 ARG4, ARG5, ARG6);
7210 PRE_REG_READ6(long, SC2("port", "sendn"), int, opcode, int *, a0,
7211 int *, a1, vki_uint_t, a2, int, a3, void *, a4);
7212 PRE_MEM_READ("port(ports)", ARG2, ARG4 * sizeof(int));
7213 PRE_MEM_WRITE("port(errors)", ARG3, ARG4 * sizeof(int));
7214 break;
7215 case VKI_PORT_GET:
7216 PRINT("sys_port ( %ld, %ld, %#lx, %ld, %ld, %#lx )", SARG1, SARG2, ARG3,
7217 SARG4, SARG5, ARG6);
7218 PRE_REG_READ6(long, SC2("port", "get"), int, opcode, int, a0,
7219 port_event_t *, a1, vki_time_t, a2, long, a3,
7220 timespec_t *, a4);
7221 PRE_MEM_WRITE("port(uevp)", ARG3, sizeof(vki_port_event_t));
7222 break;
7223 case VKI_PORT_GETN:
7224 PRINT("sys_port ( %ld, %ld, %#lx, %lu, %lu, %#lx )", SARG1, SARG2, ARG3,
7225 ARG4, ARG5, ARG6);
7226 PRE_REG_READ6(long, SC2("port", "getn"), int, opcode, int, a0,
7227 port_event_t *, a1, vki_uint_t, a2, vki_uint_t, a3,
7228 timespec_t *, a4);
7229 if (ARG6)
7230 PRE_MEM_READ("port(timeout)", ARG6, sizeof(vki_timespec_t));
7231 PRE_MEM_WRITE("port(uevp)", ARG3, ARG4 * sizeof(vki_port_event_t));
7232 break;
7233 case VKI_PORT_ALERT:
7234 PRINT("sys_port ( %ld, %ld, %ld, %ld, %#lx )", SARG1, SARG2, SARG3, SARG4,
7235 ARG5);
7236 PRE_REG_READ5(long, SC2("port", "alert"), int, opcode, int, a0, int, a1,
7237 int, a2, void *, a3);
7238 break;
7239 case VKI_PORT_DISPATCH:
7240 // FIXME: check order: SARG2, SARG1 or SARG1, SARG2 ??
7241 PRINT("sys_port ( %ld, %ld, %ld, %ld, %#lx, %#lx )", SARG2, SARG1, SARG3,
7242 SARG4, ARG5, ARG6);
7243 PRE_REG_READ6(long, SC2("port", "dispatch"), int, opcode, int, a0,
7244 int, a1, int, a2, uintptr_t, a3, void *, a4);
7245 break;
7246 default:
7247 VG_(unimplemented)("Syswrap of the port call with opcode %ld.", SARG1);
7248 /*NOTREACHED*/
7249 break;
7250 }
7251
7252 /* Be strict. */
7253 if ((opcode != VKI_PORT_CREATE && opcode != VKI_PORT_SENDN) &&
7254 !ML_(fd_allowed)(ARG2, "port", tid, False))
7255 SET_STATUS_Failure(VKI_EBADF);
7256 }
7257
7258 POST(sys_port)
7259 {
7260 Int opcode = ARG1 & VKI_PORT_CODE_MASK;
7261 switch (opcode) {
7262 case VKI_PORT_CREATE:
7263 if (!ML_(fd_allowed)(RES, "port", tid, True)) {
7264 VG_(close)(RES);
7265 SET_STATUS_Failure(VKI_EMFILE);
7266 }
7267 else if (VG_(clo_track_fds))
7268 ML_(record_fd_open_named)(tid, RES);
7269 break;
7270 case VKI_PORT_ASSOCIATE:
7271 case VKI_PORT_DISSOCIATE:
7272 case VKI_PORT_SEND:
7273 break;
7274 case VKI_PORT_SENDN:
7275 if (RES != ARG4) {
7276 /* If there is any error then the whole errors area is written. */
7277 POST_MEM_WRITE(ARG3, ARG4 * sizeof(int));
7278 }
7279 break;
7280 case VKI_PORT_GET:
7281 POST_MEM_WRITE(ARG3, sizeof(vki_port_event_t));
7282 break;
7283 case VKI_PORT_GETN:
7284 POST_MEM_WRITE(ARG3, RES * sizeof(vki_port_event_t));
7285 break;
7286 case VKI_PORT_ALERT:
7287 case VKI_PORT_DISPATCH:
7288 break;
7289 default:
7290 VG_(unimplemented)("Syswrap of the port call with opcode %lu.", ARG1);
7291 /*NOTREACHED*/
7292 break;
7293 }
7294 }
7295
7296 PRE(sys_pollsys)
7297 {
7298 /* int pollsys(pollfd_t *fds, nfds_t nfds, timespec_t *timeout,
7299 sigset_t *set); */
7300 UWord i;
7301 struct vki_pollfd *ufds = (struct vki_pollfd *)ARG1;
7302
7303 *flags |= SfMayBlock;
7304
7305 PRINT("sys_pollsys ( %#lx, %lu, %#lx, %#lx )", ARG1, ARG2, ARG3, ARG4);
7306 PRE_REG_READ4(long, "poll", pollfd_t *, fds, vki_nfds_t, nfds,
7307 timespec_t *, timeout, sigset_t *, set);
7308
7309 for (i = 0; i < ARG2; i++) {
7310 vki_pollfd_t *u = &ufds[i];
7311 PRE_FIELD_READ("poll(ufds.fd)", u->fd);
7312 /* XXX Check if it's valid? */
7313 PRE_FIELD_READ("poll(ufds.events)", u->events);
7314 PRE_FIELD_WRITE("poll(ufds.revents)", u->revents);
7315 }
7316
7317 if (ARG3)
7318 PRE_MEM_READ("poll(timeout)", ARG3, sizeof(vki_timespec_t));
7319 if (ARG4)
7320 PRE_MEM_READ("poll(set)", ARG4, sizeof(vki_sigset_t));
7321 }
7322
7323 POST(sys_pollsys)
7324 {
7325 if (RES >= 0) {
7326 UWord i;
7327 vki_pollfd_t *ufds = (vki_pollfd_t*)ARG1;
7328 for (i = 0; i < ARG2; i++)
7329 POST_FIELD_WRITE(ufds[i].revents);
7330 }
7331 }
7332
7333 PRE(sys_labelsys)
7334 {
7335 /* Kernel: int labelsys(int op, void *a1, void *a2, void *a3,
7336 void *a4, void *a5); */
7337
7338 switch (ARG1 /*op*/) {
7339 case VKI_TSOL_SYSLABELING:
7340 /* Libc: int is_system_labeled(void); */
7341 PRINT("sys_labelsys ( %ld )", SARG1);
7342 PRE_REG_READ1(long, SC2("labelsys", "syslabeling"), int, op);
7343 break;
7344
7345 case VKI_TSOL_TNRH:
7346 /* Libtsnet: int tnrh(int cmd, tsol_rhent_t *buf); */
7347 PRINT("sys_labelsys ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
7348 PRE_REG_READ3(long, SC2("labelsys", "tnrh"), int, op, int, cmd,
7349 vki_tsol_rhent_t *, buf);
7350 if (ARG2 != VKI_TNDB_FLUSH)
7351 PRE_MEM_READ("labelsys(buf)", ARG3, sizeof(vki_tsol_rhent_t));
7352 break;
7353
7354 case VKI_TSOL_TNRHTP:
7355 /* Libtsnet: int tnrhtp(int cmd, tsol_tpent_t *buf); */
7356 PRINT("sys_labelsys ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
7357 PRE_REG_READ3(long, SC2("labelsys", "tnrhtp"), int, op, int, cmd,
7358 vki_tsol_tpent_t *, buf);
7359 if (ARG2 != VKI_TNDB_FLUSH)
7360 PRE_MEM_READ("labelsys(buf)", ARG3, sizeof(vki_tsol_tpent_t));
7361 break;
7362
7363 case VKI_TSOL_TNMLP:
7364 /* Libtsnet: int tnmlp(int cmd, tsol_mlpent_t *buf); */
7365 PRINT("sys_labelsys ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
7366 PRE_REG_READ3(long, SC2("labelsys", "tnmlp"), int, op, int, cmd,
7367 vki_tsol_mlpent_t *, buf);
7368 PRE_MEM_READ("labelsys(buf)", ARG3, sizeof(vki_tsol_mlpent_t));
7369 break;
7370
7371 case VKI_TSOL_GETLABEL:
7372 /* Libtsol: int getlabel(const char *path, bslabel_t *label); */
7373 PRINT("sys_labelsys ( %ld, %#lx(%s), %#lx )",
7374 SARG1, ARG2, (HChar *) ARG2, ARG3);
7375 PRE_REG_READ3(long, SC2("labelsys", "getlabel"), int, op,
7376 const char *, path, vki_bslabel_t *, label);
7377 PRE_MEM_RASCIIZ("labelsys(path)", ARG2);
7378 PRE_MEM_WRITE("labelsys(label)", ARG3, sizeof(vki_bslabel_t));
7379 break;
7380
7381 case VKI_TSOL_FGETLABEL:
7382 /* Libtsol: int fgetlabel(int fd, bslabel_t *label); */
7383 PRINT("sys_labelsys ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
7384 PRE_REG_READ3(long, SC2("labelsys", "fgetlabel"), int, op,
7385 int, fd, vki_bslabel_t *, label);
7386 /* Be strict. */
7387 if (!ML_(fd_allowed)(ARG2, "labelsys(fgetlabel)", tid, False))
7388 SET_STATUS_Failure(VKI_EBADF);
7389 PRE_MEM_WRITE("labelsys(label)", ARG3, sizeof(vki_bslabel_t));
7390 break;
7391
7392 #if defined(SOLARIS_TSOL_CLEARANCE)
7393 case VKI_TSOL_GETCLEARANCE:
7394 /* Libtsol: int getclearance(bslabel_t *clearance); */
7395 PRINT("sys_labelsys ( %ld, %#lx )", SARG1, ARG2);
7396 PRE_REG_READ2(long, SC2("labelsys", "getclearance"), int, op,
7397 vki_bslabel_t *, clearance);
7398 PRE_MEM_WRITE("labelsys(clearance)", ARG2, sizeof(vki_bslabel_t));
7399 break;
7400
7401 case VKI_TSOL_SETCLEARANCE:
7402 /* Libtsol: int setclearance(bslabel_t *clearance); */
7403 PRINT("sys_labelsys ( %ld, %#lx )", SARG1, ARG2);
7404 PRE_REG_READ2(long, SC2("labelsys", "setclearance"), int, op,
7405 vki_bslabel_t *, clearance);
7406 PRE_MEM_READ("labelsys(clearance)", ARG2, sizeof(vki_bslabel_t));
7407 break;
7408 #endif /* SOLARIS_TSOL_CLEARANCE */
7409
7410 default:
7411 VG_(unimplemented)("Syswrap of the labelsys call with op %ld.", SARG1);
7412 /*NOTREACHED*/
7413 break;
7414 }
7415 }
7416
7417 POST(sys_labelsys)
7418 {
7419 switch (ARG1 /*op*/) {
7420 case VKI_TSOL_SYSLABELING:
7421 break;
7422
7423 case VKI_TSOL_TNRH:
7424 switch (ARG2 /*cmd*/) {
7425 case VKI_TNDB_LOAD:
7426 case VKI_TNDB_DELETE:
7427 case VKI_TNDB_FLUSH:
7428 break;
7429 #if defined(SOLARIS_TNDB_GET_TNIP)
7430 case TNDB_GET_TNIP:
7431 #endif /* SOLARIS_TNDB_GET_TNIP */
7432 case VKI_TNDB_GET:
7433 POST_MEM_WRITE(ARG3, sizeof(vki_tsol_rhent_t));
7434 break;
7435 default:
7436 vg_assert(0);
7437 break;
7438 }
7439 break;
7440
7441 case VKI_TSOL_TNRHTP:
7442 switch (ARG2 /*cmd*/) {
7443 case VKI_TNDB_LOAD:
7444 case VKI_TNDB_DELETE:
7445 case VKI_TNDB_FLUSH:
7446 break;
7447 case VKI_TNDB_GET:
7448 POST_MEM_WRITE(ARG3, sizeof(vki_tsol_tpent_t));
7449 break;
7450 default:
7451 vg_assert(0);
7452 break;
7453 }
7454 break;
7455
7456 case VKI_TSOL_TNMLP:
7457 switch (ARG2 /*cmd*/) {
7458 case VKI_TNDB_LOAD:
7459 case VKI_TNDB_DELETE:
7460 case VKI_TNDB_FLUSH:
7461 break;
7462 case VKI_TNDB_GET:
7463 POST_MEM_WRITE(ARG3, sizeof(vki_tsol_mlpent_t));
7464 break;
7465 default:
7466 vg_assert(0);
7467 break;
7468 }
7469 break;
7470
7471 case VKI_TSOL_GETLABEL:
7472 case VKI_TSOL_FGETLABEL:
7473 POST_MEM_WRITE(ARG3, sizeof(vki_bslabel_t));
7474 break;
7475
7476 #if defined(SOLARIS_TSOL_CLEARANCE)
7477 case VKI_TSOL_GETCLEARANCE:
7478 POST_MEM_WRITE(ARG2, sizeof(vki_bslabel_t));
7479 break;
7480
7481 case VKI_TSOL_SETCLEARANCE:
7482 break;
7483 #endif /* SOLARIS_TSOL_CLEARANCE */
7484
7485 default:
7486 vg_assert(0);
7487 break;
7488 }
7489 }
7490
7491 PRE(sys_acl)
7492 {
7493 /* int acl(char *pathp, int cmd, int nentries, void *aclbufp); */
7494 PRINT("sys_acl ( %#lx(%s), %ld, %ld, %#lx )", ARG1, (HChar *) ARG1, SARG2,
7495 SARG3, ARG4);
7496
7497 PRE_REG_READ4(long, "acl", char *, pathp, int, cmd,
7498 int, nentries, void *, aclbufp);
7499 PRE_MEM_RASCIIZ("acl(pathp)", ARG1);
7500
7501 switch (ARG2 /*cmd*/) {
7502 case VKI_SETACL:
7503 if (ARG4)
7504 PRE_MEM_READ("acl(aclbufp)", ARG4, ARG3 * sizeof(vki_aclent_t));
7505 break;
7506 case VKI_GETACL:
7507 PRE_MEM_WRITE("acl(aclbufp)", ARG4, ARG3 * sizeof(vki_aclent_t));
7508 break;
7509 case VKI_GETACLCNT:
7510 break;
7511 case VKI_ACE_SETACL:
7512 if (ARG4)
7513 PRE_MEM_READ("acl(aclbufp)", ARG4, ARG3 * sizeof(vki_ace_t));
7514 break;
7515 case VKI_ACE_GETACL:
7516 PRE_MEM_WRITE("acl(aclbufp)", ARG4, ARG3 * sizeof(vki_ace_t));
7517 break;
7518 case VKI_ACE_GETACLCNT:
7519 break;
7520 default:
7521 VG_(unimplemented)("Syswrap of the acl call with cmd %ld.", SARG2);
7522 /*NOTREACHED*/
7523 break;
7524 }
7525 }
7526
7527 POST(sys_acl)
7528 {
7529 switch (ARG2 /*cmd*/) {
7530 case VKI_SETACL:
7531 break;
7532 case VKI_GETACL:
7533 POST_MEM_WRITE(ARG4, ARG3 * sizeof(vki_aclent_t));
7534 break;
7535 case VKI_GETACLCNT:
7536 break;
7537 case VKI_ACE_SETACL:
7538 break;
7539 case VKI_ACE_GETACL:
7540 POST_MEM_WRITE(ARG4, ARG3 * sizeof(vki_ace_t));
7541 break;
7542 case VKI_ACE_GETACLCNT:
7543 break;
7544 default:
7545 vg_assert(0);
7546 break;
7547 }
7548 }
7549
7550 PRE(sys_auditsys)
7551 {
7552 /* Kernel: int auditsys(long code, long a1, long a2, long a3, long a4); */
7553 switch (ARG1 /*code*/) {
7554 case VKI_BSM_GETAUID:
7555 /* Libbsm: int getauid(au_id_t *auid); */
7556 PRINT("sys_auditsys ( %ld, %#lx )", SARG1, ARG2);
7557 PRE_REG_READ2(long, SC2("auditsys", "getauid"), long, code,
7558 vki_au_id_t *, auid);
7559 PRE_MEM_WRITE("auditsys(auid)", ARG2, sizeof(vki_au_id_t));
7560 break;
7561 case VKI_BSM_SETAUID:
7562 /* Libbsm: int setauid(au_id_t *auid); */
7563 PRINT("sys_auditsys ( %ld, %#lx )", SARG1, ARG2);
7564 PRE_REG_READ2(long, SC2("auditsys", "setauid"), long, code,
7565 vki_au_id_t *, auid);
7566 PRE_MEM_READ("auditsys(auid)", ARG2, sizeof(vki_au_id_t));
7567 break;
7568 case VKI_BSM_GETAUDIT:
7569 /* Libbsm: int getaudit(auditinfo_t *ai); */
7570 PRINT("sys_auditsys ( %ld, %#lx )", SARG1, ARG2);
7571 PRE_REG_READ2(long, SC2("auditsys", "getaudit"), long, code,
7572 vki_auditinfo_t *, ai);
7573 PRE_MEM_WRITE("auditsys(ai)", ARG2, sizeof(vki_auditinfo_t));
7574 break;
7575 case VKI_BSM_SETAUDIT:
7576 /* Libbsm: int setaudit(auditinfo_t *ai); */
7577 PRINT("sys_auditsys ( %ld, %#lx )", SARG1, ARG2);
7578 PRE_REG_READ2(long, SC2("auditsys", "setaudit"), long, code,
7579 vki_auditinfo_t *, ai);
7580 PRE_MEM_READ("auditsys(ai)", ARG2, sizeof(vki_auditinfo_t));
7581 break;
7582 case VKI_BSM_AUDIT:
7583 /* Libbsm: int audit(void *record, int length); */
7584 PRINT("sys_auditsys ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
7585 PRE_REG_READ3(long, SC2("auditsys", "audit"), long, code,
7586 void *, record, int, length);
7587 PRE_MEM_READ("auditsys(record)", ARG2, ARG3);
7588 break;
7589 case VKI_BSM_AUDITCTL:
7590 /* Libbsm: int auditon(int cmd, caddr_t data, int length); */
7591 PRINT("sys_auditsys ( %ld, %ld, %#lx, %ld )",
7592 SARG1, SARG2, ARG3, SARG4);
7593
7594 switch (ARG2 /*cmd*/) {
7595 case VKI_A_GETPOLICY:
7596 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getpolicy"),
7597 long, code, int, cmd, vki_uint32_t *, policy);
7598 PRE_MEM_WRITE("auditsys(policy)", ARG3, sizeof(vki_uint32_t));
7599 break;
7600 case VKI_A_SETPOLICY:
7601 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setpolicy"),
7602 long, code, int, cmd, vki_uint32_t *, policy);
7603 PRE_MEM_READ("auditsys(policy)", ARG3, sizeof(vki_uint32_t));
7604 break;
7605 case VKI_A_GETKMASK:
7606 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getkmask"),
7607 long, code, int, cmd, vki_au_mask_t *, kmask);
7608 PRE_MEM_WRITE("auditsys(kmask)", ARG3, sizeof(vki_au_mask_t));
7609 break;
7610 case VKI_A_SETKMASK:
7611 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setkmask"),
7612 long, code, int, cmd, vki_au_mask_t *, kmask);
7613 PRE_MEM_READ("auditsys(kmask)", ARG3, sizeof(vki_au_mask_t));
7614 break;
7615 case VKI_A_GETQCTRL:
7616 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getqctrl"),
7617 long, code, int, cmd,
7618 struct vki_au_qctrl *, qctrl);
7619 PRE_MEM_WRITE("auditsys(qctrl)", ARG3,
7620 sizeof(struct vki_au_qctrl));
7621 break;
7622 case VKI_A_SETQCTRL:
7623 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setqctrl"),
7624 long, code, int, cmd,
7625 struct vki_au_qctrl *, qctrl);
7626 PRE_MEM_READ("auditsys(qctrl)", ARG3,
7627 sizeof(struct vki_au_qctrl));
7628 break;
7629 case VKI_A_GETCWD:
7630 PRE_REG_READ4(long, SC3("auditsys", "auditctl", "getcwd"),
7631 long, code, int, cmd, char *, data, int, length);
7632 PRE_MEM_WRITE("auditsys(data)", ARG3, ARG4);
7633 break;
7634 case VKI_A_GETCAR:
7635 PRE_REG_READ4(long, SC3("auditsys", "auditctl", "getcar"),
7636 long, code, int, cmd, char *, data, int, length);
7637 PRE_MEM_WRITE("auditsys(data)", ARG3, ARG4);
7638 break;
7639 case VKI_A_GETSTAT:
7640 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getstat"),
7641 long, code, int, cmd, vki_au_stat_t *, stats);
7642 PRE_MEM_WRITE("auditsys(stats)", ARG3, sizeof(vki_au_stat_t));
7643 break;
7644 case VKI_A_SETSTAT:
7645 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setstat"),
7646 long, code, int, cmd, vki_au_stat_t *, stats);
7647 PRE_MEM_READ("auditsys(stats)", ARG3, sizeof(vki_au_stat_t));
7648 break;
7649 case VKI_A_SETUMASK:
7650 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setumask"),
7651 long, code, int, cmd, vki_auditinfo_t *, umask);
7652 PRE_MEM_READ("auditsys(umask)", ARG3, sizeof(vki_auditinfo_t));
7653 break;
7654 case VKI_A_SETSMASK:
7655 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setsmask"),
7656 long, code, int, cmd, vki_auditinfo_t *, smask);
7657 PRE_MEM_READ("auditsys(smask)", ARG3, sizeof(vki_auditinfo_t));
7658 break;
7659 case VKI_A_GETCOND:
7660 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getcond"),
7661 long, code, int, cmd, int *, cond);
7662 PRE_MEM_WRITE("auditsys(cond)", ARG3, sizeof(int));
7663 break;
7664 case VKI_A_SETCOND:
7665 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setcond"),
7666 long, code, int, cmd, int *, state);
7667 PRE_MEM_READ("auditsys(cond)", ARG3, sizeof(int));
7668 break;
7669 case VKI_A_GETCLASS:
7670 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getclass"),
7671 long, code, int, cmd,
7672 vki_au_evclass_map_t *, classmap);
7673
7674 if (ML_(safe_to_deref((void *) ARG3,
7675 sizeof(vki_au_evclass_map_t)))) {
7676 vki_au_evclass_map_t *classmap =
7677 (vki_au_evclass_map_t *) ARG3;
7678 PRE_FIELD_READ("auditsys(classmap.ec_number)",
7679 classmap->ec_number);
7680 PRE_MEM_WRITE("auditsys(classmap)", ARG3,
7681 sizeof(vki_au_evclass_map_t));
7682 }
7683 break;
7684 case VKI_A_SETCLASS:
7685 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setclass"),
7686 long, code, int, cmd,
7687 vki_au_evclass_map_t *, classmap);
7688
7689 if (ML_(safe_to_deref((void *) ARG3,
7690 sizeof(vki_au_evclass_map_t)))) {
7691 vki_au_evclass_map_t *classmap =
7692 (vki_au_evclass_map_t *) ARG3;
7693 PRE_FIELD_READ("auditsys(classmap.ec_number)",
7694 classmap->ec_number);
7695 PRE_FIELD_READ("auditsys(classmap.ec_class)",
7696 classmap->ec_class);
7697 }
7698 break;
7699 case VKI_A_GETPINFO:
7700 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getpinfo"),
7701 long, code, int, cmd,
7702 struct vki_auditpinfo *, apinfo);
7703
7704 if (ML_(safe_to_deref((void *) ARG3,
7705 sizeof(struct vki_auditpinfo)))) {
7706 struct vki_auditpinfo *apinfo =
7707 (struct vki_auditpinfo *) ARG3;
7708 PRE_FIELD_READ("auditsys(apinfo.ap_pid)", apinfo->ap_pid);
7709 PRE_MEM_WRITE("auditsys(apinfo)", ARG3,
7710 sizeof(struct vki_auditpinfo));
7711 }
7712 break;
7713 case VKI_A_SETPMASK:
7714 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setpmask"),
7715 long, code, int, cmd,
7716 struct vki_auditpinfo *, apinfo);
7717 PRE_MEM_WRITE("auditsys(apinfo)", ARG3,
7718 sizeof(struct vki_auditpinfo));
7719 break;
7720 case VKI_A_GETPINFO_ADDR:
7721 PRE_REG_READ4(long, SC3("auditsys", "auditctl", "getpinfo_addr"),
7722 long, code, int, cmd,
7723 struct vki_auditpinfo_addr *, apinfo, int, length);
7724
7725 if (ML_(safe_to_deref((void *) ARG3,
7726 sizeof(struct vki_auditpinfo_addr)))) {
7727 struct vki_auditpinfo_addr *apinfo_addr =
7728 (struct vki_auditpinfo_addr *) ARG3;
7729 PRE_FIELD_READ("auditsys(apinfo_addr.ap_pid)",
7730 apinfo_addr->ap_pid);
7731 PRE_MEM_WRITE("auditsys(apinfo_addr)", ARG3, ARG4);
7732 }
7733 break;
7734 case VKI_A_GETKAUDIT:
7735 PRE_REG_READ4(long, SC3("auditsys", "auditctl", "getkaudit"),
7736 long, code, int, cmd,
7737 vki_auditinfo_addr_t *, kaudit, int, length);
7738 PRE_MEM_WRITE("auditsys(kaudit)", ARG3, ARG4);
7739 break;
7740 case VKI_A_SETKAUDIT:
7741 PRE_REG_READ4(long, SC3("auditsys", "auditctl", "setkaudit"),
7742 long, code, int, cmd,
7743 vki_auditinfo_addr_t *, kaudit, int, length);
7744 PRE_MEM_READ("auditsys(kaudit)", ARG3, ARG4);
7745 break;
7746 case VKI_A_GETAMASK:
7747 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getamask"),
7748 long, code, int, cmd, vki_au_mask_t *, amask);
7749 PRE_MEM_WRITE("auditsys(amask)", ARG3, sizeof(vki_au_mask_t));
7750 break;
7751 case VKI_A_SETAMASK:
7752 PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setamask"),
7753 long, code, int, cmd, vki_au_mask_t *, amask);
7754 PRE_MEM_READ("auditsys(amask)", ARG3, sizeof(vki_au_mask_t));
7755 break;
7756 default:
7757 VG_(unimplemented)("Syswrap of the auditsys(auditctl) call "
7758 "with cmd %lu.", ARG2);
7759 /*NOTREACHED*/
7760 break;
7761 }
7762 break;
7763 case VKI_BSM_GETAUDIT_ADDR:
7764 /* Libbsm: int getaudit_addr(auditinfo_addr_t *ai, int len); */
7765 PRINT("sys_auditsys ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
7766 PRE_REG_READ3(long, SC2("auditsys", "getaudit_addr"), long, code,
7767 vki_auditinfo_addr_t *, ai, int, len);
7768 PRE_MEM_WRITE("auditsys(ai)", ARG2, ARG3);
7769 break;
7770 case VKI_BSM_SETAUDIT_ADDR:
7771 /* Libbsm: int setaudit_addr(auditinfo_addr_t *ai, int len); */
7772 PRINT("sys_auditsys ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
7773 PRE_REG_READ3(long, SC2("auditsys", "setaudit_addr"), long, code,
7774 vki_auditinfo_addr_t *, ai, int, len);
7775 PRE_MEM_READ("auditsys(ai)", ARG2, ARG3);
7776 break;
7777 case VKI_BSM_AUDITDOOR:
7778 /* Libbsm: int auditdoor(int fd); */
7779 PRINT("sys_auditsys ( %ld, %ld )", SARG1, SARG2);
7780 PRE_REG_READ2(long, SC2("auditsys", "door"), long, code, int, fd);
7781
7782 /* Be strict. */
7783 if (!ML_(fd_allowed)(ARG2, SC2("auditsys", "door")"(fd)",
7784 tid, False))
7785 SET_STATUS_Failure(VKI_EBADF);
7786 break;
7787 default:
7788 VG_(unimplemented)("Syswrap of the auditsys call with code %lu.", ARG1);
7789 /*NOTREACHED*/
7790 break;
7791 }
7792 }
7793
7794 POST(sys_auditsys)
7795 {
7796 switch (ARG1 /*code*/) {
7797 case VKI_BSM_GETAUID:
7798 POST_MEM_WRITE(ARG2, sizeof(vki_au_id_t));
7799 break;
7800 case VKI_BSM_SETAUID:
7801 break;
7802 case VKI_BSM_GETAUDIT:
7803 POST_MEM_WRITE(ARG2, sizeof(vki_auditinfo_t));
7804 break;
7805 case VKI_BSM_SETAUDIT:
7806 case VKI_BSM_AUDIT:
7807 break;
7808 case VKI_BSM_AUDITCTL:
7809 switch (ARG2 /*cmd*/) {
7810 case VKI_A_GETPOLICY:
7811 POST_MEM_WRITE(ARG3, sizeof(vki_uint32_t));
7812 break;
7813 case VKI_A_SETPOLICY:
7814 break;
7815 case VKI_A_GETKMASK:
7816 POST_MEM_WRITE(ARG3, sizeof(vki_au_mask_t));
7817 break;
7818 case VKI_A_SETKMASK:
7819 break;
7820 case VKI_A_GETQCTRL:
7821 POST_MEM_WRITE(ARG3, sizeof(struct vki_au_qctrl));
7822 break;
7823 case VKI_A_SETQCTRL:
7824 break;
7825 case VKI_A_GETCWD:
7826 case VKI_A_GETCAR:
7827 POST_MEM_WRITE(ARG3, VG_(strlen)((HChar *) ARG3) + 1);
7828 break;
7829 case VKI_A_GETSTAT:
7830 POST_MEM_WRITE(ARG3, sizeof(vki_au_stat_t));
7831 break;
7832 case VKI_A_SETSTAT:
7833 case VKI_A_SETUMASK:
7834 case VKI_A_SETSMASK:
7835 break;
7836 case VKI_A_GETCOND:
7837 POST_MEM_WRITE(ARG3, sizeof(int));
7838 break;
7839 case VKI_A_SETCOND:
7840 break;
7841 case VKI_A_GETCLASS:
7842 POST_MEM_WRITE(ARG3, sizeof(vki_au_evclass_map_t));
7843 break;
7844 case VKI_A_SETCLASS:
7845 break;
7846 case VKI_A_GETPINFO:
7847 POST_MEM_WRITE(ARG3, sizeof(struct vki_auditpinfo));
7848 break;
7849 case VKI_A_SETPMASK:
7850 break;
7851 case VKI_A_GETPINFO_ADDR:
7852 POST_MEM_WRITE(ARG3, sizeof(struct auditpinfo_addr));
7853 break;
7854 case VKI_A_GETKAUDIT:
7855 POST_MEM_WRITE(ARG3, sizeof(vki_auditinfo_addr_t));
7856 break;
7857 case VKI_A_SETKAUDIT:
7858 break;
7859 case VKI_A_GETAMASK:
7860 POST_MEM_WRITE(ARG3, sizeof(vki_au_mask_t));
7861 break;
7862 case VKI_A_SETAMASK:
7863 break;
7864 }
7865 break;
7866 case VKI_BSM_GETAUDIT_ADDR:
7867 POST_MEM_WRITE(ARG2, sizeof(vki_auditinfo_addr_t));
7868 break;
7869 case VKI_BSM_SETAUDIT_ADDR:
7870 break;
7871 case VKI_BSM_AUDITDOOR:
7872 break;
7873 }
7874 }
7875
7876 PRE(sys_p_online)
7877 {
7878 /* int p_online(processorid_t processorid, int flag); */
7879 PRINT("sys_p_online ( %ld, %ld )", SARG1, SARG2);
7880 PRE_REG_READ2(long, "p_online", vki_processorid_t, processorid, int, flag);
7881 }
7882
7883 PRE(sys_sigqueue)
7884 {
7885 /* int sigqueue(pid_t pid, int signo, void *value,
7886 int si_code, timespec_t *timeout);
7887 */
7888 PRINT("sys_sigqueue ( %ld, %ld, %#lx, %ld, %#lx )",
7889 SARG1, SARG2, ARG3, SARG4, ARG5);
7890 PRE_REG_READ5(long, "sigqueue", vki_pid_t, pid, int, signo,
7891 void *, value, int, si_code,
7892 vki_timespec_t *, timeout);
7893
7894 if (ARG5)
7895 PRE_MEM_READ("sigqueue(timeout)", ARG5, sizeof(vki_timespec_t));
7896
7897 if (!ML_(client_signal_OK)(ARG2)) {
7898 SET_STATUS_Failure(VKI_EINVAL);
7899 return;
7900 }
7901
7902 /* If we're sending SIGKILL, check to see if the target is one of
7903 our threads and handle it specially. */
7904 if (ARG2 == VKI_SIGKILL && ML_(do_sigkill)(ARG1, -1)) {
7905 SET_STATUS_Success(0);
7906 } else {
7907 SysRes res = VG_(do_syscall5)(SYSNO, ARG1, ARG2, ARG3, ARG4,
7908 ARG5);
7909 SET_STATUS_from_SysRes(res);
7910 }
7911
7912 if (VG_(clo_trace_signals))
7913 VG_(message)(Vg_DebugMsg,
7914 "sigqueue: signal %lu queued for pid %lu\n",
7915 ARG2, ARG1);
7916
7917 /* Check to see if this gave us a pending signal. */
7918 *flags |= SfPollAfter;
7919 }
7920
7921 PRE(sys_clock_gettime)
7922 {
7923 /* int clock_gettime(clockid_t clock_id, struct timespec *tp); */
7924 PRINT("sys_clock_gettime ( %ld, %#lx )", SARG1, ARG2);
7925 PRE_REG_READ2(long, "clock_gettime", vki_clockid_t, clock_id,
7926 struct timespec *, tp);
7927 PRE_MEM_WRITE("clock_gettime(tp)", ARG2, sizeof(struct vki_timespec));
7928 }
7929
7930 POST(sys_clock_gettime)
7931 {
7932 POST_MEM_WRITE(ARG2, sizeof(struct vki_timespec));
7933 }
7934
7935 PRE(sys_clock_settime)
7936 {
7937 /* int clock_settime(clockid_t clock_id, const struct timespec *tp); */
7938 PRINT("sys_clock_settime ( %ld, %#lx )", SARG1, ARG2);
7939 PRE_REG_READ2(long, "clock_settime", vki_clockid_t, clock_id,
7940 const struct timespec *, tp);
7941 PRE_MEM_READ("clock_settime(tp)", ARG2, sizeof(struct vki_timespec));
7942 }
7943
7944 PRE(sys_clock_getres)
7945 {
7946 /* int clock_getres(clockid_t clock_id, struct timespec *res); */
7947 PRINT("sys_clock_getres ( %ld, %#lx )", SARG1, ARG2);
7948 PRE_REG_READ2(long, "clock_getres", vki_clockid_t, clock_id,
7949 struct timespec *, res);
7950
7951 if (ARG2)
7952 PRE_MEM_WRITE("clock_getres(res)", ARG2, sizeof(struct vki_timespec));
7953 }
7954
7955 POST(sys_clock_getres)
7956 {
7957 if (ARG2)
7958 POST_MEM_WRITE(ARG2, sizeof(struct vki_timespec));
7959 }
7960
7961 PRE(sys_timer_create)
7962 {
7963 /* int timer_create(clockid_t clock_id,
7964 struct sigevent *evp, timer_t *timerid);
7965 */
7966 PRINT("sys_timer_create ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
7967 PRE_REG_READ3(long, "timer_create", vki_clockid_t, clock_id,
7968 struct vki_sigevent *, evp, vki_timer_t *, timerid);
7969
7970 if (ARG2) {
7971 struct vki_sigevent *evp = (struct vki_sigevent *) ARG2;
7972 PRE_FIELD_READ("timer_create(evp.sigev_notify)", evp->sigev_notify);
7973 PRE_FIELD_READ("timer_create(evp.sigev_signo)", evp->sigev_signo);
7974 PRE_FIELD_READ("timer_create(evp.sigev_value.sival_int)",
7975 evp->sigev_value.sival_int);
7976
7977 /* Be safe. */
7978 if (ML_(safe_to_deref(evp, sizeof(struct vki_sigevent)))) {
7979 if ((evp->sigev_notify == VKI_SIGEV_PORT) ||
7980 (evp->sigev_notify == VKI_SIGEV_THREAD))
7981 PRE_MEM_READ("timer_create(evp.sigev_value.sival_ptr)",
7982 (Addr) evp->sigev_value.sival_ptr,
7983 sizeof(vki_port_notify_t));
7984 }
7985 }
7986
7987 PRE_MEM_WRITE("timer_create(timerid)", ARG3, sizeof(vki_timer_t));
7988 }
7989
7990 POST(sys_timer_create)
7991 {
7992 POST_MEM_WRITE(ARG3, sizeof(vki_timer_t));
7993 }
7994
7995 PRE(sys_timer_delete)
7996 {
7997 /* int timer_delete(timer_t timerid); */
7998 PRINT("sys_timer_delete ( %ld )", SARG1);
7999 PRE_REG_READ1(long, "timer_delete", vki_timer_t, timerid);
8000 }
8001
8002 PRE(sys_timer_settime)
8003 {
8004 /* int timer_settime(timer_t timerid, int flags,
8005 const struct itimerspec *value,
8006 struct itimerspec *ovalue);
8007 */
8008 PRINT("sys_timer_settime ( %ld, %ld, %#lx, %#lx )",
8009 SARG1, SARG2, ARG3, ARG4);
8010 PRE_REG_READ4(long, "timer_settime", vki_timer_t, timerid,
8011 int, flags, const struct vki_itimerspec *, value,
8012 struct vki_itimerspec *, ovalue);
8013 PRE_MEM_READ("timer_settime(value)",
8014 ARG3, sizeof(struct vki_itimerspec));
8015 if (ARG4)
8016 PRE_MEM_WRITE("timer_settime(ovalue)",
8017 ARG4, sizeof(struct vki_itimerspec));
8018 }
8019
8020 POST(sys_timer_settime)
8021 {
8022 if (ARG4)
8023 POST_MEM_WRITE(ARG4, sizeof(struct vki_itimerspec));
8024 }
8025
8026 PRE(sys_timer_gettime)
8027 {
8028 /* int timer_gettime(timer_t timerid, struct itimerspec *value); */
8029 PRINT("sys_timer_gettime ( %ld, %#lx )", SARG1, ARG2);
8030 PRE_REG_READ2(long, "timer_gettime", vki_timer_t, timerid,
8031 struct vki_itimerspec *, value);
8032 PRE_MEM_WRITE("timer_gettime(value)",
8033 ARG2, sizeof(struct vki_itimerspec));
8034 }
8035
8036 POST(sys_timer_gettime)
8037 {
8038 POST_MEM_WRITE(ARG2, sizeof(struct vki_itimerspec));
8039 }
8040
8041 PRE(sys_timer_getoverrun)
8042 {
8043 /* int timer_getoverrun(timer_t timerid); */
8044 PRINT("sys_timer_getoverrun ( %ld )", SARG1);
8045 PRE_REG_READ1(long, "timer_getoverrun", vki_timer_t, timerid);
8046 }
8047
8048 PRE(sys_facl)
8049 {
8050 /* int facl(int fildes, int cmd, int nentries, void *aclbufp); */
8051 PRINT("sys_facl ( %ld, %ld, %ld, %#lx )", SARG1, SARG2, SARG3, ARG4);
8052
8053 PRE_REG_READ4(long, "facl", int, fildes, int, cmd,
8054 int, nentries, void *, aclbufp);
8055
8056 switch (ARG2 /*cmd*/) {
8057 case VKI_SETACL:
8058 if (ARG4)
8059 PRE_MEM_READ("facl(aclbufp)", ARG4, sizeof(vki_aclent_t));
8060 break;
8061 case VKI_GETACL:
8062 PRE_MEM_WRITE("facl(aclbufp)", ARG4, ARG3 * sizeof(vki_aclent_t));
8063 break;
8064 case VKI_GETACLCNT:
8065 break;
8066 case VKI_ACE_SETACL:
8067 if (ARG4)
8068 PRE_MEM_READ("facl(aclbufp)", ARG4, sizeof(vki_ace_t));
8069 break;
8070 case VKI_ACE_GETACL:
8071 PRE_MEM_WRITE("facl(aclbufp)", ARG4, ARG3 * sizeof(vki_ace_t));
8072 break;
8073 case VKI_ACE_GETACLCNT:
8074 break;
8075 default:
8076 VG_(unimplemented)("Syswrap of the facl call with cmd %ld.", SARG2);
8077 /*NOTREACHED*/
8078 break;
8079 }
8080
8081 /* Be strict. */
8082 if (!ML_(fd_allowed)(ARG1, "facl", tid, False))
8083 SET_STATUS_Failure(VKI_EBADF);
8084 }
8085
8086 POST(sys_facl)
8087 {
8088 switch (ARG2 /*cmd*/) {
8089 case VKI_SETACL:
8090 break;
8091 case VKI_GETACL:
8092 POST_MEM_WRITE(ARG4, ARG3 * sizeof(vki_aclent_t));
8093 break;
8094 case VKI_GETACLCNT:
8095 break;
8096 case VKI_ACE_SETACL:
8097 break;
8098 case VKI_ACE_GETACL:
8099 POST_MEM_WRITE(ARG4, ARG3 * sizeof(vki_ace_t));
8100 break;
8101 case VKI_ACE_GETACLCNT:
8102 break;
8103 default:
8104 vg_assert(0);
8105 break;
8106 }
8107 }
8108
8109 static Int pre_check_and_close_fds(ThreadId tid, const HChar *name,
8110 vki_door_desc_t *desc_ptr,
8111 vki_uint_t desc_num)
8112 {
8113 vki_uint_t i;
8114
8115 /* Verify passed file descriptors. */
8116 for (i = 0; i < desc_num; i++) {
8117 vki_door_desc_t *desc = &desc_ptr[i];
8118 if ((desc->d_attributes & DOOR_DESCRIPTOR) &&
8119 (desc->d_attributes & DOOR_RELEASE)) {
8120 Int fd = desc->d_data.d_desc.d_descriptor;
8121
8122 /* Detect and negate attempts by the client to close Valgrind's fds.
8123 Also if doing -d style logging (which is to fd = 2 = stderr),
8124 don't allow that to be closed either. */
8125 if (!ML_(fd_allowed)(fd, name, tid, False) ||
8126 (fd == 2 && VG_(debugLog_getLevel)() > 0))
8127 return VKI_EBADF;
8128 }
8129 }
8130
8131 /* All fds are allowed, record information about the closed ones.
8132
8133 Note: Recording information about any closed fds should generally happen
8134 in a post wrapper but it is not possible in this case because door calls
8135 are "very blocking", if the information was recorded after the syscall
8136 finishes then it would be out-of-date during the call, i.e. while the
8137 syscall is blocked in the kernel. Therefore, we record closed fds for
8138 this specific syscall in the PRE wrapper. Unfortunately, this creates
8139 a problem when the syscall fails, for example, door_call() can fail with
8140 EBADF or EFAULT and then no fds are released. If that happens the
8141 information about opened fds is incorrect. This should be very rare (I
8142 hope) and such a condition is also reported in the post wrapper. */
8143 if (VG_(clo_track_fds)) {
8144 for (i = 0; i < desc_num; i++) {
8145 vki_door_desc_t *desc = &desc_ptr[i];
8146 if ((desc->d_attributes & DOOR_DESCRIPTOR) &&
8147 (desc->d_attributes & DOOR_RELEASE)) {
8148 Int fd = desc->d_data.d_desc.d_descriptor;
8149 ML_(record_fd_close)(fd);
8150 }
8151 }
8152 }
8153
8154 return 0;
8155 }
8156
8157 static void post_record_fds(ThreadId tid, const HChar *name,
8158 vki_door_desc_t *desc_ptr, vki_uint_t desc_num)
8159 {
8160 vki_uint_t i;
8161
8162 /* Record returned file descriptors. */
8163 for (i = 0; i < desc_num; i++) {
8164 vki_door_desc_t *desc = &desc_ptr[i];
8165 if (desc->d_attributes & DOOR_DESCRIPTOR) {
8166 Int fd = desc->d_data.d_desc.d_descriptor;
8167 if (!ML_(fd_allowed)(fd, name, tid, True)) {
8168 /* Unfortunately, we cannot recover at this point and have to fail
8169 hard. */
8170 VG_(message)(Vg_UserMsg, "The %s syscall returned an unallowed"
8171 "file descriptor %d.\n", name, fd);
8172 VG_(exit)(101);
8173 }
8174 else if (VG_(clo_track_fds))
8175 ML_(record_fd_open_named)(tid, fd);
8176 }
8177 }
8178 }
8179
8180 /* Handles repository door protocol request over client door fd. */
8181 static void repository_door_pre_mem_door_call_hook(ThreadId tid, Int fd,
8182 void *data_ptr,
8183 SizeT data_size)
8184 {
8185 vki_rep_protocol_request_t *p = (vki_rep_protocol_request_t *) data_ptr;
8186 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8187 "request->rpr_request)", p->rpr_request);
8188
8189 if (ML_(safe_to_deref)(p, sizeof(vki_rep_protocol_request_t))) {
8190 switch (p->rpr_request) {
8191 case VKI_REP_PROTOCOL_CLOSE:
8192 break;
8193 case VKI_REP_PROTOCOL_ENTITY_SETUP:
8194 {
8195 struct vki_rep_protocol_entity_setup *r =
8196 (struct vki_rep_protocol_entity_setup *) p;
8197 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8198 "entity_setup->rpr_entityid)", r->rpr_entityid);
8199 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8200 "entity_setup->rpr_entitytype)", r->rpr_entitytype);
8201 }
8202 break;
8203 case VKI_REP_PROTOCOL_ENTITY_NAME:
8204 {
8205 struct vki_rep_protocol_entity_name *r =
8206 (struct vki_rep_protocol_entity_name *) p;
8207 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8208 "entity_name->rpr_entityid)", r->rpr_entityid);
8209 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8210 "entity_name->rpr_answertype)", r->rpr_answertype);
8211 }
8212 break;
8213 #if (SOLARIS_REPCACHE_PROTOCOL_VERSION >= 25)
8214 case VKI_REP_PROTOCOL_ENTITY_GET_ROOT:
8215 {
8216 struct vki_rep_protocol_entity_root *r =
8217 (struct vki_rep_protocol_entity_root *) p;
8218 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8219 "entity_root->rpr_entityid)", r->rpr_entityid);
8220 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8221 "entity_root->rpr_outid)", r->rpr_outid);
8222 }
8223 break;
8224 #endif /* SOLARIS_REPCACHE_PROTOCOL_VERSION >= 25 */
8225 case VKI_REP_PROTOCOL_ENTITY_GET:
8226 {
8227 struct vki_rep_protocol_entity_get *r =
8228 (struct vki_rep_protocol_entity_get *) p;
8229 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8230 "entity_get->rpr_entityid)", r->rpr_entityid);
8231 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8232 "entity_get->rpr_object)", r->rpr_object);
8233 }
8234 break;
8235 case VKI_REP_PROTOCOL_ENTITY_GET_CHILD:
8236 {
8237 struct vki_rep_protocol_entity_get_child *r =
8238 (struct vki_rep_protocol_entity_get_child *) p;
8239 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8240 "entity_get_child->rpr_entityid)", r->rpr_entityid);
8241 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8242 "entity_get_child->rpr_childid)", r->rpr_childid);
8243 PRE_MEM_RASCIIZ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8244 "entity_get_child->rpr_name)", (Addr) r->rpr_name);
8245 }
8246 break;
8247 case VKI_REP_PROTOCOL_ENTITY_GET_PARENT:
8248 {
8249 struct vki_rep_protocol_entity_parent *r =
8250 (struct vki_rep_protocol_entity_parent *) p;
8251 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8252 "entity_get_parent->rpr_entityid)", r->rpr_entityid);
8253 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8254 "entity_get_parent->rpr_outid)", r->rpr_outid);
8255 }
8256 break;
8257 case VKI_REP_PROTOCOL_ENTITY_RESET:
8258 {
8259 struct vki_rep_protocol_entity_reset *r =
8260 (struct vki_rep_protocol_entity_reset *) p;
8261 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8262 "entity_reset->rpr_entityid)", r->rpr_entityid);
8263 }
8264 break;
8265 case VKI_REP_PROTOCOL_ENTITY_TEARDOWN:
8266 {
8267 struct vki_rep_protocol_entity_teardown *r =
8268 (struct vki_rep_protocol_entity_teardown *) p;
8269 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8270 "entity_teardown->rpr_entityid)", r->rpr_entityid);
8271 }
8272 break;
8273 case VKI_REP_PROTOCOL_ITER_READ:
8274 {
8275 struct vki_rep_protocol_iter_read *r =
8276 (struct vki_rep_protocol_iter_read *) p;
8277 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8278 "iter_read->rpr_iterid)", r->rpr_iterid);
8279 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8280 "iter_read->rpr_sequence)", r->rpr_sequence);
8281 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8282 "iter_read->rpr_entityid)", r->rpr_entityid);
8283 }
8284 break;
8285 case VKI_REP_PROTOCOL_ITER_READ_VALUE:
8286 {
8287 struct vki_rep_protocol_iter_read_value *r =
8288 (struct vki_rep_protocol_iter_read_value *) p;
8289 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8290 "iter_read_value->rpr_iterid)", r->rpr_iterid);
8291 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8292 "iter_read_value->rpr_sequence)", r->rpr_sequence);
8293 }
8294 break;
8295 case VKI_REP_PROTOCOL_ITER_RESET:
8296 case VKI_REP_PROTOCOL_ITER_SETUP:
8297 case VKI_REP_PROTOCOL_ITER_TEARDOWN:
8298 {
8299 struct vki_rep_protocol_iter_request *r =
8300 (struct vki_rep_protocol_iter_request *) p;
8301 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8302 "iter_request->rpr_iterid)", r->rpr_iterid);
8303 }
8304 break;
8305 case VKI_REP_PROTOCOL_ITER_START:
8306 {
8307 struct vki_rep_protocol_iter_start *r =
8308 (struct vki_rep_protocol_iter_start *) p;
8309 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8310 "iter_start->rpr_iterid)", r->rpr_iterid);
8311 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8312 "iter_start->rpr_entity)", r->rpr_entity);
8313 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8314 "iter_start->rpr_itertype)", r->rpr_itertype);
8315 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8316 "iter_start->rpr_flags)", r->rpr_flags);
8317 PRE_MEM_RASCIIZ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8318 "iter_start->rpr_pattern)", (Addr) r->rpr_pattern);
8319 }
8320 break;
8321 case VKI_REP_PROTOCOL_PROPERTY_GET_TYPE:
8322 case VKI_REP_PROTOCOL_PROPERTY_GET_VALUE:
8323 {
8324 struct vki_rep_protocol_property_request *r =
8325 (struct vki_rep_protocol_property_request *) p;
8326 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8327 "property_request->rpr_entityid)", r->rpr_entityid);
8328 }
8329 break;
8330 default:
8331 VG_(unimplemented)("Door wrapper of " VKI_REPOSITORY_DOOR_NAME
8332 " where rpr_request=%u.", p->rpr_request);
8333 /* NOTREACHED */
8334 break;
8335 }
8336 }
8337 }
8338
8339 /* Handles repository door protocol response over client door fd. */
8340 static void repository_door_post_mem_door_call_hook(ThreadId tid, Int fd,
8341 void *rbuf, SizeT rsize)
8342 {
8343 /* :TODO: Ideally we would need to match the response type with the
8344 previous request because response itself does not contain any
8345 type identification.
8346 For now simply make defined whole response buffer. */
8347 POST_MEM_WRITE((Addr) rbuf, rsize);
8348 }
8349
8350 /* Pre-syscall checks for params->data_ptr contents of a door_call(). */
8351 static void door_call_pre_mem_params_data(ThreadId tid, Int fd,
8352 void *data_ptr, SizeT data_size)
8353 {
8354 const HChar *pathname;
8355
8356 /* Get pathname of the door file descriptor, if not already done.
8357 Needed to dissect door service on the pathname below. */
8358 if (!VG_(clo_track_fds) && !ML_(fd_recorded)(fd)) {
8359 ML_(record_fd_open_named)(tid, fd);
8360 }
8361 pathname = ML_(find_fd_recorded_by_fd)(fd);
8362
8363 /* Debug-only printing. */
8364 if (0) {
8365 VG_(printf)("PRE(door_call) with fd=%d and filename=%s\n",
8366 fd, pathname);
8367 }
8368
8369 if (VG_STREQ(pathname, VKI__PATH_KCFD_DOOR)) {
8370 vki_kcf_door_arg_t *p = (vki_kcf_door_arg_t *) data_ptr;
8371
8372 PRE_FIELD_READ("door_call(\"" VKI__PATH_KCFD_DOOR "\", "
8373 "kcf_door_arg_t->da_version)", p->da_version);
8374 PRE_FIELD_READ("door_call(\"" VKI__PATH_KCFD_DOOR "\", "
8375 "kcf_door_arg_t->da_iskernel)", p->da_iskernel);
8376 PRE_MEM_RASCIIZ("door_call(\"" VKI__PATH_KCFD_DOOR "\", "
8377 "kcf_door_arg_t->da_u.filename)",
8378 (Addr) p->vki_da_u.filename);
8379 } else if (VG_STREQ(pathname, VKI_NAME_SERVICE_DOOR)) {
8380 vki_nss_pheader_t *p = (vki_nss_pheader_t *) data_ptr;
8381
8382 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8383 "nss_pheader->nsc_callnumber)", p->nsc_callnumber);
8384 if (ML_(safe_to_deref)(p, sizeof(vki_nss_pheader_t))) {
8385 if ((p->nsc_callnumber & VKI_NSCDV2CATMASK) == VKI_NSCD_CALLCAT_APP) {
8386 /* request from an application towards nscd */
8387 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8388 "nss_pheader->p_version)", p->p_version);
8389 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8390 "nss_pheader->dbd_off)", p->dbd_off);
8391 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8392 "nss_pheader->dbd_len)", p->dbd_len);
8393 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8394 "nss_pheader->key_off)", p->key_off);
8395 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8396 "nss_pheader->key_len)", p->key_len);
8397 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8398 "nss_pheader->data_off)", p->data_off);
8399 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8400 "nss_pheader->data_len)", p->data_len);
8401 /* Fields ext_off and ext_len are set only sporadically. */
8402 PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
8403 "nss_pheader->pbufsiz)", p->pbufsiz);
8404 PRE_MEM_WRITE("door_call(\"" VKI_NAME_SERVICE_DOOR "\", pbuf)",
8405 (Addr) p, p->pbufsiz);
8406
8407 if (p->dbd_len > 0) {
8408 vki_nss_dbd_t *dbd
8409 = (vki_nss_dbd_t *) ((HChar *) p + p->dbd_off);
8410
8411 PRE_MEM_READ("door_call(\"" VKI_NAME_SERVICE_DOOR
8412 "\", nss_dbd)", (Addr) dbd, sizeof(vki_nss_dbd_t));
8413 if (ML_(safe_to_deref)(dbd, sizeof(vki_nss_dbd_t))) {
8414 if (dbd->o_name != 0)
8415 PRE_MEM_RASCIIZ("door_call(\"" VKI_NAME_SERVICE_DOOR
8416 "\", nss_dbd->o_name)", (Addr) ((HChar *) p
8417 + p->dbd_off + dbd->o_name));
8418 if (dbd->o_config_name != 0)
8419 PRE_MEM_RASCIIZ("door_call(\"" VKI_NAME_SERVICE_DOOR
8420 "\", nss_dbd->o_config_name)",
8421 (Addr) ((HChar *) p + p->dbd_off
8422 + dbd->o_config_name));
8423 if (dbd->o_default_config != 0)
8424 PRE_MEM_RASCIIZ("door_call(\"" VKI_NAME_SERVICE_DOOR
8425 "\", nss_dbd->o_default_config)",
8426 (Addr) ((HChar *) p + p->dbd_off +
8427 dbd->o_default_config));
8428 }
8429 }
8430
8431 PRE_MEM_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", nss->key)",
8432 (Addr) ((HChar *) p + p->key_off), p->key_len);
8433 } else {
8434 /* request from a child nscd towards parent nscd */
8435 VG_(unimplemented)("Door wrapper of child/parent nscd.");
8436 }
8437 }
8438 } else if (VG_STREQ(pathname, VKI_REPOSITORY_DOOR_NAME)) {
8439 vki_repository_door_request_t *p =
8440 (vki_repository_door_request_t *) data_ptr;
8441
8442 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8443 "request->rdr_version)", p->rdr_version);
8444 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8445 "request->rdr_request)", p->rdr_request);
8446 if (ML_(safe_to_deref)(p, sizeof(vki_repository_door_request_t))) {
8447 if (p->rdr_version == VKI_REPOSITORY_DOOR_VERSION) {
8448 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8449 "request->rdr_flags)", p->rdr_flags);
8450 PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
8451 "request->rdr_debug)", p->rdr_debug);
8452 } else {
8453 VG_(unimplemented)("Door wrapper of " VKI_REPOSITORY_DOOR_NAME
8454 " where version=%u.", p->rdr_version);
8455 }
8456 }
8457 } else {
8458 const OpenDoor *open_door = door_find_by_fd(fd);
8459 if ((open_door != NULL) && (open_door->pre_mem_hook != NULL)) {
8460 open_door->pre_mem_hook(tid, fd, data_ptr, data_size);
8461 } else {
8462 if (SimHintiS(SimHint_lax_doors, VG_(clo_sim_hints))) {
8463 /*
8464 * Be very lax about door syscall handling over unrecognized
8465 * door file descriptors. Does not require that full buffer
8466 * is initialized when writing. Without this, programs using
8467 * libdoor(3LIB) functionality with completely proprietary
8468 * semantics may report large number of false positives.
8469 */
8470 } else {
8471 static Int moans = 3;
8472
8473 /* generic default */
8474 if (moans > 0 && !VG_(clo_xml)) {
8475 moans--;
8476 VG_(umsg)(
8477 "Warning: noted and generically handled door call\n"
8478 " on file descriptor %d (filename: %s).\n"
8479 " This could cause spurious value errors to appear.\n"
8480 " See README_MISSING_SYSCALL_OR_IOCTL for guidance on writing a proper wrapper.\n"
8481 " Alternatively you may find '--sim-hints=lax-doors' option useful.\n",
8482 fd, pathname);
8483 }
8484 PRE_MEM_READ("door_call(params->data_ptr)",
8485 (Addr) data_ptr, data_size);
8486 }
8487 }
8488 }
8489 }
8490
8491 /* Post-syscall checks for params->rbuf contents of a door_call(). */
8492 static void door_call_post_mem_params_rbuf(ThreadId tid, Int fd,
8493 void *rbuf, SizeT rsize,
8494 const vki_door_desc_t *desc_ptr,
8495 vki_uint_t desc_num)
8496 {
8497 const HChar *pathname = ML_(find_fd_recorded_by_fd)(fd);
8498
8499 /* Debug-only printing. */
8500 if (0) {
8501 VG_(printf)("POST(door_call) with fd=%d and filename=%s\n",
8502 fd, pathname);
8503 }
8504
8505 if (VG_STREQ(pathname, VKI__PATH_KCFD_DOOR)) {
8506 vki_kcf_door_arg_t *p = (vki_kcf_door_arg_t *) rbuf;
8507
8508 POST_FIELD_WRITE(p->da_version);
8509 POST_FIELD_WRITE(p->vki_da_u.result.status);
8510 POST_MEM_WRITE((Addr) p->vki_da_u.result.signature,
8511 p->vki_da_u.result.siglen);
8512 } else if (VG_STREQ(pathname, VKI_NAME_SERVICE_DOOR)) {
8513 vki_nss_pheader_t *p = (vki_nss_pheader_t *) rbuf;
8514
8515 POST_FIELD_WRITE(p->nsc_callnumber);
8516 if (ML_(safe_to_deref)(p, sizeof(vki_nss_pheader_t))) {
8517 if ((p->nsc_callnumber & VKI_NSCDV2CATMASK) == VKI_NSCD_CALLCAT_APP) {
8518 /* response from nscd to an application */
8519 POST_FIELD_WRITE(p->p_status);
8520 POST_FIELD_WRITE(p->p_errno);
8521 POST_FIELD_WRITE(p->p_herrno);
8522 POST_FIELD_WRITE(p->dbd_off);
8523 POST_FIELD_WRITE(p->dbd_len);
8524 POST_FIELD_WRITE(p->key_off);
8525 POST_FIELD_WRITE(p->key_len);
8526 POST_FIELD_WRITE(p->data_off);
8527 POST_FIELD_WRITE(p->data_len);
8528 POST_FIELD_WRITE(p->ext_off);
8529 POST_FIELD_WRITE(p->ext_len);
8530 POST_FIELD_WRITE(p->pbufsiz);
8531
8532 if (p->pbufsiz <= rsize) {
8533 if (p->dbd_off < p->pbufsiz - sizeof(vki_nss_pheader_t)) {
8534 SizeT len = MIN(p->dbd_len, p->pbufsiz - p->dbd_off);
8535 POST_MEM_WRITE((Addr) ((HChar *) p + p->dbd_off), len);
8536 }
8537
8538 if (p->key_off < p->pbufsiz - sizeof(vki_nss_pheader_t)) {
8539 SizeT len = MIN(p->key_len, p->pbufsiz - p->key_off);
8540 POST_MEM_WRITE((Addr) ((HChar *) p + p->key_off), len);
8541 }
8542
8543 if (p->data_off < p->pbufsiz - sizeof(vki_nss_pheader_t)) {
8544 SizeT len = MIN(p->data_len, p->pbufsiz - p->data_off);
8545 POST_MEM_WRITE((Addr) ((HChar *) p + p->data_off), len);
8546 }
8547
8548 if (p->ext_off < p->pbufsiz - sizeof(vki_nss_pheader_t)) {
8549 SizeT len = MIN(p->ext_len, p->pbufsiz - p->ext_off);
8550 POST_MEM_WRITE((Addr) ((HChar *) p + p->ext_off), len);
8551 }
8552 }
8553 } else {
8554 /* response from parent nscd to a child nscd */
8555 VG_(unimplemented)("Door wrapper of child/parent nscd.");
8556 }
8557 }
8558 } else if (VG_STREQ(pathname, VKI_REPOSITORY_DOOR_NAME)) {
8559 POST_FIELD_WRITE(((vki_repository_door_response_t *) rbuf)->rdr_status);
8560 /* A new client door fd is passed over the global repository door. */
8561 if ((desc_ptr != NULL) && (desc_num > 0)) {
8562 if (desc_ptr[0].d_attributes & DOOR_DESCRIPTOR) {
8563 door_record_client(tid, desc_ptr[0].d_data.d_desc.d_descriptor,
8564 repository_door_pre_mem_door_call_hook,
8565 repository_door_post_mem_door_call_hook);
8566 }
8567 }
8568 } else {
8569 const OpenDoor *open_door = door_find_by_fd(fd);
8570 if ((open_door != NULL) && (open_door->post_mem_hook != NULL)) {
8571 open_door->post_mem_hook(tid, fd, rbuf, rsize);
8572 } else {
8573 /* generic default */
8574 POST_MEM_WRITE((Addr) rbuf, rsize);
8575 }
8576 }
8577 }
8578
8579 /* Pre-syscall checks for data_ptr contents in a door_return(). */
8580 static void door_return_pre_mem_data(ThreadId tid, Addr server_procedure,
8581 void *data_ptr, SizeT data_size)
8582 {
8583 if ((data_size == 0) || (server_procedure == 0)) {
8584 /* There is nothing to check. This usually happens during thread's
8585 first call to door_return(). */
8586 return;
8587 }
8588
8589 /* Get pathname of the door file descriptor based on the
8590 door server procedure (that's all we have).
8591 Needed to dissect door service on the pathname below. */
8592 const OpenDoor *open_door = door_find_by_proc(server_procedure);
8593 const HChar *pathname = (open_door != NULL) ? open_door->pathname : NULL;
8594 Int fd = (open_door != NULL) ? open_door->fd : -1;
8595
8596 /* Debug-only printing. */
8597 if (0) {
8598 VG_(printf)("PRE(door_return) with fd=%d and filename=%s "
8599 "(nr_doors_recorded=%u)\n",
8600 fd, pathname, nr_doors_recorded);
8601 }
8602
8603 if (VG_STREQ(pathname, VKI__PATH_KCFD_DOOR)) {
8604 vki_kcf_door_arg_t *p = (vki_kcf_door_arg_t *) data_ptr;
8605
8606 PRE_FIELD_READ("door_return(\"" VKI__PATH_KCFD_DOOR "\", "
8607 "kcf_door_arg_t->da_version)", p->da_version);
8608 PRE_FIELD_READ("door_return(\"" VKI__PATH_KCFD_DOOR "\", "
8609 "kcf_door_arg_t->da_u.result.status)",
8610 p->vki_da_u.result.status);
8611 PRE_MEM_READ("door_return(\"" VKI__PATH_KCFD_DOOR "\", "
8612 "kcf_door_arg_t->da_u.result.signature)",
8613 (Addr) p->vki_da_u.result.signature,
8614 p->vki_da_u.result.siglen);
8615 } else if (VG_STREQ(pathname, VKI_NAME_SERVICE_DOOR)) {
8616 vki_nss_pheader_t *p = (vki_nss_pheader_t *) data_ptr;
8617
8618 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
8619 "nss_pheader->nsc_callnumber)", p->nsc_callnumber);
8620 if (ML_(safe_to_deref)(p, sizeof(vki_nss_pheader_t))) {
8621 if ((p->nsc_callnumber & VKI_NSCDV2CATMASK) == VKI_NSCD_CALLCAT_APP) {
8622 /* response from nscd to an application */
8623 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
8624 "nss_pheader->p_status)", p->p_status);
8625 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
8626 "nss_pheader->p_errno)", p->p_errno);
8627 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
8628 "nss_pheader->p_herrno)", p->p_herrno);
8629 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
8630 "nss_pheader->dbd_off)", p->dbd_off);
8631 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
8632 "nss_pheader->dbd_len)", p->dbd_len);
8633 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
8634 "nss_pheader->data_off)", p->data_off);
8635 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
8636 "nss_pheader->data_len)", p->data_len);
8637 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
8638 "nss_pheader->ext_off)", p->ext_off);
8639 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
8640 "nss_pheader->ext_len)", p->ext_len);
8641 PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
8642 "nss_pheader->pbufsiz)", p->pbufsiz);
8643 PRE_MEM_WRITE("door_return(\"" VKI_NAME_SERVICE_DOOR "\", pbuf)",
8644 (Addr) p, p->pbufsiz);
8645 PRE_MEM_READ("door_return(\"" VKI_NAME_SERVICE_DOOR
8646 "\", nss->data)",
8647 (Addr) ((HChar *) p + p->data_off), p->data_len);
8648 PRE_MEM_READ("door_return(\"" VKI_NAME_SERVICE_DOOR
8649 "\", nss->ext)",
8650 (Addr) ((HChar *) p + p->ext_off), p->ext_len);
8651 } else {
8652 /* response from parent nscd to a child nscd */
8653 VG_(unimplemented)("Door wrapper of child/parent nscd.");
8654 }
8655 }
8656 } else if (VG_STREQ(pathname, VKI_REPOSITORY_DOOR_NAME)) {
8657 VG_(unimplemented)("Door wrapper of " VKI_REPOSITORY_DOOR_NAME);
8658 } else {
8659 if (SimHintiS(SimHint_lax_doors, VG_(clo_sim_hints))) {
8660 /*
8661 * Be very lax about door syscall handling over unrecognized
8662 * door file descriptors. Does not require that full buffer
8663 * is initialized when writing. Without this, programs using
8664 * libdoor(3LIB) functionality with completely proprietary
8665 * semantics may report large number of false positives.
8666 */
8667 } else {
8668 static Int moans = 3;
8669
8670 /* generic default */
8671 if (moans > 0 && !VG_(clo_xml)) {
8672 moans--;
8673 VG_(umsg)(
8674 "Warning: noted and generically handled door return\n"
8675 " on file descriptor %d (filename: %s).\n"
8676 " This could cause spurious value errors to appear.\n"
8677 " See README_MISSING_SYSCALL_OR_IOCTL for guidance on writing a proper wrapper.\n"
8678 " Alternatively you may find '--sim-hints=lax-doors' option useful.\n",
8679 fd, pathname);
8680 }
8681 PRE_MEM_READ("door_return(data_ptr)",
8682 (Addr) data_ptr, data_size);
8683 }
8684 }
8685 }
8686
8687 /* Post-syscall checks for data_ptr contents in a door_return(). */
8688 static void door_return_post_mem_data(ThreadId tid, Addr server_procedure,
8689 void *data_ptr, SizeT data_size)
8690 {
8691 const OpenDoor *open_door = door_find_by_proc(server_procedure);
8692 const HChar *pathname = (open_door != NULL) ? open_door->pathname : NULL;
8693
8694 /* Debug-only printing. */
8695 if (0) {
8696 Int fd = (open_door != NULL) ? open_door->fd : -1;
8697 VG_(printf)("POST(door_return) with fd=%d and filename=%s "
8698 "(nr_doors_recorded=%u)\n",
8699 fd, pathname, nr_doors_recorded);
8700 }
8701
8702 if (VG_STREQ(pathname, VKI__PATH_KCFD_DOOR)) {
8703 vki_kcf_door_arg_t *p = (vki_kcf_door_arg_t *) data_ptr;
8704
8705 POST_FIELD_WRITE(p->da_version);
8706 POST_FIELD_WRITE(p->da_iskernel);
8707 POST_MEM_WRITE((Addr) p->vki_da_u.filename,
8708 VG_(strlen)(p->vki_da_u.filename) + 1);
8709 } else if (VG_STREQ(pathname, VKI_NAME_SERVICE_DOOR)) {
8710 vki_nss_pheader_t *p = (vki_nss_pheader_t *) data_ptr;
8711
8712 POST_FIELD_WRITE(p->nsc_callnumber);
8713 if (ML_(safe_to_deref)(p, sizeof(vki_nss_pheader_t))) {
8714 if ((p->nsc_callnumber & VKI_NSCDV2CATMASK) == VKI_NSCD_CALLCAT_APP) {
8715 /* request from an application towards nscd */
8716 POST_FIELD_WRITE(p->p_version);
8717 POST_FIELD_WRITE(p->dbd_off);
8718 POST_FIELD_WRITE(p->dbd_len);
8719 POST_FIELD_WRITE(p->key_off);
8720 POST_FIELD_WRITE(p->key_len);
8721 POST_FIELD_WRITE(p->data_off);
8722 POST_FIELD_WRITE(p->data_len);
8723 POST_FIELD_WRITE(p->ext_off);
8724 POST_FIELD_WRITE(p->ext_len);
8725 POST_FIELD_WRITE(p->pbufsiz);
8726
8727 if (p->dbd_len > 0) {
8728 vki_nss_dbd_t *dbd
8729 = (vki_nss_dbd_t *) ((HChar *) p + p->dbd_off);
8730
8731 POST_MEM_WRITE((Addr) dbd, sizeof(vki_nss_dbd_t));
8732 if (ML_(safe_to_deref)(dbd, sizeof(vki_nss_dbd_t))) {
8733 SizeT headers_size = sizeof(vki_nss_pheader_t)
8734 + sizeof(vki_nss_dbd_t);
8735
8736 if (dbd->o_name != 0) {
8737 HChar *name = (HChar *) p + p->dbd_off + dbd->o_name;
8738 SizeT name_len = VG_(strlen)(name) + 1;
8739 if (name_len <= data_size - headers_size)
8740 POST_MEM_WRITE((Addr) name, name_len);
8741 }
8742 if (dbd->o_config_name != 0) {
8743 HChar *name = (HChar *) p + p->dbd_off + dbd->o_config_name;
8744 SizeT name_len = VG_(strlen)(name) + 1;
8745 if (name_len <= data_size - headers_size)
8746 POST_MEM_WRITE((Addr) name, name_len);
8747 }
8748 if (dbd->o_default_config != 0) {
8749 HChar *name = (HChar *) p + p->dbd_off
8750 + dbd->o_default_config;
8751 SizeT name_len = VG_(strlen)(name) + 1;
8752 if (name_len <= data_size - headers_size)
8753 POST_MEM_WRITE((Addr) name, name_len);
8754 }
8755 }
8756 }
8757
8758 if (p->key_len <= data_size - p->key_off)
8759 POST_MEM_WRITE((Addr) ((HChar *) p + p->key_off), p->key_len);
8760 } else {
8761 /* request from a child nscd towards parent nscd */
8762 VG_(unimplemented)("Door wrapper of child/parent nscd.");
8763 }
8764 }
8765 } else if (VG_STREQ(pathname, VKI_REPOSITORY_DOOR_NAME)) {
8766 VG_(unimplemented)("Door wrapper of " VKI_REPOSITORY_DOOR_NAME);
8767 } else {
8768 /* generic default */
8769 POST_MEM_WRITE((Addr) data_ptr, data_size);
8770 }
8771 }
8772
8773 PRE(sys_door)
8774 {
8775 /* int doorfs(long arg1, long arg2, long arg3, long arg4, long arg5,
8776 long subcode); */
8777 ThreadState *tst = VG_(get_ThreadState)(tid);
8778 *flags |= SfMayBlock | SfPostOnFail;
8779
8780 PRINT("sys_door ( %#lx, %#lx, %#lx, %#lx, %#lx, %ld )", ARG1, ARG2, ARG3,
8781 ARG4, ARG5, SARG6);
8782
8783 /* Macro PRE_REG_READ6 cannot be simply used because not all ARGs are used
8784 in door() syscall variants. Note that ARG6 (subcode) is used always. */
8785 #define PRE_REG_READ_SIXTH_ONLY \
8786 if (VG_(tdict).track_pre_reg_read) { \
8787 PRA6("door", long, subcode); \
8788 }
8789
8790 switch (ARG6 /*subcode*/) {
8791 case VKI_DOOR_CREATE:
8792 PRE_REG_READ3(long, "door", long, arg1, long, arg2, long, arg3);
8793 PRE_REG_READ_SIXTH_ONLY;
8794 /* Note: the first argument to DOOR_CREATE is a server procedure.
8795 This could lead to a problem if the kernel tries to force the
8796 execution of this procedure, similarly to how signal handlers are
8797 executed. Fortunately, the kernel never does that (for user-space
8798 server procedures). The procedure is always executed by the standard
8799 library. */
8800 break;
8801 case VKI_DOOR_REVOKE:
8802 PRE_REG_READ1(long, "door", long, arg1);
8803 PRE_REG_READ_SIXTH_ONLY;
8804 if (!ML_(fd_allowed)(ARG1, "door_revoke", tid, False))
8805 SET_STATUS_Failure(VKI_EBADF);
8806 break;
8807 case VKI_DOOR_INFO:
8808 PRE_REG_READ2(long, "door", long, arg1, long, arg2);
8809 PRE_REG_READ_SIXTH_ONLY;
8810 PRE_MEM_WRITE("door_info(info)", ARG2, sizeof(vki_door_info_t));
8811 break;
8812 case VKI_DOOR_CALL:
8813 {
8814 PRE_REG_READ2(long, "door", long, arg1, long, arg2);
8815 PRE_REG_READ_SIXTH_ONLY;
8816
8817 Int rval = 0;
8818 vki_door_arg_t *params = (vki_door_arg_t*)ARG2;
8819
8820 if (!ML_(fd_allowed)(ARG1, "door_call", tid, False))
8821 rval = VKI_EBADF;
8822
8823 PRE_FIELD_READ("door_call(params->data_ptr)", params->data_ptr);
8824 PRE_FIELD_READ("door_call(params->data_size)", params->data_size);
8825 PRE_FIELD_READ("door_call(params->desc_ptr)", params->desc_ptr);
8826 PRE_FIELD_READ("door_call(params->desc_num)", params->desc_num);
8827 PRE_FIELD_READ("door_call(params->rbuf)", params->rbuf);
8828 PRE_FIELD_READ("door_call(params->rsize)", params->rsize);
8829
8830 if (ML_(safe_to_deref)(params, sizeof(*params))) {
8831 if (params->data_ptr)
8832 door_call_pre_mem_params_data(tid, ARG1, params->data_ptr,
8833 params->data_size);
8834
8835 if (params->desc_ptr) {
8836 SizeT desc_size = params->desc_num * sizeof(*params->desc_ptr);
8837 PRE_MEM_READ("door_call(params->desc_ptr)",
8838 (Addr)params->desc_ptr, desc_size);
8839
8840 /* Do not record information about closed fds if we are going
8841 to fail the syscall and so no fds will be closed. */
8842 if ((rval == 0) &&
8843 (ML_(safe_to_deref)(params->desc_ptr, desc_size))) {
8844 rval = pre_check_and_close_fds(tid, "door_call",
8845 params->desc_ptr,
8846 params->desc_num);
8847 }
8848 }
8849
8850 if (params->rbuf)
8851 PRE_MEM_WRITE("door_call(params->rbuf)", (Addr)params->rbuf,
8852 params->rsize);
8853 }
8854
8855 if (rval)
8856 SET_STATUS_Failure(rval);
8857 }
8858 break;
8859 case VKI_DOOR_BIND:
8860 PRE_REG_READ1(long, "door", long, arg1);
8861 PRE_REG_READ_SIXTH_ONLY;
8862 VG_(unimplemented)("DOOR_BIND");
8863 break;
8864 case VKI_DOOR_UNBIND:
8865 PRE_REG_READ0(long, "door");
8866 PRE_REG_READ_SIXTH_ONLY;
8867 VG_(unimplemented)("DOOR_UNBIND");
8868 break;
8869 case VKI_DOOR_UNREFSYS:
8870 PRE_REG_READ0(long, "door");
8871 PRE_REG_READ_SIXTH_ONLY;
8872 VG_(unimplemented)("DOOR_UNREFSYS");
8873 break;
8874 case VKI_DOOR_UCRED:
8875 PRE_REG_READ1(long, "door", long, arg1);
8876 PRE_REG_READ_SIXTH_ONLY;
8877 VG_(unimplemented)("DOOR_UCRED");
8878 break;
8879 case VKI_DOOR_RETURN:
8880 PRE_REG_READ6(long, "door", long, arg1, long, arg2, long, arg3,
8881 long, arg4, long, arg5, long, subcode);
8882
8883 /* Register %esp/%rsp is read and modified by the syscall. */
8884 VG_TRACK(pre_reg_read, Vg_CoreSysCall, tid, "door_return(sp)",
8885 VG_O_STACK_PTR, sizeof(UWord));
8886 /* Register %ebp/%rbp is not really read by the syscall, it is only
8887 written by it, but it is hard to determine when it is written so we
8888 make sure it is always valid prior to making the syscall. */
8889 VG_TRACK(pre_reg_read, Vg_CoreSysCall, tid, "door_return(bp)",
8890 VG_O_FRAME_PTR, sizeof(UWord));
8891
8892 door_return_pre_mem_data(tid, tst->os_state.door_return_procedure,
8893 (void *) ARG1, ARG2);
8894
8895 /* Do not tell the tool where the syscall is going to write the
8896 resulting data. It is necessary to skip this check because the data
8897 area starting at ARG4-ARG5 (of length ARG5) is usually on a client
8898 thread stack below the stack pointer and therefore it can be marked
8899 by a tool (for example, Memcheck) as inaccessible. It is ok to skip
8900 this check in this case because if there is something wrong with the
8901 data area then the syscall will fail or the error will be handled by
8902 POST_MEM_WRITE() in the post wrapper. */
8903 /*PRE_MEM_WRITE("door_return(sp)", ARG4 - ARG5, ARG5);*/
8904
8905 if (ARG3) {
8906 vki_door_return_desc_t *desc_env = (vki_door_return_desc_t*)ARG3;
8907
8908 PRE_MEM_READ("door_return(desc_env)", ARG3,
8909 sizeof(vki_door_return_desc_t));
8910
8911 if (ML_(safe_to_deref)(desc_env, sizeof(*desc_env)) &&
8912 desc_env->desc_ptr) {
8913 Int rval;
8914
8915 PRE_MEM_READ("door_return(desc_env->desc_ptr)",
8916 (Addr)desc_env->desc_ptr,
8917 desc_env->desc_num * sizeof(*desc_env->desc_ptr));
8918
8919 rval = pre_check_and_close_fds(tid, "door_return",
8920 desc_env->desc_ptr,
8921 desc_env->desc_num);
8922 if (rval)
8923 SET_STATUS_Failure(rval);
8924 }
8925 }
8926 tst->os_state.in_door_return = True;
8927 tst->os_state.door_return_procedure = 0;
8928 break;
8929 case VKI_DOOR_GETPARAM:
8930 PRE_REG_READ3(long, "door", long, arg1, long, arg2, long, arg3);
8931 PRE_REG_READ_SIXTH_ONLY;
8932 VG_(unimplemented)("DOOR_GETPARAM");
8933 break;
8934 case VKI_DOOR_SETPARAM:
8935 PRE_REG_READ3(long, "door", long, arg1, long, arg2, long, arg3);
8936 PRE_REG_READ_SIXTH_ONLY;
8937 VG_(unimplemented)("DOOR_SETPARAM");
8938 break;
8939 default:
8940 VG_(unimplemented)("Syswrap of the door call with subcode %ld.", SARG6);
8941 /*NOTREACHED*/
8942 break;
8943 }
8944
8945 #undef PRE_REG_READ_SIXTH_ONLY
8946 }
8947
8948 POST(sys_door)
8949 {
8950 ThreadState *tst = VG_(get_ThreadState)(tid);
8951
8952 vg_assert(SUCCESS || FAILURE);
8953
8954 /* Alter the tst->os_state.in_door_return flag. */
8955 if (ARG6 == VKI_DOOR_RETURN) {
8956 vg_assert(tst->os_state.in_door_return == True);
8957 tst->os_state.in_door_return = False;
8958
8959 /* Inform the tool that %esp/%rsp and %ebp/%rbp were (potentially)
8960 modified. */
8961 VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, VG_O_STACK_PTR,
8962 sizeof(UWord));
8963 VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, VG_O_FRAME_PTR,
8964 sizeof(UWord));
8965 }
8966 else
8967 vg_assert(tst->os_state.in_door_return == False);
8968
8969 if (FAILURE) {
8970 if (VG_(clo_track_fds)) {
8971 /* See the discussion in pre_check_and_close_fds() to understand this
8972 part. */
8973 Bool loss = False;
8974 switch (ARG6 /*subcode*/) {
8975 case VKI_DOOR_CALL:
8976 if (ERR == VKI_EFAULT || ERR == VKI_EBADF)
8977 loss = True;
8978 break;
8979 case VKI_DOOR_RETURN:
8980 if (ERR == VKI_EFAULT || ERR == VKI_EINVAL)
8981 loss = True;
8982 break;
8983 default:
8984 break;
8985 }
8986 if (loss)
8987 VG_(message)(Vg_UserMsg, "The door call failed with an "
8988 "unexpected error and information "
8989 "about open file descriptors can be "
8990 "now imprecise.\n");
8991 }
8992
8993 return;
8994 }
8995
8996 vg_assert(SUCCESS);
8997
8998 switch (ARG6 /*subcode*/) {
8999 case VKI_DOOR_CREATE:
9000 door_record_server(tid, ARG1, RES);
9001 break;
9002 case VKI_DOOR_REVOKE:
9003 door_revoke(tid, ARG1);
9004 if (VG_(clo_track_fds))
9005 ML_(record_fd_close)(ARG1);
9006 break;
9007 case VKI_DOOR_INFO:
9008 POST_MEM_WRITE(ARG2, sizeof(vki_door_info_t));
9009 break;
9010 case VKI_DOOR_CALL:
9011 {
9012 /* Note that all returned values are stored in the rbuf, i.e.
9013 data_ptr and desc_ptr points into this buffer. */
9014 vki_door_arg_t *params = (vki_door_arg_t*)ARG2;
9015
9016 if (params->rbuf) {
9017 Addr addr = (Addr)params->rbuf;
9018 if (!VG_(am_find_anon_segment(addr))) {
9019 /* This segment is new and was mapped by the kernel. */
9020 UInt prot, flags;
9021 SizeT size;
9022
9023 prot = VKI_PROT_READ | VKI_PROT_WRITE | VKI_PROT_EXEC;
9024 flags = VKI_MAP_ANONYMOUS;
9025 size = VG_PGROUNDUP(params->rsize);
9026
9027 VG_(debugLog)(1, "syswrap-solaris", "POST(sys_door), "
9028 "new segment: vaddr=%#lx, size=%#lx, "
9029 "prot=%#x, flags=%#x, fd=%ld, offset=%#llx\n",
9030 addr, size, prot, flags, (UWord)-1, (ULong)0);
9031
9032 ML_(notify_core_and_tool_of_mmap)(addr, size, prot, flags,
9033 -1, 0);
9034
9035 /* Note: We don't notify the debuginfo reader about this
9036 mapping because there are no debug information stored in
9037 this segment. */
9038 }
9039
9040 door_call_post_mem_params_rbuf(tid, ARG1, (void *) addr,
9041 params->rsize, params->desc_ptr,
9042 params->desc_num);
9043 }
9044
9045 if (params->desc_ptr) {
9046 POST_MEM_WRITE((Addr)params->desc_ptr,
9047 params->desc_num * sizeof(vki_door_desc_t));
9048 post_record_fds(tid, "door_call", params->desc_ptr,
9049 params->desc_num);
9050 }
9051 }
9052 break;
9053 case VKI_DOOR_BIND:
9054 break;
9055 case VKI_DOOR_UNBIND:
9056 break;
9057 case VKI_DOOR_UNREFSYS:
9058 break;
9059 case VKI_DOOR_UCRED:
9060 break;
9061 case VKI_DOOR_RETURN:
9062 {
9063 struct vki_door_results *results
9064 = (struct vki_door_results*)VG_(get_SP)(tid);
9065
9066 tst->os_state.door_return_procedure = (Addr)results->pc;
9067
9068 POST_MEM_WRITE((Addr)results, sizeof(*results));
9069 if (results->data_ptr)
9070 door_return_post_mem_data(tid,
9071 tst->os_state.door_return_procedure,
9072 results->data_ptr,
9073 results->data_size);
9074 if (results->desc_ptr) {
9075 POST_MEM_WRITE((Addr)results->desc_ptr,
9076 results->desc_num * sizeof(vki_door_desc_t));
9077 post_record_fds(tid, "door_return", results->desc_ptr,
9078 results->desc_num);
9079 }
9080
9081 POST_MEM_WRITE((Addr)results->door_info,
9082 sizeof(*results->door_info));
9083 }
9084 break;
9085 case VKI_DOOR_GETPARAM:
9086 break;
9087 case VKI_DOOR_SETPARAM:
9088 break;
9089 default:
9090 vg_assert(0);
9091 break;
9092 }
9093 }
9094
9095 PRE(sys_schedctl)
9096 {
9097 /* caddr_t schedctl(void); */
9098 /* This syscall returns an address that points to struct sc_shared.
9099 This per-thread structure is used as an interface between the libc and
9100 the kernel. */
9101 PRINT("sys_schedctl ( )");
9102 PRE_REG_READ0(long, "schedctl");
9103 }
9104
9105 POST(sys_schedctl)
9106 {
9107 Addr a = RES;
9108 ThreadState *tst = VG_(get_ThreadState)(tid);
9109
9110 /* Stay sane. */
9111 vg_assert((tst->os_state.schedctl_data == 0) ||
9112 (tst->os_state.schedctl_data == a));
9113 tst->os_state.schedctl_data = a;
9114
9115 /* Returned address points to a block in a mapped page. */
9116 if (!VG_(am_find_anon_segment(a))) {
9117 Addr page = VG_PGROUNDDN(a);
9118 UInt prot = VKI_PROT_READ | VKI_PROT_WRITE | VKI_PROT_EXEC;
9119 UInt flags = VKI_MAP_ANONYMOUS;
9120 /* The kernel always allocates one page for the sc_shared struct. */
9121 SizeT size = VKI_PAGE_SIZE;
9122
9123 VG_(debugLog)(1, "syswrap-solaris", "POST(sys_schedctl), new segment: "
9124 "vaddr=%#lx, size=%#lx, prot=%#x, flags=%#x, fd=-1, "
9125 "offset=0\n", page, size, prot, flags);
9126
9127 /* The kernel always places redzone before and after the allocated page.
9128 Check this assertion now; the tool can later request to allocate
9129 a Valgrind segment and aspacemgr will place it adjacent. */
9130 const NSegment *seg = VG_(am_find_nsegment(page - 1));
9131 vg_assert(seg == NULL || seg->kind == SkResvn);
9132 seg = VG_(am_find_nsegment(page + VKI_PAGE_SIZE));
9133 vg_assert(seg == NULL || seg->kind == SkResvn);
9134
9135 /* The address space manager works with whole pages. */
9136 VG_(am_notify_client_mmap)(page, size, prot, flags, -1, 0);
9137
9138 /* Note: It isn't needed to notify debuginfo about the new mapping
9139 because it's only an anonymous mapping. */
9140 /* Note: schedctl data are cleaned in two places:
9141 - for the tool when the thread exits
9142 - for the core in child's post-fork handler clean_schedctl_data(). */
9143 }
9144
9145 /* The tool needs per-thread granularity, not whole pages. */
9146 VG_TRACK(new_mem_mmap, a, sizeof(struct vki_sc_shared), True, True, True, 0);
9147 POST_MEM_WRITE(a, sizeof(struct vki_sc_shared));
9148 }
9149
9150 PRE(sys_pset)
9151 {
9152 /* Kernel: int pset(int subcode, long arg1, long arg2, long arg3,
9153 long arg4); */
9154 switch (ARG1 /* subcode */) {
9155 case VKI_PSET_CREATE:
9156 /* Libc: int pset_create(psetid_t *newpset); */
9157 PRINT("sys_pset ( %ld, %#lx )", SARG1, ARG2);
9158 PRE_REG_READ2(long, SC2("pset", "create"), int, subcode,
9159 vki_psetid_t *, newpset);
9160 PRE_MEM_WRITE("pset(newpset)", ARG2, sizeof(vki_psetid_t));
9161 break;
9162 case VKI_PSET_DESTROY:
9163 /* Libc: int pset_destroy(psetid_t pset); */
9164 PRINT("sys_pset ( %ld, %ld )", SARG1, SARG2);
9165 PRE_REG_READ2(long, SC2("pset", "destroy"), int, subcode,
9166 vki_psetid_t, pset);
9167 break;
9168 case VKI_PSET_ASSIGN:
9169 /* Libc: int pset_assign(psetid_t pset, processorid_t cpu,
9170 psetid_t *opset); */
9171 PRINT("sys_pset ( %ld, %ld, %ld, %#lx )", SARG1, SARG2, SARG3, ARG4);
9172 PRE_REG_READ4(long, SC2("pset", "assign"), int, subcode,
9173 vki_psetid_t, pset, vki_processorid_t, cpu,
9174 vki_psetid_t *, opset);
9175 if (ARG4 != 0)
9176 PRE_MEM_WRITE("pset(opset)", ARG4, sizeof(vki_psetid_t));
9177 break;
9178 case VKI_PSET_INFO:
9179 /* Libc: int pset_info(psetid_t pset, int *type, uint_t *numcpus,
9180 processorid_t *cpulist); */
9181 PRINT("sys_pset ( %ld, %ld, %#lx, %#lx, %#lx )", SARG1, SARG2, ARG3,
9182 ARG4, ARG5);
9183 PRE_REG_READ5(long, SC2("pset", "info"), int, subcode, vki_psetid_t, pset,
9184 int *, type, vki_uint_t *, numcpus,
9185 vki_processorid_t *, cpulist);
9186 if (ARG3 != 0)
9187 PRE_MEM_WRITE("pset(type)", ARG3, sizeof(int));
9188 if (ARG4 != 0)
9189 PRE_MEM_WRITE("pset(numcpus)", ARG4, sizeof(vki_uint_t));
9190 if ((ARG4 != 0) && (ARG5 != 0)) {
9191 vki_uint_t *numcpus = (vki_uint_t *) ARG4;
9192 if (ML_(safe_to_deref(numcpus, sizeof(vki_uint_t)))) {
9193 PRE_MEM_WRITE("pset(cpulist)", ARG5,
9194 *numcpus * sizeof(vki_processorid_t));
9195 /* If cpulist buffer is not large enough, it will hold only as many
9196 entries as fit in the buffer. However numcpus will contain the
9197 real number of cpus which will be greater than originally passed
9198 in. Stash the original value in unused ARG6. */
9199 ARG6 = *numcpus;
9200 }
9201 }
9202 break;
9203 case VKI_PSET_BIND:
9204 /* Libc: int pset_bind(psetid_t pset, idtype_t idtype, id_t id,
9205 psetid_t *opset); */
9206 PRINT("sys_pset ( %ld, %ld, %ld, %ld, %#lx )", SARG1, SARG2, SARG3,
9207 SARG4, ARG5);
9208 PRE_REG_READ5(long, SC2("pset", "bind"), int, subcode, vki_psetid_t, pset,
9209 vki_idtype_t, idtype, vki_id_t, id, vki_psetid_t *, opset);
9210 if (ARG5 != 0)
9211 PRE_MEM_WRITE("pset(opset)", ARG5, sizeof(vki_psetid_t));
9212 break;
9213 case VKI_PSET_BIND_LWP:
9214 /* Libc: int pset_bind_lwp(psetid_t pset, id_t id, pid_t pid,
9215 psetid_t *opset); */
9216 PRINT("sys_pset ( %ld, %ld, %ld, %ld, %#lx )", SARG1, SARG2, SARG3,
9217 SARG4, ARG5);
9218 PRE_REG_READ5(long, SC2("pset", "bind_lwp"), int, subcode,
9219 vki_psetid_t, pset, vki_id_t, id, vki_pid_t, pid,
9220 vki_psetid_t *, opset);
9221 if (ARG5 != 0)
9222 PRE_MEM_WRITE("pset(opset)", ARG5, sizeof(vki_psetid_t));
9223 break;
9224 case VKI_PSET_GETLOADAVG:
9225 /* Libc: int pset_getloadavg(psetid_t pset, double loadavg[],
9226 int nelem); */
9227 PRINT("sys_pset ( %ld, %ld, %#lx, %ld )", SARG1, SARG2, ARG3, SARG4);
9228 PRE_REG_READ4(long, SC2("pset", "getloadavg"), int, subcode,
9229 vki_psetid_t, pset, double, loadavg[], int, nelem);
9230 if (ARG3 != 0)
9231 PRE_MEM_WRITE("pset(loadavg)", ARG3, SARG4 * sizeof(double));
9232 break;
9233 case VKI_PSET_LIST:
9234 /* Libc: int pset_list(psetid_t *psetlist, uint_t *numpsets); */
9235 PRINT("sys_pset ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
9236 PRE_REG_READ3(long, SC2("pset", "list"), int, subcode,
9237 vki_psetid_t *, psetlist, vki_uint_t *, numpsets);
9238 if (ARG3 != 0)
9239 PRE_MEM_WRITE("pset(numpsets)", ARG3, sizeof(vki_uint_t));
9240 if ((ARG2 != 0) && (ARG3 != 0)) {
9241 vki_uint_t *numpsets = (vki_uint_t *) ARG3;
9242 if (ML_(safe_to_deref(numpsets, sizeof(vki_uint_t)))) {
9243 PRE_MEM_WRITE("pset(psetlist)", ARG2,
9244 *numpsets * sizeof(vki_psetid_t));
9245 /* If psetlist buffer is not large enough, it will hold only as many
9246 entries as fit in the buffer. However numpsets will contain the
9247 real number of processor sets which will be greater than
9248 originally passed in. Stash the original value in unused ARG6. */
9249 ARG6 = *numpsets;
9250 }
9251 }
9252 break;
9253 # if defined(SOLARIS_PSET_GET_NAME)
9254 case VKI_PSET_GET_NAME:
9255 /* Libc: int pset_get_name(psetid_t psetid, char *buf, uint_t len); */
9256 PRINT("sys_pset ( %ld, %ld, %#lx, %ld )", SARG1, SARG2, ARG3, SARG4);
9257 PRE_REG_READ4(long, SC2("pset", "get_name"), int, subcode,
9258 vki_psetid_t, pset, char *, buf, vki_uint_t, len);
9259 PRE_MEM_WRITE("pset(buf)", ARG3, ARG4);
9260 break;
9261 # endif /* SOLARIS_PSET_GET_NAME */
9262 case VKI_PSET_SETATTR:
9263 /* Libc: int pset_setattr(psetid_t pset, uint_t attr); */
9264 PRINT("sys_pset ( %ld, %ld, %ld )", SARG1, SARG2, ARG3);
9265 PRE_REG_READ3(long, SC2("pset", "setattr"), int, subcode,
9266 vki_psetid_t, pset, vki_uint_t, attr);
9267 break;
9268 case VKI_PSET_GETATTR:
9269 /* Libc: int pset_getattr(psetid_t pset, uint_t *attr); */
9270 PRINT("sys_pset ( %ld, %ld, %#lx )", SARG1, SARG2, ARG3);
9271 PRE_REG_READ3(long, SC2("pset", "getattr"), int, subcode,
9272 vki_psetid_t, pset, vki_uint_t *, attr);
9273 PRE_MEM_WRITE("pset(attr)", ARG3, sizeof(vki_uint_t));
9274 break;
9275 case VKI_PSET_ASSIGN_FORCED:
9276 /* Libc: int pset_assign_forced(psetid_t pset, processorid_t cpu,
9277 psetid_t *opset); */
9278 PRINT("sys_pset ( %ld, %ld, %ld, %#lx )", SARG1, SARG2, SARG3, ARG4);
9279 PRE_REG_READ4(long, SC2("pset", "assign_forced"), int, subcode,
9280 vki_psetid_t, pset, vki_processorid_t, cpu,
9281 vki_psetid_t *, opset);
9282 if (ARG4 != 0)
9283 PRE_MEM_WRITE("pset(opset)", ARG4, sizeof(vki_psetid_t));
9284 break;
9285 default:
9286 VG_(unimplemented)("Syswrap of pset syscall with subcode %ld.", SARG1);
9287 /*NOTREACHED*/
9288 break;
9289 }
9290 }
9291
9292 POST(sys_pset)
9293 {
9294 switch (ARG1 /*subcode*/) {
9295 case VKI_PSET_CREATE:
9296 POST_MEM_WRITE(ARG2, sizeof(vki_psetid_t));
9297 break;
9298 case VKI_PSET_DESTROY:
9299 break;
9300 case VKI_PSET_ASSIGN:
9301 if (ARG4 != 0)
9302 POST_MEM_WRITE(ARG4, sizeof(vki_psetid_t));
9303 break;
9304 case VKI_PSET_INFO:
9305 if (ARG3 != 0)
9306 POST_MEM_WRITE(ARG3, sizeof(int));
9307 if (ARG4 != 0)
9308 POST_MEM_WRITE(ARG4, sizeof(vki_uint_t));
9309 if ((ARG4 != 0) && (ARG5 != 0)) {
9310 vki_uint_t *numcpus = (vki_uint_t *) ARG4;
9311 POST_MEM_WRITE(ARG5, MIN(*numcpus, ARG6) * sizeof(vki_processorid_t));
9312 }
9313 break;
9314 case VKI_PSET_BIND:
9315 if (ARG5 != 0)
9316 POST_MEM_WRITE(ARG5, sizeof(vki_psetid_t));
9317 break;
9318 case VKI_PSET_BIND_LWP:
9319 if (ARG5 != 0)
9320 POST_MEM_WRITE(ARG5, sizeof(vki_psetid_t));
9321 break;
9322 case VKI_PSET_GETLOADAVG:
9323 if (ARG3 != 0)
9324 POST_MEM_WRITE(ARG3, MIN(SARG4, VKI_LOADAVG_NSTATS) * sizeof(double));
9325 break;
9326 case VKI_PSET_LIST:
9327 if (ARG3 != 0)
9328 POST_MEM_WRITE(ARG3, sizeof(vki_uint_t));
9329 if ((ARG2 != 0) && (ARG3 != 0)) {
9330 vki_uint_t *numpsets = (vki_uint_t *) ARG3;
9331 POST_MEM_WRITE(ARG2, MIN(*numpsets, ARG6) * sizeof(vki_psetid_t));
9332 }
9333 break;
9334 # if defined(SOLARIS_PSET_GET_NAME)
9335 case VKI_PSET_GET_NAME:
9336 POST_MEM_WRITE(ARG3, VG_(strlen)((HChar *) ARG3) + 1);
9337 break;
9338 # endif /* SOLARIS_PSET_GET_NAME */
9339 case VKI_PSET_SETATTR:
9340 break;
9341 case VKI_PSET_GETATTR:
9342 POST_MEM_WRITE(ARG3, sizeof(vki_uint_t));
9343 break;
9344 case VKI_PSET_ASSIGN_FORCED:
9345 if (ARG4 != 0)
9346 POST_MEM_WRITE(ARG4, sizeof(vki_psetid_t));
9347 break;
9348 default:
9349 vg_assert(0);
9350 break;
9351 }
9352 }
9353
9354 PRE(sys_resolvepath)
9355 {
9356 /* int resolvepath(const char *path, char *buf, size_t bufsiz); */
9357 PRINT("sys_resolvepath ( %#lx(%s), %#lx, %lu )", ARG1, (HChar *) ARG1, ARG2,
9358 ARG3);
9359 PRE_REG_READ3(long, "resolvepath", const char *, path, char *, buf,
9360 vki_size_t, bufsiz);
9361
9362 PRE_MEM_RASCIIZ("resolvepath(path)", ARG1);
9363 PRE_MEM_WRITE("resolvepath(buf)", ARG2, ARG3);
9364 }
9365
9366 POST(sys_resolvepath)
9367 {
9368 POST_MEM_WRITE(ARG2, RES);
9369 }
9370
9371 PRE(sys_lwp_mutex_timedlock)
9372 {
9373 /* int lwp_mutex_timedlock(lwp_mutex_t *lp, timespec_t *tsp,
9374 uintptr_t owner); */
9375 vki_lwp_mutex_t *lp = (vki_lwp_mutex_t *)ARG1;
9376 *flags |= SfMayBlock;
9377 PRINT("lwp_mutex_timedlock ( %#lx, %#lx, %#lx )", ARG1, ARG2, ARG3);
9378 PRE_REG_READ3(long, "lwp_mutex_timedlock", lwp_mutex_t *, lp,
9379 timespec_t *, tsp, uintptr_t, owner);
9380
9381 PRE_FIELD_READ("lwp_mutex_timedlock(lp->mutex_flag)", lp->vki_mutex_flag);
9382 PRE_FIELD_READ("lwp_mutex_timedlock(lp->mutex_type)", lp->vki_mutex_type);
9383 PRE_FIELD_WRITE("lwp_mutex_timedlock(lp->mutex_owner)",
9384 lp->vki_mutex_owner);
9385 PRE_FIELD_WRITE("lwp_mutex_timedlock(lp->mutex_ownerpid)",
9386 lp->vki_mutex_ownerpid);
9387 PRE_FIELD_READ("lwp_mutex_timedlock(lp->mutex_lockw)", lp->vki_mutex_lockw);
9388 /*PRE_FIELD_WRITE("lwp_mutex_timedlock(lp->mutex_lockw)",
9389 lp->vki_mutex_lockw);*/
9390 PRE_FIELD_READ("lwp_mutex_timedlock(lp->mutex_waiters)",
9391 lp->vki_mutex_waiters);
9392 /*PRE_FIELD_WRITE("lwp_mutex_timedlock(lp->mutex_waiters)",
9393 lp->vki_mutex_waiters);*/
9394 if (ARG2) {
9395 PRE_MEM_READ("lwp_mutex_timedlock(tsp)", ARG2, sizeof(vki_timespec_t));
9396 /*PRE_MEM_WRITE("lwp_mutex_timedlock(tsp)", ARG2,
9397 sizeof(vki_timespec_t));*/
9398 }
9399 }
9400
9401 POST(sys_lwp_mutex_timedlock)
9402 {
9403 vki_lwp_mutex_t *lp = (vki_lwp_mutex_t *)ARG1;
9404 POST_FIELD_WRITE(lp->vki_mutex_owner);
9405 POST_FIELD_WRITE(lp->vki_mutex_ownerpid);
9406 POST_FIELD_WRITE(lp->vki_mutex_lockw);
9407 POST_FIELD_WRITE(lp->vki_mutex_waiters);
9408 if (ARG2)
9409 POST_MEM_WRITE(ARG2, sizeof(vki_timespec_t));
9410 }
9411
9412 PRE(sys_lwp_rwlock_sys)
9413 {
9414 /* int lwp_rwlock_sys(int subcode, lwp_rwlock_t *rwlp, timespec_t *tsp); */
9415 vki_lwp_rwlock_t *rwlp = (vki_lwp_rwlock_t *)ARG2;
9416 switch (ARG1 /*subcode*/) {
9417 case 0:
9418 case 1:
9419 case 2:
9420 case 3:
9421 *flags |= SfMayBlock;
9422 switch (ARG1 /*subcode*/) {
9423 case 0:
9424 PRINT("sys_lwp_rwlock ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
9425 PRE_REG_READ3(long, SC2("lwp_rwlock", "rdlock"), int, subcode,
9426 lwp_rwlock_t *, rwlp, timespec_t *, tsp);
9427 break;
9428 case 1:
9429 PRINT("sys_lwp_rwlock ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
9430 PRE_REG_READ3(long, SC2("lwp_rwlock", "wrlock"), int, subcode,
9431 lwp_rwlock_t *, rwlp, timespec_t *, tsp);
9432 break;
9433 case 2:
9434 PRINT("sys_lwp_rwlock ( %ld, %#lx )", SARG1, ARG2);
9435 PRE_REG_READ2(long, SC2("lwp_rwlock", "tryrdlock"), int, subcode,
9436 lwp_rwlock_t *, rwlp);
9437 break;
9438 case 3:
9439 PRINT("sys_lwp_rwlock ( %ld, %#lx )", SARG1, ARG2);
9440 PRE_REG_READ2(long, SC2("lwp_rwlock", "trywrlock"), int, subcode,
9441 lwp_rwlock_t *, rwlp);
9442 break;
9443 default:
9444 vg_assert(0);
9445 break;
9446 }
9447
9448 PRE_FIELD_READ("lwp_rwlock(rwlp->rwlock_type)", rwlp->vki_rwlock_type);
9449 PRE_FIELD_READ("lwp_rwlock(rwlp->rwlock_readers)",
9450 rwlp->vki_rwlock_readers);
9451 /*PRE_FIELD_WRITE("lwp_rwlock(rwlp->rwlock_readers)",
9452 rwlp->vki_rwlock_readers);*/
9453
9454 PRE_FIELD_READ("lwp_rwlock(rwlp->mutex.mutex_type)",
9455 rwlp->mutex.vki_mutex_type);
9456 PRE_FIELD_WRITE("lwp_rwlock(rwlp->mutex.mutex_owner)",
9457 rwlp->mutex.vki_mutex_owner);
9458 PRE_FIELD_WRITE("lwp_rwlock(rwlp->mutex.mutex_ownerpid)",
9459 rwlp->mutex.vki_mutex_ownerpid);
9460 /* The mutex_lockw member is not really read by the kernel for this
9461 syscall but it seems better to mark it that way because when locking
9462 an rwlock the associated mutex has to be locked. */
9463 PRE_FIELD_READ("lwp_rwlock(rwlp->mutex.mutex_lockw)",
9464 rwlp->mutex.vki_mutex_lockw);
9465 /*PRE_FIELD_WRITE("lwp_rwlock(rwlp->mutex.mutex_lockw)",
9466 rwlp->mutex.vki_mutex_lockw);*/
9467 PRE_FIELD_READ("lwp_rwlock(rwlp->mutex.mutex_waiters)",
9468 rwlp->mutex.vki_mutex_waiters);
9469 /*PRE_FIELD_WRITE("lwp_rwlock(rwlp->mutex.mutex_waiters)",
9470 rwlp->mutex.vki_mutex_waiters);*/
9471
9472 if ((ARG1 == 0 || ARG1 == 1) && ARG3)
9473 PRE_MEM_READ("lwp_rwlock(tsp)", ARG3, sizeof(vki_timespec_t));
9474 break;
9475 case 4:
9476 PRINT("sys_lwp_rwlock( %ld, %#lx )", SARG1, ARG2);
9477 PRE_REG_READ2(long, SC2("lwp_rwlock", "unlock"), int, subcode,
9478 lwp_rwlock_t *, rwlp);
9479 PRE_FIELD_READ("lwp_rwlock(rwlp->mutex.mutex_type)",
9480 rwlp->mutex.vki_mutex_type);
9481 PRE_FIELD_READ("lwp_rwlock(rwlp->rwlock_readers)",
9482 rwlp->vki_rwlock_readers);
9483 /*PRE_FIELD_WRITE("lwp_rwlock(rwlp->rwlock_readers)",
9484 rwlp->vki_rwlock_readers);*/
9485 break;
9486 default:
9487 VG_(unimplemented)("Syswrap of the lwp_rwlock_sys call with subcode %ld.",
9488 SARG1);
9489 /*NOTREACHED*/
9490 break;
9491 }
9492 }
9493
9494 POST(sys_lwp_rwlock_sys)
9495 {
9496 vki_lwp_rwlock_t *rwlp = (vki_lwp_rwlock_t *)ARG2;
9497 switch (ARG1 /*subcode*/) {
9498 case 0:
9499 case 1:
9500 case 2:
9501 case 3:
9502 POST_FIELD_WRITE(rwlp->vki_rwlock_readers);
9503 POST_FIELD_WRITE(rwlp->vki_rwlock_owner);
9504 POST_FIELD_WRITE(rwlp->vki_rwlock_ownerpid);
9505 POST_FIELD_WRITE(rwlp->mutex.vki_mutex_lockw);
9506 POST_FIELD_WRITE(rwlp->mutex.vki_mutex_waiters);
9507 break;
9508 case 4:
9509 POST_FIELD_WRITE(rwlp->vki_rwlock_readers);
9510 break;
9511 default:
9512 vg_assert(0);
9513 break;
9514 }
9515 }
9516
9517 PRE(sys_lwp_sema_timedwait)
9518 {
9519 /* int lwp_sema_timedwait(lwp_sema_t *sema, timespec_t *timeout,
9520 int check_park); */
9521 vki_lwp_sema_t *sema = (vki_lwp_sema_t*)ARG1;
9522 *flags |= SfMayBlock;
9523 PRINT("sys_lwp_sema_timewait ( %#lx, %#lx, %ld )", ARG1, ARG2, SARG3);
9524 PRE_REG_READ3(long, "lwp_sema_timedwait", lwp_sema_t *, sema,
9525 timespec_t *, timeout, int, check_park);
9526
9527 PRE_FIELD_READ("lwp_sema_timedwait(sema->type)", sema->vki_sema_type);
9528 PRE_FIELD_READ("lwp_sema_timedwait(sema->count)", sema->vki_sema_count);
9529 /*PRE_FIELD_WRITE("lwp_sema_timedwait(sema->count)",
9530 sema->vki_sema_count);*/
9531 PRE_FIELD_READ("lwp_sema_timedwait(sema->waiters)", sema->vki_sema_waiters);
9532 /*PRE_FIELD_WRITE("lwp_sema_timedwait(sema->waiters)",
9533 sema->vki_sema_waiters);*/
9534 if (ARG2) {
9535 PRE_MEM_READ("lwp_sema_timedwait(timeout)", ARG2,
9536 sizeof(vki_timespec_t));
9537 /*PRE_MEM_WRITE("lwp_sema_timedwait(timeout)", ARG2,
9538 sizeof(vki_timespec_t));*/
9539 }
9540 }
9541
9542 POST(sys_lwp_sema_timedwait)
9543 {
9544 vki_lwp_sema_t *sema = (vki_lwp_sema_t*)ARG1;
9545 POST_FIELD_WRITE(sema->vki_sema_count);
9546 POST_FIELD_WRITE(sema->vki_sema_waiters);
9547 if (ARG2)
9548 POST_MEM_WRITE(ARG2, sizeof(vki_timespec_t));
9549 }
9550
9551 PRE(sys_zone)
9552 {
9553 /* Kernel: long zone(int cmd, void *arg1, void *arg2, void *arg3,
9554 void *arg4);
9555 */
9556 switch (ARG1 /*cmd*/) {
9557 case VKI_ZONE_CREATE:
9558 /* Libc: zoneid_t zone_create(const char *name, const char *root,
9559 const struct priv_set *privs,
9560 const char *rctls, size_t rctlsz,
9561 const char *zfs, size_t zfssz,
9562 int *extended_error, int match,
9563 int doi, const bslabel_t *label,
9564 int flags);
9565 Kernel: zoneid_t zone_create(zone_def *zd);
9566 */
9567 PRINT("sys_zone ( %ld, %#lx )", SARG1, ARG2);
9568 PRE_REG_READ2(long, SC2("zone", "create"), int, cmd,
9569 vki_zone_def *, zd);
9570
9571 vki_zone_def *zd = (vki_zone_def *) ARG2;
9572 PRE_FIELD_READ("zone(zd.zone_name)", zd->zone_name);
9573 PRE_FIELD_READ("zone(zd.zone_root)", zd->zone_root);
9574 PRE_FIELD_READ("zone(zd.zone_privs)", zd->zone_privs);
9575 PRE_FIELD_READ("zone(zd.zone_privssz)", zd->zone_privssz);
9576 PRE_FIELD_READ("zone(zd.rctlbuf)", zd->rctlbuf);
9577 PRE_FIELD_READ("zone(zd.rctlbufsz)", zd->rctlbufsz);
9578 PRE_FIELD_READ("zone(zd.zfsbuf)", zd->zfsbuf);
9579 PRE_FIELD_READ("zone(zd.zfsbufsz)", zd->zfsbufsz);
9580 PRE_FIELD_READ("zone(zd.extended_error)", zd->extended_error);
9581 PRE_FIELD_READ("zone(zd.match)", zd->match);
9582 PRE_FIELD_READ("zone(zd.doi)", zd->doi);
9583 PRE_FIELD_READ("zone(zd.label)", zd->label);
9584 PRE_FIELD_READ("zone(zd.flags)", zd->flags);
9585
9586 if (ML_(safe_to_deref((void *)ARG2, sizeof(vki_zone_def)))) {
9587 if (zd->zone_name)
9588 PRE_MEM_RASCIIZ("zone(zd.zone_name)", (Addr) zd->zone_name);
9589 if (zd->zone_root)
9590 PRE_MEM_RASCIIZ("zone(zd.zone_root)", (Addr) zd->zone_root);
9591 PRE_MEM_READ("zone(zd.zone_privs)", (Addr) zd->zone_privs,
9592 zd->zone_privssz);
9593 PRE_MEM_READ("zone(zd.rctlbuf)", (Addr) zd->rctlbuf,
9594 zd->rctlbufsz);
9595 PRE_MEM_READ("zone(zd.zfsbuf)",
9596 (Addr) zd->zfsbuf, zd->zfsbufsz);
9597 if (zd->label)
9598 PRE_MEM_READ("zone(zd.label)", (Addr) zd->label,
9599 sizeof(vki_bslabel_t));
9600 }
9601 break;
9602 case VKI_ZONE_DESTROY:
9603 /* Libc: int zone_destroy(zoneid_t zoneid); */
9604 PRINT("sys_zone ( %ld, %ld )", SARG1, SARG2);
9605 PRE_REG_READ2(long, SC2("zone", "destroy"), int, cmd,
9606 vki_zoneid_t, zoneid);
9607 break;
9608 case VKI_ZONE_GETATTR:
9609 /* Libc: ssize_t zone_getattr(zoneid_t zoneid, int attr,
9610 void *valp, size_t size);
9611 */
9612 PRINT("sys_zone ( %ld, %ld, %ld, %#lx, %ld )",
9613 SARG1, SARG2, SARG3, ARG4, SARG5);
9614 PRE_REG_READ5(long, SC2("zone", "getattr"), int, cmd,
9615 vki_zoneid_t, zoneid, int, attr, void *, valp,
9616 vki_size_t, size);
9617 PRE_MEM_WRITE("zone(valp)", ARG4, ARG5);
9618 break;
9619 case VKI_ZONE_ENTER:
9620 /* Libc: int zone_enter(zoneid_t zoneid); */
9621 PRINT("sys_zone ( %ld, %ld )", SARG1, SARG2);
9622 PRE_REG_READ2(long, SC2("zone", "enter"), int, cmd,
9623 vki_zoneid_t, zoneid);
9624 break;
9625 case VKI_ZONE_LIST:
9626 /* Libc: int zone_list(zoneid_t *zonelist, uint_t *numzones); */
9627 PRINT("sys_zone ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
9628 PRE_REG_READ3(long, SC2("zone", "list"), int, cmd,
9629 vki_zoneid_t *, zonelist, vki_uint_t *, numzones);
9630
9631 PRE_MEM_WRITE("zone(numzones)", ARG3, sizeof(vki_uint_t));
9632
9633 if (ML_(safe_to_deref((void *) ARG3, sizeof(vki_uint_t)))) {
9634 if (ARG2)
9635 PRE_MEM_WRITE("zone(zonelist)", ARG2,
9636 *(vki_uint_t *) ARG3 * sizeof(vki_zoneid_t));
9637 }
9638 break;
9639 case VKI_ZONE_SHUTDOWN:
9640 /* Libc: int zone_shutdown(zoneid_t zoneid); */
9641 PRINT("sys_zone ( %ld, %ld )", SARG1, SARG2);
9642 PRE_REG_READ2(long, SC2("zone", "shutdown"), int, cmd,
9643 vki_zoneid_t, zoneid);
9644 break;
9645 case VKI_ZONE_LOOKUP:
9646 /* Libc: zoneid_t zone_lookup(const char *name); */
9647 PRINT("sys_zone ( %ld, %#lx(%s) )", SARG1, ARG2, (HChar *) ARG2);
9648 PRE_REG_READ2(long, SC2("zone", "lookup"), int, cmd,
9649 const char *, name);
9650 if (ARG2)
9651 PRE_MEM_RASCIIZ("zone(name)", ARG2);
9652 break;
9653 case VKI_ZONE_BOOT:
9654 /* Libc: int zone_boot(zoneid_t zoneid); */
9655 PRINT("sys_zone ( %ld, %ld )", SARG1, SARG2);
9656 PRE_REG_READ2(long, SC2("zone", "boot"), int, cmd,
9657 vki_zoneid_t, zoneid);
9658 break;
9659 case VKI_ZONE_SETATTR:
9660 /* Libc: int zone_setattr(zoneid_t zoneid, int attr, void *valp,
9661 size_t size);
9662 */
9663 PRINT("sys_zone ( %ld, %ld, %ld, %#lx, %lu )",
9664 SARG1, SARG2, SARG3, ARG4, ARG5);
9665 PRE_REG_READ5(long, SC2("zone", "setattr"), int, cmd,
9666 vki_zoneid_t, zoneid, int, attr, void *, valp,
9667 vki_size_t, size);
9668 PRE_MEM_READ("zone(valp)", ARG4, ARG5);
9669 break;
9670 case VKI_ZONE_ADD_DATALINK:
9671 /* Libc: int zone_add_datalink(zoneid_t zoneid,
9672 datalink_id_t linkid);
9673 */
9674 PRINT("sys_zone ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
9675 PRE_REG_READ3(long, SC2("zone", "add_datalink"), int, cmd,
9676 vki_zoneid_t, zoneid, vki_datalink_id_t, linkid);
9677 break;
9678 case VKI_ZONE_DEL_DATALINK:
9679 /* Libc: int zone_remove_datalink(zoneid_t zoneid,
9680 datalink_id_t linkid);
9681 */
9682 PRINT("sys_zone ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
9683 PRE_REG_READ3(long, SC2("zone", "del_datalink"), int, cmd,
9684 vki_zoneid_t, zoneid, vki_datalink_id_t, linkid);
9685 break;
9686 case VKI_ZONE_CHECK_DATALINK:
9687 /* Libc: int zone_check_datalink(zoneid_t *zoneidp,
9688 datalink_id_t linkid);
9689 */
9690 PRINT("sys_zone ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
9691 PRE_REG_READ3(long, SC2("zone", "check_datalink"), int, cmd,
9692 vki_zoneid_t *, zoneidp, vki_datalink_id_t, linkid);
9693 PRE_MEM_WRITE("zone(zoneidp)", ARG2, sizeof(vki_zoneid_t));
9694 break;
9695 case VKI_ZONE_LIST_DATALINK:
9696 /* Libc: int zone_list_datalink(zoneid_t zoneid, int *dlnump,
9697 datalink_id_t *linkids);
9698 */
9699 PRINT("sys_zone ( %ld, %ld, %#lx, %#lx )", SARG1, SARG2, ARG3, ARG4);
9700 PRE_REG_READ4(long, SC2("zone", "list_datalink"), int, cmd,
9701 vki_zoneid_t, zoneid, int *, dlnump,
9702 vki_datalink_id_t *, linkids);
9703
9704 PRE_MEM_WRITE("zone(dlnump)", ARG3, sizeof(int));
9705 if (ML_(safe_to_deref((void *) ARG3, sizeof(int)))) {
9706 if (ARG4)
9707 PRE_MEM_WRITE("zone(linkids)", ARG4,
9708 *(int *) ARG3 * sizeof(vki_datalink_id_t));
9709 }
9710 break;
9711 #if defined(SOLARIS_ZONE_DEFUNCT)
9712 case VKI_ZONE_LIST_DEFUNCT:
9713 /* Libc: int zone_list_defunct(uint64_t *uniqidlist,
9714 uint_t *numzones);
9715 */
9716 PRINT("sys_zone ( %ld, %#lx, %#lx )", SARG1, ARG2, ARG3);
9717 PRE_REG_READ3(long, SC2("zone", "list_defunct"), int, cmd,
9718 vki_uint64_t *, uniqidlist, vki_uint_t *, numzones);
9719
9720 PRE_MEM_WRITE("zone(numzones)", ARG3, sizeof(vki_uint_t));
9721
9722 if (ML_(safe_to_deref((void *) ARG3, sizeof(vki_uint_t)))) {
9723 if (ARG2)
9724 PRE_MEM_WRITE("zone(uniqidlist)", ARG2,
9725 *(vki_uint_t *) ARG3 * sizeof(vki_uint64_t));
9726 }
9727 break;
9728 case VKI_ZONE_GETATTR_DEFUNCT:
9729 /* Libc: ssize_t zone_getattr_defunct(uint64_t uniqid, int attr,
9730 void *valp, size_t size);
9731 Kernel: ssize_t zone_getattr_defunct(uint64_t *uniqid, int attr,
9732 void *valp, size_t size);
9733 */
9734 PRINT("sys_zone ( %ld, %#lx, %ld, %#lx, %lu )",
9735 SARG1, ARG2, SARG3, ARG4, ARG5);
9736 PRE_REG_READ5(long, SC2("zone", "getattr_defunct"), int, cmd,
9737 vki_uint64_t *, uniqid, int, attr,
9738 void *, valp, vki_size_t, size);
9739
9740 PRE_MEM_READ("zone(uniqid)", ARG2, sizeof(vki_uint64_t));
9741 PRE_MEM_WRITE("zone(valp)", ARG4, ARG5);
9742 break;
9743 #endif /* SOLARIS_ZONE_DEFUNCT */
9744 default:
9745 VG_(unimplemented)("Syswrap of the zone call with cmd %ld.", SARG1);
9746 /*NOTREACHED*/
9747 break;
9748 }
9749
9750 }
9751
9752 POST(sys_zone)
9753 {
9754 switch (ARG1 /*cmd*/) {
9755 case VKI_ZONE_CREATE:
9756 case VKI_ZONE_DESTROY:
9757 break;
9758 case VKI_ZONE_GETATTR:
9759 POST_MEM_WRITE(ARG4, MIN(RES, ARG5));
9760 break;
9761 case VKI_ZONE_ENTER:
9762 break;
9763 case VKI_ZONE_LIST:
9764 POST_MEM_WRITE(ARG2, *(vki_uint_t *) ARG3 * sizeof(vki_zoneid_t));
9765 break;
9766 case VKI_ZONE_SHUTDOWN:
9767 case VKI_ZONE_LOOKUP:
9768 case VKI_ZONE_BOOT:
9769 case VKI_ZONE_SETATTR:
9770 case VKI_ZONE_ADD_DATALINK:
9771 case VKI_ZONE_DEL_DATALINK:
9772 break;
9773 case VKI_ZONE_CHECK_DATALINK:
9774 POST_MEM_WRITE(ARG2, sizeof(vki_zoneid_t));
9775 break;
9776 case VKI_ZONE_LIST_DATALINK:
9777 POST_MEM_WRITE(ARG4, *(int *) ARG3 * sizeof(vki_datalink_id_t));
9778 break;
9779 #if defined(SOLARIS_ZONE_DEFUNCT)
9780 case VKI_ZONE_LIST_DEFUNCT:
9781 POST_MEM_WRITE(ARG2, *(vki_uint_t *) ARG3 * sizeof(vki_uint64_t));
9782 break;
9783 case VKI_ZONE_GETATTR_DEFUNCT:
9784 POST_MEM_WRITE(ARG4, MIN(RES, ARG5));
9785 break;
9786 #endif /* SOLARIS_ZONE_DEFUNCT */
9787 default:
9788 vg_assert(0);
9789 break;
9790 }
9791 }
9792
9793 PRE(sys_getcwd)
9794 {
9795 /* int getcwd(char *buf, size_t size); */
9796 /* Note: Generic getcwd() syswrap can't be used because it expects
9797 a different return value. */
9798 PRINT("sys_getcwd ( %#lx, %lu )", ARG1, ARG2);
9799 PRE_REG_READ2(long, "getcwd", char *, buf, vki_size_t, size);
9800 PRE_MEM_WRITE("getcwd(buf)", ARG1, ARG2);
9801 }
9802
9803 POST(sys_getcwd)
9804 {
9805 POST_MEM_WRITE(ARG1, VG_(strlen)((HChar*)ARG1) + 1);
9806 }
9807
9808 PRE(sys_so_socket)
9809 {
9810 /* int so_socket(int family, int type, int protocol, char *devpath,
9811 int version); */
9812 PRINT("sys_so_socket ( %ld, %ld, %ld, %#lx(%s), %ld)", SARG1, SARG2, SARG3,
9813 ARG4, (HChar *) ARG4, SARG5);
9814 PRE_REG_READ5(long, "socket", int, family, int, type, int, protocol,
9815 char *, devpath, int, version);
9816 if (ARG4)
9817 PRE_MEM_RASCIIZ("socket(devpath)", ARG4);
9818 }
9819
9820 POST(sys_so_socket)
9821 {
9822 SysRes r;
9823 r = ML_(generic_POST_sys_socket)(tid, VG_(mk_SysRes_Success)(RES));
9824 SET_STATUS_from_SysRes(r);
9825 }
9826
9827 PRE(sys_so_socketpair)
9828 {
9829 /* int so_socketpair(int sv[2]); */
9830 /* This syscall is used to connect two already created sockets together. */
9831 PRINT("sys_so_socketpair ( %#lx )", ARG1);
9832 PRE_REG_READ1(long, "socketpair", int *, sv);
9833 PRE_MEM_READ("socketpair(sv)", ARG1, 2 * sizeof(int));
9834 /*PRE_MEM_WRITE("socketpair(sv)", ARG1, 2 * sizeof(int));*/
9835 if (ML_(safe_to_deref)((void*)ARG1, 2 * sizeof(int))) {
9836 int *fds = (int*)ARG1;
9837 if (!ML_(fd_allowed)(fds[0], "socketpair", tid, False))
9838 SET_STATUS_Failure(VKI_EBADF);
9839 else if (!ML_(fd_allowed)(fds[1], "socketpair", tid, False))
9840 SET_STATUS_Failure(VKI_EBADF);
9841 }
9842 }
9843
9844 POST(sys_so_socketpair)
9845 {
9846 /* The kernel can return new file descriptors, in such a case we have to
9847 validate them. */
9848 int *fds = (int*)ARG1;
9849 POST_MEM_WRITE(ARG1, 2 * sizeof(int));
9850 if (!ML_(fd_allowed)(fds[0], "socketpair", tid, True))
9851 SET_STATUS_Failure(VKI_EMFILE);
9852 if (!ML_(fd_allowed)(fds[1], "socketpair", tid, True))
9853 SET_STATUS_Failure(VKI_EMFILE);
9854 if (FAILURE) {
9855 /* One or both of the file descriptors weren't allowed, close newly
9856 created file descriptors but don't close the already recorded
9857 ones. */
9858 if (!ML_(fd_recorded)(fds[0]))
9859 VG_(close)(fds[0]);
9860 if (!ML_(fd_recorded)(fds[1]))
9861 VG_(close)(fds[1]);
9862 }
9863 else if (VG_(clo_track_fds)) {
9864 /* Everything went better than expected, record the newly created file
9865 descriptors. Note: If the kernel actually returns the original file
9866 descriptors, then ML_(record_fd_open_nameless) notices that these
9867 file descriptors have been already recorded. */
9868 ML_(record_fd_open_nameless)(tid, fds[0]);
9869 ML_(record_fd_open_nameless)(tid, fds[1]);
9870 }
9871 }
9872
9873 PRE(sys_bind)
9874 {
9875 /* int bind(int s, struct sockaddr *name, socklen_t namelen,
9876 int version); */
9877 PRINT("sys_bind ( %ld, %#lx, %lu, %ld )", SARG1, ARG2, ARG3, SARG4);
9878 PRE_REG_READ4(long, "bind", int, s, struct sockaddr *, name,
9879 vki_socklen_t, namelen, int, version);
9880 ML_(generic_PRE_sys_bind)(tid, ARG1, ARG2, ARG3);
9881 }
9882
9883 PRE(sys_listen)
9884 {
9885 /* int listen(int s, int backlog, int version); */
9886 PRINT("sys_listen ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
9887 PRE_REG_READ3(long, "listen", int, s, int, backlog, int, version);
9888 }
9889
9890 PRE(sys_accept)
9891 {
9892 #if defined(SOLARIS_NEW_ACCEPT_SYSCALL)
9893 /* int accept(int s, struct sockaddr *addr, socklen_t *addrlen,
9894 int version, int flags); */
9895 *flags |= SfMayBlock;
9896 PRINT("sys_accept ( %ld, %#lx, %#lx, %ld, %ld )", SARG1, ARG2, ARG3, SARG4,
9897 SARG5);
9898 PRE_REG_READ5(long, "accept", int, s, struct sockaddr *, addr,
9899 socklen_t *, addrlen, int, version, int, flags);
9900 #else
9901 /* int accept(int s, struct sockaddr *addr, socklen_t *addrlen,
9902 int version); */
9903 *flags |= SfMayBlock;
9904 PRINT("sys_accept ( %ld, %#lx, %#lx, %ld )", SARG1, ARG2, ARG3, SARG4);
9905 PRE_REG_READ4(long, "accept", int, s, struct sockaddr *, addr,
9906 socklen_t *, addrlen, int, version);
9907 #endif /* SOLARIS_NEW_ACCEPT_SYSCALL */
9908 ML_(generic_PRE_sys_accept)(tid, ARG1, ARG2, ARG3);
9909 }
9910
9911 POST(sys_accept)
9912 {
9913 SysRes r;
9914 r = ML_(generic_POST_sys_accept)(tid, VG_(mk_SysRes_Success)(RES),
9915 ARG1, ARG2, ARG3);
9916 SET_STATUS_from_SysRes(r);
9917 }
9918
9919 PRE(sys_connect)
9920 {
9921 /* int connect(int s, struct sockaddr *name, socklen_t namelen,
9922 int version); */
9923 *flags |= SfMayBlock;
9924 PRINT("sys_connect ( %ld, %#lx, %lu, %ld )", SARG1, ARG2, ARG3, SARG4);
9925 PRE_REG_READ4(long, "connect", int, s, struct sockaddr *, name,
9926 vki_socklen_t, namelen, int, version);
9927 ML_(generic_PRE_sys_connect)(tid, ARG1, ARG2, ARG3);
9928 }
9929
9930 PRE(sys_shutdown)
9931 {
9932 /* Kernel: int shutdown(int sock, int how, int version);
9933 Libc: int shutdown(int sock, int how);
9934 */
9935 *flags |= SfMayBlock;
9936 PRINT("sys_shutdown ( %ld, %ld, %ld )", SARG1, SARG2, SARG3);
9937 PRE_REG_READ3(int, "shutdown", int, sock, int, how, int, version);
9938
9939 /* Be strict. */
9940 if (!ML_(fd_allowed)(ARG1, "shutdown", tid, False))
9941 SET_STATUS_Failure(VKI_EBADF);
9942 }
9943
9944 PRE(sys_recv)
9945 {
9946 /* ssize_t recv(int s, void *buf, size_t len, int flags); */
9947 *flags |= SfMayBlock;
9948 PRINT("sys_recv ( %ld, %#lx, %lu, %ld )", SARG1, ARG2, ARG3, SARG4);
9949 PRE_REG_READ4(long, "recv", int, s, void *, buf, vki_size_t, len,
9950 int, flags);
9951 ML_(generic_PRE_sys_recv)(tid, ARG1, ARG2, ARG3);
9952 }
9953
9954 POST(sys_recv)
9955 {
9956 ML_(generic_POST_sys_recv)(tid, RES, ARG1, ARG2, ARG3);
9957 }
9958
9959 PRE(sys_recvfrom)
9960 {
9961 /* ssize_t recvfrom(int s, void *buf, size_t len, int flags,
9962 struct sockaddr *from, socklen_t *fromlen); */
9963 *flags |= SfMayBlock;
9964 PRINT("sys_recvfrom ( %ld, %#lx, %lu, %ld, %#lx, %#lx )", SARG1, ARG2, ARG3,
9965 SARG4, ARG5, ARG6);
9966 PRE_REG_READ6(long, "recvfrom", int, s, void *, buf, vki_size_t, len,
9967 int, flags, struct sockaddr *, from, socklen_t *, fromlen);
9968 ML_(generic_PRE_sys_recvfrom)(tid, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6);
9969 }
9970
9971 POST(sys_recvfrom)
9972 {
9973 ML_(generic_POST_sys_recvfrom)(tid, VG_(mk_SysRes_Success)(RES),
9974 ARG1, ARG2, ARG3, ARG4, ARG5, ARG6);
9975 }
9976
9977 PRE(sys_recvmsg)
9978 {
9979 /* ssize_t recvmsg(int s, struct msghdr *msg, int flags); */
9980 *flags |= SfMayBlock;
9981 PRINT("sys_recvmsg ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
9982 PRE_REG_READ3(long, "recvmsg", int, s, struct msghdr *, msg, int, flags);
9983 ML_(generic_PRE_sys_recvmsg)(tid, "msg", (struct vki_msghdr*)ARG2);
9984 }
9985
9986 POST(sys_recvmsg)
9987 {
9988 ML_(generic_POST_sys_recvmsg)(tid, "msg", (struct vki_msghdr*)ARG2, RES);
9989 }
9990
9991 PRE(sys_send)
9992 {
9993 /* ssize_t send(int s, const void *msg, size_t len, int flags); */
9994 *flags |= SfMayBlock;
9995 PRINT("sys_send ( %ld, %#lx, %lu, %ld )", SARG1, ARG2, ARG3, SARG4);
9996 PRE_REG_READ4(long, "send", int, s, const void *, msg, vki_size_t, len,
9997 int, flags);
9998 ML_(generic_PRE_sys_send)(tid, ARG1, ARG2, ARG3);
9999 }
10000
10001 PRE(sys_sendmsg)
10002 {
10003 /* ssize_t sendmsg(int s, const struct msghdr *msg, int flags); */
10004 *flags |= SfMayBlock;
10005 PRINT("sys_sendmsg ( %ld, %#lx, %ld )", SARG1, ARG2, SARG3);
10006 PRE_REG_READ3(long, "sendmsg", int, s, const struct msghdr *, msg,
10007 int, flags);
10008 ML_(generic_PRE_sys_sendmsg)(tid, "msg", (struct vki_msghdr*)ARG2);
10009 }
10010
10011 PRE(sys_sendto)
10012 {
10013 /* ssize_t sendto(int s, const void *msg, size_t len, int flags,
10014 const struct sockaddr *to, int tolen); */
10015 *flags |= SfMayBlock;
10016 PRINT("sys_sendto ( %ld, %#lx, %lu, %ld, %#lx, %ld )", SARG1, ARG2, ARG3,
10017 SARG4, ARG5, SARG6);
10018 PRE_REG_READ6(long, "sendto", int, s, const void *, msg, vki_size_t, len,
10019 int, flags, const struct sockaddr *, to, int, tolen);
10020 ML_(generic_PRE_sys_sendto)(tid, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6);
10021 }
10022
10023 PRE(sys_getpeername)
10024 {
10025 /* Kernel: int getpeername(int s, struct sockaddr *name,
10026 socklen_t *namelen, int version);
10027 Libc: int getpeername(int s, struct sockaddr *name,
10028 socklen_t *namelen);
10029 */
10030 *flags |= SfMayBlock;
10031 PRINT("sys_getpeername ( %ld, %#lx, %#lx, %ld )",
10032 SARG1, ARG2, ARG3, SARG4);
10033 PRE_REG_READ4(long, "getpeername", int, s, struct vki_sockaddr *, name,
10034 vki_socklen_t *, namelen, int, version);
10035 ML_(buf_and_len_pre_check)(tid, ARG2, ARG3, "getpeername(name)",
10036 "getpeername(namelen)");
10037
10038 /* Be strict. */
10039 if (!ML_(fd_allowed)(ARG1, "getpeername", tid, False))
10040 SET_STATUS_Failure(VKI_EBADF);
10041 }
10042
10043 POST(sys_getpeername)
10044 {
10045 ML_(buf_and_len_post_check)(tid, VG_(mk_SysRes_Success)(RES),
10046 ARG2, ARG3, "getpeername(namelen)");
10047 }
10048
10049 PRE(sys_getsockname)
10050 {
10051 /* int getsockname(int s, struct sockaddr *name, socklen_t *namelen,
10052 int version); */
10053 PRINT("sys_getsockname ( %ld, %#lx, %#lx, %ld )", SARG1, ARG2, ARG3, SARG4);
10054 PRE_REG_READ4(long, "getsockname", int, s, struct sockaddr *, name,
10055 socklen_t *, namelen, int, version);
10056 ML_(generic_PRE_sys_getsockname)(tid, ARG1, ARG2, ARG3);
10057 }
10058
10059 POST(sys_getsockname)
10060 {
10061 ML_(generic_POST_sys_getsockname)(tid, VG_(mk_SysRes_Success)(RES),
10062 ARG1, ARG2, ARG3);
10063 }
10064
10065 PRE(sys_getsockopt)
10066 {
10067 /* int getsockopt(int s, int level, int optname, void *optval,
10068 socklen_t *optlen, int version); */
10069 PRINT("sys_getsockopt ( %ld, %ld, %ld, %#lx, %#lx, %ld )", SARG1, SARG2,
10070 SARG3, ARG4, ARG5, SARG6);
10071 PRE_REG_READ6(long, "getsockopt", int, s, int, level, int, optname,
10072 void *, optval, socklen_t *, option, int, version);
10073 if (ARG4)
10074 ML_(buf_and_len_pre_check)(tid, ARG4, ARG5, "getsockopt(optval)",
10075 "getsockopt(optlen)");
10076 }
10077
10078 POST(sys_getsockopt)
10079 {
10080 if (ARG4)
10081 ML_(buf_and_len_post_check)(tid, VG_(mk_SysRes_Success)(RES), ARG4,
10082 ARG5, "getsockopt(optlen_out)");
10083 }
10084
10085 PRE(sys_setsockopt)
10086 {
10087 /* int setsockopt(int s, int level, int optname, const void *optval,
10088 socklen_t optlen, int version); */
10089 PRINT("sys_setsockopt ( %ld, %ld, %ld, %#lx, %lu, %ld )", SARG1, SARG2,
10090 SARG3, ARG4, ARG5, SARG6);
10091 PRE_REG_READ6(long, "setsockopt", int, s, int, level, int, optname,
10092 const void *, optval, vki_socklen_t, optlen, int, version);
10093 ML_(generic_PRE_sys_setsockopt)(tid, ARG1, ARG2, ARG3, ARG4, ARG5);
10094 }
10095
10096 PRE(sys_lwp_mutex_register)
10097 {
10098 /* int lwp_mutex_register(lwp_mutex_t *mp, caddr_t uaddr); */
10099 vki_lwp_mutex_t *mp = (vki_lwp_mutex_t*)ARG1;
10100 PRINT("sys_lwp_mutex_register ( %#lx, %#lx )", ARG1, ARG2);
10101 PRE_REG_READ2(long, "lwp_mutex_register", lwp_mutex_t *, mp,
10102 void *, uaddr);
10103 PRE_FIELD_READ("lwp_mutex_register(mp->mutex_type)", mp->vki_mutex_type);
10104 }
10105
10106 PRE(sys_uucopy)
10107 {
10108 /* int uucopy(const void *s1, void *s2, size_t n); */
10109 PRINT("sys_uucopy ( %#lx, %#lx, %lu )", ARG1, ARG2, ARG3);
10110 PRE_REG_READ3(long, "uucopy", const void *, s1, void *, s2, vki_size_t, n);
10111
10112 /* Stay away from V segments. */
10113 if (!ML_(valid_client_addr)(ARG1, ARG3, tid, "uucopy(s1)")) {
10114 SET_STATUS_Failure(VKI_EFAULT);
10115 }
10116 if (!ML_(valid_client_addr)(ARG2, ARG3, tid, "uucopy(s2)")) {
10117 SET_STATUS_Failure(VKI_EFAULT);
10118 }
10119
10120 if (FAILURE)
10121 return;
10122
10123 /* XXX This is actually incorrect, we should be able to copy undefined
10124 values through to their new destination. */
10125 PRE_MEM_READ("uucopy(s1)", ARG1, ARG3);
10126 PRE_MEM_WRITE("uucopy(s2)", ARG2, ARG3);
10127 }
10128
10129 POST(sys_uucopy)
10130 {
10131 POST_MEM_WRITE(ARG2, ARG3);
10132 }
10133
10134 PRE(sys_umount2)
10135 {
10136 /* int umount2(const char *file, int mflag); */
10137 *flags |= SfMayBlock;
10138 PRINT("sys_umount2 ( %#lx(%s), %ld )", ARG1, (HChar *) ARG1, SARG2);
10139 PRE_REG_READ2(long, "umount2", const char *, file, int, mflag);
10140 PRE_MEM_RASCIIZ("umount2(file)", ARG1);
10141 }
10142
10143 PRE(fast_gethrtime)
10144 {
10145 PRINT("fast_gethrtime ( )");
10146 PRE_REG_READ0(long, "gethrtime");
10147 }
10148
10149 PRE(fast_gethrvtime)
10150 {
10151 PRINT("fast_gethrvtime ( )");
10152 PRE_REG_READ0(long, "gethrvtime");
10153 }
10154
10155 PRE(fast_gethrestime)
10156 {
10157 /* Used by gettimeofday(3C). */
10158 PRINT("fast_gethrestime ( )");
10159 PRE_REG_READ0(long, "gethrestime");
10160 }
10161
10162 #if defined(SOLARIS_GETHRT_FASTTRAP)
10163 PRE(fast_gethrt)
10164 {
10165 /* Used by gethrtime(3C) when tsp & tscp HWCAPs are present. */
10166 PRINT("fast_gethrt ( )");
10167 PRE_REG_READ0(long, "gethrt");
10168 }
10169
10170 POST(fast_gethrt)
10171 {
10172 if (RES == 0)
10173 return;
10174
10175 VG_(change_mapping_ownership)(RES, False);
10176 }
10177 #endif /* SOLARIS_GETHRT_FASTTRAP */
10178
10179 #if defined(SOLARIS_GETZONEOFFSET_FASTTRAP)
10180 PRE(fast_getzoneoffset)
10181 {
10182 /* Returns kernel's time zone offset data. */
10183 PRINT("fast_getzoneoffset ( )");
10184 PRE_REG_READ0(long, "get_zone_offset");
10185 }
10186
10187 POST(fast_getzoneoffset)
10188 {
10189 if (RES == 0)
10190 return;
10191
10192 VG_(change_mapping_ownership)(RES, False);
10193 }
10194 #endif /* SOLARIS_GETZONEOFFSET_FASTTRAP */
10195
10196 #undef PRE
10197 #undef POST
10198
10199 /* ---------------------------------------------------------------------
10200 The Solaris syscall table
10201 ------------------------------------------------------------------ */
10202
10203 /* Add a Solaris-specific, arch-independent wrapper to a syscall table. */
10204 #define SOLX_(sysno, name) \
10205 WRAPPER_ENTRY_X_(solaris, VG_SOLARIS_SYSNO_INDEX(sysno), name)
10206 #define SOLXY(sysno, name) \
10207 WRAPPER_ENTRY_XY(solaris, VG_SOLARIS_SYSNO_INDEX(sysno), name)
10208
10209 #if defined(VGP_x86_solaris)
10210 /* Add an x86-solaris specific wrapper to a syscall table. */
10211 #define PLAX_(sysno, name) \
10212 WRAPPER_ENTRY_X_(x86_solaris, VG_SOLARIS_SYSNO_INDEX(sysno), name)
10213 #define PLAXY(sysno, name) \
10214 WRAPPER_ENTRY_XY(x86_solaris, VG_SOLARIS_SYSNO_INDEX(sysno), name)
10215
10216 #elif defined(VGP_amd64_solaris)
10217 /* Add an amd64-solaris specific wrapper to a syscall table. */
10218 #define PLAX_(sysno, name) \
10219 WRAPPER_ENTRY_X_(amd64_solaris, VG_SOLARIS_SYSNO_INDEX(sysno), name)
10220 #define PLAXY(sysno, name) \
10221 WRAPPER_ENTRY_XY(amd64_solaris, VG_SOLARIS_SYSNO_INDEX(sysno), name)
10222
10223 #else
10224 # error "Unknown platform"
10225 #endif
10226
10227 /*
10228 GEN : handlers are in syswrap-generic.c
10229 SOL : handlers are in this file
10230 X_ : PRE handler only
10231 XY : PRE and POST handlers
10232 */
10233
10234 static SyscallTableEntry syscall_table[] = {
10235 SOLX_(__NR_exit, sys_exit), /* 1 */
10236 #if defined(SOLARIS_SPAWN_SYSCALL)
10237 SOLX_(__NR_spawn, sys_spawn), /* 2 */
10238 #endif /* SOLARIS_SPAWN_SYSCALL */
10239 GENXY(__NR_read, sys_read), /* 3 */
10240 GENX_(__NR_write, sys_write), /* 4 */
10241 #if defined(SOLARIS_OLD_SYSCALLS)
10242 SOLXY(__NR_open, sys_open), /* 5 */
10243 #endif /* SOLARIS_OLD_SYSCALLS */
10244 SOLXY(__NR_close, sys_close), /* 6 */
10245 SOLX_(__NR_linkat, sys_linkat), /* 7 */
10246 #if defined(SOLARIS_OLD_SYSCALLS)
10247 GENX_(__NR_link, sys_link), /* 9 */
10248 GENX_(__NR_unlink, sys_unlink), /* 10 */
10249 #endif /* SOLARIS_OLD_SYSCALLS */
10250 SOLX_(__NR_symlinkat, sys_symlinkat), /* 11 */
10251 GENX_(__NR_chdir, sys_chdir), /* 12 */
10252 SOLX_(__NR_time, sys_time), /* 13 */
10253 #if defined(SOLARIS_OLD_SYSCALLS)
10254 GENX_(__NR_chmod, sys_chmod), /* 15 */
10255 GENX_(__NR_chown, sys_chown), /* 16 */
10256 #endif /* SOLARIS_OLD_SYSCALLS */
10257 SOLX_(__NR_brk, sys_brk), /* 17 */
10258 #if defined(SOLARIS_OLD_SYSCALLS)
10259 SOLXY(__NR_stat, sys_stat), /* 18 */
10260 #endif /* SOLARIS_OLD_SYSCALLS */
10261 SOLX_(__NR_lseek, sys_lseek), /* 19 */
10262 GENX_(__NR_getpid, sys_getpid), /* 20 */
10263 SOLXY(__NR_mount, sys_mount), /* 21 */
10264 SOLXY(__NR_readlinkat, sys_readlinkat), /* 22 */
10265 GENX_(__NR_setuid, sys_setuid), /* 23 */
10266 GENX_(__NR_getuid, sys_getuid), /* 24 */
10267 SOLX_(__NR_stime, sys_stime), /* 25 */
10268 GENX_(__NR_alarm, sys_alarm), /* 27 */
10269 #if defined(SOLARIS_OLD_SYSCALLS)
10270 SOLXY(__NR_fstat, sys_fstat), /* 28 */
10271 #endif /* SOLARIS_OLD_SYSCALLS */
10272 GENX_(__NR_pause, sys_pause), /* 29 */
10273 #if defined(SOLARIS_FREALPATHAT_SYSCALL)
10274 SOLXY(__NR_frealpathat, sys_frealpathat), /* 30 */
10275 #endif /* SOLARIS_FREALPATHAT_SYSCALL */
10276 SOLX_(__NR_stty, sys_stty), /* 31 */
10277 SOLXY(__NR_gtty, sys_gtty), /* 32 */
10278 #if defined(SOLARIS_OLD_SYSCALLS)
10279 GENX_(__NR_access, sys_access), /* 33 */
10280 #endif /* SOLARIS_OLD_SYSCALLS */
10281 GENX_(__NR_kill, sys_kill), /* 37 */
10282 SOLX_(__NR_pgrpsys, sys_pgrpsys), /* 39 */
10283 SOLXY(__NR_pipe, sys_pipe), /* 42 */
10284 GENXY(__NR_times, sys_times), /* 43 */
10285 SOLX_(__NR_faccessat, sys_faccessat), /* 45 */
10286 GENX_(__NR_setgid, sys_setgid), /* 46 */
10287 GENX_(__NR_getgid, sys_getgid), /* 47 */
10288 SOLXY(__NR_mknodat, sys_mknodat), /* 48 */
10289 SOLXY(__NR_sysi86, sys_sysi86), /* 50 */
10290 SOLXY(__NR_shmsys, sys_shmsys), /* 52 */
10291 SOLXY(__NR_semsys, sys_semsys), /* 53 */
10292 SOLXY(__NR_ioctl, sys_ioctl), /* 54 */
10293 SOLX_(__NR_fchownat, sys_fchownat), /* 56 */
10294 SOLX_(__NR_fdsync, sys_fdsync), /* 58 */
10295 SOLX_(__NR_execve, sys_execve), /* 59 */
10296 GENX_(__NR_umask, sys_umask), /* 60 */
10297 GENX_(__NR_chroot, sys_chroot), /* 61 */
10298 SOLXY(__NR_fcntl, sys_fcntl), /* 62 */
10299 SOLX_(__NR_renameat, sys_renameat), /* 64 */
10300 SOLX_(__NR_unlinkat, sys_unlinkat), /* 65 */
10301 SOLXY(__NR_fstatat, sys_fstatat), /* 66 */
10302 #if defined(VGP_x86_solaris)
10303 PLAXY(__NR_fstatat64, sys_fstatat64), /* 67 */
10304 #endif /* VGP_x86_solaris */
10305 SOLXY(__NR_openat, sys_openat), /* 68 */
10306 #if defined(VGP_x86_solaris)
10307 PLAXY(__NR_openat64, sys_openat64), /* 69 */
10308 #endif /* VGP_x86_solaris */
10309 SOLXY(__NR_tasksys, sys_tasksys), /* 70 */
10310 SOLXY(__NR_getpagesizes, sys_getpagesizes), /* 73 */
10311 SOLXY(__NR_lwp_park, sys_lwp_park), /* 77 */
10312 SOLXY(__NR_sendfilev, sys_sendfilev), /* 78 */
10313 #if defined(SOLARIS_LWP_NAME_SYSCALL)
10314 SOLXY(__NR_lwp_name, sys_lwp_name), /* 79 */
10315 #endif /* SOLARIS_LWP_NAME_SYSCALL */
10316 #if defined(SOLARIS_OLD_SYSCALLS)
10317 GENX_(__NR_rmdir, sys_rmdir), /* 79 */
10318 GENX_(__NR_mkdir, sys_mkdir), /* 80 */
10319 #endif /* SOLARIS_OLD_SYSCALLS */
10320 GENXY(__NR_getdents, sys_getdents), /* 81 */
10321 SOLXY(__NR_privsys, sys_privsys), /* 82 */
10322 SOLXY(__NR_ucredsys, sys_ucredsys), /* 83 */
10323 SOLXY(__NR_getmsg, sys_getmsg), /* 85 */
10324 SOLX_(__NR_putmsg, sys_putmsg), /* 86 */
10325 #if defined(SOLARIS_OLD_SYSCALLS)
10326 SOLXY(__NR_lstat, sys_lstat), /* 88 */
10327 GENX_(__NR_symlink, sys_symlink), /* 89 */
10328 GENX_(__NR_readlink, sys_readlink), /* 90 */
10329 #endif /* SOLARIS_OLD_SYSCALLS */
10330 GENX_(__NR_setgroups, sys_setgroups), /* 91 */
10331 GENXY(__NR_getgroups, sys_getgroups), /* 92 */
10332 #if defined(SOLARIS_OLD_SYSCALLS)
10333 GENX_(__NR_fchmod, sys_fchmod), /* 93 */
10334 GENX_(__NR_fchown, sys_fchown), /* 94 */
10335 #endif /* SOLARIS_OLD_SYSCALLS */
10336 SOLXY(__NR_sigprocmask, sys_sigprocmask), /* 95 */
10337 GENXY(__NR_sigaltstack, sys_sigaltstack), /* 97 */
10338 SOLXY(__NR_sigaction, sys_sigaction), /* 98 */
10339 SOLXY(__NR_sigpending, sys_sigpending), /* 99 */
10340 SOLX_(__NR_context, sys_getsetcontext), /* 100 */
10341 SOLX_(__NR_fchmodat, sys_fchmodat), /* 101 */
10342 SOLX_(__NR_mkdirat, sys_mkdirat), /* 102 */
10343 SOLXY(__NR_statvfs, sys_statvfs), /* 103 */
10344 SOLXY(__NR_fstatvfs, sys_fstatvfs), /* 104 */
10345 SOLXY(__NR_nfssys, sys_nfssys), /* 106 */
10346 SOLXY(__NR_waitid, sys_waitid), /* 107 */
10347 #if defined(SOLARIS_UTIMESYS_SYSCALL)
10348 SOLX_(__NR_utimesys, sys_utimesys), /* 110 */
10349 #endif /* SOLARIS_UTIMESYS_SYSCALL */
10350 #if defined(SOLARIS_UTIMENSAT_SYSCALL)
10351 SOLX_(__NR_utimensat, sys_utimensat), /* 110 */
10352 #endif /* SOLARIS_UTIMENSAT_SYSCALL */
10353 SOLXY(__NR_sigresend, sys_sigresend), /* 111 */
10354 SOLXY(__NR_priocntlsys, sys_priocntlsys), /* 112 */
10355 SOLX_(__NR_pathconf, sys_pathconf), /* 113 */
10356 SOLX_(__NR_mmap, sys_mmap), /* 115 */
10357 GENXY(__NR_mprotect, sys_mprotect), /* 116 */
10358 GENXY(__NR_munmap, sys_munmap), /* 117 */
10359 GENX_(__NR_fchdir, sys_fchdir), /* 120 */
10360 GENXY(__NR_readv, sys_readv), /* 121 */
10361 GENX_(__NR_writev, sys_writev), /* 122 */
10362 #if defined(SOLARIS_UUIDSYS_SYSCALL)
10363 SOLXY(__NR_uuidsys, sys_uuidsys), /* 124 */
10364 #endif /* SOLARIS_UUIDSYS_SYSCALL */
10365 SOLX_(__NR_mmapobj, sys_mmapobj), /* 127 */
10366 GENX_(__NR_setrlimit, sys_setrlimit), /* 128 */
10367 GENXY(__NR_getrlimit, sys_getrlimit), /* 129 */
10368 #if defined(SOLARIS_OLD_SYSCALLS)
10369 GENX_(__NR_lchown, sys_lchown), /* 130 */
10370 #endif /* SOLARIS_OLD_SYSCALLS */
10371 SOLX_(__NR_memcntl, sys_memcntl), /* 131 */
10372 SOLXY(__NR_getpmsg, sys_getpmsg), /* 132 */
10373 SOLX_(__NR_putpmsg, sys_putpmsg), /* 133 */
10374 #if defined(SOLARIS_OLD_SYSCALLS)
10375 SOLX_(__NR_rename, sys_rename), /* 134 */
10376 #endif /* SOLARIS_OLD_SYSCALLS */
10377 SOLXY(__NR_uname, sys_uname), /* 135 */
10378 SOLX_(__NR_setegid, sys_setegid), /* 136 */
10379 SOLX_(__NR_sysconfig, sys_sysconfig), /* 137 */
10380 SOLXY(__NR_systeminfo, sys_systeminfo), /* 139 */
10381 SOLX_(__NR_seteuid, sys_seteuid), /* 141 */
10382 SOLX_(__NR_forksys, sys_forksys), /* 142 */
10383 SOLXY(__NR_sigtimedwait, sys_sigtimedwait), /* 144 */
10384 SOLX_(__NR_yield, sys_yield), /* 146 */
10385 SOLXY(__NR_lwp_sema_post, sys_lwp_sema_post), /* 148 */
10386 SOLXY(__NR_lwp_sema_trywait, sys_lwp_sema_trywait), /* 149 */
10387 SOLX_(__NR_lwp_detach, sys_lwp_detach), /* 150 */
10388 SOLX_(__NR_fchroot, sys_fchroot), /* 153 */
10389 #if defined(SOLARIS_SYSTEM_STATS_SYSCALL)
10390 SOLX_(__NR_system_stats, sys_system_stats), /* 154 */
10391 #endif /* SOLARIS_SYSTEM_STATS_SYSCALL */
10392 SOLXY(__NR_gettimeofday, sys_gettimeofday), /* 156 */
10393 GENXY(__NR_getitimer, sys_getitimer), /* 157 */
10394 GENXY(__NR_setitimer, sys_setitimer), /* 158 */
10395 SOLX_(__NR_lwp_create, sys_lwp_create), /* 159 */
10396 SOLX_(__NR_lwp_exit, sys_lwp_exit), /* 160 */
10397 SOLX_(__NR_lwp_suspend, sys_lwp_suspend), /* 161 */
10398 SOLX_(__NR_lwp_continue, sys_lwp_continue), /* 162 */
10399 #if defined(SOLARIS_LWP_SIGQUEUE_SYSCALL)
10400 SOLXY(__NR_lwp_sigqueue, sys_lwp_sigqueue), /* 163 */
10401 #else
10402 SOLXY(__NR_lwp_kill, sys_lwp_kill), /* 163 */
10403 #endif /* SOLARIS_LWP_SIGQUEUE_SYSCALL */
10404 SOLX_(__NR_lwp_self, sys_lwp_self), /* 164 */
10405 SOLX_(__NR_lwp_sigmask, sys_lwp_sigmask), /* 165 */
10406 SOLX_(__NR_lwp_private, sys_lwp_private), /* 166 */
10407 SOLXY(__NR_lwp_wait, sys_lwp_wait), /* 167 */
10408 SOLXY(__NR_lwp_mutex_wakeup, sys_lwp_mutex_wakeup), /* 168 */
10409 SOLXY(__NR_lwp_cond_wait, sys_lwp_cond_wait), /* 170 */
10410 SOLX_(__NR_lwp_cond_broadcast, sys_lwp_cond_broadcast), /* 172 */
10411 SOLXY(__NR_pread, sys_pread), /* 173 */
10412 SOLX_(__NR_pwrite, sys_pwrite), /* 174 */
10413 #if defined(VGP_x86_solaris)
10414 PLAX_(__NR_llseek, sys_llseek32), /* 175 */
10415 #endif /* VGP_x86_solaris */
10416 SOLXY(__NR_rusagesys, sys_rusagesys), /* 181 */
10417 SOLXY(__NR_port, sys_port), /* 182 */
10418 SOLXY(__NR_pollsys, sys_pollsys), /* 183 */
10419 SOLXY(__NR_labelsys, sys_labelsys), /* 184 */
10420 SOLXY(__NR_acl, sys_acl), /* 185 */
10421 SOLXY(__NR_auditsys, sys_auditsys), /* 186 */
10422 SOLX_(__NR_p_online, sys_p_online), /* 189 */
10423 SOLX_(__NR_sigqueue, sys_sigqueue), /* 190 */
10424 SOLX_(__NR_clock_gettime, sys_clock_gettime), /* 191 */
10425 SOLX_(__NR_clock_settime, sys_clock_settime), /* 192 */
10426 SOLXY(__NR_clock_getres, sys_clock_getres), /* 193 */
10427 SOLXY(__NR_timer_create, sys_timer_create), /* 194 */
10428 SOLX_(__NR_timer_delete, sys_timer_delete), /* 195 */
10429 SOLXY(__NR_timer_settime, sys_timer_settime), /* 196 */
10430 SOLXY(__NR_timer_gettime, sys_timer_gettime), /* 197 */
10431 SOLX_(__NR_timer_getoverrun, sys_timer_getoverrun), /* 198 */
10432 GENXY(__NR_nanosleep, sys_nanosleep), /* 199 */
10433 SOLXY(__NR_facl, sys_facl), /* 200 */
10434 SOLXY(__NR_door, sys_door), /* 201 */
10435 GENX_(__NR_setreuid, sys_setreuid), /* 202 */
10436 GENX_(__NR_setregid, sys_setregid), /* 202 */
10437 SOLXY(__NR_schedctl, sys_schedctl), /* 206 */
10438 SOLXY(__NR_pset, sys_pset), /* 207 */
10439 SOLXY(__NR_resolvepath, sys_resolvepath), /* 209 */
10440 SOLXY(__NR_lwp_mutex_timedlock, sys_lwp_mutex_timedlock), /* 210 */
10441 SOLXY(__NR_lwp_sema_timedwait, sys_lwp_sema_timedwait), /* 211 */
10442 SOLXY(__NR_lwp_rwlock_sys, sys_lwp_rwlock_sys), /* 212 */
10443 #if defined(VGP_x86_solaris)
10444 GENXY(__NR_getdents64, sys_getdents64), /* 213 */
10445 PLAX_(__NR_mmap64, sys_mmap64), /* 214 */
10446 #if defined(SOLARIS_OLD_SYSCALLS)
10447 PLAXY(__NR_stat64, sys_stat64), /* 215 */
10448 PLAXY(__NR_lstat64, sys_lstat64), /* 216 */
10449 PLAXY(__NR_fstat64, sys_fstat64), /* 217 */
10450 #endif /* SOLARIS_OLD_SYSCALLS */
10451 PLAXY(__NR_statvfs64, sys_statvfs64), /* 218 */
10452 PLAXY(__NR_fstatvfs64, sys_fstatvfs64), /* 219 */
10453 #endif /* VGP_x86_solaris */
10454 #if defined(VGP_x86_solaris)
10455 PLAX_(__NR_setrlimit64, sys_setrlimit64), /* 220 */
10456 PLAXY(__NR_getrlimit64, sys_getrlimit64), /* 221 */
10457 PLAXY(__NR_pread64, sys_pread64), /* 222 */
10458 PLAX_(__NR_pwrite64, sys_pwrite64), /* 223 */
10459 #if defined(SOLARIS_OLD_SYSCALLS)
10460 PLAXY(__NR_open64, sys_open64), /* 225 */
10461 #endif /* SOLARIS_OLD_SYSCALLS */
10462 #endif /* VGP_x86_solaris */
10463 SOLXY(__NR_zone, sys_zone), /* 227 */
10464 SOLXY(__NR_getcwd, sys_getcwd), /* 229 */
10465 SOLXY(__NR_so_socket, sys_so_socket), /* 230 */
10466 SOLXY(__NR_so_socketpair, sys_so_socketpair), /* 231 */
10467 SOLX_(__NR_bind, sys_bind), /* 232 */
10468 SOLX_(__NR_listen, sys_listen), /* 233 */
10469 SOLXY(__NR_accept, sys_accept), /* 234 */
10470 SOLX_(__NR_connect, sys_connect), /* 235 */
10471 SOLX_(__NR_shutdown, sys_shutdown), /* 236 */
10472 SOLXY(__NR_recv, sys_recv), /* 237 */
10473 SOLXY(__NR_recvfrom, sys_recvfrom), /* 238 */
10474 SOLXY(__NR_recvmsg, sys_recvmsg), /* 239 */
10475 SOLX_(__NR_send, sys_send), /* 240 */
10476 SOLX_(__NR_sendmsg, sys_sendmsg), /* 241 */
10477 SOLX_(__NR_sendto, sys_sendto), /* 242 */
10478 SOLXY(__NR_getpeername, sys_getpeername), /* 243 */
10479 SOLXY(__NR_getsockname, sys_getsockname), /* 244 */
10480 SOLXY(__NR_getsockopt, sys_getsockopt), /* 245 */
10481 SOLX_(__NR_setsockopt, sys_setsockopt), /* 246 */
10482 SOLX_(__NR_lwp_mutex_register, sys_lwp_mutex_register), /* 252 */
10483 SOLXY(__NR_uucopy, sys_uucopy), /* 254 */
10484 SOLX_(__NR_umount2, sys_umount2) /* 255 */
10485 };
10486
10487 static SyscallTableEntry fasttrap_table[] = {
10488 SOLX_(__NR_gethrtime, fast_gethrtime), /* 3 */
10489 SOLX_(__NR_gethrvtime, fast_gethrvtime), /* 4 */
10490 SOLX_(__NR_gethrestime, fast_gethrestime) /* 5 */
10491 #if defined(SOLARIS_GETHRT_FASTTRAP)
10492 ,
10493 SOLXY(__NR_gethrt, fast_gethrt) /* 7 */
10494 #endif /* SOLARIS_GETHRT_FASTTRAP */
10495 #if defined(SOLARIS_GETZONEOFFSET_FASTTRAP)
10496 ,
10497 SOLXY(__NR_getzoneoffset, fast_getzoneoffset) /* 8 */
10498 #endif /* SOLARIS_GETZONEOFFSET_FASTTRAP */
10499
10500 };
10501
10502 SyscallTableEntry *ML_(get_solaris_syscall_entry)(UInt sysno)
10503 {
10504 const UInt syscall_table_size
10505 = sizeof(syscall_table) / sizeof(syscall_table[0]);
10506 const UInt fasttrap_table_size
10507 = sizeof(fasttrap_table) / sizeof(fasttrap_table[0]);
10508
10509 SyscallTableEntry *table;
10510 Int size;
10511
10512 switch (VG_SOLARIS_SYSNO_CLASS(sysno)) {
10513 case VG_SOLARIS_SYSCALL_CLASS_CLASSIC:
10514 table = syscall_table;
10515 size = syscall_table_size;
10516 break;
10517 case VG_SOLARIS_SYSCALL_CLASS_FASTTRAP:
10518 table = fasttrap_table;
10519 size = fasttrap_table_size;
10520 break;
10521 default:
10522 vg_assert(0);
10523 break;
10524 }
10525 sysno = VG_SOLARIS_SYSNO_INDEX(sysno);
10526 if (sysno < size) {
10527 SyscallTableEntry *sys = &table[sysno];
10528 if (!sys->before)
10529 return NULL; /* no entry */
10530 return sys;
10531 }
10532
10533 /* Can't find a wrapper. */
10534 return NULL;
10535 }
10536
10537 #endif // defined(VGO_solaris)
10538
10539 /*--------------------------------------------------------------------*/
10540 /*--- end ---*/
10541 /*--------------------------------------------------------------------*/
10542