1 /*
2 This file is part of drd, a thread error detector.
3
4 Copyright (C) 2006-2013 Bart Van Assche <bvanassche@acm.org>.
5
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 02111-1307, USA.
20
21 The GNU General Public License is contained in the file COPYING.
22 */
23
24
25 #include "drd_barrier.h"
26 #include "drd_clientreq.h"
27 #include "drd_cond.h"
28 #include "drd_error.h"
29 #include "drd_hb.h"
30 #include "drd_load_store.h"
31 #include "drd_malloc_wrappers.h"
32 #include "drd_mutex.h"
33 #include "drd_rwlock.h"
34 #include "drd_semaphore.h"
35 #include "drd_suppression.h" // drd_start_suppression()
36 #include "drd_thread.h"
37 #include "pub_tool_basics.h" // Bool
38 #include "pub_tool_libcassert.h"
39 #include "pub_tool_libcassert.h" // tl_assert()
40 #include "pub_tool_libcprint.h" // VG_(message)()
41 #include "pub_tool_machine.h" // VG_(get_SP)()
42 #include "pub_tool_threadstate.h"
43 #include "pub_tool_tooliface.h" // VG_(needs_...)()
44
45
46 /* Global variables. */
47
48 Bool DRD_(g_free_is_write);
49
50
51 /* Local function declarations. */
52
53 static Bool handle_client_request(ThreadId vg_tid, UWord* arg, UWord* ret);
54
55
56 /* Function definitions. */
57
58 /**
59 * Tell the Valgrind core the address of the DRD function that processes
60 * client requests. Must be called before any client code is run.
61 */
DRD_(clientreq_init)62 void DRD_(clientreq_init)(void)
63 {
64 VG_(needs_client_requests)(handle_client_request);
65 }
66
67 /**
68 * DRD's handler for Valgrind client requests. The code below handles both
69 * DRD's public and tool-internal client requests.
70 */
71 #if defined(VGP_mips32_linux) || defined(VGP_mips64_linux) || \
72 defined(VGP_tilegx_linux)
73 /* There is a cse related issue in gcc for MIPS. Optimization level
74 has to be lowered, so cse related optimizations are not
75 included. */
76 __attribute__((optimize("O1")))
77 #endif
handle_client_request(ThreadId vg_tid,UWord * arg,UWord * ret)78 static Bool handle_client_request(ThreadId vg_tid, UWord* arg, UWord* ret)
79 {
80 UWord result = 0;
81 const DrdThreadId drd_tid = DRD_(thread_get_running_tid)();
82
83 tl_assert(vg_tid == VG_(get_running_tid()));
84 tl_assert(DRD_(VgThreadIdToDrdThreadId)(vg_tid) == drd_tid
85 || (VG_USERREQ__GDB_MONITOR_COMMAND == arg[0]
86 && vg_tid == VG_INVALID_THREADID));
87 /* Check the consistency of vg_tid and drd_tid, unless
88 vgdb has forced the invokation of a gdb monitor cmd
89 when no threads was running (i.e. all threads blocked
90 in a syscall. In such a case, vg_tid is invalid,
91 its conversion to a drd thread id gives also an invalid
92 drd thread id, but drd_tid is not invalid (probably
93 equal to the last running drd thread. */
94
95 switch (arg[0])
96 {
97 case VG_USERREQ__MALLOCLIKE_BLOCK:
98 if (DRD_(g_free_is_write)) {
99 GenericErrInfo GEI = {
100 .tid = DRD_(thread_get_running_tid)(),
101 .addr = 0,
102 };
103 VG_(maybe_record_error)(vg_tid,
104 GenericErr,
105 VG_(get_IP)(vg_tid),
106 "--free-is-write=yes is incompatible with"
107 " custom memory allocator client requests",
108 &GEI);
109 }
110 if (arg[1])
111 DRD_(malloclike_block)(vg_tid, arg[1]/*addr*/, arg[2]/*size*/);
112 break;
113
114 case VG_USERREQ__RESIZEINPLACE_BLOCK:
115 if (!DRD_(freelike_block)(vg_tid, arg[1]/*addr*/, False))
116 {
117 GenericErrInfo GEI = {
118 .tid = DRD_(thread_get_running_tid)(),
119 .addr = 0,
120 };
121 VG_(maybe_record_error)(vg_tid,
122 GenericErr,
123 VG_(get_IP)(vg_tid),
124 "Invalid VG_USERREQ__RESIZEINPLACE_BLOCK request",
125 &GEI);
126 }
127 DRD_(malloclike_block)(vg_tid, arg[1]/*addr*/, arg[3]/*newSize*/);
128 break;
129
130 case VG_USERREQ__FREELIKE_BLOCK:
131 if (arg[1] && ! DRD_(freelike_block)(vg_tid, arg[1]/*addr*/, False))
132 {
133 GenericErrInfo GEI = {
134 .tid = DRD_(thread_get_running_tid)(),
135 .addr = 0,
136 };
137 VG_(maybe_record_error)(vg_tid,
138 GenericErr,
139 VG_(get_IP)(vg_tid),
140 "Invalid VG_USERREQ__FREELIKE_BLOCK request",
141 &GEI);
142 }
143 break;
144
145 case VG_USERREQ__DRD_GET_VALGRIND_THREAD_ID:
146 result = vg_tid;
147 break;
148
149 case VG_USERREQ__DRD_GET_DRD_THREAD_ID:
150 result = drd_tid;
151 break;
152
153 case VG_USERREQ__DRD_SET_THREAD_NAME:
154 DRD_(thread_set_name)(drd_tid, (const HChar*)arg[1]);
155 break;
156
157 case VG_USERREQ__DRD_START_SUPPRESSION:
158 /*_VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED*/
159 case VG_USERREQ_TOOL_BASE('H','G') + 256 + 39:
160 DRD_(start_suppression)(arg[1], arg[1] + arg[2], "client");
161 break;
162
163 case VG_USERREQ__DRD_FINISH_SUPPRESSION:
164 /*_VG_USERREQ__HG_ARANGE_MAKE_TRACKED*/
165 case VG_USERREQ_TOOL_BASE('H','G') + 256 + 40:
166 DRD_(finish_suppression)(arg[1], arg[1] + arg[2]);
167 break;
168
169 case VG_USERREQ__DRD_ANNOTATE_HAPPENS_BEFORE:
170 DRD_(hb_happens_before)(drd_tid, arg[1]);
171 break;
172
173 case VG_USERREQ__DRD_ANNOTATE_HAPPENS_AFTER:
174 DRD_(hb_happens_after)(drd_tid, arg[1]);
175 break;
176
177 case VG_USERREQ__DRD_ANNOTATE_RWLOCK_CREATE:
178 if (arg[1])
179 {
180 struct mutex_info* const mutex_p = DRD_(mutex_get)(arg[1]);
181 if (mutex_p && mutex_p->mutex_type == mutex_type_spinlock)
182 break;
183 }
184 DRD_(rwlock_pre_init)(arg[1], user_rwlock);
185 break;
186
187 case VG_USERREQ__DRD_ANNOTATE_RWLOCK_DESTROY:
188 if (arg[1])
189 {
190 struct mutex_info* const mutex_p = DRD_(mutex_get)(arg[1]);
191 if (mutex_p && mutex_p->mutex_type == mutex_type_spinlock)
192 break;
193 }
194 DRD_(rwlock_post_destroy)(arg[1], user_rwlock);
195 break;
196
197 case VG_USERREQ__DRD_ANNOTATE_RWLOCK_ACQUIRED:
198 if (arg[1])
199 {
200 struct mutex_info* const mutex_p = DRD_(mutex_get)(arg[1]);
201 if (mutex_p && mutex_p->mutex_type == mutex_type_spinlock)
202 break;
203 }
204 tl_assert(arg[2] == !! arg[2]);
205 if (arg[2])
206 {
207 DRD_(rwlock_pre_wrlock)(arg[1], user_rwlock);
208 DRD_(rwlock_post_wrlock)(arg[1], user_rwlock, True);
209 }
210 else
211 {
212 DRD_(rwlock_pre_rdlock)(arg[1], user_rwlock);
213 DRD_(rwlock_post_rdlock)(arg[1], user_rwlock, True);
214 }
215 break;
216
217 case VG_USERREQ__DRD_ANNOTATE_RWLOCK_RELEASED:
218 if (arg[1])
219 {
220 struct mutex_info* const mutex_p = DRD_(mutex_get)(arg[1]);
221 if (mutex_p && mutex_p->mutex_type == mutex_type_spinlock)
222 break;
223 }
224 tl_assert(arg[2] == !! arg[2]);
225 DRD_(rwlock_pre_unlock)(arg[1], user_rwlock);
226 break;
227
228 case VG_USERREQ__DRD_ANNOTATE_SEM_INIT_PRE:
229 DRD_(semaphore_init)(arg[1], 0, arg[2]);
230 break;
231
232 case VG_USERREQ__DRD_ANNOTATE_SEM_DESTROY_POST:
233 DRD_(semaphore_destroy)(arg[1]);
234 break;
235
236 case VG_USERREQ__DRD_ANNOTATE_SEM_WAIT_PRE:
237 DRD_(semaphore_pre_wait)(arg[1]);
238 break;
239
240 case VG_USERREQ__DRD_ANNOTATE_SEM_WAIT_POST:
241 DRD_(semaphore_post_wait)(drd_tid, arg[1], True /* waited */);
242 break;
243
244 case VG_USERREQ__DRD_ANNOTATE_SEM_POST_PRE:
245 DRD_(semaphore_pre_post)(drd_tid, arg[1]);
246 break;
247
248 case VG_USERREQ__SET_PTHREAD_COND_INITIALIZER:
249 DRD_(pthread_cond_initializer) = (Addr)arg[1];
250 DRD_(pthread_cond_initializer_size) = arg[2];
251 break;
252
253 case VG_USERREQ__DRD_START_NEW_SEGMENT:
254 DRD_(thread_new_segment)(DRD_(PtThreadIdToDrdThreadId)(arg[1]));
255 break;
256
257 case VG_USERREQ__DRD_START_TRACE_ADDR:
258 DRD_(start_tracing_address_range)(arg[1], arg[1] + arg[2], False);
259 break;
260
261 case VG_USERREQ__DRD_STOP_TRACE_ADDR:
262 DRD_(stop_tracing_address_range)(arg[1], arg[1] + arg[2]);
263 break;
264
265 case VG_USERREQ__DRD_RECORD_LOADS:
266 DRD_(thread_set_record_loads)(drd_tid, arg[1]);
267 break;
268
269 case VG_USERREQ__DRD_RECORD_STORES:
270 DRD_(thread_set_record_stores)(drd_tid, arg[1]);
271 break;
272
273 case VG_USERREQ__SET_PTHREADID:
274 // pthread_self() returns 0 for programs not linked with libpthread.so.
275 if (arg[1] != INVALID_POSIX_THREADID)
276 DRD_(thread_set_pthreadid)(drd_tid, arg[1]);
277 break;
278
279 case VG_USERREQ__SET_JOINABLE:
280 {
281 const DrdThreadId drd_joinable = DRD_(PtThreadIdToDrdThreadId)(arg[1]);
282 if (drd_joinable != DRD_INVALID_THREADID)
283 DRD_(thread_set_joinable)(drd_joinable, (Bool)arg[2]);
284 else {
285 InvalidThreadIdInfo ITI = { DRD_(thread_get_running_tid)(), arg[1] };
286 VG_(maybe_record_error)(vg_tid,
287 InvalidThreadId,
288 VG_(get_IP)(vg_tid),
289 "pthread_detach(): invalid thread ID",
290 &ITI);
291 }
292 break;
293 }
294
295 case VG_USERREQ__ENTERING_PTHREAD_CREATE:
296 DRD_(thread_entering_pthread_create)(drd_tid);
297 break;
298
299 case VG_USERREQ__LEFT_PTHREAD_CREATE:
300 DRD_(thread_left_pthread_create)(drd_tid);
301 break;
302
303 case VG_USERREQ__POST_THREAD_JOIN:
304 {
305 const DrdThreadId thread_to_join = DRD_(PtThreadIdToDrdThreadId)(arg[1]);
306 if (thread_to_join == DRD_INVALID_THREADID)
307 {
308 InvalidThreadIdInfo ITI = { DRD_(thread_get_running_tid)(), arg[1] };
309 VG_(maybe_record_error)(vg_tid,
310 InvalidThreadId,
311 VG_(get_IP)(vg_tid),
312 "pthread_join(): invalid thread ID",
313 &ITI);
314 }
315 else
316 {
317 DRD_(thread_post_join)(drd_tid, thread_to_join);
318 }
319 break;
320 }
321
322 case VG_USERREQ__PRE_THREAD_CANCEL:
323 {
324 const DrdThreadId thread_to_cancel =DRD_(PtThreadIdToDrdThreadId)(arg[1]);
325 if (thread_to_cancel == DRD_INVALID_THREADID)
326 {
327 InvalidThreadIdInfo ITI = { DRD_(thread_get_running_tid)(), arg[1] };
328 VG_(maybe_record_error)(vg_tid,
329 InvalidThreadId,
330 VG_(get_IP)(vg_tid),
331 "pthread_cancel(): invalid thread ID",
332 &ITI);
333 }
334 else
335 {
336 DRD_(thread_pre_cancel)(thread_to_cancel);
337 }
338 break;
339 }
340
341 case VG_USERREQ__POST_THREAD_CANCEL:
342 break;
343
344 case VG_USERREQ__PRE_MUTEX_INIT:
345 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
346 DRD_(mutex_init)(arg[1], arg[2]);
347 break;
348
349 case VG_USERREQ__POST_MUTEX_INIT:
350 DRD_(thread_leave_synchr)(drd_tid);
351 break;
352
353 case VG_USERREQ__PRE_MUTEX_DESTROY:
354 DRD_(thread_enter_synchr)(drd_tid);
355 break;
356
357 case VG_USERREQ__POST_MUTEX_DESTROY:
358 if (DRD_(thread_leave_synchr)(drd_tid) == 0)
359 DRD_(mutex_post_destroy)(arg[1]);
360 break;
361
362 case VG_USERREQ__PRE_MUTEX_LOCK:
363 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
364 DRD_(mutex_pre_lock)(arg[1], arg[2], arg[3]);
365 break;
366
367 case VG_USERREQ__POST_MUTEX_LOCK:
368 if (DRD_(thread_leave_synchr)(drd_tid) == 0)
369 DRD_(mutex_post_lock)(arg[1], arg[2], False/*post_cond_wait*/);
370 break;
371
372 case VG_USERREQ__PRE_MUTEX_UNLOCK:
373 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
374 DRD_(mutex_unlock)(arg[1], arg[2]);
375 break;
376
377 case VG_USERREQ__POST_MUTEX_UNLOCK:
378 DRD_(thread_leave_synchr)(drd_tid);
379 break;
380
381 case VG_USERREQ__DRD_IGNORE_MUTEX_ORDERING:
382 DRD_(mutex_ignore_ordering)(arg[1]);
383 break;
384
385 case VG_USERREQ__PRE_SPIN_INIT_OR_UNLOCK:
386 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
387 DRD_(spinlock_init_or_unlock)(arg[1]);
388 break;
389
390 case VG_USERREQ__POST_SPIN_INIT_OR_UNLOCK:
391 DRD_(thread_leave_synchr)(drd_tid);
392 break;
393
394 case VG_USERREQ__PRE_COND_INIT:
395 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
396 DRD_(cond_pre_init)(arg[1]);
397 break;
398
399 case VG_USERREQ__POST_COND_INIT:
400 DRD_(thread_leave_synchr)(drd_tid);
401 break;
402
403 case VG_USERREQ__PRE_COND_DESTROY:
404 DRD_(thread_enter_synchr)(drd_tid);
405 break;
406
407 case VG_USERREQ__POST_COND_DESTROY:
408 if (DRD_(thread_leave_synchr)(drd_tid) == 0)
409 DRD_(cond_post_destroy)(arg[1], arg[2]);
410 break;
411
412 case VG_USERREQ__PRE_COND_WAIT:
413 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
414 {
415 const Addr cond = arg[1];
416 const Addr mutex = arg[2];
417 const MutexT mutex_type = arg[3];
418 DRD_(mutex_unlock)(mutex, mutex_type);
419 DRD_(cond_pre_wait)(cond, mutex);
420 }
421 break;
422
423 case VG_USERREQ__POST_COND_WAIT:
424 if (DRD_(thread_leave_synchr)(drd_tid) == 0)
425 {
426 const Addr cond = arg[1];
427 const Addr mutex = arg[2];
428 const Bool took_lock = arg[3];
429 DRD_(cond_post_wait)(cond);
430 DRD_(mutex_post_lock)(mutex, took_lock, True);
431 }
432 break;
433
434 case VG_USERREQ__PRE_COND_SIGNAL:
435 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
436 DRD_(cond_pre_signal)(arg[1]);
437 break;
438
439 case VG_USERREQ__POST_COND_SIGNAL:
440 DRD_(thread_leave_synchr)(drd_tid);
441 break;
442
443 case VG_USERREQ__PRE_COND_BROADCAST:
444 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
445 DRD_(cond_pre_broadcast)(arg[1]);
446 break;
447
448 case VG_USERREQ__POST_COND_BROADCAST:
449 DRD_(thread_leave_synchr)(drd_tid);
450 break;
451
452 case VG_USERREQ__PRE_SEM_INIT:
453 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
454 DRD_(semaphore_init)(arg[1], arg[2], arg[3]);
455 break;
456
457 case VG_USERREQ__POST_SEM_INIT:
458 DRD_(thread_leave_synchr)(drd_tid);
459 break;
460
461 case VG_USERREQ__PRE_SEM_DESTROY:
462 DRD_(thread_enter_synchr)(drd_tid);
463 break;
464
465 case VG_USERREQ__POST_SEM_DESTROY:
466 if (DRD_(thread_leave_synchr)(drd_tid) == 0)
467 DRD_(semaphore_destroy)(arg[1]);
468 break;
469
470 case VG_USERREQ__PRE_SEM_OPEN:
471 DRD_(thread_enter_synchr)(drd_tid);
472 break;
473
474 case VG_USERREQ__POST_SEM_OPEN:
475 if (DRD_(thread_leave_synchr)(drd_tid) == 0)
476 DRD_(semaphore_open)(arg[1], (HChar*)arg[2], arg[3], arg[4], arg[5]);
477 break;
478
479 case VG_USERREQ__PRE_SEM_CLOSE:
480 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
481 DRD_(semaphore_close)(arg[1]);
482 break;
483
484 case VG_USERREQ__POST_SEM_CLOSE:
485 DRD_(thread_leave_synchr)(drd_tid);
486 break;
487
488 case VG_USERREQ__PRE_SEM_WAIT:
489 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
490 DRD_(semaphore_pre_wait)(arg[1]);
491 break;
492
493 case VG_USERREQ__POST_SEM_WAIT:
494 if (DRD_(thread_leave_synchr)(drd_tid) == 0)
495 DRD_(semaphore_post_wait)(drd_tid, arg[1], arg[2]);
496 break;
497
498 case VG_USERREQ__PRE_SEM_POST:
499 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
500 DRD_(semaphore_pre_post)(drd_tid, arg[1]);
501 break;
502
503 case VG_USERREQ__POST_SEM_POST:
504 if (DRD_(thread_leave_synchr)(drd_tid) == 0)
505 DRD_(semaphore_post_post)(drd_tid, arg[1], arg[2]);
506 break;
507
508 case VG_USERREQ__PRE_BARRIER_INIT:
509 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
510 DRD_(barrier_init)(arg[1], arg[2], arg[3], arg[4]);
511 break;
512
513 case VG_USERREQ__POST_BARRIER_INIT:
514 DRD_(thread_leave_synchr)(drd_tid);
515 break;
516
517 case VG_USERREQ__PRE_BARRIER_DESTROY:
518 DRD_(thread_enter_synchr)(drd_tid);
519 break;
520
521 case VG_USERREQ__POST_BARRIER_DESTROY:
522 if (DRD_(thread_leave_synchr)(drd_tid) == 0)
523 DRD_(barrier_destroy)(arg[1], arg[2]);
524 break;
525
526 case VG_USERREQ__PRE_BARRIER_WAIT:
527 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
528 DRD_(barrier_pre_wait)(drd_tid, arg[1], arg[2]);
529 break;
530
531 case VG_USERREQ__POST_BARRIER_WAIT:
532 if (DRD_(thread_leave_synchr)(drd_tid) == 0)
533 DRD_(barrier_post_wait)(drd_tid, arg[1], arg[2], arg[3], arg[4]);
534 break;
535
536 case VG_USERREQ__PRE_RWLOCK_INIT:
537 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
538 DRD_(rwlock_pre_init)(arg[1], pthread_rwlock);
539 break;
540
541 case VG_USERREQ__POST_RWLOCK_INIT:
542 DRD_(thread_leave_synchr)(drd_tid);
543 break;
544
545 case VG_USERREQ__PRE_RWLOCK_DESTROY:
546 DRD_(thread_enter_synchr)(drd_tid);
547 break;
548
549 case VG_USERREQ__POST_RWLOCK_DESTROY:
550 if (DRD_(thread_leave_synchr)(drd_tid) == 0)
551 DRD_(rwlock_post_destroy)(arg[1], pthread_rwlock);
552 break;
553
554 case VG_USERREQ__PRE_RWLOCK_RDLOCK:
555 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
556 DRD_(rwlock_pre_rdlock)(arg[1], pthread_rwlock);
557 break;
558
559 case VG_USERREQ__POST_RWLOCK_RDLOCK:
560 if (DRD_(thread_leave_synchr)(drd_tid) == 0)
561 DRD_(rwlock_post_rdlock)(arg[1], pthread_rwlock, arg[2]);
562 break;
563
564 case VG_USERREQ__PRE_RWLOCK_WRLOCK:
565 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
566 DRD_(rwlock_pre_wrlock)(arg[1], pthread_rwlock);
567 break;
568
569 case VG_USERREQ__POST_RWLOCK_WRLOCK:
570 if (DRD_(thread_leave_synchr)(drd_tid) == 0)
571 DRD_(rwlock_post_wrlock)(arg[1], pthread_rwlock, arg[2]);
572 break;
573
574 case VG_USERREQ__PRE_RWLOCK_UNLOCK:
575 if (DRD_(thread_enter_synchr)(drd_tid) == 0)
576 DRD_(rwlock_pre_unlock)(arg[1], pthread_rwlock);
577 break;
578
579 case VG_USERREQ__POST_RWLOCK_UNLOCK:
580 DRD_(thread_leave_synchr)(drd_tid);
581 break;
582
583 case VG_USERREQ__DRD_CLEAN_MEMORY:
584 if (arg[2] > 0)
585 DRD_(clean_memory)(arg[1], arg[2]);
586 break;
587
588 case VG_USERREQ__HELGRIND_ANNOTATION_UNIMP:
589 {
590 /* Note: it is assumed below that the text arg[1] points to is never
591 * freed, e.g. because it points to static data.
592 */
593 UnimpClReqInfo UICR =
594 { DRD_(thread_get_running_tid)(), (HChar*)arg[1] };
595 VG_(maybe_record_error)(vg_tid,
596 UnimpHgClReq,
597 VG_(get_IP)(vg_tid),
598 "",
599 &UICR);
600 }
601 break;
602
603 case VG_USERREQ__DRD_ANNOTATION_UNIMP:
604 {
605 /* Note: it is assumed below that the text arg[1] points to is never
606 * freed, e.g. because it points to static data.
607 */
608 UnimpClReqInfo UICR =
609 { DRD_(thread_get_running_tid)(), (HChar*)arg[1] };
610 VG_(maybe_record_error)(vg_tid,
611 UnimpDrdClReq,
612 VG_(get_IP)(vg_tid),
613 "",
614 &UICR);
615 }
616 break;
617
618 default:
619 #if 0
620 VG_(message)(Vg_DebugMsg, "Unrecognized client request 0x%lx 0x%lx",
621 arg[0], arg[1]);
622 tl_assert(0);
623 #endif
624 return False;
625 }
626
627 *ret = result;
628 return True;
629 }
630