1 /*
2 This file is part of drd, a thread error detector.
3
4 Copyright (C) 2006-2015 Bart Van Assche <bvanassche@acm.org>.
5
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 02111-1307, USA.
20
21 The GNU General Public License is contained in the file COPYING.
22 */
23
24
25 #include "drd_error.h"
26 #include "drd_barrier.h"
27 #include "drd_clientobj.h"
28 #include "drd_cond.h"
29 #include "drd_mutex.h"
30 #include "drd_segment.h"
31 #include "drd_semaphore.h"
32 #include "drd_suppression.h"
33 #include "drd_thread.h"
34 #include "pub_tool_vki.h"
35 #include "pub_tool_basics.h" // Addr, SizeT
36 #include "pub_tool_libcassert.h" // tl_assert()
37 #include "pub_tool_libcbase.h" // VG_(strlen)()
38 #include "pub_tool_libcprint.h" // VG_(printf)()
39 #include "pub_tool_machine.h"
40 #include "pub_tool_mallocfree.h" // VG_(malloc)(), VG_(free)()
41 #include "pub_tool_options.h" // VG_(clo_backtrace_size)
42 #include "pub_tool_threadstate.h" // VG_(get_pthread_id)()
43
44
45
46 /* Local functions. */
47
48 static void thread_append_segment(const DrdThreadId tid, Segment* const sg);
49 static void thread_discard_segment(const DrdThreadId tid, Segment* const sg);
50 static void thread_compute_conflict_set(struct bitmap** conflict_set,
51 const DrdThreadId tid);
52 static Bool thread_conflict_set_up_to_date(const DrdThreadId tid);
53
54
55 /* Local variables. */
56
57 static ULong s_context_switch_count;
58 static ULong s_discard_ordered_segments_count;
59 static ULong s_compute_conflict_set_count;
60 static ULong s_update_conflict_set_count;
61 static ULong s_update_conflict_set_new_sg_count;
62 static ULong s_update_conflict_set_sync_count;
63 static ULong s_update_conflict_set_join_count;
64 static ULong s_conflict_set_bitmap_creation_count;
65 static ULong s_conflict_set_bitmap2_creation_count;
66 static ThreadId s_vg_running_tid = VG_INVALID_THREADID;
67 DrdThreadId DRD_(g_drd_running_tid) = DRD_INVALID_THREADID;
68 ThreadInfo* DRD_(g_threadinfo);
69 struct bitmap* DRD_(g_conflict_set);
70 Bool DRD_(verify_conflict_set);
71 static Bool s_trace_context_switches = False;
72 static Bool s_trace_conflict_set = False;
73 static Bool s_trace_conflict_set_bm = False;
74 static Bool s_trace_fork_join = False;
75 static Bool s_segment_merging = True;
76 static Bool s_new_segments_since_last_merge;
77 static int s_segment_merge_interval = 10;
78 static unsigned s_join_list_vol = 10;
79 static unsigned s_deletion_head;
80 static unsigned s_deletion_tail;
81 #if defined(VGO_solaris)
82 Bool DRD_(ignore_thread_creation) = True;
83 #else
84 Bool DRD_(ignore_thread_creation) = False;
85 #endif /* VGO_solaris */
86
87
88 /* Function definitions. */
89
90 /** Enables/disables context switch tracing. */
DRD_(thread_trace_context_switches)91 void DRD_(thread_trace_context_switches)(const Bool t)
92 {
93 tl_assert(t == False || t == True);
94 s_trace_context_switches = t;
95 }
96
97 /** Enables/disables conflict set tracing. */
DRD_(thread_trace_conflict_set)98 void DRD_(thread_trace_conflict_set)(const Bool t)
99 {
100 tl_assert(t == False || t == True);
101 s_trace_conflict_set = t;
102 }
103
104 /** Enables/disables conflict set bitmap tracing. */
DRD_(thread_trace_conflict_set_bm)105 void DRD_(thread_trace_conflict_set_bm)(const Bool t)
106 {
107 tl_assert(t == False || t == True);
108 s_trace_conflict_set_bm = t;
109 }
110
111 /** Report whether fork/join tracing is enabled. */
DRD_(thread_get_trace_fork_join)112 Bool DRD_(thread_get_trace_fork_join)(void)
113 {
114 return s_trace_fork_join;
115 }
116
117 /** Enables/disables fork/join tracing. */
DRD_(thread_set_trace_fork_join)118 void DRD_(thread_set_trace_fork_join)(const Bool t)
119 {
120 tl_assert(t == False || t == True);
121 s_trace_fork_join = t;
122 }
123
124 /** Enables/disables segment merging. */
DRD_(thread_set_segment_merging)125 void DRD_(thread_set_segment_merging)(const Bool m)
126 {
127 tl_assert(m == False || m == True);
128 s_segment_merging = m;
129 }
130
131 /** Get the segment merging interval. */
DRD_(thread_get_segment_merge_interval)132 int DRD_(thread_get_segment_merge_interval)(void)
133 {
134 return s_segment_merge_interval;
135 }
136
137 /** Set the segment merging interval. */
DRD_(thread_set_segment_merge_interval)138 void DRD_(thread_set_segment_merge_interval)(const int i)
139 {
140 s_segment_merge_interval = i;
141 }
142
DRD_(thread_set_join_list_vol)143 void DRD_(thread_set_join_list_vol)(const int jlv)
144 {
145 s_join_list_vol = jlv;
146 }
147
DRD_(thread_init)148 void DRD_(thread_init)(void)
149 {
150 DRD_(g_threadinfo) = VG_(malloc)("drd.main.ti.1",
151 DRD_N_THREADS * sizeof DRD_(g_threadinfo)[0]);
152 for (UInt i = 0; i < DRD_N_THREADS; ++i) {
153 static ThreadInfo initval;
154 DRD_(g_threadinfo)[i] = initval;
155 }
156 }
157
158 /**
159 * Convert Valgrind's ThreadId into a DrdThreadId.
160 *
161 * @return DRD thread ID upon success and DRD_INVALID_THREADID if the passed
162 * Valgrind ThreadId does not yet exist.
163 */
DRD_(VgThreadIdToDrdThreadId)164 DrdThreadId DRD_(VgThreadIdToDrdThreadId)(const ThreadId tid)
165 {
166 UInt i;
167
168 if (tid == VG_INVALID_THREADID)
169 return DRD_INVALID_THREADID;
170
171 for (i = 1; i < DRD_N_THREADS; i++)
172 {
173 if (DRD_(g_threadinfo)[i].vg_thread_exists == True
174 && DRD_(g_threadinfo)[i].vg_threadid == tid)
175 {
176 return i;
177 }
178 }
179
180 return DRD_INVALID_THREADID;
181 }
182
183 /** Allocate a new DRD thread ID for the specified Valgrind thread ID. */
DRD_(VgThreadIdToNewDrdThreadId)184 static DrdThreadId DRD_(VgThreadIdToNewDrdThreadId)(const ThreadId tid)
185 {
186 UInt i;
187
188 tl_assert(DRD_(VgThreadIdToDrdThreadId)(tid) == DRD_INVALID_THREADID);
189
190 for (i = 1; i < DRD_N_THREADS; i++)
191 {
192 if (!DRD_(g_threadinfo)[i].valid)
193 {
194 tl_assert(! DRD_(IsValidDrdThreadId)(i));
195
196 DRD_(g_threadinfo)[i].valid = True;
197 DRD_(g_threadinfo)[i].vg_thread_exists = True;
198 DRD_(g_threadinfo)[i].vg_threadid = tid;
199 DRD_(g_threadinfo)[i].pt_threadid = INVALID_POSIX_THREADID;
200 DRD_(g_threadinfo)[i].stack_min = 0;
201 DRD_(g_threadinfo)[i].stack_min_min = 0;
202 DRD_(g_threadinfo)[i].stack_startup = 0;
203 DRD_(g_threadinfo)[i].stack_max = 0;
204 DRD_(thread_set_name)(i, "");
205 DRD_(g_threadinfo)[i].on_alt_stack = False;
206 DRD_(g_threadinfo)[i].is_recording_loads = True;
207 DRD_(g_threadinfo)[i].is_recording_stores = True;
208 DRD_(g_threadinfo)[i].pthread_create_nesting_level = 0;
209 DRD_(g_threadinfo)[i].synchr_nesting = 0;
210 DRD_(g_threadinfo)[i].deletion_seq = s_deletion_tail - 1;
211 DRD_(g_threadinfo)[i].creator_thread = DRD_INVALID_THREADID;
212 #if defined (VGO_solaris)
213 DRD_(g_threadinfo)[i].bind_guard_flag = 0;
214 #endif /* VGO_solaris */
215
216 tl_assert(DRD_(g_threadinfo)[i].sg_first == NULL);
217 tl_assert(DRD_(g_threadinfo)[i].sg_last == NULL);
218
219 tl_assert(DRD_(IsValidDrdThreadId)(i));
220
221 return i;
222 }
223 }
224
225 VG_(printf)(
226 "\nSorry, but the maximum number of threads supported by DRD has been exceeded."
227 "Aborting.\n");
228
229 tl_assert(False);
230
231 return DRD_INVALID_THREADID;
232 }
233
234 /** Convert a POSIX thread ID into a DRD thread ID. */
DRD_(PtThreadIdToDrdThreadId)235 DrdThreadId DRD_(PtThreadIdToDrdThreadId)(const PThreadId tid)
236 {
237 UInt i;
238
239 if (tid != INVALID_POSIX_THREADID)
240 {
241 for (i = 1; i < DRD_N_THREADS; i++)
242 {
243 if (DRD_(g_threadinfo)[i].posix_thread_exists
244 && DRD_(g_threadinfo)[i].pt_threadid == tid)
245 {
246 return i;
247 }
248 }
249 }
250 return DRD_INVALID_THREADID;
251 }
252
253 /** Convert a DRD thread ID into a Valgrind thread ID. */
DRD_(DrdThreadIdToVgThreadId)254 ThreadId DRD_(DrdThreadIdToVgThreadId)(const DrdThreadId tid)
255 {
256 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
257 && tid != DRD_INVALID_THREADID);
258
259 return (DRD_(g_threadinfo)[tid].vg_thread_exists
260 ? DRD_(g_threadinfo)[tid].vg_threadid
261 : VG_INVALID_THREADID);
262 }
263
264 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
265 /**
266 * Sanity check of the doubly linked list of segments referenced by a
267 * ThreadInfo struct.
268 * @return True if sane, False if not.
269 */
DRD_(sane_ThreadInfo)270 static Bool DRD_(sane_ThreadInfo)(const ThreadInfo* const ti)
271 {
272 Segment* p;
273
274 for (p = ti->sg_first; p; p = p->thr_next) {
275 if (p->thr_next && p->thr_next->thr_prev != p)
276 return False;
277 if (p->thr_next == 0 && p != ti->sg_last)
278 return False;
279 }
280 for (p = ti->sg_last; p; p = p->thr_prev) {
281 if (p->thr_prev && p->thr_prev->thr_next != p)
282 return False;
283 if (p->thr_prev == 0 && p != ti->sg_first)
284 return False;
285 }
286 return True;
287 }
288 #endif
289
290 /**
291 * Create the first segment for a newly started thread.
292 *
293 * This function is called from the handler installed via
294 * VG_(track_pre_thread_ll_create)(). The Valgrind core invokes this handler
295 * from the context of the creator thread, before the new thread has been
296 * created.
297 *
298 * @param[in] creator DRD thread ID of the creator thread.
299 * @param[in] vg_created Valgrind thread ID of the created thread.
300 *
301 * @return DRD thread ID of the created thread.
302 */
DRD_(thread_pre_create)303 DrdThreadId DRD_(thread_pre_create)(const DrdThreadId creator,
304 const ThreadId vg_created)
305 {
306 DrdThreadId created;
307
308 tl_assert(DRD_(VgThreadIdToDrdThreadId)(vg_created) == DRD_INVALID_THREADID);
309 created = DRD_(VgThreadIdToNewDrdThreadId)(vg_created);
310 tl_assert(0 <= (int)created && created < DRD_N_THREADS
311 && created != DRD_INVALID_THREADID);
312
313 tl_assert(DRD_(g_threadinfo)[created].sg_first == NULL);
314 tl_assert(DRD_(g_threadinfo)[created].sg_last == NULL);
315
316 if (creator != DRD_INVALID_THREADID) {
317 if (DRD_(ignore_thread_creation)) {
318 tl_assert(DRD_(thread_get_synchr_nesting_count)(created) == 0);
319 DRD_(thread_enter_synchr)(created);
320 /* Counterpart in DRD_(thread_set_pthreadid)(). */
321 }
322 }
323 DRD_(g_threadinfo)[created].creator_thread = creator;
324
325 /* Create an initial segment for the newly created thread. */
326 thread_append_segment(created, DRD_(sg_new)(creator, created));
327
328 return created;
329 }
330
331 /**
332 * Initialize DRD_(g_threadinfo)[] for a newly created thread. Must be called
333 * after the thread has been created and before any client instructions are run
334 * on the newly created thread, e.g. from the handler installed via
335 * VG_(track_pre_thread_first_insn)().
336 *
337 * @param[in] vg_created Valgrind thread ID of the newly created thread.
338 *
339 * @return DRD thread ID for the new thread.
340 */
DRD_(thread_post_create)341 DrdThreadId DRD_(thread_post_create)(const ThreadId vg_created)
342 {
343 const DrdThreadId created = DRD_(VgThreadIdToDrdThreadId)(vg_created);
344
345 tl_assert(0 <= (int)created && created < DRD_N_THREADS
346 && created != DRD_INVALID_THREADID);
347
348 DRD_(g_threadinfo)[created].stack_max
349 = VG_(thread_get_stack_max)(vg_created);
350 DRD_(g_threadinfo)[created].stack_startup
351 = DRD_(g_threadinfo)[created].stack_max;
352 DRD_(g_threadinfo)[created].stack_min
353 = DRD_(g_threadinfo)[created].stack_max;
354 DRD_(g_threadinfo)[created].stack_min_min
355 = DRD_(g_threadinfo)[created].stack_max;
356 DRD_(g_threadinfo)[created].stack_size
357 = VG_(thread_get_stack_size)(vg_created);
358 tl_assert(DRD_(g_threadinfo)[created].stack_max != 0);
359
360 return created;
361 }
362
DRD_(thread_delayed_delete)363 static void DRD_(thread_delayed_delete)(const DrdThreadId tid)
364 {
365 UInt j;
366
367 DRD_(g_threadinfo)[tid].vg_thread_exists = False;
368 DRD_(g_threadinfo)[tid].posix_thread_exists = False;
369 DRD_(g_threadinfo)[tid].deletion_seq = s_deletion_head++;
370 #if 0
371 VG_(message)(Vg_DebugMsg, "Adding thread %d to the deletion list\n", tid);
372 #endif
373 if (s_deletion_head - s_deletion_tail >= s_join_list_vol) {
374 for (j = 0; j < DRD_N_THREADS; ++j) {
375 if (DRD_(IsValidDrdThreadId)(j)
376 && DRD_(g_threadinfo)[j].deletion_seq == s_deletion_tail)
377 {
378 s_deletion_tail++;
379 #if 0
380 VG_(message)(Vg_DebugMsg, "Delayed delete of thread %d\n", j);
381 #endif
382 DRD_(thread_delete)(j, False);
383 break;
384 }
385 }
386 }
387 }
388
389 /**
390 * Process VG_USERREQ__POST_THREAD_JOIN. This client request is invoked just
391 * after thread drd_joiner joined thread drd_joinee.
392 */
DRD_(thread_post_join)393 void DRD_(thread_post_join)(DrdThreadId drd_joiner, DrdThreadId drd_joinee)
394 {
395 tl_assert(DRD_(IsValidDrdThreadId)(drd_joiner));
396 tl_assert(DRD_(IsValidDrdThreadId)(drd_joinee));
397
398 DRD_(thread_new_segment)(drd_joiner);
399 DRD_(thread_combine_vc_join)(drd_joiner, drd_joinee);
400 DRD_(thread_new_segment)(drd_joinee);
401
402 if (s_trace_fork_join)
403 {
404 const ThreadId joiner = DRD_(DrdThreadIdToVgThreadId)(drd_joiner);
405 const unsigned msg_size = 256;
406 HChar* msg;
407
408 msg = VG_(malloc)("drd.main.dptj.1", msg_size);
409
410 VG_(snprintf)(msg, msg_size,
411 "drd_post_thread_join joiner = %u, joinee = %u",
412 drd_joiner, drd_joinee);
413 if (joiner)
414 {
415 HChar* vc;
416
417 vc = DRD_(vc_aprint)(DRD_(thread_get_vc)(drd_joiner));
418 VG_(snprintf)(msg + VG_(strlen)(msg), msg_size - VG_(strlen)(msg),
419 ", new vc: %s", vc);
420 VG_(free)(vc);
421 }
422 DRD_(trace_msg)("%pS", msg);
423 VG_(free)(msg);
424 }
425
426 if (! DRD_(get_check_stack_accesses)())
427 {
428 DRD_(finish_suppression)(DRD_(thread_get_stack_max)(drd_joinee)
429 - DRD_(thread_get_stack_size)(drd_joinee),
430 DRD_(thread_get_stack_max)(drd_joinee));
431 }
432 DRD_(clientobj_delete_thread)(drd_joinee);
433 DRD_(thread_delayed_delete)(drd_joinee);
434 }
435
436 /**
437 * NPTL hack: NPTL allocates the 'struct pthread' on top of the stack,
438 * and accesses this data structure from multiple threads without locking.
439 * Any conflicting accesses in the range stack_startup..stack_max will be
440 * ignored.
441 */
DRD_(thread_set_stack_startup)442 void DRD_(thread_set_stack_startup)(const DrdThreadId tid,
443 const Addr stack_startup)
444 {
445 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
446 && tid != DRD_INVALID_THREADID);
447 tl_assert(DRD_(g_threadinfo)[tid].stack_min <= stack_startup);
448 tl_assert(stack_startup <= DRD_(g_threadinfo)[tid].stack_max);
449 DRD_(g_threadinfo)[tid].stack_startup = stack_startup;
450 }
451
452 /** Return the stack pointer for the specified thread. */
DRD_(thread_get_stack_min)453 Addr DRD_(thread_get_stack_min)(const DrdThreadId tid)
454 {
455 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
456 && tid != DRD_INVALID_THREADID);
457 return DRD_(g_threadinfo)[tid].stack_min;
458 }
459
460 /**
461 * Return the lowest value that was ever assigned to the stack pointer
462 * for the specified thread.
463 */
DRD_(thread_get_stack_min_min)464 Addr DRD_(thread_get_stack_min_min)(const DrdThreadId tid)
465 {
466 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
467 && tid != DRD_INVALID_THREADID);
468 return DRD_(g_threadinfo)[tid].stack_min_min;
469 }
470
471 /** Return the top address for the stack of the specified thread. */
DRD_(thread_get_stack_max)472 Addr DRD_(thread_get_stack_max)(const DrdThreadId tid)
473 {
474 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
475 && tid != DRD_INVALID_THREADID);
476 return DRD_(g_threadinfo)[tid].stack_max;
477 }
478
479 /** Return the maximum stack size for the specified thread. */
DRD_(thread_get_stack_size)480 SizeT DRD_(thread_get_stack_size)(const DrdThreadId tid)
481 {
482 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
483 && tid != DRD_INVALID_THREADID);
484 return DRD_(g_threadinfo)[tid].stack_size;
485 }
486
DRD_(thread_get_on_alt_stack)487 Bool DRD_(thread_get_on_alt_stack)(const DrdThreadId tid)
488 {
489 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
490 && tid != DRD_INVALID_THREADID);
491 return DRD_(g_threadinfo)[tid].on_alt_stack;
492 }
493
DRD_(thread_set_on_alt_stack)494 void DRD_(thread_set_on_alt_stack)(const DrdThreadId tid,
495 const Bool on_alt_stack)
496 {
497 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
498 && tid != DRD_INVALID_THREADID);
499 tl_assert(on_alt_stack == !!on_alt_stack);
500 DRD_(g_threadinfo)[tid].on_alt_stack = on_alt_stack;
501 }
502
DRD_(thread_get_threads_on_alt_stack)503 Int DRD_(thread_get_threads_on_alt_stack)(void)
504 {
505 int n = 0;
506
507 for (UInt i = 1; i < DRD_N_THREADS; i++)
508 n += DRD_(g_threadinfo)[i].on_alt_stack;
509 return n;
510 }
511
512 /**
513 * Clean up thread-specific data structures.
514 */
DRD_(thread_delete)515 void DRD_(thread_delete)(const DrdThreadId tid, const Bool detached)
516 {
517 Segment* sg;
518 Segment* sg_prev;
519
520 tl_assert(DRD_(IsValidDrdThreadId)(tid));
521
522 tl_assert(DRD_(g_threadinfo)[tid].synchr_nesting >= 0);
523 for (sg = DRD_(g_threadinfo)[tid].sg_last; sg; sg = sg_prev) {
524 sg_prev = sg->thr_prev;
525 sg->thr_next = NULL;
526 sg->thr_prev = NULL;
527 DRD_(sg_put)(sg);
528 }
529 DRD_(g_threadinfo)[tid].valid = False;
530 DRD_(g_threadinfo)[tid].vg_thread_exists = False;
531 DRD_(g_threadinfo)[tid].posix_thread_exists = False;
532 if (detached)
533 DRD_(g_threadinfo)[tid].detached_posix_thread = False;
534 else
535 tl_assert(!DRD_(g_threadinfo)[tid].detached_posix_thread);
536 DRD_(g_threadinfo)[tid].sg_first = NULL;
537 DRD_(g_threadinfo)[tid].sg_last = NULL;
538
539 tl_assert(!DRD_(IsValidDrdThreadId)(tid));
540 }
541
542 /**
543 * Called after a thread performed its last memory access and before
544 * thread_delete() is called. Note: thread_delete() is only called for
545 * joinable threads, not for detached threads.
546 */
DRD_(thread_finished)547 void DRD_(thread_finished)(const DrdThreadId tid)
548 {
549 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
550 && tid != DRD_INVALID_THREADID);
551
552 DRD_(g_threadinfo)[tid].vg_thread_exists = False;
553
554 if (DRD_(g_threadinfo)[tid].detached_posix_thread)
555 {
556 /*
557 * Once a detached thread has finished, its stack is deallocated and
558 * should no longer be taken into account when computing the conflict set.
559 */
560 DRD_(g_threadinfo)[tid].stack_min = DRD_(g_threadinfo)[tid].stack_max;
561
562 /*
563 * For a detached thread, calling pthread_exit() invalidates the
564 * POSIX thread ID associated with the detached thread. For joinable
565 * POSIX threads however, the POSIX thread ID remains live after the
566 * pthread_exit() call until pthread_join() is called.
567 */
568 DRD_(g_threadinfo)[tid].posix_thread_exists = False;
569 }
570 }
571
572 /** Called just after fork() in the child process. */
DRD_(drd_thread_atfork_child)573 void DRD_(drd_thread_atfork_child)(const DrdThreadId tid)
574 {
575 unsigned i;
576
577 for (i = 1; i < DRD_N_THREADS; i++)
578 {
579 if (i == tid)
580 continue;
581 if (DRD_(IsValidDrdThreadId(i)))
582 DRD_(thread_delete)(i, True);
583 tl_assert(!DRD_(IsValidDrdThreadId(i)));
584 }
585
586 DRD_(bm_cleanup)(DRD_(g_conflict_set));
587 DRD_(bm_init)(DRD_(g_conflict_set));
588 }
589
590 /** Called just before pthread_cancel(). */
DRD_(thread_pre_cancel)591 void DRD_(thread_pre_cancel)(const DrdThreadId tid)
592 {
593 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
594 && tid != DRD_INVALID_THREADID);
595 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
596
597 if (DRD_(thread_get_trace_fork_join)())
598 DRD_(trace_msg)("[%u] drd_thread_pre_cancel %u",
599 DRD_(g_drd_running_tid), tid);
600 }
601
602 /**
603 * Store the POSIX thread ID for the specified thread.
604 *
605 * @note This function can be called two times for the same thread -- see also
606 * the comment block preceding the pthread_create() wrapper in
607 * drd_pthread_intercepts.c.
608 */
DRD_(thread_set_pthreadid)609 void DRD_(thread_set_pthreadid)(const DrdThreadId tid, const PThreadId ptid)
610 {
611 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
612 && tid != DRD_INVALID_THREADID);
613 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid == INVALID_POSIX_THREADID
614 || DRD_(g_threadinfo)[tid].pt_threadid == ptid);
615 tl_assert(ptid != INVALID_POSIX_THREADID);
616 DRD_(g_threadinfo)[tid].posix_thread_exists = True;
617 DRD_(g_threadinfo)[tid].pt_threadid = ptid;
618
619 if (DRD_(g_threadinfo)[tid].creator_thread != DRD_INVALID_THREADID) {
620 if (DRD_(ignore_thread_creation)) {
621 DRD_(thread_leave_synchr)(tid);
622 tl_assert(DRD_(thread_get_synchr_nesting_count)(tid) == 0);
623 }
624 }
625 }
626
627 /** Returns true for joinable threads and false for detached threads. */
DRD_(thread_get_joinable)628 Bool DRD_(thread_get_joinable)(const DrdThreadId tid)
629 {
630 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
631 && tid != DRD_INVALID_THREADID);
632 return ! DRD_(g_threadinfo)[tid].detached_posix_thread;
633 }
634
635 /** Store the thread mode: joinable or detached. */
636 #if defined(VGP_mips32_linux) || defined(VGP_mips64_linux)
637 /* There is a cse related issue in gcc for MIPS. Optimization level
638 has to be lowered, so cse related optimizations are not
639 included.*/
640 __attribute__((optimize("O1")))
641 #endif
DRD_(thread_set_joinable)642 void DRD_(thread_set_joinable)(const DrdThreadId tid, const Bool joinable)
643 {
644 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
645 && tid != DRD_INVALID_THREADID);
646 tl_assert((!! joinable) == joinable);
647 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
648
649 DRD_(g_threadinfo)[tid].detached_posix_thread = ! joinable;
650 }
651
652 /** Tells DRD that the calling thread is about to enter pthread_create(). */
DRD_(thread_entering_pthread_create)653 void DRD_(thread_entering_pthread_create)(const DrdThreadId tid)
654 {
655 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
656 && tid != DRD_INVALID_THREADID);
657 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
658 tl_assert(DRD_(g_threadinfo)[tid].pthread_create_nesting_level >= 0);
659
660 DRD_(g_threadinfo)[tid].pthread_create_nesting_level++;
661
662 if (DRD_(ignore_thread_creation)) {
663 tl_assert(DRD_(thread_get_synchr_nesting_count)(tid) == 0);
664 DRD_(thread_enter_synchr)(tid);
665 }
666 }
667
668 /** Tells DRD that the calling thread has left pthread_create(). */
DRD_(thread_left_pthread_create)669 void DRD_(thread_left_pthread_create)(const DrdThreadId tid)
670 {
671 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
672 && tid != DRD_INVALID_THREADID);
673 tl_assert(DRD_(g_threadinfo)[tid].pt_threadid != INVALID_POSIX_THREADID);
674 tl_assert(DRD_(g_threadinfo)[tid].pthread_create_nesting_level > 0);
675
676 DRD_(g_threadinfo)[tid].pthread_create_nesting_level--;
677
678 if (DRD_(ignore_thread_creation)) {
679 DRD_(thread_leave_synchr)(tid);
680 tl_assert(DRD_(thread_get_synchr_nesting_count)(tid) == 0);
681 }
682 }
683
684 #if defined(VGO_solaris)
685 /** Handles the bind_guard() intercept. */
DRD_(thread_entering_rtld_bind_guard)686 void DRD_(thread_entering_rtld_bind_guard)(const DrdThreadId tid, int flags)
687 {
688 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
689 && tid != DRD_INVALID_THREADID);
690
691 Int bindflag = (flags & VKI_THR_FLG_RTLD);
692 if ((bindflag & DRD_(g_threadinfo)[tid].bind_guard_flag) == 0) {
693 DRD_(g_threadinfo)[tid].bind_guard_flag |= bindflag;
694 DRD_(thread_enter_synchr)(tid);
695 }
696 }
697
698 /**
699 * Handles the bind_clear() intercept.
700 * Call to bind_clear(0) is typically used to determine value of bind_flags.
701 */
DRD_(thread_leaving_rtld_bind_clear)702 void DRD_(thread_leaving_rtld_bind_clear)(const DrdThreadId tid, int flags)
703 {
704 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
705 && tid != DRD_INVALID_THREADID);
706
707 Int bindflag = (flags & VKI_THR_FLG_RTLD);
708 if ((DRD_(g_threadinfo)[tid].bind_guard_flag & bindflag) != 0) {
709 DRD_(g_threadinfo)[tid].bind_guard_flag &= ~bindflag;
710 DRD_(thread_leave_synchr)(tid);
711 }
712 }
713 #endif /* VGO_solaris */
714
715 /** Obtain the thread number and the user-assigned thread name. */
DRD_(thread_get_name)716 const HChar* DRD_(thread_get_name)(const DrdThreadId tid)
717 {
718 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
719 && tid != DRD_INVALID_THREADID);
720
721 return DRD_(g_threadinfo)[tid].name;
722 }
723
724 /** Set the name of the specified thread. */
DRD_(thread_set_name)725 void DRD_(thread_set_name)(const DrdThreadId tid, const HChar* const name)
726 {
727 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
728 && tid != DRD_INVALID_THREADID);
729
730 if (name == NULL || name[0] == 0)
731 VG_(snprintf)(DRD_(g_threadinfo)[tid].name,
732 sizeof(DRD_(g_threadinfo)[tid].name),
733 "Thread %u",
734 tid);
735 else
736 VG_(snprintf)(DRD_(g_threadinfo)[tid].name,
737 sizeof(DRD_(g_threadinfo)[tid].name),
738 "Thread %u (%s)",
739 tid, name);
740 DRD_(g_threadinfo)[tid].name[sizeof(DRD_(g_threadinfo)[tid].name) - 1] = 0;
741 }
742
743 /**
744 * Update s_vg_running_tid, DRD_(g_drd_running_tid) and recalculate the
745 * conflict set.
746 */
DRD_(thread_set_vg_running_tid)747 void DRD_(thread_set_vg_running_tid)(const ThreadId vg_tid)
748 {
749 tl_assert(vg_tid != VG_INVALID_THREADID);
750
751 if (vg_tid != s_vg_running_tid)
752 {
753 DRD_(thread_set_running_tid)(vg_tid,
754 DRD_(VgThreadIdToDrdThreadId)(vg_tid));
755 }
756
757 tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
758 tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID);
759 }
760
761 /**
762 * Update s_vg_running_tid, DRD_(g_drd_running_tid) and recalculate the
763 * conflict set.
764 */
DRD_(thread_set_running_tid)765 void DRD_(thread_set_running_tid)(const ThreadId vg_tid,
766 const DrdThreadId drd_tid)
767 {
768 tl_assert(vg_tid != VG_INVALID_THREADID);
769 tl_assert(drd_tid != DRD_INVALID_THREADID);
770
771 if (vg_tid != s_vg_running_tid)
772 {
773 if (s_trace_context_switches
774 && DRD_(g_drd_running_tid) != DRD_INVALID_THREADID)
775 {
776 VG_(message)(Vg_DebugMsg,
777 "Context switch from thread %u to thread %u;"
778 " segments: %llu\n",
779 DRD_(g_drd_running_tid), drd_tid,
780 DRD_(sg_get_segments_alive_count)());
781 }
782 s_vg_running_tid = vg_tid;
783 DRD_(g_drd_running_tid) = drd_tid;
784 thread_compute_conflict_set(&DRD_(g_conflict_set), drd_tid);
785 s_context_switch_count++;
786 }
787
788 tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
789 tl_assert(DRD_(g_drd_running_tid) != DRD_INVALID_THREADID);
790 }
791
792 /**
793 * Increase the synchronization nesting counter. Must be called before the
794 * client calls a synchronization function.
795 */
DRD_(thread_enter_synchr)796 int DRD_(thread_enter_synchr)(const DrdThreadId tid)
797 {
798 tl_assert(DRD_(IsValidDrdThreadId)(tid));
799 return DRD_(g_threadinfo)[tid].synchr_nesting++;
800 }
801
802 /**
803 * Decrease the synchronization nesting counter. Must be called after the
804 * client left a synchronization function.
805 */
DRD_(thread_leave_synchr)806 int DRD_(thread_leave_synchr)(const DrdThreadId tid)
807 {
808 tl_assert(DRD_(IsValidDrdThreadId)(tid));
809 tl_assert(DRD_(g_threadinfo)[tid].synchr_nesting >= 1);
810 return --DRD_(g_threadinfo)[tid].synchr_nesting;
811 }
812
813 /** Returns the synchronization nesting counter. */
DRD_(thread_get_synchr_nesting_count)814 int DRD_(thread_get_synchr_nesting_count)(const DrdThreadId tid)
815 {
816 tl_assert(DRD_(IsValidDrdThreadId)(tid));
817 return DRD_(g_threadinfo)[tid].synchr_nesting;
818 }
819
820 /** Append a new segment at the end of the segment list. */
821 static
thread_append_segment(const DrdThreadId tid,Segment * const sg)822 void thread_append_segment(const DrdThreadId tid, Segment* const sg)
823 {
824 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
825 && tid != DRD_INVALID_THREADID);
826
827 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
828 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
829 #endif
830
831 // add at tail
832 sg->thr_prev = DRD_(g_threadinfo)[tid].sg_last;
833 sg->thr_next = NULL;
834 if (DRD_(g_threadinfo)[tid].sg_last)
835 DRD_(g_threadinfo)[tid].sg_last->thr_next = sg;
836 DRD_(g_threadinfo)[tid].sg_last = sg;
837 if (DRD_(g_threadinfo)[tid].sg_first == NULL)
838 DRD_(g_threadinfo)[tid].sg_first = sg;
839
840 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
841 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
842 #endif
843 }
844
845 /**
846 * Remove a segment from the segment list of thread threadid, and free the
847 * associated memory.
848 */
849 static
thread_discard_segment(const DrdThreadId tid,Segment * const sg)850 void thread_discard_segment(const DrdThreadId tid, Segment* const sg)
851 {
852 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
853 && tid != DRD_INVALID_THREADID);
854
855 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
856 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
857 #endif
858
859 if (sg->thr_prev)
860 sg->thr_prev->thr_next = sg->thr_next;
861 if (sg->thr_next)
862 sg->thr_next->thr_prev = sg->thr_prev;
863 if (sg == DRD_(g_threadinfo)[tid].sg_first)
864 DRD_(g_threadinfo)[tid].sg_first = sg->thr_next;
865 if (sg == DRD_(g_threadinfo)[tid].sg_last)
866 DRD_(g_threadinfo)[tid].sg_last = sg->thr_prev;
867 DRD_(sg_put)(sg);
868
869 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
870 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[tid]));
871 #endif
872 }
873
874 /**
875 * Returns a pointer to the vector clock of the most recent segment associated
876 * with thread 'tid'.
877 */
DRD_(thread_get_vc)878 VectorClock* DRD_(thread_get_vc)(const DrdThreadId tid)
879 {
880 Segment* latest_sg;
881
882 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
883 && tid != DRD_INVALID_THREADID);
884 latest_sg = DRD_(g_threadinfo)[tid].sg_last;
885 tl_assert(latest_sg);
886 return &latest_sg->vc;
887 }
888
889 /**
890 * Return the latest segment of thread 'tid' and increment its reference count.
891 */
DRD_(thread_get_latest_segment)892 void DRD_(thread_get_latest_segment)(Segment** sg, const DrdThreadId tid)
893 {
894 Segment* latest_sg;
895
896 tl_assert(sg);
897 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
898 && tid != DRD_INVALID_THREADID);
899 latest_sg = DRD_(g_threadinfo)[tid].sg_last;
900 tl_assert(latest_sg);
901
902 DRD_(sg_put)(*sg);
903 *sg = DRD_(sg_get)(latest_sg);
904 }
905
906 /**
907 * Compute the minimum of all latest vector clocks of all threads
908 * (Michiel Ronsse calls this "clock snooping" in his papers about DIOTA).
909 *
910 * @param vc pointer to a vectorclock, holds result upon return.
911 */
DRD_(thread_compute_minimum_vc)912 static void DRD_(thread_compute_minimum_vc)(VectorClock* vc)
913 {
914 unsigned i;
915 Bool first;
916 Segment* latest_sg;
917
918 first = True;
919 for (i = 0; i < DRD_N_THREADS; i++)
920 {
921 latest_sg = DRD_(g_threadinfo)[i].sg_last;
922 if (latest_sg) {
923 if (first)
924 DRD_(vc_assign)(vc, &latest_sg->vc);
925 else
926 DRD_(vc_min)(vc, &latest_sg->vc);
927 first = False;
928 }
929 }
930 }
931
932 /**
933 * Compute the maximum of all latest vector clocks of all threads.
934 *
935 * @param vc pointer to a vectorclock, holds result upon return.
936 */
DRD_(thread_compute_maximum_vc)937 static void DRD_(thread_compute_maximum_vc)(VectorClock* vc)
938 {
939 unsigned i;
940 Bool first;
941 Segment* latest_sg;
942
943 first = True;
944 for (i = 0; i < DRD_N_THREADS; i++)
945 {
946 latest_sg = DRD_(g_threadinfo)[i].sg_last;
947 if (latest_sg) {
948 if (first)
949 DRD_(vc_assign)(vc, &latest_sg->vc);
950 else
951 DRD_(vc_combine)(vc, &latest_sg->vc);
952 first = False;
953 }
954 }
955 }
956
957 /**
958 * Discard all segments that have a defined order against the latest vector
959 * clock of all threads -- these segments can no longer be involved in a
960 * data race.
961 */
thread_discard_ordered_segments(void)962 static void thread_discard_ordered_segments(void)
963 {
964 unsigned i;
965 VectorClock thread_vc_min;
966
967 s_discard_ordered_segments_count++;
968
969 DRD_(vc_init)(&thread_vc_min, 0, 0);
970 DRD_(thread_compute_minimum_vc)(&thread_vc_min);
971 if (DRD_(sg_get_trace)())
972 {
973 HChar *vc_min, *vc_max;
974 VectorClock thread_vc_max;
975
976 DRD_(vc_init)(&thread_vc_max, 0, 0);
977 DRD_(thread_compute_maximum_vc)(&thread_vc_max);
978 vc_min = DRD_(vc_aprint)(&thread_vc_min);
979 vc_max = DRD_(vc_aprint)(&thread_vc_max);
980 VG_(message)(Vg_DebugMsg,
981 "Discarding ordered segments -- min vc is %s, max vc is %s\n",
982 vc_min, vc_max);
983 VG_(free)(vc_min);
984 VG_(free)(vc_max);
985 DRD_(vc_cleanup)(&thread_vc_max);
986 }
987
988 for (i = 0; i < DRD_N_THREADS; i++) {
989 Segment* sg;
990 Segment* sg_next;
991
992 for (sg = DRD_(g_threadinfo)[i].sg_first;
993 sg && (sg_next = sg->thr_next)
994 && DRD_(vc_lte)(&sg->vc, &thread_vc_min);
995 sg = sg_next)
996 {
997 thread_discard_segment(i, sg);
998 }
999 }
1000 DRD_(vc_cleanup)(&thread_vc_min);
1001 }
1002
1003 /**
1004 * An implementation of the property 'equiv(sg1, sg2)' as defined in the paper
1005 * by Mark Christiaens e.a. The property equiv(sg1, sg2) holds if and only if
1006 * all segments in the set CS are ordered consistently against both sg1 and
1007 * sg2. The set CS is defined as the set of segments that can immediately
1008 * precede future segments via inter-thread synchronization operations. In
1009 * DRD the set CS consists of the latest segment of each thread combined with
1010 * all segments for which the reference count is strictly greater than one.
1011 * The code below is an optimized version of the following:
1012 *
1013 * for (i = 0; i < DRD_N_THREADS; i++)
1014 * {
1015 * Segment* sg;
1016 *
1017 * for (sg = DRD_(g_threadinfo)[i].first; sg; sg = sg->next)
1018 * {
1019 * if (sg == DRD_(g_threadinfo)[i].last || DRD_(sg_get_refcnt)(sg) > 1)
1020 * {
1021 * if ( DRD_(vc_lte)(&sg1->vc, &sg->vc)
1022 * != DRD_(vc_lte)(&sg2->vc, &sg->vc)
1023 * || DRD_(vc_lte)(&sg->vc, &sg1->vc)
1024 * != DRD_(vc_lte)(&sg->vc, &sg2->vc))
1025 * {
1026 * return False;
1027 * }
1028 * }
1029 * }
1030 * }
1031 */
thread_consistent_segment_ordering(const DrdThreadId tid,Segment * const sg1,Segment * const sg2)1032 static Bool thread_consistent_segment_ordering(const DrdThreadId tid,
1033 Segment* const sg1,
1034 Segment* const sg2)
1035 {
1036 unsigned i;
1037
1038 tl_assert(sg1->thr_next);
1039 tl_assert(sg2->thr_next);
1040 tl_assert(sg1->thr_next == sg2);
1041 tl_assert(DRD_(vc_lte)(&sg1->vc, &sg2->vc));
1042
1043 for (i = 0; i < DRD_N_THREADS; i++)
1044 {
1045 Segment* sg;
1046
1047 for (sg = DRD_(g_threadinfo)[i].sg_first; sg; sg = sg->thr_next) {
1048 if (!sg->thr_next || DRD_(sg_get_refcnt)(sg) > 1) {
1049 if (DRD_(vc_lte)(&sg2->vc, &sg->vc))
1050 break;
1051 if (DRD_(vc_lte)(&sg1->vc, &sg->vc))
1052 return False;
1053 }
1054 }
1055 for (sg = DRD_(g_threadinfo)[i].sg_last; sg; sg = sg->thr_prev) {
1056 if (!sg->thr_next || DRD_(sg_get_refcnt)(sg) > 1) {
1057 if (DRD_(vc_lte)(&sg->vc, &sg1->vc))
1058 break;
1059 if (DRD_(vc_lte)(&sg->vc, &sg2->vc))
1060 return False;
1061 }
1062 }
1063 }
1064 return True;
1065 }
1066
1067 /**
1068 * Merge all segments that may be merged without triggering false positives
1069 * or discarding real data races. For the theoretical background of segment
1070 * merging, see also the following paper: Mark Christiaens, Michiel Ronsse
1071 * and Koen De Bosschere. Bounding the number of segment histories during
1072 * data race detection. Parallel Computing archive, Volume 28, Issue 9,
1073 * pp 1221-1238, September 2002. This paper contains a proof that merging
1074 * consecutive segments for which the property equiv(s1,s2) holds can be
1075 * merged without reducing the accuracy of datarace detection. Furthermore
1076 * it is also proven that the total number of all segments will never grow
1077 * unbounded if all segments s1, s2 for which equiv(s1, s2) holds are merged
1078 * every time a new segment is created. The property equiv(s1, s2) is defined
1079 * as follows: equiv(s1, s2) <=> for all segments in the set CS, the vector
1080 * clocks of segments s and s1 are ordered in the same way as those of segments
1081 * s and s2. The set CS is defined as the set of existing segments s that have
1082 * the potential to conflict with not yet created segments, either because the
1083 * segment s is the latest segment of a thread or because it can become the
1084 * immediate predecessor of a new segment due to a synchronization operation.
1085 */
thread_merge_segments(void)1086 static void thread_merge_segments(void)
1087 {
1088 unsigned i;
1089
1090 s_new_segments_since_last_merge = 0;
1091
1092 for (i = 0; i < DRD_N_THREADS; i++)
1093 {
1094 Segment* sg;
1095
1096 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
1097 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i]));
1098 #endif
1099
1100 for (sg = DRD_(g_threadinfo)[i].sg_first; sg; sg = sg->thr_next) {
1101 if (DRD_(sg_get_refcnt)(sg) == 1 && sg->thr_next) {
1102 Segment* const sg_next = sg->thr_next;
1103 if (DRD_(sg_get_refcnt)(sg_next) == 1
1104 && sg_next->thr_next
1105 && thread_consistent_segment_ordering(i, sg, sg_next))
1106 {
1107 /* Merge sg and sg_next into sg. */
1108 DRD_(sg_merge)(sg, sg_next);
1109 thread_discard_segment(i, sg_next);
1110 }
1111 }
1112 }
1113
1114 #ifdef ENABLE_DRD_CONSISTENCY_CHECKS
1115 tl_assert(DRD_(sane_ThreadInfo)(&DRD_(g_threadinfo)[i]));
1116 #endif
1117 }
1118 }
1119
1120 /**
1121 * Create a new segment for the specified thread, and discard any segments
1122 * that cannot cause races anymore.
1123 */
DRD_(thread_new_segment)1124 void DRD_(thread_new_segment)(const DrdThreadId tid)
1125 {
1126 Segment* last_sg;
1127 Segment* new_sg;
1128
1129 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1130 && tid != DRD_INVALID_THREADID);
1131 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1132
1133 last_sg = DRD_(g_threadinfo)[tid].sg_last;
1134 new_sg = DRD_(sg_new)(tid, tid);
1135 thread_append_segment(tid, new_sg);
1136 if (tid == DRD_(g_drd_running_tid) && last_sg)
1137 {
1138 DRD_(thread_update_conflict_set)(tid, &last_sg->vc);
1139 s_update_conflict_set_new_sg_count++;
1140 }
1141
1142 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1143
1144 if (s_segment_merging
1145 && ++s_new_segments_since_last_merge >= s_segment_merge_interval)
1146 {
1147 thread_discard_ordered_segments();
1148 thread_merge_segments();
1149 }
1150 }
1151
1152 /** Call this function after thread 'joiner' joined thread 'joinee'. */
DRD_(thread_combine_vc_join)1153 void DRD_(thread_combine_vc_join)(DrdThreadId joiner, DrdThreadId joinee)
1154 {
1155 tl_assert(joiner != joinee);
1156 tl_assert(0 <= (int)joiner && joiner < DRD_N_THREADS
1157 && joiner != DRD_INVALID_THREADID);
1158 tl_assert(0 <= (int)joinee && joinee < DRD_N_THREADS
1159 && joinee != DRD_INVALID_THREADID);
1160 tl_assert(DRD_(g_threadinfo)[joiner].sg_first);
1161 tl_assert(DRD_(g_threadinfo)[joiner].sg_last);
1162 tl_assert(DRD_(g_threadinfo)[joinee].sg_first);
1163 tl_assert(DRD_(g_threadinfo)[joinee].sg_last);
1164
1165 if (DRD_(sg_get_trace)())
1166 {
1167 HChar *str1, *str2;
1168 str1 = DRD_(vc_aprint)(DRD_(thread_get_vc)(joiner));
1169 str2 = DRD_(vc_aprint)(DRD_(thread_get_vc)(joinee));
1170 VG_(message)(Vg_DebugMsg, "Before join: joiner %s, joinee %s\n",
1171 str1, str2);
1172 VG_(free)(str1);
1173 VG_(free)(str2);
1174 }
1175 if (joiner == DRD_(g_drd_running_tid)) {
1176 VectorClock old_vc;
1177
1178 DRD_(vc_copy)(&old_vc, DRD_(thread_get_vc)(joiner));
1179 DRD_(vc_combine)(DRD_(thread_get_vc)(joiner),
1180 DRD_(thread_get_vc)(joinee));
1181 DRD_(thread_update_conflict_set)(joiner, &old_vc);
1182 s_update_conflict_set_join_count++;
1183 DRD_(vc_cleanup)(&old_vc);
1184 } else {
1185 DRD_(vc_combine)(DRD_(thread_get_vc)(joiner),
1186 DRD_(thread_get_vc)(joinee));
1187 }
1188
1189 thread_discard_ordered_segments();
1190
1191 if (DRD_(sg_get_trace)()) {
1192 HChar* str;
1193
1194 str = DRD_(vc_aprint)(DRD_(thread_get_vc)(joiner));
1195 VG_(message)(Vg_DebugMsg, "After join: %s\n", str);
1196 VG_(free)(str);
1197 }
1198 }
1199
1200 /**
1201 * Update the vector clock of the last segment of thread tid with the
1202 * the vector clock of segment sg.
1203 */
thread_combine_vc_sync(DrdThreadId tid,const Segment * sg)1204 static void thread_combine_vc_sync(DrdThreadId tid, const Segment* sg)
1205 {
1206 const VectorClock* const vc = &sg->vc;
1207
1208 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1209 && tid != DRD_INVALID_THREADID);
1210 tl_assert(DRD_(g_threadinfo)[tid].sg_first);
1211 tl_assert(DRD_(g_threadinfo)[tid].sg_last);
1212 tl_assert(sg);
1213 tl_assert(vc);
1214
1215 if (tid != sg->tid) {
1216 VectorClock old_vc;
1217
1218 DRD_(vc_copy)(&old_vc, DRD_(thread_get_vc)(tid));
1219 DRD_(vc_combine)(DRD_(thread_get_vc)(tid), vc);
1220 if (DRD_(sg_get_trace)()) {
1221 HChar *str1, *str2;
1222 str1 = DRD_(vc_aprint)(&old_vc);
1223 str2 = DRD_(vc_aprint)(DRD_(thread_get_vc)(tid));
1224 VG_(message)(Vg_DebugMsg, "thread %u: vc %s -> %s\n", tid, str1, str2);
1225 VG_(free)(str1);
1226 VG_(free)(str2);
1227 }
1228
1229 thread_discard_ordered_segments();
1230
1231 DRD_(thread_update_conflict_set)(tid, &old_vc);
1232 s_update_conflict_set_sync_count++;
1233
1234 DRD_(vc_cleanup)(&old_vc);
1235 } else {
1236 tl_assert(DRD_(vc_lte)(vc, DRD_(thread_get_vc)(tid)));
1237 }
1238 }
1239
1240 /**
1241 * Create a new segment for thread tid and update the vector clock of the last
1242 * segment of this thread with the vector clock of segment sg. Call this
1243 * function after thread tid had to wait because of thread synchronization
1244 * until the memory accesses in the segment sg finished.
1245 */
DRD_(thread_new_segment_and_combine_vc)1246 void DRD_(thread_new_segment_and_combine_vc)(DrdThreadId tid, const Segment* sg)
1247 {
1248 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1249 && tid != DRD_INVALID_THREADID);
1250 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1251 tl_assert(sg);
1252
1253 thread_append_segment(tid, DRD_(sg_new)(tid, tid));
1254
1255 thread_combine_vc_sync(tid, sg);
1256
1257 if (s_segment_merging
1258 && ++s_new_segments_since_last_merge >= s_segment_merge_interval)
1259 {
1260 thread_discard_ordered_segments();
1261 thread_merge_segments();
1262 }
1263 }
1264
1265 /**
1266 * Call this function whenever a thread is no longer using the memory
1267 * [ a1, a2 [, e.g. because of a call to free() or a stack pointer
1268 * increase.
1269 */
DRD_(thread_stop_using_mem)1270 void DRD_(thread_stop_using_mem)(const Addr a1, const Addr a2)
1271 {
1272 Segment* p;
1273
1274 for (p = DRD_(g_sg_list); p; p = p->g_next)
1275 DRD_(bm_clear)(DRD_(sg_bm)(p), a1, a2);
1276
1277 DRD_(bm_clear)(DRD_(g_conflict_set), a1, a2);
1278 }
1279
1280 /** Specify whether memory loads should be recorded. */
DRD_(thread_set_record_loads)1281 void DRD_(thread_set_record_loads)(const DrdThreadId tid, const Bool enabled)
1282 {
1283 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1284 && tid != DRD_INVALID_THREADID);
1285 tl_assert(enabled == !! enabled);
1286
1287 DRD_(g_threadinfo)[tid].is_recording_loads = enabled;
1288 }
1289
1290 /** Specify whether memory stores should be recorded. */
DRD_(thread_set_record_stores)1291 void DRD_(thread_set_record_stores)(const DrdThreadId tid, const Bool enabled)
1292 {
1293 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1294 && tid != DRD_INVALID_THREADID);
1295 tl_assert(enabled == !! enabled);
1296
1297 DRD_(g_threadinfo)[tid].is_recording_stores = enabled;
1298 }
1299
1300 /**
1301 * Print the segment information for all threads.
1302 *
1303 * This function is only used for debugging purposes.
1304 */
DRD_(thread_print_all)1305 void DRD_(thread_print_all)(void)
1306 {
1307 UInt i;
1308 Segment* p;
1309
1310 for (i = 0; i < DRD_N_THREADS; i++)
1311 {
1312 p = DRD_(g_threadinfo)[i].sg_first;
1313 if (p) {
1314 VG_(printf)("**************\n"
1315 "* thread %3u (%d/%u/%u/%u/0x%lx/%d) *\n"
1316 "**************\n",
1317 i,
1318 DRD_(g_threadinfo)[i].valid,
1319 DRD_(g_threadinfo)[i].vg_thread_exists,
1320 DRD_(g_threadinfo)[i].vg_threadid,
1321 DRD_(g_threadinfo)[i].posix_thread_exists,
1322 DRD_(g_threadinfo)[i].pt_threadid,
1323 DRD_(g_threadinfo)[i].detached_posix_thread);
1324 for ( ; p; p = p->thr_next)
1325 DRD_(sg_print)(p);
1326 }
1327 }
1328 }
1329
1330 /** Show a call stack involved in a data race. */
show_call_stack(const DrdThreadId tid,ExeContext * const callstack)1331 static void show_call_stack(const DrdThreadId tid, ExeContext* const callstack)
1332 {
1333 const ThreadId vg_tid = DRD_(DrdThreadIdToVgThreadId)(tid);
1334
1335 if (vg_tid != VG_INVALID_THREADID) {
1336 if (callstack)
1337 VG_(pp_ExeContext)(callstack);
1338 else
1339 VG_(get_and_pp_StackTrace)(vg_tid, VG_(clo_backtrace_size));
1340 } else {
1341 if (!VG_(clo_xml))
1342 VG_(message)(Vg_UserMsg,
1343 " (thread finished, call stack no longer available)\n");
1344 }
1345 }
1346
1347 /** Print information about the segments involved in a data race. */
1348 static void
thread_report_conflicting_segments_segment(const DrdThreadId tid,const Addr addr,const SizeT size,const BmAccessTypeT access_type,const Segment * const p)1349 thread_report_conflicting_segments_segment(const DrdThreadId tid,
1350 const Addr addr,
1351 const SizeT size,
1352 const BmAccessTypeT access_type,
1353 const Segment* const p)
1354 {
1355 unsigned i;
1356
1357 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1358 && tid != DRD_INVALID_THREADID);
1359 tl_assert(p);
1360
1361 for (i = 0; i < DRD_N_THREADS; i++) {
1362 if (i != tid) {
1363 Segment* q;
1364
1365 for (q = DRD_(g_threadinfo)[i].sg_last; q; q = q->thr_prev) {
1366 /*
1367 * Since q iterates over the segments of thread i in order of
1368 * decreasing vector clocks, if q->vc <= p->vc, then
1369 * q->next->vc <= p->vc will also hold. Hence, break out of the
1370 * loop once this condition is met.
1371 */
1372 if (DRD_(vc_lte)(&q->vc, &p->vc))
1373 break;
1374 if (!DRD_(vc_lte)(&p->vc, &q->vc)) {
1375 if (DRD_(bm_has_conflict_with)(DRD_(sg_bm)(q), addr, addr + size,
1376 access_type)) {
1377 Segment* q_next;
1378
1379 tl_assert(q->stacktrace);
1380 if (VG_(clo_xml))
1381 VG_(printf_xml)(" <other_segment_start>\n");
1382 else
1383 VG_(message)(Vg_UserMsg,
1384 "Other segment start (thread %u)\n", i);
1385 show_call_stack(i, q->stacktrace);
1386 if (VG_(clo_xml))
1387 VG_(printf_xml)(" </other_segment_start>\n"
1388 " <other_segment_end>\n");
1389 else
1390 VG_(message)(Vg_UserMsg,
1391 "Other segment end (thread %u)\n", i);
1392 q_next = q->thr_next;
1393 show_call_stack(i, q_next ? q_next->stacktrace : 0);
1394 if (VG_(clo_xml))
1395 VG_(printf_xml)(" </other_segment_end>\n");
1396 }
1397 }
1398 }
1399 }
1400 }
1401 }
1402
1403 /** Print information about all segments involved in a data race. */
DRD_(thread_report_conflicting_segments)1404 void DRD_(thread_report_conflicting_segments)(const DrdThreadId tid,
1405 const Addr addr,
1406 const SizeT size,
1407 const BmAccessTypeT access_type)
1408 {
1409 Segment* p;
1410
1411 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1412 && tid != DRD_INVALID_THREADID);
1413
1414 for (p = DRD_(g_threadinfo)[tid].sg_first; p; p = p->thr_next) {
1415 if (DRD_(bm_has)(DRD_(sg_bm)(p), addr, addr + size, access_type))
1416 thread_report_conflicting_segments_segment(tid, addr, size,
1417 access_type, p);
1418 }
1419 }
1420
1421 /**
1422 * Verify whether the conflict set for thread tid is up to date. Only perform
1423 * the check if the environment variable DRD_VERIFY_CONFLICT_SET has been set.
1424 */
thread_conflict_set_up_to_date(const DrdThreadId tid)1425 static Bool thread_conflict_set_up_to_date(const DrdThreadId tid)
1426 {
1427 Bool result;
1428 struct bitmap* computed_conflict_set = 0;
1429
1430 if (!DRD_(verify_conflict_set))
1431 return True;
1432
1433 thread_compute_conflict_set(&computed_conflict_set, tid);
1434 result = DRD_(bm_equal)(DRD_(g_conflict_set), computed_conflict_set);
1435 if (! result)
1436 {
1437 VG_(printf)("actual conflict set:\n");
1438 DRD_(bm_print)(DRD_(g_conflict_set));
1439 VG_(printf)("\n");
1440 VG_(printf)("computed conflict set:\n");
1441 DRD_(bm_print)(computed_conflict_set);
1442 VG_(printf)("\n");
1443 }
1444 DRD_(bm_delete)(computed_conflict_set);
1445 return result;
1446 }
1447
1448 /**
1449 * Compute the conflict set: a bitmap that represents the union of all memory
1450 * accesses of all segments that are unordered to the current segment of the
1451 * thread tid.
1452 */
thread_compute_conflict_set(struct bitmap ** conflict_set,const DrdThreadId tid)1453 static void thread_compute_conflict_set(struct bitmap** conflict_set,
1454 const DrdThreadId tid)
1455 {
1456 Segment* p;
1457
1458 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1459 && tid != DRD_INVALID_THREADID);
1460 tl_assert(tid == DRD_(g_drd_running_tid));
1461
1462 s_compute_conflict_set_count++;
1463 s_conflict_set_bitmap_creation_count
1464 -= DRD_(bm_get_bitmap_creation_count)();
1465 s_conflict_set_bitmap2_creation_count
1466 -= DRD_(bm_get_bitmap2_creation_count)();
1467
1468 if (*conflict_set) {
1469 DRD_(bm_cleanup)(*conflict_set);
1470 DRD_(bm_init)(*conflict_set);
1471 } else {
1472 *conflict_set = DRD_(bm_new)();
1473 }
1474
1475 if (s_trace_conflict_set) {
1476 HChar* str;
1477
1478 str = DRD_(vc_aprint)(DRD_(thread_get_vc)(tid));
1479 VG_(message)(Vg_DebugMsg,
1480 "computing conflict set for thread %u with vc %s\n",
1481 tid, str);
1482 VG_(free)(str);
1483 }
1484
1485 p = DRD_(g_threadinfo)[tid].sg_last;
1486 {
1487 unsigned j;
1488
1489 if (s_trace_conflict_set) {
1490 HChar* vc;
1491
1492 vc = DRD_(vc_aprint)(&p->vc);
1493 VG_(message)(Vg_DebugMsg, "conflict set: thread [%u] at vc %s\n",
1494 tid, vc);
1495 VG_(free)(vc);
1496 }
1497
1498 for (j = 0; j < DRD_N_THREADS; j++) {
1499 if (j != tid && DRD_(IsValidDrdThreadId)(j)) {
1500 Segment* q;
1501
1502 for (q = DRD_(g_threadinfo)[j].sg_last; q; q = q->thr_prev) {
1503 if (!DRD_(vc_lte)(&q->vc, &p->vc)
1504 && !DRD_(vc_lte)(&p->vc, &q->vc)) {
1505 if (s_trace_conflict_set) {
1506 HChar* str;
1507
1508 str = DRD_(vc_aprint)(&q->vc);
1509 VG_(message)(Vg_DebugMsg,
1510 "conflict set: [%u] merging segment %s\n",
1511 j, str);
1512 VG_(free)(str);
1513 }
1514 DRD_(bm_merge2)(*conflict_set, DRD_(sg_bm)(q));
1515 } else {
1516 if (s_trace_conflict_set) {
1517 HChar* str;
1518
1519 str = DRD_(vc_aprint)(&q->vc);
1520 VG_(message)(Vg_DebugMsg,
1521 "conflict set: [%u] ignoring segment %s\n",
1522 j, str);
1523 VG_(free)(str);
1524 }
1525 }
1526 }
1527 }
1528 }
1529 }
1530
1531 s_conflict_set_bitmap_creation_count
1532 += DRD_(bm_get_bitmap_creation_count)();
1533 s_conflict_set_bitmap2_creation_count
1534 += DRD_(bm_get_bitmap2_creation_count)();
1535
1536 if (s_trace_conflict_set_bm) {
1537 VG_(message)(Vg_DebugMsg, "[%u] new conflict set:\n", tid);
1538 DRD_(bm_print)(*conflict_set);
1539 VG_(message)(Vg_DebugMsg, "[%u] end of new conflict set.\n", tid);
1540 }
1541 }
1542
1543 /**
1544 * Update the conflict set after the vector clock of thread tid has been
1545 * updated from old_vc to its current value, either because a new segment has
1546 * been created or because of a synchronization operation.
1547 */
DRD_(thread_update_conflict_set)1548 void DRD_(thread_update_conflict_set)(const DrdThreadId tid,
1549 const VectorClock* const old_vc)
1550 {
1551 const VectorClock* new_vc;
1552 Segment* p;
1553 unsigned j;
1554
1555 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1556 && tid != DRD_INVALID_THREADID);
1557 tl_assert(old_vc);
1558 tl_assert(tid == DRD_(g_drd_running_tid));
1559 tl_assert(DRD_(g_conflict_set));
1560
1561 if (s_trace_conflict_set) {
1562 HChar* str;
1563
1564 str = DRD_(vc_aprint)(DRD_(thread_get_vc)(tid));
1565 VG_(message)(Vg_DebugMsg,
1566 "updating conflict set for thread %u with vc %s\n",
1567 tid, str);
1568 VG_(free)(str);
1569 }
1570
1571 new_vc = DRD_(thread_get_vc)(tid);
1572 tl_assert(DRD_(vc_lte)(old_vc, new_vc));
1573
1574 DRD_(bm_unmark)(DRD_(g_conflict_set));
1575
1576 for (j = 0; j < DRD_N_THREADS; j++)
1577 {
1578 Segment* q;
1579
1580 if (j == tid || ! DRD_(IsValidDrdThreadId)(j))
1581 continue;
1582
1583 for (q = DRD_(g_threadinfo)[j].sg_last;
1584 q && !DRD_(vc_lte)(&q->vc, new_vc);
1585 q = q->thr_prev) {
1586 const Bool included_in_old_conflict_set
1587 = !DRD_(vc_lte)(old_vc, &q->vc);
1588 const Bool included_in_new_conflict_set
1589 = !DRD_(vc_lte)(new_vc, &q->vc);
1590
1591 if (UNLIKELY(s_trace_conflict_set)) {
1592 HChar* str;
1593
1594 str = DRD_(vc_aprint)(&q->vc);
1595 VG_(message)(Vg_DebugMsg,
1596 "conflict set: [%u] %s segment %s\n", j,
1597 included_in_old_conflict_set
1598 != included_in_new_conflict_set
1599 ? "merging" : "ignoring", str);
1600 VG_(free)(str);
1601 }
1602 if (included_in_old_conflict_set != included_in_new_conflict_set)
1603 DRD_(bm_mark)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
1604 }
1605
1606 for ( ; q && !DRD_(vc_lte)(&q->vc, old_vc); q = q->thr_prev) {
1607 const Bool included_in_old_conflict_set
1608 = !DRD_(vc_lte)(old_vc, &q->vc);
1609 const Bool included_in_new_conflict_set
1610 = !DRD_(vc_lte)(&q->vc, new_vc)
1611 && !DRD_(vc_lte)(new_vc, &q->vc);
1612
1613 if (UNLIKELY(s_trace_conflict_set)) {
1614 HChar* str;
1615
1616 str = DRD_(vc_aprint)(&q->vc);
1617 VG_(message)(Vg_DebugMsg,
1618 "conflict set: [%u] %s segment %s\n", j,
1619 included_in_old_conflict_set
1620 != included_in_new_conflict_set
1621 ? "merging" : "ignoring", str);
1622 VG_(free)(str);
1623 }
1624 if (included_in_old_conflict_set != included_in_new_conflict_set)
1625 DRD_(bm_mark)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
1626 }
1627 }
1628
1629 DRD_(bm_clear_marked)(DRD_(g_conflict_set));
1630
1631 p = DRD_(g_threadinfo)[tid].sg_last;
1632 for (j = 0; j < DRD_N_THREADS; j++) {
1633 if (j != tid && DRD_(IsValidDrdThreadId)(j)) {
1634 Segment* q;
1635 for (q = DRD_(g_threadinfo)[j].sg_last;
1636 q && !DRD_(vc_lte)(&q->vc, &p->vc);
1637 q = q->thr_prev) {
1638 if (!DRD_(vc_lte)(&p->vc, &q->vc))
1639 DRD_(bm_merge2_marked)(DRD_(g_conflict_set), DRD_(sg_bm)(q));
1640 }
1641 }
1642 }
1643
1644 DRD_(bm_remove_cleared_marked)(DRD_(g_conflict_set));
1645
1646 s_update_conflict_set_count++;
1647
1648 if (s_trace_conflict_set_bm)
1649 {
1650 VG_(message)(Vg_DebugMsg, "[%u] updated conflict set:\n", tid);
1651 DRD_(bm_print)(DRD_(g_conflict_set));
1652 VG_(message)(Vg_DebugMsg, "[%u] end of updated conflict set.\n", tid);
1653 }
1654
1655 tl_assert(thread_conflict_set_up_to_date(DRD_(g_drd_running_tid)));
1656 }
1657
1658 /** Report the number of context switches performed. */
DRD_(thread_get_context_switch_count)1659 ULong DRD_(thread_get_context_switch_count)(void)
1660 {
1661 return s_context_switch_count;
1662 }
1663
1664 /** Report the number of ordered segments that have been discarded. */
DRD_(thread_get_discard_ordered_segments_count)1665 ULong DRD_(thread_get_discard_ordered_segments_count)(void)
1666 {
1667 return s_discard_ordered_segments_count;
1668 }
1669
1670 /** Return how many times the conflict set has been updated entirely. */
DRD_(thread_get_compute_conflict_set_count)1671 ULong DRD_(thread_get_compute_conflict_set_count)()
1672 {
1673 return s_compute_conflict_set_count;
1674 }
1675
1676 /** Return how many times the conflict set has been updated partially. */
DRD_(thread_get_update_conflict_set_count)1677 ULong DRD_(thread_get_update_conflict_set_count)(void)
1678 {
1679 return s_update_conflict_set_count;
1680 }
1681
1682 /**
1683 * Return how many times the conflict set has been updated partially
1684 * because a new segment has been created.
1685 */
DRD_(thread_get_update_conflict_set_new_sg_count)1686 ULong DRD_(thread_get_update_conflict_set_new_sg_count)(void)
1687 {
1688 return s_update_conflict_set_new_sg_count;
1689 }
1690
1691 /**
1692 * Return how many times the conflict set has been updated partially
1693 * because of combining vector clocks due to synchronization operations
1694 * other than reader/writer lock or barrier operations.
1695 */
DRD_(thread_get_update_conflict_set_sync_count)1696 ULong DRD_(thread_get_update_conflict_set_sync_count)(void)
1697 {
1698 return s_update_conflict_set_sync_count;
1699 }
1700
1701 /**
1702 * Return how many times the conflict set has been updated partially
1703 * because of thread joins.
1704 */
DRD_(thread_get_update_conflict_set_join_count)1705 ULong DRD_(thread_get_update_conflict_set_join_count)(void)
1706 {
1707 return s_update_conflict_set_join_count;
1708 }
1709
1710 /**
1711 * Return the number of first-level bitmaps that have been created during
1712 * conflict set updates.
1713 */
DRD_(thread_get_conflict_set_bitmap_creation_count)1714 ULong DRD_(thread_get_conflict_set_bitmap_creation_count)(void)
1715 {
1716 return s_conflict_set_bitmap_creation_count;
1717 }
1718
1719 /**
1720 * Return the number of second-level bitmaps that have been created during
1721 * conflict set updates.
1722 */
DRD_(thread_get_conflict_set_bitmap2_creation_count)1723 ULong DRD_(thread_get_conflict_set_bitmap2_creation_count)(void)
1724 {
1725 return s_conflict_set_bitmap2_creation_count;
1726 }
1727