1 
2 /*--------------------------------------------------------------------*/
3 /*--- Helgrind: a Valgrind tool for detecting errors               ---*/
4 /*--- in threaded programs.                              hg_main.c ---*/
5 /*--------------------------------------------------------------------*/
6 
7 /*
8    This file is part of Helgrind, a Valgrind tool for detecting errors
9    in threaded programs.
10 
11    Copyright (C) 2007-2015 OpenWorks LLP
12       info@open-works.co.uk
13 
14    Copyright (C) 2007-2015 Apple, Inc.
15 
16    This program is free software; you can redistribute it and/or
17    modify it under the terms of the GNU General Public License as
18    published by the Free Software Foundation; either version 2 of the
19    License, or (at your option) any later version.
20 
21    This program is distributed in the hope that it will be useful, but
22    WITHOUT ANY WARRANTY; without even the implied warranty of
23    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
24    General Public License for more details.
25 
26    You should have received a copy of the GNU General Public License
27    along with this program; if not, write to the Free Software
28    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29    02111-1307, USA.
30 
31    The GNU General Public License is contained in the file COPYING.
32 
33    Neither the names of the U.S. Department of Energy nor the
34    University of California nor the names of its contributors may be
35    used to endorse or promote products derived from this software
36    without prior written permission.
37 */
38 
39 #include "pub_tool_basics.h"
40 #include "pub_tool_gdbserver.h"
41 #include "pub_tool_libcassert.h"
42 #include "pub_tool_libcbase.h"
43 #include "pub_tool_libcprint.h"
44 #include "pub_tool_threadstate.h"
45 #include "pub_tool_tooliface.h"
46 #include "pub_tool_hashtable.h"
47 #include "pub_tool_replacemalloc.h"
48 #include "pub_tool_machine.h"
49 #include "pub_tool_options.h"
50 #include "pub_tool_xarray.h"
51 #include "pub_tool_stacktrace.h"
52 #include "pub_tool_wordfm.h"
53 #include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
54 #include "pub_tool_redir.h"     // sonames for the dynamic linkers
55 #include "pub_tool_vki.h"       // VKI_PAGE_SIZE
56 #include "pub_tool_libcproc.h"
57 #include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client)
58 #include "pub_tool_poolalloc.h"
59 #include "pub_tool_addrinfo.h"
60 
61 #include "hg_basics.h"
62 #include "hg_wordset.h"
63 #include "hg_addrdescr.h"
64 #include "hg_lock_n_thread.h"
65 #include "hg_errors.h"
66 
67 #include "libhb.h"
68 
69 #include "helgrind.h"
70 
71 
72 // FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
73 
74 // FIXME: when client destroys a lock or a CV, remove these
75 // from our mappings, so that the associated SO can be freed up
76 
77 /*----------------------------------------------------------------*/
78 /*---                                                          ---*/
79 /*----------------------------------------------------------------*/
80 
81 /* Note this needs to be compiled with -fno-strict-aliasing, since it
82    contains a whole bunch of calls to lookupFM etc which cast between
83    Word and pointer types.  gcc rightly complains this breaks ANSI C
84    strict aliasing rules, at -O2.  No complaints at -O, but -O2 gives
85    worthwhile performance benefits over -O.
86 */
87 
88 // FIXME what is supposed to happen to locks in memory which
89 // is relocated as a result of client realloc?
90 
91 // FIXME put referencing ThreadId into Thread and get
92 // rid of the slow reverse mapping function.
93 
94 // FIXME accesses to NoAccess areas: change state to Excl?
95 
96 // FIXME report errors for accesses of NoAccess memory?
97 
98 // FIXME pth_cond_wait/timedwait wrappers.  Even if these fail,
99 // the thread still holds the lock.
100 
101 /* ------------ Debug/trace options ------------ */
102 
103 // 0 for silent, 1 for some stuff, 2 for lots of stuff
104 #define SHOW_EVENTS 0
105 
106 
107 static void all__sanity_check ( const HChar* who ); /* fwds */
108 
109 #define HG_CLI__DEFAULT_MALLOC_REDZONE_SZB 16 /* let's say */
110 
111 // 0 for none, 1 for dump at end of run
112 #define SHOW_DATA_STRUCTURES 0
113 
114 
115 /* ------------ Misc comments ------------ */
116 
117 // FIXME: don't hardwire initial entries for root thread.
118 // Instead, let the pre_thread_ll_create handler do this.
119 
120 
121 /*----------------------------------------------------------------*/
122 /*--- Primary data structures                                  ---*/
123 /*----------------------------------------------------------------*/
124 
125 /* Admin linked list of Threads */
126 static Thread* admin_threads = NULL;
get_admin_threads(void)127 Thread* get_admin_threads ( void ) { return admin_threads; }
128 
129 /* Admin double linked list of Locks */
130 /* We need a double linked list to properly and efficiently
131    handle del_LockN. */
132 static Lock* admin_locks = NULL;
133 
134 /* Mapping table for core ThreadIds to Thread* */
135 static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
136 
137 /* Mapping table for lock guest addresses to Lock* */
138 static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
139 
140 /* The word-set universes for lock sets. */
141 static WordSetU* univ_lsets = NULL; /* sets of Lock* */
142 static WordSetU* univ_laog  = NULL; /* sets of Lock*, for LAOG */
143 static Int next_gc_univ_laog = 1;
144 /* univ_laog will be garbaged collected when the nr of element in univ_laog is
145    >= next_gc_univ_laog. */
146 
147 /* Allow libhb to get at the universe of locksets stored
148    here.  Sigh. */
HG_(get_univ_lsets)149 WordSetU* HG_(get_univ_lsets) ( void ) { return univ_lsets; }
150 
151 /* Allow libhb to get at the list of locks stored here.  Ditto
152    sigh. */
HG_(get_admin_locks)153 Lock* HG_(get_admin_locks) ( void ) { return admin_locks; }
154 
155 
156 /*----------------------------------------------------------------*/
157 /*--- Simple helpers for the data structures                   ---*/
158 /*----------------------------------------------------------------*/
159 
160 static UWord stats__lockN_acquires = 0;
161 static UWord stats__lockN_releases = 0;
162 
163 #if defined(VGO_solaris)
164 Bool HG_(clo_ignore_thread_creation) = True;
165 #else
166 Bool HG_(clo_ignore_thread_creation) = False;
167 #endif /* VGO_solaris */
168 
169 static
170 ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
171 
172 /* --------- Constructors --------- */
173 
mk_Thread(Thr * hbthr)174 static Thread* mk_Thread ( Thr* hbthr ) {
175    static Int indx      = 1;
176    Thread* thread       = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
177    thread->locksetA     = HG_(emptyWS)( univ_lsets );
178    thread->locksetW     = HG_(emptyWS)( univ_lsets );
179    thread->magic        = Thread_MAGIC;
180    thread->hbthr        = hbthr;
181    thread->coretid      = VG_INVALID_THREADID;
182    thread->created_at   = NULL;
183    thread->announced    = False;
184    thread->errmsg_index = indx++;
185    thread->admin        = admin_threads;
186    thread->synchr_nesting = 0;
187    thread->pthread_create_nesting_level = 0;
188 #if defined(VGO_solaris)
189    thread->bind_guard_flag = 0;
190 #endif /* VGO_solaris */
191 
192    admin_threads        = thread;
193    return thread;
194 }
195 
196 // Make a new lock which is unlocked (hence ownerless)
197 // and insert the new lock in admin_locks double linked list.
mk_LockN(LockKind kind,Addr guestaddr)198 static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
199    static ULong unique = 0;
200    Lock* lock             = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
201    /* begin: add to double linked list */
202    if (admin_locks)
203       admin_locks->admin_prev = lock;
204    lock->admin_next       = admin_locks;
205    lock->admin_prev       = NULL;
206    admin_locks            = lock;
207    /* end: add */
208    lock->unique           = unique++;
209    lock->magic            = LockN_MAGIC;
210    lock->appeared_at      = NULL;
211    lock->acquired_at      = NULL;
212    lock->hbso             = libhb_so_alloc();
213    lock->guestaddr        = guestaddr;
214    lock->kind             = kind;
215    lock->heldW            = False;
216    lock->heldBy           = NULL;
217    tl_assert(HG_(is_sane_LockN)(lock));
218    return lock;
219 }
220 
221 /* Release storage for a Lock.  Also release storage in .heldBy, if
222    any. Removes from admin_locks double linked list. */
del_LockN(Lock * lk)223 static void del_LockN ( Lock* lk )
224 {
225    tl_assert(HG_(is_sane_LockN)(lk));
226    tl_assert(lk->hbso);
227    libhb_so_dealloc(lk->hbso);
228    if (lk->heldBy)
229       VG_(deleteBag)( lk->heldBy );
230    /* begin: del lock from double linked list */
231    if (lk == admin_locks) {
232       tl_assert(lk->admin_prev == NULL);
233       if (lk->admin_next)
234          lk->admin_next->admin_prev = NULL;
235       admin_locks = lk->admin_next;
236    }
237    else {
238       tl_assert(lk->admin_prev != NULL);
239       lk->admin_prev->admin_next = lk->admin_next;
240       if (lk->admin_next)
241          lk->admin_next->admin_prev = lk->admin_prev;
242    }
243    /* end: del */
244    VG_(memset)(lk, 0xAA, sizeof(*lk));
245    HG_(free)(lk);
246 }
247 
248 /* Update 'lk' to reflect that 'thr' now has a write-acquisition of
249    it.  This is done strictly: only combinations resulting from
250    correct program and libpthread behaviour are allowed. */
lockN_acquire_writer(Lock * lk,Thread * thr)251 static void lockN_acquire_writer ( Lock* lk, Thread* thr )
252 {
253    tl_assert(HG_(is_sane_LockN)(lk));
254    tl_assert(HG_(is_sane_Thread)(thr));
255 
256    stats__lockN_acquires++;
257 
258    /* EXPOSITION only */
259    /* We need to keep recording snapshots of where the lock was
260       acquired, so as to produce better lock-order error messages. */
261    if (lk->acquired_at == NULL) {
262       ThreadId tid;
263       tl_assert(lk->heldBy == NULL);
264       tid = map_threads_maybe_reverse_lookup_SLOW(thr);
265       lk->acquired_at
266          = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
267    } else {
268       tl_assert(lk->heldBy != NULL);
269    }
270    /* end EXPOSITION only */
271 
272    switch (lk->kind) {
273       case LK_nonRec:
274       case_LK_nonRec:
275          tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
276          tl_assert(!lk->heldW);
277          lk->heldW  = True;
278          lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
279          VG_(addToBag)( lk->heldBy, (UWord)thr );
280          break;
281       case LK_mbRec:
282          if (lk->heldBy == NULL)
283             goto case_LK_nonRec;
284          /* 2nd and subsequent locking of a lock by its owner */
285          tl_assert(lk->heldW);
286          /* assert: lk is only held by one thread .. */
287          tl_assert(VG_(sizeUniqueBag(lk->heldBy)) == 1);
288          /* assert: .. and that thread is 'thr'. */
289          tl_assert(VG_(elemBag)(lk->heldBy, (UWord)thr)
290                    == VG_(sizeTotalBag)(lk->heldBy));
291          VG_(addToBag)(lk->heldBy, (UWord)thr);
292          break;
293       case LK_rdwr:
294          tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
295          goto case_LK_nonRec;
296       default:
297          tl_assert(0);
298   }
299   tl_assert(HG_(is_sane_LockN)(lk));
300 }
301 
lockN_acquire_reader(Lock * lk,Thread * thr)302 static void lockN_acquire_reader ( Lock* lk, Thread* thr )
303 {
304    tl_assert(HG_(is_sane_LockN)(lk));
305    tl_assert(HG_(is_sane_Thread)(thr));
306    /* can only add reader to a reader-writer lock. */
307    tl_assert(lk->kind == LK_rdwr);
308    /* lk must be free or already r-held. */
309    tl_assert(lk->heldBy == NULL
310              || (lk->heldBy != NULL && !lk->heldW));
311 
312    stats__lockN_acquires++;
313 
314    /* EXPOSITION only */
315    /* We need to keep recording snapshots of where the lock was
316       acquired, so as to produce better lock-order error messages. */
317    if (lk->acquired_at == NULL) {
318       ThreadId tid;
319       tl_assert(lk->heldBy == NULL);
320       tid = map_threads_maybe_reverse_lookup_SLOW(thr);
321       lk->acquired_at
322          = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
323    } else {
324       tl_assert(lk->heldBy != NULL);
325    }
326    /* end EXPOSITION only */
327 
328    if (lk->heldBy) {
329       VG_(addToBag)(lk->heldBy, (UWord)thr);
330    } else {
331       lk->heldW  = False;
332       lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
333       VG_(addToBag)( lk->heldBy, (UWord)thr );
334    }
335    tl_assert(!lk->heldW);
336    tl_assert(HG_(is_sane_LockN)(lk));
337 }
338 
339 /* Update 'lk' to reflect a release of it by 'thr'.  This is done
340    strictly: only combinations resulting from correct program and
341    libpthread behaviour are allowed. */
342 
lockN_release(Lock * lk,Thread * thr)343 static void lockN_release ( Lock* lk, Thread* thr )
344 {
345    Bool b;
346    tl_assert(HG_(is_sane_LockN)(lk));
347    tl_assert(HG_(is_sane_Thread)(thr));
348    /* lock must be held by someone */
349    tl_assert(lk->heldBy);
350    stats__lockN_releases++;
351    /* Remove it from the holder set */
352    b = VG_(delFromBag)(lk->heldBy, (UWord)thr);
353    /* thr must actually have been a holder of lk */
354    tl_assert(b);
355    /* normalise */
356    tl_assert(lk->acquired_at);
357    if (VG_(isEmptyBag)(lk->heldBy)) {
358       VG_(deleteBag)(lk->heldBy);
359       lk->heldBy      = NULL;
360       lk->heldW       = False;
361       lk->acquired_at = NULL;
362    }
363    tl_assert(HG_(is_sane_LockN)(lk));
364 }
365 
remove_Lock_from_locksets_of_all_owning_Threads(Lock * lk)366 static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
367 {
368    Thread* thr;
369    if (!lk->heldBy) {
370       tl_assert(!lk->heldW);
371       return;
372    }
373    /* for each thread that holds this lock do ... */
374    VG_(initIterBag)( lk->heldBy );
375    while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, NULL )) {
376       tl_assert(HG_(is_sane_Thread)(thr));
377       tl_assert(HG_(elemWS)( univ_lsets,
378                              thr->locksetA, (UWord)lk ));
379       thr->locksetA
380          = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lk );
381 
382       if (lk->heldW) {
383          tl_assert(HG_(elemWS)( univ_lsets,
384                                 thr->locksetW, (UWord)lk ));
385          thr->locksetW
386             = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lk );
387       }
388    }
389    VG_(doneIterBag)( lk->heldBy );
390 }
391 
392 
393 /*----------------------------------------------------------------*/
394 /*--- Print out the primary data structures                    ---*/
395 /*----------------------------------------------------------------*/
396 
397 #define PP_THREADS      (1<<1)
398 #define PP_LOCKS        (1<<2)
399 #define PP_ALL (PP_THREADS | PP_LOCKS)
400 
401 
402 static const Int sHOW_ADMIN = 0;
403 
space(Int n)404 static void space ( Int n )
405 {
406    Int  i;
407    HChar spaces[128+1];
408    tl_assert(n >= 0 && n < 128);
409    if (n == 0)
410       return;
411    for (i = 0; i < n; i++)
412       spaces[i] = ' ';
413    spaces[i] = 0;
414    tl_assert(i < 128+1);
415    VG_(printf)("%s", spaces);
416 }
417 
pp_Thread(Int d,Thread * t)418 static void pp_Thread ( Int d, Thread* t )
419 {
420    space(d+0); VG_(printf)("Thread %p {\n", t);
421    if (sHOW_ADMIN) {
422    space(d+3); VG_(printf)("admin    %p\n",   t->admin);
423    space(d+3); VG_(printf)("magic    0x%x\n", (UInt)t->magic);
424    }
425    space(d+3); VG_(printf)("locksetA %d\n",   (Int)t->locksetA);
426    space(d+3); VG_(printf)("locksetW %d\n",   (Int)t->locksetW);
427    space(d+0); VG_(printf)("}\n");
428 }
429 
pp_admin_threads(Int d)430 static void pp_admin_threads ( Int d )
431 {
432    Int     i, n;
433    Thread* t;
434    for (n = 0, t = admin_threads;  t;  n++, t = t->admin) {
435       /* nothing */
436    }
437    space(d); VG_(printf)("admin_threads (%d records) {\n", n);
438    for (i = 0, t = admin_threads;  t;  i++, t = t->admin) {
439       if (0) {
440          space(n);
441          VG_(printf)("admin_threads record %d of %d:\n", i, n);
442       }
443       pp_Thread(d+3, t);
444    }
445    space(d); VG_(printf)("}\n");
446 }
447 
pp_map_threads(Int d)448 static void pp_map_threads ( Int d )
449 {
450    Int i, n = 0;
451    space(d); VG_(printf)("map_threads ");
452    for (i = 0; i < VG_N_THREADS; i++) {
453       if (map_threads[i] != NULL)
454          n++;
455    }
456    VG_(printf)("(%d entries) {\n", n);
457    for (i = 0; i < VG_N_THREADS; i++) {
458       if (map_threads[i] == NULL)
459          continue;
460       space(d+3);
461       VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
462    }
463    space(d); VG_(printf)("}\n");
464 }
465 
show_LockKind(LockKind lkk)466 static const HChar* show_LockKind ( LockKind lkk ) {
467    switch (lkk) {
468       case LK_mbRec:  return "mbRec";
469       case LK_nonRec: return "nonRec";
470       case LK_rdwr:   return "rdwr";
471       default:        tl_assert(0);
472    }
473 }
474 
475 /* Pretty Print lock lk.
476    if show_lock_addrdescr, describes the (guest) lock address.
477      (this description will be more complete with --read-var-info=yes).
478    if show_internal_data, shows also helgrind internal information.
479    d is the level at which output is indented. */
pp_Lock(Int d,Lock * lk,Bool show_lock_addrdescr,Bool show_internal_data)480 static void pp_Lock ( Int d, Lock* lk,
481                       Bool show_lock_addrdescr,
482                       Bool show_internal_data)
483 {
484    space(d+0);
485    if (show_internal_data)
486       VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
487    else
488       VG_(printf)("Lock ga %#lx {\n", lk->guestaddr);
489    if (!show_lock_addrdescr
490        || !HG_(get_and_pp_addrdescr) ((Addr) lk->guestaddr))
491       VG_(printf)("\n");
492 
493    if (sHOW_ADMIN) {
494       space(d+3); VG_(printf)("admin_n  %p\n",   lk->admin_next);
495       space(d+3); VG_(printf)("admin_p  %p\n",   lk->admin_prev);
496       space(d+3); VG_(printf)("magic    0x%x\n", (UInt)lk->magic);
497    }
498    if (show_internal_data) {
499       space(d+3); VG_(printf)("unique %llu\n", lk->unique);
500    }
501    space(d+3); VG_(printf)("kind   %s\n", show_LockKind(lk->kind));
502    if (show_internal_data) {
503       space(d+3); VG_(printf)("heldW  %s\n", lk->heldW ? "yes" : "no");
504    }
505    if (show_internal_data) {
506       space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
507    }
508    if (lk->heldBy) {
509       Thread* thr;
510       UWord   count;
511       VG_(printf)(" { ");
512       VG_(initIterBag)( lk->heldBy );
513       while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, &count )) {
514          if (show_internal_data)
515             VG_(printf)("%lu:%p ", count, thr);
516          else {
517             VG_(printf)("%c%lu:thread #%d ",
518                         lk->heldW ? 'W' : 'R',
519                         count, thr->errmsg_index);
520             if (thr->coretid == VG_INVALID_THREADID)
521                VG_(printf)("tid (exited) ");
522             else
523                VG_(printf)("tid %u ", thr->coretid);
524 
525          }
526       }
527       VG_(doneIterBag)( lk->heldBy );
528       VG_(printf)("}\n");
529    }
530    space(d+0); VG_(printf)("}\n");
531 }
532 
pp_admin_locks(Int d)533 static void pp_admin_locks ( Int d )
534 {
535    Int   i, n;
536    Lock* lk;
537    for (n = 0, lk = admin_locks;  lk;  n++, lk = lk->admin_next) {
538       /* nothing */
539    }
540    space(d); VG_(printf)("admin_locks (%d records) {\n", n);
541    for (i = 0, lk = admin_locks;  lk;  i++, lk = lk->admin_next) {
542       if (0) {
543          space(n);
544          VG_(printf)("admin_locks record %d of %d:\n", i, n);
545       }
546       pp_Lock(d+3, lk,
547               False /* show_lock_addrdescr */,
548               True /* show_internal_data */);
549    }
550    space(d); VG_(printf)("}\n");
551 }
552 
pp_map_locks(Int d)553 static void pp_map_locks ( Int d)
554 {
555    void* gla;
556    Lock* lk;
557    space(d); VG_(printf)("map_locks (%d entries) {\n",
558                          (Int)VG_(sizeFM)( map_locks ));
559    VG_(initIterFM)( map_locks );
560    while (VG_(nextIterFM)( map_locks, (UWord*)&gla,
561                                       (UWord*)&lk )) {
562       space(d+3);
563       VG_(printf)("guest %p -> Lock %p\n", gla, lk);
564    }
565    VG_(doneIterFM)( map_locks );
566    space(d); VG_(printf)("}\n");
567 }
568 
pp_everything(Int flags,const HChar * caller)569 static void pp_everything ( Int flags, const HChar* caller )
570 {
571    Int d = 0;
572    VG_(printf)("\n");
573    VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
574    if (flags & PP_THREADS) {
575       VG_(printf)("\n");
576       pp_admin_threads(d+3);
577       VG_(printf)("\n");
578       pp_map_threads(d+3);
579    }
580    if (flags & PP_LOCKS) {
581       VG_(printf)("\n");
582       pp_admin_locks(d+3);
583       VG_(printf)("\n");
584       pp_map_locks(d+3);
585    }
586 
587    VG_(printf)("\n");
588    VG_(printf)("}\n");
589    VG_(printf)("\n");
590 }
591 
592 #undef SHOW_ADMIN
593 
594 
595 /*----------------------------------------------------------------*/
596 /*--- Initialise the primary data structures                   ---*/
597 /*----------------------------------------------------------------*/
598 
initialise_data_structures(Thr * hbthr_root)599 static void initialise_data_structures ( Thr* hbthr_root )
600 {
601    Thread*   thr;
602    WordSetID wsid;
603 
604    /* Get everything initialised and zeroed. */
605    tl_assert(admin_threads == NULL);
606    tl_assert(admin_locks == NULL);
607 
608    tl_assert(map_threads == NULL);
609    map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
610 
611    tl_assert(sizeof(Addr) == sizeof(UWord));
612    tl_assert(map_locks == NULL);
613    map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
614                            NULL/*unboxed Word cmp*/);
615 
616    tl_assert(univ_lsets == NULL);
617    univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
618                                   8/*cacheSize*/ );
619    tl_assert(univ_lsets != NULL);
620    /* Ensure that univ_lsets is non-empty, with lockset zero being the
621       empty lockset.  hg_errors.c relies on the assumption that
622       lockset number zero in univ_lsets is always valid. */
623    wsid = HG_(emptyWS)(univ_lsets);
624    tl_assert(wsid == 0);
625 
626    tl_assert(univ_laog == NULL);
627    if (HG_(clo_track_lockorders)) {
628       univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
629                                     HG_(free), 24/*cacheSize*/ );
630       tl_assert(univ_laog != NULL);
631    }
632 
633    /* Set up entries for the root thread */
634    // FIXME: this assumes that the first real ThreadId is 1
635 
636    /* a Thread for the new thread ... */
637    thr = mk_Thread(hbthr_root);
638    thr->coretid = 1; /* FIXME: hardwires an assumption about the
639                         identity of the root thread. */
640    tl_assert( libhb_get_Thr_hgthread(hbthr_root) == NULL );
641    libhb_set_Thr_hgthread(hbthr_root, thr);
642 
643    /* and bind it in the thread-map table. */
644    tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
645    tl_assert(thr->coretid != VG_INVALID_THREADID);
646 
647    map_threads[thr->coretid] = thr;
648 
649    tl_assert(VG_INVALID_THREADID == 0);
650 
651    all__sanity_check("initialise_data_structures");
652 }
653 
654 
655 /*----------------------------------------------------------------*/
656 /*--- map_threads :: array[core-ThreadId] of Thread*           ---*/
657 /*----------------------------------------------------------------*/
658 
659 /* Doesn't assert if the relevant map_threads entry is NULL. */
map_threads_maybe_lookup(ThreadId coretid)660 static Thread* map_threads_maybe_lookup ( ThreadId coretid )
661 {
662    Thread* thr;
663    tl_assert( HG_(is_sane_ThreadId)(coretid) );
664    thr = map_threads[coretid];
665    return thr;
666 }
667 
668 /* Asserts if the relevant map_threads entry is NULL. */
map_threads_lookup(ThreadId coretid)669 static inline Thread* map_threads_lookup ( ThreadId coretid )
670 {
671    Thread* thr;
672    tl_assert( HG_(is_sane_ThreadId)(coretid) );
673    thr = map_threads[coretid];
674    tl_assert(thr);
675    return thr;
676 }
677 
678 /* Do a reverse lookup.  Does not assert if 'thr' is not found in
679    map_threads. */
map_threads_maybe_reverse_lookup_SLOW(Thread * thr)680 static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
681 {
682    ThreadId tid;
683    tl_assert(HG_(is_sane_Thread)(thr));
684    /* Check nobody used the invalid-threadid slot */
685    tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
686    tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
687    tid = thr->coretid;
688    tl_assert(HG_(is_sane_ThreadId)(tid));
689    return tid;
690 }
691 
692 /* Do a reverse lookup.  Warning: POTENTIALLY SLOW.  Asserts if 'thr'
693    is not found in map_threads. */
map_threads_reverse_lookup_SLOW(Thread * thr)694 static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
695 {
696    ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
697    tl_assert(tid != VG_INVALID_THREADID);
698    tl_assert(map_threads[tid]);
699    tl_assert(map_threads[tid]->coretid == tid);
700    return tid;
701 }
702 
map_threads_delete(ThreadId coretid)703 static void map_threads_delete ( ThreadId coretid )
704 {
705    Thread* thr;
706    tl_assert(coretid != 0);
707    tl_assert( HG_(is_sane_ThreadId)(coretid) );
708    thr = map_threads[coretid];
709    tl_assert(thr);
710    map_threads[coretid] = NULL;
711 }
712 
HG_(thread_enter_synchr)713 static void HG_(thread_enter_synchr)(Thread *thr) {
714    tl_assert(thr->synchr_nesting >= 0);
715 #if defined(VGO_solaris)
716    thr->synchr_nesting += 1;
717 #endif /* VGO_solaris */
718 }
719 
HG_(thread_leave_synchr)720 static void HG_(thread_leave_synchr)(Thread *thr) {
721 #if defined(VGO_solaris)
722    thr->synchr_nesting -= 1;
723 #endif /* VGO_solaris */
724    tl_assert(thr->synchr_nesting >= 0);
725 }
726 
HG_(thread_enter_pthread_create)727 static void HG_(thread_enter_pthread_create)(Thread *thr) {
728    tl_assert(thr->pthread_create_nesting_level >= 0);
729    thr->pthread_create_nesting_level += 1;
730 }
731 
HG_(thread_leave_pthread_create)732 static void HG_(thread_leave_pthread_create)(Thread *thr) {
733    tl_assert(thr->pthread_create_nesting_level > 0);
734    thr->pthread_create_nesting_level -= 1;
735 }
736 
HG_(get_pthread_create_nesting_level)737 static Int HG_(get_pthread_create_nesting_level)(ThreadId tid) {
738    Thread *thr = map_threads_maybe_lookup(tid);
739    return thr->pthread_create_nesting_level;
740 }
741 
742 /*----------------------------------------------------------------*/
743 /*--- map_locks :: WordFM guest-Addr-of-lock Lock*             ---*/
744 /*----------------------------------------------------------------*/
745 
746 /* Make sure there is a lock table entry for the given (lock) guest
747    address.  If not, create one of the stated 'kind' in unheld state.
748    In any case, return the address of the existing or new Lock. */
749 static
map_locks_lookup_or_create(LockKind lkk,Addr ga,ThreadId tid)750 Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
751 {
752    Bool  found;
753    Lock* oldlock = NULL;
754    tl_assert(HG_(is_sane_ThreadId)(tid));
755    found = VG_(lookupFM)( map_locks,
756                           NULL, (UWord*)&oldlock, (UWord)ga );
757    if (!found) {
758       Lock* lock = mk_LockN(lkk, ga);
759       lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
760       tl_assert(HG_(is_sane_LockN)(lock));
761       VG_(addToFM)( map_locks, (UWord)ga, (UWord)lock );
762       tl_assert(oldlock == NULL);
763       return lock;
764    } else {
765       tl_assert(oldlock != NULL);
766       tl_assert(HG_(is_sane_LockN)(oldlock));
767       tl_assert(oldlock->guestaddr == ga);
768       return oldlock;
769    }
770 }
771 
map_locks_maybe_lookup(Addr ga)772 static Lock* map_locks_maybe_lookup ( Addr ga )
773 {
774    Bool  found;
775    Lock* lk = NULL;
776    found = VG_(lookupFM)( map_locks, NULL, (UWord*)&lk, (UWord)ga );
777    tl_assert(found  ?  lk != NULL  :  lk == NULL);
778    return lk;
779 }
780 
map_locks_delete(Addr ga)781 static void map_locks_delete ( Addr ga )
782 {
783    Addr  ga2 = 0;
784    Lock* lk  = NULL;
785    VG_(delFromFM)( map_locks,
786                    (UWord*)&ga2, (UWord*)&lk, (UWord)ga );
787    /* delFromFM produces the val which is being deleted, if it is
788       found.  So assert it is non-null; that in effect asserts that we
789       are deleting a (ga, Lock) pair which actually exists. */
790    tl_assert(lk != NULL);
791    tl_assert(ga2 == ga);
792 }
793 
794 
795 
796 /*----------------------------------------------------------------*/
797 /*--- Sanity checking the data structures                      ---*/
798 /*----------------------------------------------------------------*/
799 
800 static UWord stats__sanity_checks = 0;
801 
802 static void laog__sanity_check ( const HChar* who ); /* fwds */
803 
804 /* REQUIRED INVARIANTS:
805 
806    Thread vs Segment/Lock/SecMaps
807 
808       for each t in Threads {
809 
810          // Thread.lockset: each element is really a valid Lock
811 
812          // Thread.lockset: each Lock in set is actually held by that thread
813          for lk in Thread.lockset
814             lk == LockedBy(t)
815 
816          // Thread.csegid is a valid SegmentID
817          // and the associated Segment has .thr == t
818 
819       }
820 
821       all thread Locksets are pairwise empty under intersection
822       (that is, no lock is claimed to be held by more than one thread)
823       -- this is guaranteed if all locks in locksets point back to their
824       owner threads
825 
826    Lock vs Thread/Segment/SecMaps
827 
828       for each entry (gla, la) in map_locks
829          gla == la->guest_addr
830 
831       for each lk in Locks {
832 
833          lk->tag is valid
834          lk->guest_addr does not have shadow state NoAccess
835          if lk == LockedBy(t), then t->lockset contains lk
836          if lk == UnlockedBy(segid) then segid is valid SegmentID
837              and can be mapped to a valid Segment(seg)
838              and seg->thr->lockset does not contain lk
839          if lk == UnlockedNew then (no lockset contains lk)
840 
841          secmaps for lk has .mbHasLocks == True
842 
843       }
844 
845    Segment vs Thread/Lock/SecMaps
846 
847       the Segment graph is a dag (no cycles)
848       all of the Segment graph must be reachable from the segids
849          mentioned in the Threads
850 
851       for seg in Segments {
852 
853          seg->thr is a sane Thread
854 
855       }
856 
857    SecMaps vs Segment/Thread/Lock
858 
859       for sm in SecMaps {
860 
861          sm properly aligned
862          if any shadow word is ShR or ShM then .mbHasShared == True
863 
864          for each Excl(segid) state
865             map_segments_lookup maps to a sane Segment(seg)
866          for each ShM/ShR(tsetid,lsetid) state
867             each lk in lset is a valid Lock
868             each thr in tset is a valid thread, which is non-dead
869 
870       }
871 */
872 
873 
874 /* Return True iff 'thr' holds 'lk' in some mode. */
thread_is_a_holder_of_Lock(Thread * thr,Lock * lk)875 static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
876 {
877    if (lk->heldBy)
878       return VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0;
879    else
880       return False;
881 }
882 
883 /* Sanity check Threads, as far as possible */
884 __attribute__((noinline))
threads__sanity_check(const HChar * who)885 static void threads__sanity_check ( const HChar* who )
886 {
887 #define BAD(_str) do { how = (_str); goto bad; } while (0)
888    const HChar* how = "no error";
889    Thread*   thr;
890    WordSetID wsA, wsW;
891    UWord*    ls_words;
892    UWord     ls_size, i;
893    Lock*     lk;
894    for (thr = admin_threads; thr; thr = thr->admin) {
895       if (!HG_(is_sane_Thread)(thr)) BAD("1");
896       wsA = thr->locksetA;
897       wsW = thr->locksetW;
898       // locks held in W mode are a subset of all locks held
899       if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
900       HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
901       for (i = 0; i < ls_size; i++) {
902          lk = (Lock*)ls_words[i];
903          // Thread.lockset: each element is really a valid Lock
904          if (!HG_(is_sane_LockN)(lk)) BAD("2");
905          // Thread.lockset: each Lock in set is actually held by that
906          // thread
907          if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
908       }
909    }
910    return;
911   bad:
912    VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
913    tl_assert(0);
914 #undef BAD
915 }
916 
917 
918 /* Sanity check Locks, as far as possible */
919 __attribute__((noinline))
locks__sanity_check(const HChar * who)920 static void locks__sanity_check ( const HChar* who )
921 {
922 #define BAD(_str) do { how = (_str); goto bad; } while (0)
923    const HChar* how = "no error";
924    Addr      gla;
925    Lock*     lk;
926    Int       i;
927    // # entries in admin_locks == # entries in map_locks
928    for (i = 0, lk = admin_locks;  lk;  i++, lk = lk->admin_next)
929       ;
930    if (i != VG_(sizeFM)(map_locks)) BAD("1");
931    // for each entry (gla, lk) in map_locks
932    //      gla == lk->guest_addr
933    VG_(initIterFM)( map_locks );
934    while (VG_(nextIterFM)( map_locks,
935                            (UWord*)&gla, (UWord*)&lk )) {
936       if (lk->guestaddr != gla) BAD("2");
937    }
938    VG_(doneIterFM)( map_locks );
939    // scan through admin_locks ...
940    for (lk = admin_locks; lk; lk = lk->admin_next) {
941       // lock is sane.  Quite comprehensive, also checks that
942       // referenced (holder) threads are sane.
943       if (!HG_(is_sane_LockN)(lk)) BAD("3");
944       // map_locks binds guest address back to this lock
945       if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
946       // look at all threads mentioned as holders of this lock.  Ensure
947       // this lock is mentioned in their locksets.
948       if (lk->heldBy) {
949          Thread* thr;
950          UWord   count;
951          VG_(initIterBag)( lk->heldBy );
952          while (VG_(nextIterBag)( lk->heldBy,
953                                   (UWord*)&thr, &count )) {
954             // HG_(is_sane_LockN) above ensures these
955             tl_assert(count >= 1);
956             tl_assert(HG_(is_sane_Thread)(thr));
957             if (!HG_(elemWS)(univ_lsets, thr->locksetA, (UWord)lk))
958                BAD("6");
959             // also check the w-only lockset
960             if (lk->heldW
961                 && !HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
962                BAD("7");
963             if ((!lk->heldW)
964                 && HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
965                BAD("8");
966          }
967          VG_(doneIterBag)( lk->heldBy );
968       } else {
969          /* lock not held by anybody */
970          if (lk->heldW) BAD("9"); /* should be False if !heldBy */
971          // since lk is unheld, then (no lockset contains lk)
972          // hmm, this is really too expensive to check.  Hmm.
973       }
974    }
975 
976    return;
977   bad:
978    VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
979    tl_assert(0);
980 #undef BAD
981 }
982 
983 
all_except_Locks__sanity_check(const HChar * who)984 static void all_except_Locks__sanity_check ( const HChar* who ) {
985    stats__sanity_checks++;
986    if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
987    threads__sanity_check(who);
988    if (HG_(clo_track_lockorders))
989       laog__sanity_check(who);
990 }
all__sanity_check(const HChar * who)991 static void all__sanity_check ( const HChar* who ) {
992    all_except_Locks__sanity_check(who);
993    locks__sanity_check(who);
994 }
995 
996 
997 /*----------------------------------------------------------------*/
998 /*--- Shadow value and address range handlers                  ---*/
999 /*----------------------------------------------------------------*/
1000 
1001 static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
1002 //static void laog__handle_lock_deletions    ( WordSetID ); /* fwds */
1003 static inline Thread* get_current_Thread ( void ); /* fwds */
1004 __attribute__((noinline))
1005 static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
1006 
1007 
1008 /* Block-copy states (needed for implementing realloc()). */
1009 /* FIXME this copies shadow memory; it doesn't apply the MSM to it.
1010    Is that a problem? (hence 'scopy' rather than 'ccopy') */
shadow_mem_scopy_range(Thread * thr,Addr src,Addr dst,SizeT len)1011 static void shadow_mem_scopy_range ( Thread* thr,
1012                                      Addr src, Addr dst, SizeT len )
1013 {
1014    Thr*     hbthr = thr->hbthr;
1015    tl_assert(hbthr);
1016    libhb_copy_shadow_state( hbthr, src, dst, len );
1017 }
1018 
shadow_mem_cread_range(Thread * thr,Addr a,SizeT len)1019 static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
1020 {
1021    Thr*     hbthr = thr->hbthr;
1022    tl_assert(hbthr);
1023    LIBHB_CREAD_N(hbthr, a, len);
1024 }
1025 
shadow_mem_cwrite_range(Thread * thr,Addr a,SizeT len)1026 static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
1027    Thr*     hbthr = thr->hbthr;
1028    tl_assert(hbthr);
1029    LIBHB_CWRITE_N(hbthr, a, len);
1030 }
1031 
shadow_mem_make_New(Thread * thr,Addr a,SizeT len)1032 static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
1033 {
1034    libhb_srange_new( thr->hbthr, a, len );
1035 }
1036 
shadow_mem_make_NoAccess_NoFX(Thread * thr,Addr aIN,SizeT len)1037 static void shadow_mem_make_NoAccess_NoFX ( Thread* thr, Addr aIN, SizeT len )
1038 {
1039    if (0 && len > 500)
1040       VG_(printf)("make NoAccess_NoFX ( %#lx, %lu )\n", aIN, len );
1041    // has no effect (NoFX)
1042    libhb_srange_noaccess_NoFX( thr->hbthr, aIN, len );
1043 }
1044 
shadow_mem_make_NoAccess_AHAE(Thread * thr,Addr aIN,SizeT len)1045 static void shadow_mem_make_NoAccess_AHAE ( Thread* thr, Addr aIN, SizeT len )
1046 {
1047    if (0 && len > 500)
1048       VG_(printf)("make NoAccess_AHAE ( %#lx, %lu )\n", aIN, len );
1049    // Actually Has An Effect (AHAE)
1050    libhb_srange_noaccess_AHAE( thr->hbthr, aIN, len );
1051 }
1052 
shadow_mem_make_Untracked(Thread * thr,Addr aIN,SizeT len)1053 static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN, SizeT len )
1054 {
1055    if (0 && len > 500)
1056       VG_(printf)("make Untracked ( %#lx, %lu )\n", aIN, len );
1057    libhb_srange_untrack( thr->hbthr, aIN, len );
1058 }
1059 
1060 
1061 /*----------------------------------------------------------------*/
1062 /*--- Event handlers (evh__* functions)                        ---*/
1063 /*--- plus helpers (evhH__* functions)                         ---*/
1064 /*----------------------------------------------------------------*/
1065 
1066 /*--------- Event handler helpers (evhH__* functions) ---------*/
1067 
1068 /* Create a new segment for 'thr', making it depend (.prev) on its
1069    existing segment, bind together the SegmentID and Segment, and
1070    return both of them.  Also update 'thr' so it references the new
1071    Segment. */
1072 //zz static
1073 //zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
1074 //zz                                           /*OUT*/Segment** new_segP,
1075 //zz                                           Thread* thr )
1076 //zz {
1077 //zz    Segment* cur_seg;
1078 //zz    tl_assert(new_segP);
1079 //zz    tl_assert(new_segidP);
1080 //zz    tl_assert(HG_(is_sane_Thread)(thr));
1081 //zz    cur_seg = map_segments_lookup( thr->csegid );
1082 //zz    tl_assert(cur_seg);
1083 //zz    tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1084 //zz                                       at their owner thread. */
1085 //zz    *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1086 //zz    *new_segidP = alloc_SegmentID();
1087 //zz    map_segments_add( *new_segidP, *new_segP );
1088 //zz    thr->csegid = *new_segidP;
1089 //zz }
1090 
1091 
1092 /* The lock at 'lock_ga' has acquired a writer.  Make all necessary
1093    updates, and also do all possible error checks. */
1094 static
evhH__post_thread_w_acquires_lock(Thread * thr,LockKind lkk,Addr lock_ga)1095 void evhH__post_thread_w_acquires_lock ( Thread* thr,
1096                                          LockKind lkk, Addr lock_ga )
1097 {
1098    Lock* lk;
1099 
1100    /* Basically what we need to do is call lockN_acquire_writer.
1101       However, that will barf if any 'invalid' lock states would
1102       result.  Therefore check before calling.  Side effect is that
1103       'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
1104       routine.
1105 
1106       Because this routine is only called after successful lock
1107       acquisition, we should not be asked to move the lock into any
1108       invalid states.  Requests to do so are bugs in libpthread, since
1109       that should have rejected any such requests. */
1110 
1111    tl_assert(HG_(is_sane_Thread)(thr));
1112    /* Try to find the lock.  If we can't, then create a new one with
1113       kind 'lkk'. */
1114    lk = map_locks_lookup_or_create(
1115            lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
1116    tl_assert( HG_(is_sane_LockN)(lk) );
1117 
1118    /* check libhb level entities exist */
1119    tl_assert(thr->hbthr);
1120    tl_assert(lk->hbso);
1121 
1122    if (lk->heldBy == NULL) {
1123       /* the lock isn't held.  Simple. */
1124       tl_assert(!lk->heldW);
1125       lockN_acquire_writer( lk, thr );
1126       /* acquire a dependency from the lock's VCs */
1127       libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
1128       goto noerror;
1129    }
1130 
1131    /* So the lock is already held.  If held as a r-lock then
1132       libpthread must be buggy. */
1133    tl_assert(lk->heldBy);
1134    if (!lk->heldW) {
1135       HG_(record_error_Misc)(
1136          thr, "Bug in libpthread: write lock "
1137               "granted on rwlock which is currently rd-held");
1138       goto error;
1139    }
1140 
1141    /* So the lock is held in w-mode.  If it's held by some other
1142       thread, then libpthread must be buggy. */
1143    tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
1144 
1145    if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
1146       HG_(record_error_Misc)(
1147          thr, "Bug in libpthread: write lock "
1148               "granted on mutex/rwlock which is currently "
1149               "wr-held by a different thread");
1150       goto error;
1151    }
1152 
1153    /* So the lock is already held in w-mode by 'thr'.  That means this
1154       is an attempt to lock it recursively, which is only allowable
1155       for LK_mbRec kinded locks.  Since this routine is called only
1156       once the lock has been acquired, this must also be a libpthread
1157       bug. */
1158    if (lk->kind != LK_mbRec) {
1159       HG_(record_error_Misc)(
1160          thr, "Bug in libpthread: recursive write lock "
1161               "granted on mutex/wrlock which does not "
1162               "support recursion");
1163       goto error;
1164    }
1165 
1166    /* So we are recursively re-locking a lock we already w-hold. */
1167    lockN_acquire_writer( lk, thr );
1168    /* acquire a dependency from the lock's VC.  Probably pointless,
1169       but also harmless. */
1170    libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
1171    goto noerror;
1172 
1173   noerror:
1174    if (HG_(clo_track_lockorders)) {
1175       /* check lock order acquisition graph, and update.  This has to
1176          happen before the lock is added to the thread's locksetA/W. */
1177       laog__pre_thread_acquires_lock( thr, lk );
1178    }
1179    /* update the thread's held-locks set */
1180    thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
1181    thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (UWord)lk );
1182    /* fall through */
1183 
1184   error:
1185    tl_assert(HG_(is_sane_LockN)(lk));
1186 }
1187 
1188 
1189 /* The lock at 'lock_ga' has acquired a reader.  Make all necessary
1190    updates, and also do all possible error checks. */
1191 static
evhH__post_thread_r_acquires_lock(Thread * thr,LockKind lkk,Addr lock_ga)1192 void evhH__post_thread_r_acquires_lock ( Thread* thr,
1193                                          LockKind lkk, Addr lock_ga )
1194 {
1195    Lock* lk;
1196 
1197    /* Basically what we need to do is call lockN_acquire_reader.
1198       However, that will barf if any 'invalid' lock states would
1199       result.  Therefore check before calling.  Side effect is that
1200       'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
1201       routine.
1202 
1203       Because this routine is only called after successful lock
1204       acquisition, we should not be asked to move the lock into any
1205       invalid states.  Requests to do so are bugs in libpthread, since
1206       that should have rejected any such requests. */
1207 
1208    tl_assert(HG_(is_sane_Thread)(thr));
1209    /* Try to find the lock.  If we can't, then create a new one with
1210       kind 'lkk'.  Only a reader-writer lock can be read-locked,
1211       hence the first assertion. */
1212    tl_assert(lkk == LK_rdwr);
1213    lk = map_locks_lookup_or_create(
1214            lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
1215    tl_assert( HG_(is_sane_LockN)(lk) );
1216 
1217    /* check libhb level entities exist */
1218    tl_assert(thr->hbthr);
1219    tl_assert(lk->hbso);
1220 
1221    if (lk->heldBy == NULL) {
1222       /* the lock isn't held.  Simple. */
1223       tl_assert(!lk->heldW);
1224       lockN_acquire_reader( lk, thr );
1225       /* acquire a dependency from the lock's VC */
1226       libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
1227       goto noerror;
1228    }
1229 
1230    /* So the lock is already held.  If held as a w-lock then
1231       libpthread must be buggy. */
1232    tl_assert(lk->heldBy);
1233    if (lk->heldW) {
1234       HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1235                                    "granted on rwlock which is "
1236                                    "currently wr-held");
1237       goto error;
1238    }
1239 
1240    /* Easy enough.  In short anybody can get a read-lock on a rwlock
1241       provided it is either unlocked or already in rd-held. */
1242    lockN_acquire_reader( lk, thr );
1243    /* acquire a dependency from the lock's VC.  Probably pointless,
1244       but also harmless. */
1245    libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
1246    goto noerror;
1247 
1248   noerror:
1249    if (HG_(clo_track_lockorders)) {
1250       /* check lock order acquisition graph, and update.  This has to
1251          happen before the lock is added to the thread's locksetA/W. */
1252       laog__pre_thread_acquires_lock( thr, lk );
1253    }
1254    /* update the thread's held-locks set */
1255    thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
1256    /* but don't update thr->locksetW, since lk is only rd-held */
1257    /* fall through */
1258 
1259   error:
1260    tl_assert(HG_(is_sane_LockN)(lk));
1261 }
1262 
1263 
1264 /* The lock at 'lock_ga' is just about to be unlocked.  Make all
1265    necessary updates, and also do all possible error checks. */
1266 static
evhH__pre_thread_releases_lock(Thread * thr,Addr lock_ga,Bool isRDWR)1267 void evhH__pre_thread_releases_lock ( Thread* thr,
1268                                       Addr lock_ga, Bool isRDWR )
1269 {
1270    Lock* lock;
1271    Word  n;
1272    Bool  was_heldW;
1273 
1274    /* This routine is called prior to a lock release, before
1275       libpthread has had a chance to validate the call.  Hence we need
1276       to detect and reject any attempts to move the lock into an
1277       invalid state.  Such attempts are bugs in the client.
1278 
1279       isRDWR is True if we know from the wrapper context that lock_ga
1280       should refer to a reader-writer lock, and is False if [ditto]
1281       lock_ga should refer to a standard mutex. */
1282 
1283    tl_assert(HG_(is_sane_Thread)(thr));
1284    lock = map_locks_maybe_lookup( lock_ga );
1285 
1286    if (!lock) {
1287       /* We know nothing about a lock at 'lock_ga'.  Nevertheless
1288          the client is trying to unlock it.  So complain, then ignore
1289          the attempt. */
1290       HG_(record_error_UnlockBogus)( thr, lock_ga );
1291       return;
1292    }
1293 
1294    tl_assert(lock->guestaddr == lock_ga);
1295    tl_assert(HG_(is_sane_LockN)(lock));
1296 
1297    if (isRDWR && lock->kind != LK_rdwr) {
1298       HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1299                                    "pthread_mutex_t* argument " );
1300    }
1301    if ((!isRDWR) && lock->kind == LK_rdwr) {
1302       HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1303                                    "pthread_rwlock_t* argument " );
1304    }
1305 
1306    if (!lock->heldBy) {
1307       /* The lock is not held.  This indicates a serious bug in the
1308          client. */
1309       tl_assert(!lock->heldW);
1310       HG_(record_error_UnlockUnlocked)( thr, lock );
1311       tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1312       tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
1313       goto error;
1314    }
1315 
1316    /* test just above dominates */
1317    tl_assert(lock->heldBy);
1318    was_heldW = lock->heldW;
1319 
1320    /* The lock is held.  Is this thread one of the holders?  If not,
1321       report a bug in the client. */
1322    n = VG_(elemBag)( lock->heldBy, (UWord)thr );
1323    tl_assert(n >= 0);
1324    if (n == 0) {
1325       /* We are not a current holder of the lock.  This is a bug in
1326          the guest, and (per POSIX pthread rules) the unlock
1327          attempt will fail.  So just complain and do nothing
1328          else. */
1329       Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
1330       tl_assert(HG_(is_sane_Thread)(realOwner));
1331       tl_assert(realOwner != thr);
1332       tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1333       tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
1334       HG_(record_error_UnlockForeign)( thr, realOwner, lock );
1335       goto error;
1336    }
1337 
1338    /* Ok, we hold the lock 'n' times. */
1339    tl_assert(n >= 1);
1340 
1341    lockN_release( lock, thr );
1342 
1343    n--;
1344    tl_assert(n >= 0);
1345 
1346    if (n > 0) {
1347       tl_assert(lock->heldBy);
1348       tl_assert(n == VG_(elemBag)( lock->heldBy, (UWord)thr ));
1349       /* We still hold the lock.  So either it's a recursive lock
1350          or a rwlock which is currently r-held. */
1351       tl_assert(lock->kind == LK_mbRec
1352                 || (lock->kind == LK_rdwr && !lock->heldW));
1353       tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1354       if (lock->heldW)
1355          tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
1356       else
1357          tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
1358    } else {
1359       /* n is zero.  This means we don't hold the lock any more.  But
1360          if it's a rwlock held in r-mode, someone else could still
1361          hold it.  Just do whatever sanity checks we can. */
1362       if (lock->kind == LK_rdwr && lock->heldBy) {
1363          /* It's a rwlock.  We no longer hold it but we used to;
1364             nevertheless it still appears to be held by someone else.
1365             The implication is that, prior to this release, it must
1366             have been shared by us and and whoever else is holding it;
1367             which in turn implies it must be r-held, since a lock
1368             can't be w-held by more than one thread. */
1369          /* The lock is now R-held by somebody else: */
1370          tl_assert(lock->heldW == False);
1371       } else {
1372          /* Normal case.  It's either not a rwlock, or it's a rwlock
1373             that we used to hold in w-mode (which is pretty much the
1374             same thing as a non-rwlock.)  Since this transaction is
1375             atomic (V does not allow multiple threads to run
1376             simultaneously), it must mean the lock is now not held by
1377             anybody.  Hence assert for it. */
1378          /* The lock is now not held by anybody: */
1379          tl_assert(!lock->heldBy);
1380          tl_assert(lock->heldW == False);
1381       }
1382       //if (lock->heldBy) {
1383       //   tl_assert(0 == VG_(elemBag)( lock->heldBy, (UWord)thr ));
1384       //}
1385       /* update this thread's lockset accordingly. */
1386       thr->locksetA
1387          = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lock );
1388       thr->locksetW
1389          = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lock );
1390       /* push our VC into the lock */
1391       tl_assert(thr->hbthr);
1392       tl_assert(lock->hbso);
1393       /* If the lock was previously W-held, then we want to do a
1394          strong send, and if previously R-held, then a weak send. */
1395       libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
1396    }
1397    /* fall through */
1398 
1399   error:
1400    tl_assert(HG_(is_sane_LockN)(lock));
1401 }
1402 
1403 
1404 /* ---------------------------------------------------------- */
1405 /* -------- Event handlers proper (evh__* functions) -------- */
1406 /* ---------------------------------------------------------- */
1407 
1408 /* What is the Thread* for the currently running thread?  This is
1409    absolutely performance critical.  We receive notifications from the
1410    core for client code starts/stops, and cache the looked-up result
1411    in 'current_Thread'.  Hence, for the vast majority of requests,
1412    finding the current thread reduces to a read of a global variable,
1413    provided get_current_Thread_in_C_C is inlined.
1414 
1415    Outside of client code, current_Thread is NULL, and presumably
1416    any uses of it will cause a segfault.  Hence:
1417 
1418    - for uses definitely within client code, use
1419      get_current_Thread_in_C_C.
1420 
1421    - for all other uses, use get_current_Thread.
1422 */
1423 
1424 static Thread *current_Thread      = NULL,
1425               *current_Thread_prev = NULL;
1426 
evh__start_client_code(ThreadId tid,ULong nDisp)1427 static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1428    if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1429    tl_assert(current_Thread == NULL);
1430    current_Thread = map_threads_lookup( tid );
1431    tl_assert(current_Thread != NULL);
1432    if (current_Thread != current_Thread_prev) {
1433       libhb_Thr_resumes( current_Thread->hbthr );
1434       current_Thread_prev = current_Thread;
1435    }
1436 }
evh__stop_client_code(ThreadId tid,ULong nDisp)1437 static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1438    if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1439    tl_assert(current_Thread != NULL);
1440    current_Thread = NULL;
1441    libhb_maybe_GC();
1442 }
get_current_Thread_in_C_C(void)1443 static inline Thread* get_current_Thread_in_C_C ( void ) {
1444    return current_Thread;
1445 }
get_current_Thread(void)1446 static inline Thread* get_current_Thread ( void ) {
1447    ThreadId coretid;
1448    Thread*  thr;
1449    thr = get_current_Thread_in_C_C();
1450    if (LIKELY(thr))
1451       return thr;
1452    /* evidently not in client code.  Do it the slow way. */
1453    coretid = VG_(get_running_tid)();
1454    /* FIXME: get rid of the following kludge.  It exists because
1455       evh__new_mem is called during initialisation (as notification
1456       of initial memory layout) and VG_(get_running_tid)() returns
1457       VG_INVALID_THREADID at that point. */
1458    if (coretid == VG_INVALID_THREADID)
1459       coretid = 1; /* KLUDGE */
1460    thr = map_threads_lookup( coretid );
1461    return thr;
1462 }
1463 
1464 static
evh__new_mem(Addr a,SizeT len)1465 void evh__new_mem ( Addr a, SizeT len ) {
1466    Thread *thr = get_current_Thread();
1467    if (SHOW_EVENTS >= 2)
1468       VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1469    shadow_mem_make_New( thr, a, len );
1470    if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1471       all__sanity_check("evh__new_mem-post");
1472    if (UNLIKELY(thr->pthread_create_nesting_level > 0))
1473       shadow_mem_make_Untracked( thr, a, len );
1474 }
1475 
1476 static
evh__new_mem_stack(Addr a,SizeT len)1477 void evh__new_mem_stack ( Addr a, SizeT len ) {
1478    Thread *thr = get_current_Thread();
1479    if (SHOW_EVENTS >= 2)
1480       VG_(printf)("evh__new_mem_stack(%p, %lu)\n", (void*)a, len );
1481    shadow_mem_make_New( thr, -VG_STACK_REDZONE_SZB + a, len );
1482    if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1483       all__sanity_check("evh__new_mem_stack-post");
1484    if (UNLIKELY(thr->pthread_create_nesting_level > 0))
1485       shadow_mem_make_Untracked( thr, a, len );
1486 }
1487 
1488 static
evh__new_mem_w_tid(Addr a,SizeT len,ThreadId tid)1489 void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1490    Thread *thr = get_current_Thread();
1491    if (SHOW_EVENTS >= 2)
1492       VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1493    shadow_mem_make_New( thr, a, len );
1494    if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1495       all__sanity_check("evh__new_mem_w_tid-post");
1496    if (UNLIKELY(thr->pthread_create_nesting_level > 0))
1497       shadow_mem_make_Untracked( thr, a, len );
1498 }
1499 
1500 static
evh__new_mem_w_perms(Addr a,SizeT len,Bool rr,Bool ww,Bool xx,ULong di_handle)1501 void evh__new_mem_w_perms ( Addr a, SizeT len,
1502                             Bool rr, Bool ww, Bool xx, ULong di_handle ) {
1503    Thread *thr = get_current_Thread();
1504    if (SHOW_EVENTS >= 1)
1505       VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1506                   (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1507    if (rr || ww || xx) {
1508       shadow_mem_make_New( thr, a, len );
1509       if (UNLIKELY(thr->pthread_create_nesting_level > 0))
1510          shadow_mem_make_Untracked( thr, a, len );
1511    }
1512    if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1513       all__sanity_check("evh__new_mem_w_perms-post");
1514 }
1515 
1516 static
evh__set_perms(Addr a,SizeT len,Bool rr,Bool ww,Bool xx)1517 void evh__set_perms ( Addr a, SizeT len,
1518                       Bool rr, Bool ww, Bool xx ) {
1519    // This handles mprotect requests.  If the memory is being put
1520    // into no-R no-W state, paint it as NoAccess, for the reasons
1521    // documented at evh__die_mem_munmap().
1522    if (SHOW_EVENTS >= 1)
1523       VG_(printf)("evh__set_perms(%p, %lu, r=%d w=%d x=%d)\n",
1524                   (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1525    /* Hmm.  What should we do here, that actually makes any sense?
1526       Let's say: if neither readable nor writable, then declare it
1527       NoAccess, else leave it alone. */
1528    if (!(rr || ww))
1529       shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
1530    if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1531       all__sanity_check("evh__set_perms-post");
1532 }
1533 
1534 static
evh__die_mem(Addr a,SizeT len)1535 void evh__die_mem ( Addr a, SizeT len ) {
1536    // Urr, libhb ignores this.
1537    if (SHOW_EVENTS >= 2)
1538       VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
1539    shadow_mem_make_NoAccess_NoFX( get_current_Thread(), a, len );
1540    if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1541       all__sanity_check("evh__die_mem-post");
1542 }
1543 
1544 static
evh__die_mem_munmap(Addr a,SizeT len)1545 void evh__die_mem_munmap ( Addr a, SizeT len ) {
1546    // It's important that libhb doesn't ignore this.  If, as is likely,
1547    // the client is subject to address space layout randomization,
1548    // then unmapped areas may never get remapped over, even in long
1549    // runs.  If we just ignore them we wind up with large resource
1550    // (VTS) leaks in libhb.  So force them to NoAccess, so that all
1551    // VTS references in the affected area are dropped.  Marking memory
1552    // as NoAccess is expensive, but we assume that munmap is sufficiently
1553    // rare that the space gains of doing this are worth the costs.
1554    if (SHOW_EVENTS >= 2)
1555       VG_(printf)("evh__die_mem_munmap(%p, %lu)\n", (void*)a, len );
1556    shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
1557 }
1558 
1559 static
evh__untrack_mem(Addr a,SizeT len)1560 void evh__untrack_mem ( Addr a, SizeT len ) {
1561    // Libhb doesn't ignore this.
1562    if (SHOW_EVENTS >= 2)
1563       VG_(printf)("evh__untrack_mem(%p, %lu)\n", (void*)a, len );
1564    shadow_mem_make_Untracked( get_current_Thread(), a, len );
1565    if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1566       all__sanity_check("evh__untrack_mem-post");
1567 }
1568 
1569 static
evh__copy_mem(Addr src,Addr dst,SizeT len)1570 void evh__copy_mem ( Addr src, Addr dst, SizeT len ) {
1571    if (SHOW_EVENTS >= 2)
1572       VG_(printf)("evh__copy_mem(%p, %p, %lu)\n", (void*)src, (void*)dst, len );
1573    Thread *thr = get_current_Thread();
1574    if (LIKELY(thr->synchr_nesting == 0))
1575       shadow_mem_scopy_range( thr , src, dst, len );
1576    if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1577       all__sanity_check("evh__copy_mem-post");
1578 }
1579 
1580 static
evh__pre_thread_ll_create(ThreadId parent,ThreadId child)1581 void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1582 {
1583    if (SHOW_EVENTS >= 1)
1584       VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1585                   (Int)parent, (Int)child );
1586 
1587    if (parent != VG_INVALID_THREADID) {
1588       Thread* thr_p;
1589       Thread* thr_c;
1590       Thr*    hbthr_p;
1591       Thr*    hbthr_c;
1592 
1593       tl_assert(HG_(is_sane_ThreadId)(parent));
1594       tl_assert(HG_(is_sane_ThreadId)(child));
1595       tl_assert(parent != child);
1596 
1597       thr_p = map_threads_maybe_lookup( parent );
1598       thr_c = map_threads_maybe_lookup( child );
1599 
1600       tl_assert(thr_p != NULL);
1601       tl_assert(thr_c == NULL);
1602 
1603       hbthr_p = thr_p->hbthr;
1604       tl_assert(hbthr_p != NULL);
1605       tl_assert( libhb_get_Thr_hgthread(hbthr_p) == thr_p );
1606 
1607       hbthr_c = libhb_create ( hbthr_p );
1608 
1609       /* Create a new thread record for the child. */
1610       /* a Thread for the new thread ... */
1611       thr_c = mk_Thread( hbthr_c );
1612       tl_assert( libhb_get_Thr_hgthread(hbthr_c) == NULL );
1613       libhb_set_Thr_hgthread(hbthr_c, thr_c);
1614 
1615       /* and bind it in the thread-map table */
1616       map_threads[child] = thr_c;
1617       tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1618       thr_c->coretid = child;
1619 
1620       /* Record where the parent is so we can later refer to this in
1621          error messages.
1622 
1623          On x86/amd64-linux, this entails a nasty glibc specific hack.
1624          The stack snapshot is taken immediately after the parent has
1625          returned from its sys_clone call.  Unfortunately there is no
1626          unwind info for the insn following "syscall" - reading the
1627          glibc sources confirms this.  So we ask for a snapshot to be
1628          taken as if RIP was 3 bytes earlier, in a place where there
1629          is unwind info.  Sigh.
1630       */
1631       { Word first_ip_delta = 0;
1632 #       if defined(VGP_amd64_linux) || defined(VGP_x86_linux)
1633         first_ip_delta = -3;
1634 #       elif defined(VGP_arm64_linux) || defined(VGP_arm_linux)
1635         first_ip_delta = -1;
1636 #       endif
1637         thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1638       }
1639 
1640       if (HG_(clo_ignore_thread_creation)) {
1641          HG_(thread_enter_pthread_create)(thr_c);
1642          tl_assert(thr_c->synchr_nesting == 0);
1643          HG_(thread_enter_synchr)(thr_c);
1644          /* Counterpart in _VG_USERREQ__HG_SET_MY_PTHREAD_T. */
1645       }
1646    }
1647 
1648    if (HG_(clo_sanity_flags) & SCE_THREADS)
1649       all__sanity_check("evh__pre_thread_create-post");
1650 }
1651 
1652 static
evh__pre_thread_ll_exit(ThreadId quit_tid)1653 void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1654 {
1655    Int     nHeld;
1656    Thread* thr_q;
1657    if (SHOW_EVENTS >= 1)
1658       VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1659                   (Int)quit_tid );
1660 
1661    /* quit_tid has disappeared without joining to any other thread.
1662       Therefore there is no synchronisation event associated with its
1663       exit and so we have to pretty much treat it as if it was still
1664       alive but mysteriously making no progress.  That is because, if
1665       we don't know when it really exited, then we can never say there
1666       is a point in time when we're sure the thread really has
1667       finished, and so we need to consider the possibility that it
1668       lingers indefinitely and continues to interact with other
1669       threads. */
1670    /* However, it might have rendezvous'd with a thread that called
1671       pthread_join with this one as arg, prior to this point (that's
1672       how NPTL works).  In which case there has already been a prior
1673       sync event.  So in any case, just let the thread exit.  On NPTL,
1674       all thread exits go through here. */
1675    tl_assert(HG_(is_sane_ThreadId)(quit_tid));
1676    thr_q = map_threads_maybe_lookup( quit_tid );
1677    tl_assert(thr_q != NULL);
1678 
1679    /* Complain if this thread holds any locks. */
1680    nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1681    tl_assert(nHeld >= 0);
1682    if (nHeld > 0) {
1683       HChar buf[80];
1684       VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1685                         nHeld, nHeld > 1 ? "s" : "");
1686       HG_(record_error_Misc)( thr_q, buf );
1687    }
1688 
1689    /* Not much to do here:
1690       - tell libhb the thread is gone
1691       - clear the map_threads entry, in order that the Valgrind core
1692         can re-use it. */
1693    /* Cleanup actions (next 5 lines) copied in evh__atfork_child; keep
1694       in sync. */
1695    tl_assert(thr_q->hbthr);
1696    libhb_async_exit(thr_q->hbthr);
1697    tl_assert(thr_q->coretid == quit_tid);
1698    thr_q->coretid = VG_INVALID_THREADID;
1699    map_threads_delete( quit_tid );
1700 
1701    if (HG_(clo_sanity_flags) & SCE_THREADS)
1702       all__sanity_check("evh__pre_thread_ll_exit-post");
1703 }
1704 
1705 /* This is called immediately after fork, for the child only.  'tid'
1706    is the only surviving thread (as per POSIX rules on fork() in
1707    threaded programs), so we have to clean up map_threads to remove
1708    entries for any other threads. */
1709 static
evh__atfork_child(ThreadId tid)1710 void evh__atfork_child ( ThreadId tid )
1711 {
1712    UInt    i;
1713    Thread* thr;
1714    /* Slot 0 should never be used. */
1715    thr = map_threads_maybe_lookup( 0/*INVALID*/ );
1716    tl_assert(!thr);
1717    /* Clean up all other slots except 'tid'. */
1718    for (i = 1; i < VG_N_THREADS; i++) {
1719       if (i == tid)
1720          continue;
1721       thr = map_threads_maybe_lookup(i);
1722       if (!thr)
1723          continue;
1724       /* Cleanup actions (next 5 lines) copied from end of
1725          evh__pre_thread_ll_exit; keep in sync. */
1726       tl_assert(thr->hbthr);
1727       libhb_async_exit(thr->hbthr);
1728       tl_assert(thr->coretid == i);
1729       thr->coretid = VG_INVALID_THREADID;
1730       map_threads_delete(i);
1731    }
1732 }
1733 
1734 /* generate a dependence from the hbthr_q quitter to the hbthr_s stayer. */
1735 static
generate_quitter_stayer_dependence(Thr * hbthr_q,Thr * hbthr_s)1736 void generate_quitter_stayer_dependence (Thr* hbthr_q, Thr* hbthr_s)
1737 {
1738    SO*      so;
1739    /* Allocate a temporary synchronisation object and use it to send
1740       an imaginary message from the quitter to the stayer, the purpose
1741       being to generate a dependence from the quitter to the
1742       stayer. */
1743    so = libhb_so_alloc();
1744    tl_assert(so);
1745    /* Send last arg of _so_send as False, since the sending thread
1746       doesn't actually exist any more, so we don't want _so_send to
1747       try taking stack snapshots of it. */
1748    libhb_so_send(hbthr_q, so, True/*strong_send*//*?!? wrt comment above*/);
1749    libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1750    libhb_so_dealloc(so);
1751 
1752    /* Tell libhb that the quitter has been reaped.  Note that we might
1753       have to be cleverer about this, to exclude 2nd and subsequent
1754       notifications for the same hbthr_q, in the case where the app is
1755       buggy (calls pthread_join twice or more on the same thread) AND
1756       where libpthread is also buggy and doesn't return ESRCH on
1757       subsequent calls.  (If libpthread isn't thusly buggy, then the
1758       wrapper for pthread_join in hg_intercepts.c will stop us getting
1759       notified here multiple times for the same joinee.)  See also
1760       comments in helgrind/tests/jointwice.c. */
1761    libhb_joinedwith_done(hbthr_q);
1762 }
1763 
1764 
1765 static
evh__HG_PTHREAD_JOIN_POST(ThreadId stay_tid,Thread * quit_thr)1766 void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1767 {
1768    Thread*  thr_s;
1769    Thread*  thr_q;
1770    Thr*     hbthr_s;
1771    Thr*     hbthr_q;
1772 
1773    if (SHOW_EVENTS >= 1)
1774       VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1775                   (Int)stay_tid, quit_thr );
1776 
1777    tl_assert(HG_(is_sane_ThreadId)(stay_tid));
1778 
1779    thr_s = map_threads_maybe_lookup( stay_tid );
1780    thr_q = quit_thr;
1781    tl_assert(thr_s != NULL);
1782    tl_assert(thr_q != NULL);
1783    tl_assert(thr_s != thr_q);
1784 
1785    hbthr_s = thr_s->hbthr;
1786    hbthr_q = thr_q->hbthr;
1787    tl_assert(hbthr_s != hbthr_q);
1788    tl_assert( libhb_get_Thr_hgthread(hbthr_s) == thr_s );
1789    tl_assert( libhb_get_Thr_hgthread(hbthr_q) == thr_q );
1790 
1791    generate_quitter_stayer_dependence (hbthr_q, hbthr_s);
1792 
1793    /* evh__pre_thread_ll_exit issues an error message if the exiting
1794       thread holds any locks.  No need to check here. */
1795 
1796    /* This holds because, at least when using NPTL as the thread
1797       library, we should be notified the low level thread exit before
1798       we hear of any join event on it.  The low level exit
1799       notification feeds through into evh__pre_thread_ll_exit,
1800       which should clear the map_threads entry for it.  Hence we
1801       expect there to be no map_threads entry at this point. */
1802    tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1803               == VG_INVALID_THREADID);
1804 
1805    if (HG_(clo_sanity_flags) & SCE_THREADS)
1806       all__sanity_check("evh__post_thread_join-post");
1807 }
1808 
1809 static
evh__pre_mem_read(CorePart part,ThreadId tid,const HChar * s,Addr a,SizeT size)1810 void evh__pre_mem_read ( CorePart part, ThreadId tid, const HChar* s,
1811                          Addr a, SizeT size) {
1812    if (SHOW_EVENTS >= 2
1813        || (SHOW_EVENTS >= 1 && size != 1))
1814       VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1815                   (Int)tid, s, (void*)a, size );
1816    Thread *thr = map_threads_lookup(tid);
1817    if (LIKELY(thr->synchr_nesting == 0))
1818       shadow_mem_cread_range(thr, a, size);
1819    if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1820       all__sanity_check("evh__pre_mem_read-post");
1821 }
1822 
1823 static
evh__pre_mem_read_asciiz(CorePart part,ThreadId tid,const HChar * s,Addr a)1824 void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
1825                                 const HChar* s, Addr a ) {
1826    Int len;
1827    if (SHOW_EVENTS >= 1)
1828       VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1829                   (Int)tid, s, (void*)a );
1830    // Don't segfault if the string starts in an obviously stupid
1831    // place.  Actually we should check the whole string, not just
1832    // the start address, but that's too much trouble.  At least
1833    // checking the first byte is better than nothing.  See #255009.
1834    if (!VG_(am_is_valid_for_client) (a, 1, VKI_PROT_READ))
1835       return;
1836    Thread *thr = map_threads_lookup(tid);
1837    len = VG_(strlen)( (HChar*) a );
1838    if (LIKELY(thr->synchr_nesting == 0))
1839       shadow_mem_cread_range( thr, a, len+1 );
1840    if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1841       all__sanity_check("evh__pre_mem_read_asciiz-post");
1842 }
1843 
1844 static
evh__pre_mem_write(CorePart part,ThreadId tid,const HChar * s,Addr a,SizeT size)1845 void evh__pre_mem_write ( CorePart part, ThreadId tid, const HChar* s,
1846                           Addr a, SizeT size ) {
1847    if (SHOW_EVENTS >= 1)
1848       VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1849                   (Int)tid, s, (void*)a, size );
1850    Thread *thr = map_threads_lookup(tid);
1851    if (LIKELY(thr->synchr_nesting == 0))
1852       shadow_mem_cwrite_range(thr, a, size);
1853    if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1854       all__sanity_check("evh__pre_mem_write-post");
1855 }
1856 
1857 static
evh__new_mem_heap(Addr a,SizeT len,Bool is_inited)1858 void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1859    if (SHOW_EVENTS >= 1)
1860       VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1861                   (void*)a, len, (Int)is_inited );
1862    // We ignore the initialisation state (is_inited); that's ok.
1863    shadow_mem_make_New(get_current_Thread(), a, len);
1864    if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1865       all__sanity_check("evh__pre_mem_read-post");
1866 }
1867 
1868 static
evh__die_mem_heap(Addr a,SizeT len)1869 void evh__die_mem_heap ( Addr a, SizeT len ) {
1870    Thread* thr;
1871    if (SHOW_EVENTS >= 1)
1872       VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
1873    thr = get_current_Thread();
1874    tl_assert(thr);
1875    if (HG_(clo_free_is_write)) {
1876       /* Treat frees as if the memory was written immediately prior to
1877          the free.  This shakes out more races, specifically, cases
1878          where memory is referenced by one thread, and freed by
1879          another, and there's no observable synchronisation event to
1880          guarantee that the reference happens before the free. */
1881       if (LIKELY(thr->synchr_nesting == 0))
1882          shadow_mem_cwrite_range(thr, a, len);
1883    }
1884    shadow_mem_make_NoAccess_AHAE( thr, a, len );
1885    /* We used to call instead
1886           shadow_mem_make_NoAccess_NoFX( thr, a, len );
1887       A non-buggy application will not access anymore
1888       the freed memory, and so marking no access is in theory useless.
1889       Not marking freed memory would avoid the overhead for applications
1890       doing mostly malloc/free, as the freed memory should then be recycled
1891       very quickly after marking.
1892       We rather mark it noaccess for the following reasons:
1893         * accessibility bits then always correctly represents the memory
1894           status (e.g. for the client request VALGRIND_HG_GET_ABITS).
1895         * the overhead is reasonable (about 5 seconds per Gb in 1000 bytes
1896           blocks, on a ppc64le, for a unrealistic workload of an application
1897           doing only malloc/free).
1898         * marking no access allows to GC the SecMap, which might improve
1899           performance and/or memory usage.
1900         * we might detect more applications bugs when memory is marked
1901           noaccess.
1902       If needed, we could support here an option --free-is-noaccess=yes|no
1903       to avoid marking freed memory as no access if some applications
1904       would need to avoid the marking noaccess overhead. */
1905 
1906    if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1907       all__sanity_check("evh__pre_mem_read-post");
1908 }
1909 
1910 /* --- Event handlers called from generated code --- */
1911 
1912 static VG_REGPARM(1)
evh__mem_help_cread_1(Addr a)1913 void evh__mem_help_cread_1(Addr a) {
1914    Thread*  thr = get_current_Thread_in_C_C();
1915    Thr*     hbthr = thr->hbthr;
1916    if (LIKELY(thr->synchr_nesting == 0))
1917       LIBHB_CREAD_1(hbthr, a);
1918 }
1919 
1920 static VG_REGPARM(1)
evh__mem_help_cread_2(Addr a)1921 void evh__mem_help_cread_2(Addr a) {
1922    Thread*  thr = get_current_Thread_in_C_C();
1923    Thr*     hbthr = thr->hbthr;
1924    if (LIKELY(thr->synchr_nesting == 0))
1925       LIBHB_CREAD_2(hbthr, a);
1926 }
1927 
1928 static VG_REGPARM(1)
evh__mem_help_cread_4(Addr a)1929 void evh__mem_help_cread_4(Addr a) {
1930    Thread*  thr = get_current_Thread_in_C_C();
1931    Thr*     hbthr = thr->hbthr;
1932    if (LIKELY(thr->synchr_nesting == 0))
1933       LIBHB_CREAD_4(hbthr, a);
1934 }
1935 
1936 static VG_REGPARM(1)
evh__mem_help_cread_8(Addr a)1937 void evh__mem_help_cread_8(Addr a) {
1938    Thread*  thr = get_current_Thread_in_C_C();
1939    Thr*     hbthr = thr->hbthr;
1940    if (LIKELY(thr->synchr_nesting == 0))
1941       LIBHB_CREAD_8(hbthr, a);
1942 }
1943 
1944 static VG_REGPARM(2)
evh__mem_help_cread_N(Addr a,SizeT size)1945 void evh__mem_help_cread_N(Addr a, SizeT size) {
1946    Thread*  thr = get_current_Thread_in_C_C();
1947    Thr*     hbthr = thr->hbthr;
1948    if (LIKELY(thr->synchr_nesting == 0))
1949       LIBHB_CREAD_N(hbthr, a, size);
1950 }
1951 
1952 static VG_REGPARM(1)
evh__mem_help_cwrite_1(Addr a)1953 void evh__mem_help_cwrite_1(Addr a) {
1954    Thread*  thr = get_current_Thread_in_C_C();
1955    Thr*     hbthr = thr->hbthr;
1956    if (LIKELY(thr->synchr_nesting == 0))
1957       LIBHB_CWRITE_1(hbthr, a);
1958 }
1959 
1960 static VG_REGPARM(1)
evh__mem_help_cwrite_2(Addr a)1961 void evh__mem_help_cwrite_2(Addr a) {
1962    Thread*  thr = get_current_Thread_in_C_C();
1963    Thr*     hbthr = thr->hbthr;
1964    if (LIKELY(thr->synchr_nesting == 0))
1965       LIBHB_CWRITE_2(hbthr, a);
1966 }
1967 
1968 static VG_REGPARM(1)
evh__mem_help_cwrite_4(Addr a)1969 void evh__mem_help_cwrite_4(Addr a) {
1970    Thread*  thr = get_current_Thread_in_C_C();
1971    Thr*     hbthr = thr->hbthr;
1972    if (LIKELY(thr->synchr_nesting == 0))
1973       LIBHB_CWRITE_4(hbthr, a);
1974 }
1975 
1976 static VG_REGPARM(1)
evh__mem_help_cwrite_8(Addr a)1977 void evh__mem_help_cwrite_8(Addr a) {
1978    Thread*  thr = get_current_Thread_in_C_C();
1979    Thr*     hbthr = thr->hbthr;
1980    if (LIKELY(thr->synchr_nesting == 0))
1981       LIBHB_CWRITE_8(hbthr, a);
1982 }
1983 
1984 static VG_REGPARM(2)
evh__mem_help_cwrite_N(Addr a,SizeT size)1985 void evh__mem_help_cwrite_N(Addr a, SizeT size) {
1986    Thread*  thr = get_current_Thread_in_C_C();
1987    Thr*     hbthr = thr->hbthr;
1988    if (LIKELY(thr->synchr_nesting == 0))
1989       LIBHB_CWRITE_N(hbthr, a, size);
1990 }
1991 
1992 
1993 /* ------------------------------------------------------- */
1994 /* -------------- events to do with mutexes -------------- */
1995 /* ------------------------------------------------------- */
1996 
1997 /* EXPOSITION only: by intercepting lock init events we can show the
1998    user where the lock was initialised, rather than only being able to
1999    show where it was first locked.  Intercepting lock initialisations
2000    is not necessary for the basic operation of the race checker. */
2001 static
evh__HG_PTHREAD_MUTEX_INIT_POST(ThreadId tid,void * mutex,Word mbRec)2002 void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
2003                                       void* mutex, Word mbRec )
2004 {
2005    if (SHOW_EVENTS >= 1)
2006       VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
2007                   (Int)tid, mbRec, (void*)mutex );
2008    tl_assert(mbRec == 0 || mbRec == 1);
2009    map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
2010                                (Addr)mutex, tid );
2011    if (HG_(clo_sanity_flags) & SCE_LOCKS)
2012       all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
2013 }
2014 
2015 static
evh__HG_PTHREAD_MUTEX_DESTROY_PRE(ThreadId tid,void * mutex,Bool mutex_is_init)2016 void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex,
2017                                         Bool mutex_is_init )
2018 {
2019    Thread* thr;
2020    Lock*   lk;
2021    if (SHOW_EVENTS >= 1)
2022       VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE"
2023                   "(ctid=%d, %p, isInit=%d)\n",
2024                   (Int)tid, (void*)mutex, (Int)mutex_is_init );
2025 
2026    thr = map_threads_maybe_lookup( tid );
2027    /* cannot fail - Thread* must already exist */
2028    tl_assert( HG_(is_sane_Thread)(thr) );
2029 
2030    lk = map_locks_maybe_lookup( (Addr)mutex );
2031 
2032    if (lk == NULL && mutex_is_init) {
2033       /* We're destroying a mutex which we don't have any record of,
2034          and which appears to have the value PTHREAD_MUTEX_INITIALIZER.
2035          Assume it never got used, and so we don't need to do anything
2036          more. */
2037       goto out;
2038    }
2039 
2040    if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
2041       HG_(record_error_Misc)(
2042          thr, "pthread_mutex_destroy with invalid argument" );
2043    }
2044 
2045    if (lk) {
2046       tl_assert( HG_(is_sane_LockN)(lk) );
2047       tl_assert( lk->guestaddr == (Addr)mutex );
2048       if (lk->heldBy) {
2049          /* Basically act like we unlocked the lock */
2050          HG_(record_error_Misc)(
2051             thr, "pthread_mutex_destroy of a locked mutex" );
2052          /* remove lock from locksets of all owning threads */
2053          remove_Lock_from_locksets_of_all_owning_Threads( lk );
2054          VG_(deleteBag)( lk->heldBy );
2055          lk->heldBy = NULL;
2056          lk->heldW = False;
2057          lk->acquired_at = NULL;
2058       }
2059       tl_assert( !lk->heldBy );
2060       tl_assert( HG_(is_sane_LockN)(lk) );
2061 
2062       if (HG_(clo_track_lockorders))
2063          laog__handle_one_lock_deletion(lk);
2064       map_locks_delete( lk->guestaddr );
2065       del_LockN( lk );
2066    }
2067 
2068   out:
2069    if (HG_(clo_sanity_flags) & SCE_LOCKS)
2070       all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
2071 }
2072 
evh__HG_PTHREAD_MUTEX_LOCK_PRE(ThreadId tid,void * mutex,Word isTryLock)2073 static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
2074                                              void* mutex, Word isTryLock )
2075 {
2076    /* Just check the mutex is sane; nothing else to do. */
2077    // 'mutex' may be invalid - not checked by wrapper
2078    Thread* thr;
2079    Lock*   lk;
2080    if (SHOW_EVENTS >= 1)
2081       VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
2082                   (Int)tid, (void*)mutex );
2083 
2084    tl_assert(isTryLock == 0 || isTryLock == 1);
2085    thr = map_threads_maybe_lookup( tid );
2086    tl_assert(thr); /* cannot fail - Thread* must already exist */
2087 
2088    lk = map_locks_maybe_lookup( (Addr)mutex );
2089 
2090    if (lk && (lk->kind == LK_rdwr)) {
2091       HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
2092                                    "pthread_rwlock_t* argument " );
2093    }
2094 
2095    if ( lk
2096         && isTryLock == 0
2097         && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
2098         && lk->heldBy
2099         && lk->heldW
2100         && VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0 ) {
2101       /* uh, it's a non-recursive lock and we already w-hold it, and
2102          this is a real lock operation (not a speculative "tryLock"
2103          kind of thing).  Duh.  Deadlock coming up; but at least
2104          produce an error message. */
2105       const HChar* errstr = "Attempt to re-lock a "
2106                             "non-recursive lock I already hold";
2107       const HChar* auxstr = "Lock was previously acquired";
2108       if (lk->acquired_at) {
2109          HG_(record_error_Misc_w_aux)( thr, errstr, auxstr, lk->acquired_at );
2110       } else {
2111          HG_(record_error_Misc)( thr, errstr );
2112       }
2113    }
2114 }
2115 
evh__HG_PTHREAD_MUTEX_LOCK_POST(ThreadId tid,void * mutex)2116 static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
2117 {
2118    // only called if the real library call succeeded - so mutex is sane
2119    Thread* thr;
2120    if (SHOW_EVENTS >= 1)
2121       VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
2122                   (Int)tid, (void*)mutex );
2123 
2124    thr = map_threads_maybe_lookup( tid );
2125    tl_assert(thr); /* cannot fail - Thread* must already exist */
2126 
2127    evhH__post_thread_w_acquires_lock(
2128       thr,
2129       LK_mbRec, /* if not known, create new lock with this LockKind */
2130       (Addr)mutex
2131    );
2132 }
2133 
evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ThreadId tid,void * mutex)2134 static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
2135 {
2136    // 'mutex' may be invalid - not checked by wrapper
2137    Thread* thr;
2138    if (SHOW_EVENTS >= 1)
2139       VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
2140                   (Int)tid, (void*)mutex );
2141 
2142    thr = map_threads_maybe_lookup( tid );
2143    tl_assert(thr); /* cannot fail - Thread* must already exist */
2144 
2145    evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
2146 }
2147 
evh__HG_PTHREAD_MUTEX_UNLOCK_POST(ThreadId tid,void * mutex)2148 static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
2149 {
2150    // only called if the real library call succeeded - so mutex is sane
2151    Thread* thr;
2152    if (SHOW_EVENTS >= 1)
2153       VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
2154                   (Int)tid, (void*)mutex );
2155    thr = map_threads_maybe_lookup( tid );
2156    tl_assert(thr); /* cannot fail - Thread* must already exist */
2157 
2158    // anything we should do here?
2159 }
2160 
2161 
2162 /* ------------------------------------------------------- */
2163 /* -------------- events to do with spinlocks ------------ */
2164 /* ------------------------------------------------------- */
2165 
2166 /* All a bit of a kludge.  Pretend we're really dealing with ordinary
2167    pthread_mutex_t's instead, for the most part. */
2168 
evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE(ThreadId tid,void * slock)2169 static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid,
2170                                                      void* slock )
2171 {
2172    Thread* thr;
2173    Lock*   lk;
2174    /* In glibc's kludgey world, we're either initialising or unlocking
2175       it.  Since this is the pre-routine, if it is locked, unlock it
2176       and take a dependence edge.  Otherwise, do nothing. */
2177 
2178    if (SHOW_EVENTS >= 1)
2179       VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
2180                   "(ctid=%d, slock=%p)\n",
2181                   (Int)tid, (void*)slock );
2182 
2183    thr = map_threads_maybe_lookup( tid );
2184    /* cannot fail - Thread* must already exist */;
2185    tl_assert( HG_(is_sane_Thread)(thr) );
2186 
2187    lk = map_locks_maybe_lookup( (Addr)slock );
2188    if (lk && lk->heldBy) {
2189       /* it's held.  So do the normal pre-unlock actions, as copied
2190          from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE.  This stupidly
2191          duplicates the map_locks_maybe_lookup. */
2192       evhH__pre_thread_releases_lock( thr, (Addr)slock,
2193                                            False/*!isRDWR*/ );
2194    }
2195 }
2196 
evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST(ThreadId tid,void * slock)2197 static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid,
2198                                                       void* slock )
2199 {
2200    Lock* lk;
2201    /* More kludgery.  If the lock has never been seen before, do
2202       actions as per evh__HG_PTHREAD_MUTEX_INIT_POST.  Else do
2203       nothing. */
2204 
2205    if (SHOW_EVENTS >= 1)
2206       VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
2207                   "(ctid=%d, slock=%p)\n",
2208                   (Int)tid, (void*)slock );
2209 
2210    lk = map_locks_maybe_lookup( (Addr)slock );
2211    if (!lk) {
2212       map_locks_lookup_or_create( LK_nonRec, (Addr)slock, tid );
2213    }
2214 }
2215 
evh__HG_PTHREAD_SPIN_LOCK_PRE(ThreadId tid,void * slock,Word isTryLock)2216 static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid,
2217                                            void* slock, Word isTryLock )
2218 {
2219    evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, slock, isTryLock );
2220 }
2221 
evh__HG_PTHREAD_SPIN_LOCK_POST(ThreadId tid,void * slock)2222 static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid,
2223                                             void* slock )
2224 {
2225    evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, slock );
2226 }
2227 
evh__HG_PTHREAD_SPIN_DESTROY_PRE(ThreadId tid,void * slock)2228 static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid,
2229                                               void* slock )
2230 {
2231    evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, slock, 0/*!isInit*/ );
2232 }
2233 
2234 
2235 /* ----------------------------------------------------- */
2236 /* --------------- events to do with CVs --------------- */
2237 /* ----------------------------------------------------- */
2238 
2239 /* A mapping from CV to (the SO associated with it, plus some
2240    auxiliary data for error checking).  When the CV is
2241    signalled/broadcasted upon, we do a 'send' into the SO, and when a
2242    wait on it completes, we do a 'recv' from the SO.  This is believed
2243    to give the correct happens-before events arising from CV
2244    signallings/broadcasts.
2245 */
2246 
2247 /* .so is the SO for this CV.
2248    .mx_ga is the associated mutex, when .nWaiters > 0
2249 
2250    POSIX says effectively that the first pthread_cond_{timed}wait call
2251    causes a dynamic binding between the CV and the mutex, and that
2252    lasts until such time as the waiter count falls to zero.  Hence
2253    need to keep track of the number of waiters in order to do
2254    consistency tracking. */
2255 typedef
2256    struct {
2257       SO*   so;       /* libhb-allocated SO */
2258       void* mx_ga;    /* addr of associated mutex, if any */
2259       UWord nWaiters; /* # threads waiting on the CV */
2260    }
2261    CVInfo;
2262 
2263 
2264 /* pthread_cond_t* -> CVInfo* */
2265 static WordFM* map_cond_to_CVInfo = NULL;
2266 
map_cond_to_CVInfo_INIT(void)2267 static void map_cond_to_CVInfo_INIT ( void ) {
2268    if (UNLIKELY(map_cond_to_CVInfo == NULL)) {
2269       map_cond_to_CVInfo = VG_(newFM)( HG_(zalloc),
2270                                        "hg.mctCI.1", HG_(free), NULL );
2271    }
2272 }
2273 
map_cond_to_CVInfo_lookup_or_alloc(void * cond)2274 static CVInfo* map_cond_to_CVInfo_lookup_or_alloc ( void* cond ) {
2275    UWord key, val;
2276    map_cond_to_CVInfo_INIT();
2277    if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
2278       tl_assert(key == (UWord)cond);
2279       return (CVInfo*)val;
2280    } else {
2281       SO*     so  = libhb_so_alloc();
2282       CVInfo* cvi = HG_(zalloc)("hg.mctCloa.1", sizeof(CVInfo));
2283       cvi->so     = so;
2284       cvi->mx_ga  = 0;
2285       VG_(addToFM)( map_cond_to_CVInfo, (UWord)cond, (UWord)cvi );
2286       return cvi;
2287    }
2288 }
2289 
map_cond_to_CVInfo_lookup_NO_alloc(void * cond)2290 static CVInfo* map_cond_to_CVInfo_lookup_NO_alloc ( void* cond ) {
2291    UWord key, val;
2292    map_cond_to_CVInfo_INIT();
2293    if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
2294       tl_assert(key == (UWord)cond);
2295       return (CVInfo*)val;
2296    } else {
2297       return NULL;
2298    }
2299 }
2300 
map_cond_to_CVInfo_delete(ThreadId tid,void * cond,Bool cond_is_init)2301 static void map_cond_to_CVInfo_delete ( ThreadId tid,
2302                                         void* cond, Bool cond_is_init ) {
2303    Thread*   thr;
2304    UWord keyW, valW;
2305 
2306    thr = map_threads_maybe_lookup( tid );
2307    tl_assert(thr); /* cannot fail - Thread* must already exist */
2308 
2309    map_cond_to_CVInfo_INIT();
2310    if (VG_(lookupFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond )) {
2311       CVInfo* cvi = (CVInfo*)valW;
2312       tl_assert(keyW == (UWord)cond);
2313       tl_assert(cvi);
2314       tl_assert(cvi->so);
2315       if (cvi->nWaiters > 0) {
2316          HG_(record_error_Misc)(
2317             thr, "pthread_cond_destroy:"
2318                  " destruction of condition variable being waited upon");
2319          /* Destroying a cond var being waited upon outcome is EBUSY and
2320             variable is not destroyed. */
2321          return;
2322       }
2323       if (!VG_(delFromFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond ))
2324          tl_assert(0); // cond var found above, and not here ???
2325       libhb_so_dealloc(cvi->so);
2326       cvi->mx_ga = 0;
2327       HG_(free)(cvi);
2328    } else {
2329       /* We have no record of this CV.  So complain about it
2330          .. except, don't bother to complain if it has exactly the
2331          value PTHREAD_COND_INITIALIZER, since it might be that the CV
2332          was initialised like that but never used. */
2333       if (!cond_is_init) {
2334          HG_(record_error_Misc)(
2335             thr, "pthread_cond_destroy: destruction of unknown cond var");
2336       }
2337    }
2338 }
2339 
evh__HG_PTHREAD_COND_SIGNAL_PRE(ThreadId tid,void * cond)2340 static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2341 {
2342    /* 'tid' has signalled on 'cond'.  As per the comment above, bind
2343       cond to a SO if it is not already so bound, and 'send' on the
2344       SO.  This is later used by other thread(s) which successfully
2345       exit from a pthread_cond_wait on the same cv; then they 'recv'
2346       from the SO, thereby acquiring a dependency on this signalling
2347       event. */
2348    Thread*   thr;
2349    CVInfo*   cvi;
2350    //Lock*     lk;
2351 
2352    if (SHOW_EVENTS >= 1)
2353       VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2354                   (Int)tid, (void*)cond );
2355 
2356    thr = map_threads_maybe_lookup( tid );
2357    tl_assert(thr); /* cannot fail - Thread* must already exist */
2358 
2359    cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2360    tl_assert(cvi);
2361    tl_assert(cvi->so);
2362 
2363    // error-if: mutex is bogus
2364    // error-if: mutex is not locked
2365    // Hmm.  POSIX doesn't actually say that it's an error to call
2366    // pthread_cond_signal with the associated mutex being unlocked.
2367    // Although it does say that it should be "if consistent scheduling
2368    // is desired."  For that reason, print "dubious" if the lock isn't
2369    // held by any thread.  Skip the "dubious" if it is held by some
2370    // other thread; that sounds straight-out wrong.
2371    //
2372    // Anybody who writes code that signals on a CV without holding
2373    // the associated MX needs to be shipped off to a lunatic asylum
2374    // ASAP, even though POSIX doesn't actually declare such behaviour
2375    // illegal -- it makes code extremely difficult to understand/
2376    // reason about.  In particular it puts the signalling thread in
2377    // a situation where it is racing against the released waiter
2378    // as soon as the signalling is done, and so there needs to be
2379    // some auxiliary synchronisation mechanism in the program that
2380    // makes this safe -- or the race(s) need to be harmless, or
2381    // probably nonexistent.
2382    //
2383    if (1) {
2384       Lock* lk = NULL;
2385       if (cvi->mx_ga != 0) {
2386          lk = map_locks_maybe_lookup( (Addr)cvi->mx_ga );
2387       }
2388       /* note: lk could be NULL.  Be careful. */
2389       if (lk) {
2390          if (lk->kind == LK_rdwr) {
2391             HG_(record_error_Misc)(thr,
2392                "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2393          }
2394          if (lk->heldBy == NULL) {
2395             HG_(record_error_Misc)(thr,
2396                "pthread_cond_{signal,broadcast}: dubious: "
2397                "associated lock is not held by any thread");
2398          }
2399          if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (UWord)thr)) {
2400             HG_(record_error_Misc)(thr,
2401                "pthread_cond_{signal,broadcast}: "
2402                "associated lock is not held by calling thread");
2403          }
2404       } else {
2405          /* Couldn't even find the damn thing. */
2406          // But actually .. that's not necessarily an error.  We don't
2407          // know the (CV,MX) binding until a pthread_cond_wait or bcast
2408          // shows us what it is, and if that may not have happened yet.
2409          // So just keep quiet in this circumstance.
2410          //HG_(record_error_Misc)( thr,
2411          //   "pthread_cond_{signal,broadcast}: "
2412          //   "no or invalid mutex associated with cond");
2413       }
2414    }
2415 
2416    libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
2417 }
2418 
2419 /* returns True if it reckons 'mutex' is valid and held by this
2420    thread, else False */
evh__HG_PTHREAD_COND_WAIT_PRE(ThreadId tid,void * cond,void * mutex)2421 static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2422                                             void* cond, void* mutex )
2423 {
2424    Thread* thr;
2425    Lock*   lk;
2426    Bool    lk_valid = True;
2427    CVInfo* cvi;
2428 
2429    if (SHOW_EVENTS >= 1)
2430       VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2431                   "(ctid=%d, cond=%p, mutex=%p)\n",
2432                   (Int)tid, (void*)cond, (void*)mutex );
2433 
2434    thr = map_threads_maybe_lookup( tid );
2435    tl_assert(thr); /* cannot fail - Thread* must already exist */
2436 
2437    lk = map_locks_maybe_lookup( (Addr)mutex );
2438 
2439    /* Check for stupid mutex arguments.  There are various ways to be
2440       a bozo.  Only complain once, though, even if more than one thing
2441       is wrong. */
2442    if (lk == NULL) {
2443       lk_valid = False;
2444       HG_(record_error_Misc)(
2445          thr,
2446          "pthread_cond_{timed}wait called with invalid mutex" );
2447    } else {
2448       tl_assert( HG_(is_sane_LockN)(lk) );
2449       if (lk->kind == LK_rdwr) {
2450          lk_valid = False;
2451          HG_(record_error_Misc)(
2452             thr, "pthread_cond_{timed}wait called with mutex "
2453                  "of type pthread_rwlock_t*" );
2454       } else
2455          if (lk->heldBy == NULL) {
2456          lk_valid = False;
2457          HG_(record_error_Misc)(
2458             thr, "pthread_cond_{timed}wait called with un-held mutex");
2459       } else
2460       if (lk->heldBy != NULL
2461           && VG_(elemBag)( lk->heldBy, (UWord)thr ) == 0) {
2462          lk_valid = False;
2463          HG_(record_error_Misc)(
2464             thr, "pthread_cond_{timed}wait called with mutex "
2465                  "held by a different thread" );
2466       }
2467    }
2468 
2469    // error-if: cond is also associated with a different mutex
2470    cvi = map_cond_to_CVInfo_lookup_or_alloc(cond);
2471    tl_assert(cvi);
2472    tl_assert(cvi->so);
2473    if (cvi->nWaiters == 0) {
2474       /* form initial (CV,MX) binding */
2475       cvi->mx_ga = mutex;
2476    }
2477    else /* check existing (CV,MX) binding */
2478    if (cvi->mx_ga != mutex) {
2479       HG_(record_error_Misc)(
2480          thr, "pthread_cond_{timed}wait: cond is associated "
2481               "with a different mutex");
2482    }
2483    cvi->nWaiters++;
2484 
2485    return lk_valid;
2486 }
2487 
evh__HG_PTHREAD_COND_WAIT_POST(ThreadId tid,void * cond,void * mutex,Bool timeout)2488 static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
2489                                              void* cond, void* mutex,
2490                                              Bool timeout)
2491 {
2492    /* A pthread_cond_wait(cond, mutex) completed successfully.  Find
2493       the SO for this cond, and 'recv' from it so as to acquire a
2494       dependency edge back to the signaller/broadcaster. */
2495    Thread* thr;
2496    CVInfo* cvi;
2497 
2498    if (SHOW_EVENTS >= 1)
2499       VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
2500                   "(ctid=%d, cond=%p, mutex=%p)\n, timeout=%d",
2501                   (Int)tid, (void*)cond, (void*)mutex, (Int)timeout );
2502 
2503    thr = map_threads_maybe_lookup( tid );
2504    tl_assert(thr); /* cannot fail - Thread* must already exist */
2505 
2506    // error-if: cond is also associated with a different mutex
2507 
2508    cvi = map_cond_to_CVInfo_lookup_NO_alloc( cond );
2509    if (!cvi) {
2510       /* This could be either a bug in helgrind or the guest application
2511          that did an error (e.g. cond var was destroyed by another thread.
2512          Let's assume helgrind is perfect ...
2513          Note that this is similar to drd behaviour. */
2514       HG_(record_error_Misc)(thr, "condition variable has been destroyed while"
2515                              " being waited upon");
2516       return;
2517    }
2518 
2519    tl_assert(cvi);
2520    tl_assert(cvi->so);
2521    tl_assert(cvi->nWaiters > 0);
2522 
2523    if (!timeout && !libhb_so_everSent(cvi->so)) {
2524       /* Hmm.  How can a wait on 'cond' succeed if nobody signalled
2525          it?  If this happened it would surely be a bug in the threads
2526          library.  Or one of those fabled "spurious wakeups". */
2527       HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
2528                                    "succeeded"
2529                                    " without prior pthread_cond_post");
2530    }
2531 
2532    /* anyway, acquire a dependency on it. */
2533    libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2534 
2535    cvi->nWaiters--;
2536 }
2537 
evh__HG_PTHREAD_COND_INIT_POST(ThreadId tid,void * cond,void * cond_attr)2538 static void evh__HG_PTHREAD_COND_INIT_POST ( ThreadId tid,
2539                                              void* cond, void* cond_attr )
2540 {
2541    CVInfo* cvi;
2542 
2543    if (SHOW_EVENTS >= 1)
2544       VG_(printf)("evh__HG_PTHREAD_COND_INIT_POST"
2545                   "(ctid=%d, cond=%p, cond_attr=%p)\n",
2546                   (Int)tid, (void*)cond, (void*) cond_attr );
2547 
2548    cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2549    tl_assert (cvi);
2550    tl_assert (cvi->so);
2551 }
2552 
2553 
evh__HG_PTHREAD_COND_DESTROY_PRE(ThreadId tid,void * cond,Bool cond_is_init)2554 static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
2555                                                void* cond, Bool cond_is_init )
2556 {
2557    /* Deal with destroy events.  The only purpose is to free storage
2558       associated with the CV, so as to avoid any possible resource
2559       leaks. */
2560    if (SHOW_EVENTS >= 1)
2561       VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
2562                   "(ctid=%d, cond=%p, cond_is_init=%d)\n",
2563                   (Int)tid, (void*)cond, (Int)cond_is_init );
2564 
2565    map_cond_to_CVInfo_delete( tid, cond, cond_is_init );
2566 }
2567 
2568 
2569 /* ------------------------------------------------------- */
2570 /* -------------- events to do with rwlocks -------------- */
2571 /* ------------------------------------------------------- */
2572 
2573 /* EXPOSITION only */
2574 static
evh__HG_PTHREAD_RWLOCK_INIT_POST(ThreadId tid,void * rwl)2575 void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2576 {
2577    if (SHOW_EVENTS >= 1)
2578       VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2579                   (Int)tid, (void*)rwl );
2580    map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
2581    if (HG_(clo_sanity_flags) & SCE_LOCKS)
2582       all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2583 }
2584 
2585 static
evh__HG_PTHREAD_RWLOCK_DESTROY_PRE(ThreadId tid,void * rwl)2586 void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2587 {
2588    Thread* thr;
2589    Lock*   lk;
2590    if (SHOW_EVENTS >= 1)
2591       VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2592                   (Int)tid, (void*)rwl );
2593 
2594    thr = map_threads_maybe_lookup( tid );
2595    /* cannot fail - Thread* must already exist */
2596    tl_assert( HG_(is_sane_Thread)(thr) );
2597 
2598    lk = map_locks_maybe_lookup( (Addr)rwl );
2599 
2600    if (lk == NULL || lk->kind != LK_rdwr) {
2601       HG_(record_error_Misc)(
2602          thr, "pthread_rwlock_destroy with invalid argument" );
2603    }
2604 
2605    if (lk) {
2606       tl_assert( HG_(is_sane_LockN)(lk) );
2607       tl_assert( lk->guestaddr == (Addr)rwl );
2608       if (lk->heldBy) {
2609          /* Basically act like we unlocked the lock */
2610          HG_(record_error_Misc)(
2611             thr, "pthread_rwlock_destroy of a locked mutex" );
2612          /* remove lock from locksets of all owning threads */
2613          remove_Lock_from_locksets_of_all_owning_Threads( lk );
2614          VG_(deleteBag)( lk->heldBy );
2615          lk->heldBy = NULL;
2616          lk->heldW = False;
2617          lk->acquired_at = NULL;
2618       }
2619       tl_assert( !lk->heldBy );
2620       tl_assert( HG_(is_sane_LockN)(lk) );
2621 
2622       if (HG_(clo_track_lockorders))
2623          laog__handle_one_lock_deletion(lk);
2624       map_locks_delete( lk->guestaddr );
2625       del_LockN( lk );
2626    }
2627 
2628    if (HG_(clo_sanity_flags) & SCE_LOCKS)
2629       all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2630 }
2631 
2632 static
evh__HG_PTHREAD_RWLOCK_LOCK_PRE(ThreadId tid,void * rwl,Word isW,Word isTryLock)2633 void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2634                                        void* rwl,
2635                                        Word isW, Word isTryLock )
2636 {
2637    /* Just check the rwl is sane; nothing else to do. */
2638    // 'rwl' may be invalid - not checked by wrapper
2639    Thread* thr;
2640    Lock*   lk;
2641    if (SHOW_EVENTS >= 1)
2642       VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2643                   (Int)tid, (Int)isW, (void*)rwl );
2644 
2645    tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2646    tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
2647    thr = map_threads_maybe_lookup( tid );
2648    tl_assert(thr); /* cannot fail - Thread* must already exist */
2649 
2650    lk = map_locks_maybe_lookup( (Addr)rwl );
2651    if ( lk
2652         && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2653       /* Wrong kind of lock.  Duh.  */
2654       HG_(record_error_Misc)(
2655          thr, "pthread_rwlock_{rd,rw}lock with a "
2656               "pthread_mutex_t* argument " );
2657    }
2658 }
2659 
2660 static
evh__HG_PTHREAD_RWLOCK_LOCK_POST(ThreadId tid,void * rwl,Word isW)2661 void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2662 {
2663    // only called if the real library call succeeded - so mutex is sane
2664    Thread* thr;
2665    if (SHOW_EVENTS >= 1)
2666       VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2667                   (Int)tid, (Int)isW, (void*)rwl );
2668 
2669    tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2670    thr = map_threads_maybe_lookup( tid );
2671    tl_assert(thr); /* cannot fail - Thread* must already exist */
2672 
2673    (isW ? evhH__post_thread_w_acquires_lock
2674         : evhH__post_thread_r_acquires_lock)(
2675       thr,
2676       LK_rdwr, /* if not known, create new lock with this LockKind */
2677       (Addr)rwl
2678    );
2679 }
2680 
evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ThreadId tid,void * rwl)2681 static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2682 {
2683    // 'rwl' may be invalid - not checked by wrapper
2684    Thread* thr;
2685    if (SHOW_EVENTS >= 1)
2686       VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2687                   (Int)tid, (void*)rwl );
2688 
2689    thr = map_threads_maybe_lookup( tid );
2690    tl_assert(thr); /* cannot fail - Thread* must already exist */
2691 
2692    evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2693 }
2694 
evh__HG_PTHREAD_RWLOCK_UNLOCK_POST(ThreadId tid,void * rwl)2695 static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2696 {
2697    // only called if the real library call succeeded - so mutex is sane
2698    Thread* thr;
2699    if (SHOW_EVENTS >= 1)
2700       VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2701                   (Int)tid, (void*)rwl );
2702    thr = map_threads_maybe_lookup( tid );
2703    tl_assert(thr); /* cannot fail - Thread* must already exist */
2704 
2705    // anything we should do here?
2706 }
2707 
2708 
2709 /* ---------------------------------------------------------- */
2710 /* -------------- events to do with semaphores -------------- */
2711 /* ---------------------------------------------------------- */
2712 
2713 /* This is similar to but not identical to the handling for condition
2714    variables. */
2715 
2716 /* For each semaphore, we maintain a stack of SOs.  When a 'post'
2717    operation is done on a semaphore (unlocking, essentially), a new SO
2718    is created for the posting thread, the posting thread does a strong
2719    send to it (which merely installs the posting thread's VC in the
2720    SO), and the SO is pushed on the semaphore's stack.
2721 
2722    Later, when a (probably different) thread completes 'wait' on the
2723    semaphore, we pop a SO off the semaphore's stack (which should be
2724    nonempty), and do a strong recv from it.  This mechanism creates
2725    dependencies between posters and waiters of the semaphore.
2726 
2727    It may not be necessary to use a stack - perhaps a bag of SOs would
2728    do.  But we do need to keep track of how many unused-up posts have
2729    happened for the semaphore.
2730 
2731    Imagine T1 and T2 both post once on a semaphore S, and T3 waits
2732    twice on S.  T3 cannot complete its waits without both T1 and T2
2733    posting.  The above mechanism will ensure that T3 acquires
2734    dependencies on both T1 and T2.
2735 
2736    When a semaphore is initialised with value N, we do as if we'd
2737    posted N times on the semaphore: basically create N SOs and do a
2738    strong send to all of then.  This allows up to N waits on the
2739    semaphore to acquire a dependency on the initialisation point,
2740    which AFAICS is the correct behaviour.
2741 
2742    We don't emit an error for DESTROY_PRE on a semaphore we don't know
2743    about.  We should.
2744 */
2745 
2746 /* sem_t* -> XArray* SO* */
2747 static WordFM* map_sem_to_SO_stack = NULL;
2748 
map_sem_to_SO_stack_INIT(void)2749 static void map_sem_to_SO_stack_INIT ( void ) {
2750    if (map_sem_to_SO_stack == NULL) {
2751       map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2752                                         HG_(free), NULL );
2753    }
2754 }
2755 
push_SO_for_sem(void * sem,SO * so)2756 static void push_SO_for_sem ( void* sem, SO* so ) {
2757    UWord   keyW;
2758    XArray* xa;
2759    tl_assert(so);
2760    map_sem_to_SO_stack_INIT();
2761    if (VG_(lookupFM)( map_sem_to_SO_stack,
2762                       &keyW, (UWord*)&xa, (UWord)sem )) {
2763       tl_assert(keyW == (UWord)sem);
2764       tl_assert(xa);
2765       VG_(addToXA)( xa, &so );
2766    } else {
2767      xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2768       VG_(addToXA)( xa, &so );
2769       VG_(addToFM)( map_sem_to_SO_stack, (UWord)sem, (UWord)xa );
2770    }
2771 }
2772 
mb_pop_SO_for_sem(void * sem)2773 static SO* mb_pop_SO_for_sem ( void* sem ) {
2774    UWord    keyW;
2775    XArray*  xa;
2776    SO* so;
2777    map_sem_to_SO_stack_INIT();
2778    if (VG_(lookupFM)( map_sem_to_SO_stack,
2779                       &keyW, (UWord*)&xa, (UWord)sem )) {
2780       /* xa is the stack for this semaphore. */
2781       Word sz;
2782       tl_assert(keyW == (UWord)sem);
2783       sz = VG_(sizeXA)( xa );
2784       tl_assert(sz >= 0);
2785       if (sz == 0)
2786          return NULL; /* odd, the stack is empty */
2787       so = *(SO**)VG_(indexXA)( xa, sz-1 );
2788       tl_assert(so);
2789       VG_(dropTailXA)( xa, 1 );
2790       return so;
2791    } else {
2792       /* hmm, that's odd.  No stack for this semaphore. */
2793       return NULL;
2794    }
2795 }
2796 
evh__HG_POSIX_SEM_DESTROY_PRE(ThreadId tid,void * sem)2797 static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
2798 {
2799    UWord keyW, valW;
2800    SO*   so;
2801 
2802    if (SHOW_EVENTS >= 1)
2803       VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
2804                   (Int)tid, (void*)sem );
2805 
2806    map_sem_to_SO_stack_INIT();
2807 
2808    /* Empty out the semaphore's SO stack.  This way of doing it is
2809       stupid, but at least it's easy. */
2810    while (1) {
2811       so = mb_pop_SO_for_sem( sem );
2812       if (!so) break;
2813       libhb_so_dealloc(so);
2814    }
2815 
2816    if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2817       XArray* xa = (XArray*)valW;
2818       tl_assert(keyW == (UWord)sem);
2819       tl_assert(xa);
2820       tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2821       VG_(deleteXA)(xa);
2822    }
2823 }
2824 
2825 static
evh__HG_POSIX_SEM_INIT_POST(ThreadId tid,void * sem,UWord value)2826 void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2827 {
2828    SO*     so;
2829    Thread* thr;
2830 
2831    if (SHOW_EVENTS >= 1)
2832       VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2833                   (Int)tid, (void*)sem, value );
2834 
2835    thr = map_threads_maybe_lookup( tid );
2836    tl_assert(thr); /* cannot fail - Thread* must already exist */
2837 
2838    /* Empty out the semaphore's SO stack.  This way of doing it is
2839       stupid, but at least it's easy. */
2840    while (1) {
2841       so = mb_pop_SO_for_sem( sem );
2842       if (!so) break;
2843       libhb_so_dealloc(so);
2844    }
2845 
2846    /* If we don't do this check, the following while loop runs us out
2847       of memory for stupid initial values of 'value'. */
2848    if (value > 10000) {
2849       HG_(record_error_Misc)(
2850          thr, "sem_init: initial value exceeds 10000; using 10000" );
2851       value = 10000;
2852    }
2853 
2854    /* Now create 'valid' new SOs for the thread, do a strong send to
2855       each of them, and push them all on the stack. */
2856    for (; value > 0; value--) {
2857       Thr* hbthr = thr->hbthr;
2858       tl_assert(hbthr);
2859 
2860       so = libhb_so_alloc();
2861       libhb_so_send( hbthr, so, True/*strong send*/ );
2862       push_SO_for_sem( sem, so );
2863    }
2864 }
2865 
evh__HG_POSIX_SEM_POST_PRE(ThreadId tid,void * sem)2866 static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
2867 {
2868    /* 'tid' has posted on 'sem'.  Create a new SO, do a strong send to
2869       it (iow, write our VC into it, then tick ours), and push the SO
2870       on on a stack of SOs associated with 'sem'.  This is later used
2871       by other thread(s) which successfully exit from a sem_wait on
2872       the same sem; by doing a strong recv from SOs popped of the
2873       stack, they acquire dependencies on the posting thread
2874       segment(s). */
2875 
2876    Thread* thr;
2877    SO*     so;
2878    Thr*    hbthr;
2879 
2880    if (SHOW_EVENTS >= 1)
2881       VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
2882                   (Int)tid, (void*)sem );
2883 
2884    thr = map_threads_maybe_lookup( tid );
2885    tl_assert(thr); /* cannot fail - Thread* must already exist */
2886 
2887    // error-if: sem is bogus
2888 
2889    hbthr = thr->hbthr;
2890    tl_assert(hbthr);
2891 
2892    so = libhb_so_alloc();
2893    libhb_so_send( hbthr, so, True/*strong send*/ );
2894    push_SO_for_sem( sem, so );
2895 }
2896 
evh__HG_POSIX_SEM_WAIT_POST(ThreadId tid,void * sem)2897 static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
2898 {
2899    /* A sem_wait(sem) completed successfully.  Pop the posting-SO for
2900       the 'sem' from this semaphore's SO-stack, and do a strong recv
2901       from it.  This creates a dependency back to one of the post-ers
2902       for the semaphore. */
2903 
2904    Thread* thr;
2905    SO*     so;
2906    Thr*    hbthr;
2907 
2908    if (SHOW_EVENTS >= 1)
2909       VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
2910                   (Int)tid, (void*)sem );
2911 
2912    thr = map_threads_maybe_lookup( tid );
2913    tl_assert(thr); /* cannot fail - Thread* must already exist */
2914 
2915    // error-if: sem is bogus
2916 
2917    so = mb_pop_SO_for_sem( sem );
2918 
2919    if (so) {
2920       hbthr = thr->hbthr;
2921       tl_assert(hbthr);
2922 
2923       libhb_so_recv( hbthr, so, True/*strong recv*/ );
2924       libhb_so_dealloc(so);
2925    } else {
2926       /* Hmm.  How can a wait on 'sem' succeed if nobody posted to it?
2927          If this happened it would surely be a bug in the threads
2928          library. */
2929       HG_(record_error_Misc)(
2930          thr, "Bug in libpthread: sem_wait succeeded on"
2931               " semaphore without prior sem_post");
2932    }
2933 }
2934 
2935 
2936 /* -------------------------------------------------------- */
2937 /* -------------- events to do with barriers -------------- */
2938 /* -------------------------------------------------------- */
2939 
2940 typedef
2941    struct {
2942       Bool    initted; /* has it yet been initted by guest? */
2943       Bool    resizable; /* is resizing allowed? */
2944       UWord   size;    /* declared size */
2945       XArray* waiting; /* XA of Thread*.  # present is 0 .. .size */
2946    }
2947    Bar;
2948 
new_Bar(void)2949 static Bar* new_Bar ( void ) {
2950    Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
2951    /* all fields are zero */
2952    tl_assert(bar->initted == False);
2953    return bar;
2954 }
2955 
delete_Bar(Bar * bar)2956 static void delete_Bar ( Bar* bar ) {
2957    tl_assert(bar);
2958    if (bar->waiting)
2959       VG_(deleteXA)(bar->waiting);
2960    HG_(free)(bar);
2961 }
2962 
2963 /* A mapping which stores auxiliary data for barriers. */
2964 
2965 /* pthread_barrier_t* -> Bar* */
2966 static WordFM* map_barrier_to_Bar = NULL;
2967 
map_barrier_to_Bar_INIT(void)2968 static void map_barrier_to_Bar_INIT ( void ) {
2969    if (UNLIKELY(map_barrier_to_Bar == NULL)) {
2970       map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
2971                                        "hg.mbtBI.1", HG_(free), NULL );
2972    }
2973 }
2974 
map_barrier_to_Bar_lookup_or_alloc(void * barrier)2975 static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
2976    UWord key, val;
2977    map_barrier_to_Bar_INIT();
2978    if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
2979       tl_assert(key == (UWord)barrier);
2980       return (Bar*)val;
2981    } else {
2982       Bar* bar = new_Bar();
2983       VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
2984       return bar;
2985    }
2986 }
2987 
map_barrier_to_Bar_delete(void * barrier)2988 static void map_barrier_to_Bar_delete ( void* barrier ) {
2989    UWord keyW, valW;
2990    map_barrier_to_Bar_INIT();
2991    if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
2992       Bar* bar = (Bar*)valW;
2993       tl_assert(keyW == (UWord)barrier);
2994       delete_Bar(bar);
2995    }
2996 }
2997 
2998 
evh__HG_PTHREAD_BARRIER_INIT_PRE(ThreadId tid,void * barrier,UWord count,UWord resizable)2999 static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
3000                                                void* barrier,
3001                                                UWord count,
3002                                                UWord resizable )
3003 {
3004    Thread* thr;
3005    Bar*    bar;
3006 
3007    if (SHOW_EVENTS >= 1)
3008       VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
3009                   "(tid=%d, barrier=%p, count=%lu, resizable=%lu)\n",
3010                   (Int)tid, (void*)barrier, count, resizable );
3011 
3012    thr = map_threads_maybe_lookup( tid );
3013    tl_assert(thr); /* cannot fail - Thread* must already exist */
3014 
3015    if (count == 0) {
3016       HG_(record_error_Misc)(
3017          thr, "pthread_barrier_init: 'count' argument is zero"
3018       );
3019    }
3020 
3021    if (resizable != 0 && resizable != 1) {
3022       HG_(record_error_Misc)(
3023          thr, "pthread_barrier_init: invalid 'resizable' argument"
3024       );
3025    }
3026 
3027    bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3028    tl_assert(bar);
3029 
3030    if (bar->initted) {
3031       HG_(record_error_Misc)(
3032          thr, "pthread_barrier_init: barrier is already initialised"
3033       );
3034    }
3035 
3036    if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
3037       tl_assert(bar->initted);
3038       HG_(record_error_Misc)(
3039          thr, "pthread_barrier_init: threads are waiting at barrier"
3040       );
3041       VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
3042    }
3043    if (!bar->waiting) {
3044       bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
3045                                  sizeof(Thread*) );
3046    }
3047 
3048    tl_assert(VG_(sizeXA)(bar->waiting) == 0);
3049    bar->initted   = True;
3050    bar->resizable = resizable == 1 ? True : False;
3051    bar->size      = count;
3052 }
3053 
3054 
evh__HG_PTHREAD_BARRIER_DESTROY_PRE(ThreadId tid,void * barrier)3055 static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
3056                                                   void* barrier )
3057 {
3058    Thread* thr;
3059    Bar*    bar;
3060 
3061    /* Deal with destroy events.  The only purpose is to free storage
3062       associated with the barrier, so as to avoid any possible
3063       resource leaks. */
3064    if (SHOW_EVENTS >= 1)
3065       VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
3066                   "(tid=%d, barrier=%p)\n",
3067                   (Int)tid, (void*)barrier );
3068 
3069    thr = map_threads_maybe_lookup( tid );
3070    tl_assert(thr); /* cannot fail - Thread* must already exist */
3071 
3072    bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3073    tl_assert(bar);
3074 
3075    if (!bar->initted) {
3076       HG_(record_error_Misc)(
3077          thr, "pthread_barrier_destroy: barrier was never initialised"
3078       );
3079    }
3080 
3081    if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
3082       HG_(record_error_Misc)(
3083          thr, "pthread_barrier_destroy: threads are waiting at barrier"
3084       );
3085    }
3086 
3087    /* Maybe we shouldn't do this; just let it persist, so that when it
3088       is reinitialised we don't need to do any dynamic memory
3089       allocation?  The downside is a potentially unlimited space leak,
3090       if the client creates (in turn) a large number of barriers all
3091       at different locations.  Note that if we do later move to the
3092       don't-delete-it scheme, we need to mark the barrier as
3093       uninitialised again since otherwise a later _init call will
3094       elicit a duplicate-init error.  */
3095    map_barrier_to_Bar_delete( barrier );
3096 }
3097 
3098 
3099 /* All the threads have arrived.  Now do the Interesting Bit.  Get a
3100    new synchronisation object and do a weak send to it from all the
3101    participating threads.  This makes its vector clocks be the join of
3102    all the individual threads' vector clocks.  Then do a strong
3103    receive from it back to all threads, so that their VCs are a copy
3104    of it (hence are all equal to the join of their original VCs.) */
do_barrier_cross_sync_and_empty(Bar * bar)3105 static void do_barrier_cross_sync_and_empty ( Bar* bar )
3106 {
3107    /* XXX check bar->waiting has no duplicates */
3108    UWord i;
3109    SO*   so = libhb_so_alloc();
3110 
3111    tl_assert(bar->waiting);
3112    tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
3113 
3114    /* compute the join ... */
3115    for (i = 0; i < bar->size; i++) {
3116       Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
3117       Thr* hbthr = t->hbthr;
3118       libhb_so_send( hbthr, so, False/*weak send*/ );
3119    }
3120    /* ... and distribute to all threads */
3121    for (i = 0; i < bar->size; i++) {
3122       Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
3123       Thr* hbthr = t->hbthr;
3124       libhb_so_recv( hbthr, so, True/*strong recv*/ );
3125    }
3126 
3127    /* finally, we must empty out the waiting vector */
3128    VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
3129 
3130    /* and we don't need this any more.  Perhaps a stack-allocated
3131       SO would be better? */
3132    libhb_so_dealloc(so);
3133 }
3134 
3135 
evh__HG_PTHREAD_BARRIER_WAIT_PRE(ThreadId tid,void * barrier)3136 static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
3137                                                void* barrier )
3138 {
3139   /* This function gets called after a client thread calls
3140      pthread_barrier_wait but before it arrives at the real
3141      pthread_barrier_wait.
3142 
3143      Why is the following correct?  It's a bit subtle.
3144 
3145      If this is not the last thread arriving at the barrier, we simply
3146      note its presence and return.  Because valgrind (at least as of
3147      Nov 08) is single threaded, we are guaranteed safe from any race
3148      conditions when in this function -- no other client threads are
3149      running.
3150 
3151      If this is the last thread, then we are again the only running
3152      thread.  All the other threads will have either arrived at the
3153      real pthread_barrier_wait or are on their way to it, but in any
3154      case are guaranteed not to be able to move past it, because this
3155      thread is currently in this function and so has not yet arrived
3156      at the real pthread_barrier_wait.  That means that:
3157 
3158      1. While we are in this function, none of the other threads
3159         waiting at the barrier can move past it.
3160 
3161      2. When this function returns (and simulated execution resumes),
3162         this thread and all other waiting threads will be able to move
3163         past the real barrier.
3164 
3165      Because of this, it is now safe to update the vector clocks of
3166      all threads, to represent the fact that they all arrived at the
3167      barrier and have all moved on.  There is no danger of any
3168      complications to do with some threads leaving the barrier and
3169      racing back round to the front, whilst others are still leaving
3170      (which is the primary source of complication in correct handling/
3171      implementation of barriers).  That can't happen because we update
3172      here our data structures so as to indicate that the threads have
3173      passed the barrier, even though, as per (2) above, they are
3174      guaranteed not to pass the barrier until we return.
3175 
3176      This relies crucially on Valgrind being single threaded.  If that
3177      changes, this will need to be reconsidered.
3178    */
3179    Thread* thr;
3180    Bar*    bar;
3181    UWord   present;
3182 
3183    if (SHOW_EVENTS >= 1)
3184       VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
3185                   "(tid=%d, barrier=%p)\n",
3186                   (Int)tid, (void*)barrier );
3187 
3188    thr = map_threads_maybe_lookup( tid );
3189    tl_assert(thr); /* cannot fail - Thread* must already exist */
3190 
3191    bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3192    tl_assert(bar);
3193 
3194    if (!bar->initted) {
3195       HG_(record_error_Misc)(
3196          thr, "pthread_barrier_wait: barrier is uninitialised"
3197       );
3198       return; /* client is broken .. avoid assertions below */
3199    }
3200 
3201    /* guaranteed by _INIT_PRE above */
3202    tl_assert(bar->size > 0);
3203    tl_assert(bar->waiting);
3204 
3205    VG_(addToXA)( bar->waiting, &thr );
3206 
3207    /* guaranteed by this function */
3208    present = VG_(sizeXA)(bar->waiting);
3209    tl_assert(present > 0 && present <= bar->size);
3210 
3211    if (present < bar->size)
3212       return;
3213 
3214    do_barrier_cross_sync_and_empty(bar);
3215 }
3216 
3217 
evh__HG_PTHREAD_BARRIER_RESIZE_PRE(ThreadId tid,void * barrier,UWord newcount)3218 static void evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( ThreadId tid,
3219                                                  void* barrier,
3220                                                  UWord newcount )
3221 {
3222    Thread* thr;
3223    Bar*    bar;
3224    UWord   present;
3225 
3226    if (SHOW_EVENTS >= 1)
3227       VG_(printf)("evh__HG_PTHREAD_BARRIER_RESIZE_PRE"
3228                   "(tid=%d, barrier=%p, newcount=%lu)\n",
3229                   (Int)tid, (void*)barrier, newcount );
3230 
3231    thr = map_threads_maybe_lookup( tid );
3232    tl_assert(thr); /* cannot fail - Thread* must already exist */
3233 
3234    bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3235    tl_assert(bar);
3236 
3237    if (!bar->initted) {
3238       HG_(record_error_Misc)(
3239          thr, "pthread_barrier_resize: barrier is uninitialised"
3240       );
3241       return; /* client is broken .. avoid assertions below */
3242    }
3243 
3244    if (!bar->resizable) {
3245       HG_(record_error_Misc)(
3246          thr, "pthread_barrier_resize: barrier is may not be resized"
3247       );
3248       return; /* client is broken .. avoid assertions below */
3249    }
3250 
3251    if (newcount == 0) {
3252       HG_(record_error_Misc)(
3253          thr, "pthread_barrier_resize: 'newcount' argument is zero"
3254       );
3255       return; /* client is broken .. avoid assertions below */
3256    }
3257 
3258    /* guaranteed by _INIT_PRE above */
3259    tl_assert(bar->size > 0);
3260    tl_assert(bar->waiting);
3261    /* Guaranteed by this fn */
3262    tl_assert(newcount > 0);
3263 
3264    if (newcount >= bar->size) {
3265       /* Increasing the capacity.  There's no possibility of threads
3266          moving on from the barrier in this situation, so just note
3267          the fact and do nothing more. */
3268       bar->size = newcount;
3269    } else {
3270       /* Decreasing the capacity.  If we decrease it to be equal or
3271          below the number of waiting threads, they will now move past
3272          the barrier, so need to mess with dep edges in the same way
3273          as if the barrier had filled up normally. */
3274       present = VG_(sizeXA)(bar->waiting);
3275       tl_assert(present >= 0 && present <= bar->size);
3276       if (newcount <= present) {
3277          bar->size = present; /* keep the cross_sync call happy */
3278          do_barrier_cross_sync_and_empty(bar);
3279       }
3280       bar->size = newcount;
3281    }
3282 }
3283 
3284 
3285 /* ----------------------------------------------------- */
3286 /* ----- events to do with user-specified HB edges ----- */
3287 /* ----------------------------------------------------- */
3288 
3289 /* A mapping from arbitrary UWord tag to the SO associated with it.
3290    The UWord tags are meaningless to us, interpreted only by the
3291    user. */
3292 
3293 
3294 
3295 /* UWord -> SO* */
3296 static WordFM* map_usertag_to_SO = NULL;
3297 
map_usertag_to_SO_INIT(void)3298 static void map_usertag_to_SO_INIT ( void ) {
3299    if (UNLIKELY(map_usertag_to_SO == NULL)) {
3300       map_usertag_to_SO = VG_(newFM)( HG_(zalloc),
3301                                       "hg.mutS.1", HG_(free), NULL );
3302    }
3303 }
3304 
map_usertag_to_SO_lookup_or_alloc(UWord usertag)3305 static SO* map_usertag_to_SO_lookup_or_alloc ( UWord usertag ) {
3306    UWord key, val;
3307    map_usertag_to_SO_INIT();
3308    if (VG_(lookupFM)( map_usertag_to_SO, &key, &val, usertag )) {
3309       tl_assert(key == (UWord)usertag);
3310       return (SO*)val;
3311    } else {
3312       SO* so = libhb_so_alloc();
3313       VG_(addToFM)( map_usertag_to_SO, usertag, (UWord)so );
3314       return so;
3315    }
3316 }
3317 
map_usertag_to_SO_delete(UWord usertag)3318 static void map_usertag_to_SO_delete ( UWord usertag ) {
3319    UWord keyW, valW;
3320    map_usertag_to_SO_INIT();
3321    if (VG_(delFromFM)( map_usertag_to_SO, &keyW, &valW, usertag )) {
3322       SO* so = (SO*)valW;
3323       tl_assert(keyW == usertag);
3324       tl_assert(so);
3325       libhb_so_dealloc(so);
3326    }
3327 }
3328 
3329 
3330 static
evh__HG_USERSO_SEND_PRE(ThreadId tid,UWord usertag)3331 void evh__HG_USERSO_SEND_PRE ( ThreadId tid, UWord usertag )
3332 {
3333    /* TID is just about to notionally sent a message on a notional
3334       abstract synchronisation object whose identity is given by
3335       USERTAG.  Bind USERTAG to a real SO if it is not already so
3336       bound, and do a 'weak send' on the SO.  This joins the vector
3337       clocks from this thread into any vector clocks already present
3338       in the SO.  The resulting SO vector clocks are later used by
3339       other thread(s) which successfully 'receive' from the SO,
3340       thereby acquiring a dependency on all the events that have
3341       previously signalled on this SO. */
3342    Thread* thr;
3343    SO*     so;
3344 
3345    if (SHOW_EVENTS >= 1)
3346       VG_(printf)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3347                   (Int)tid, usertag );
3348 
3349    thr = map_threads_maybe_lookup( tid );
3350    tl_assert(thr); /* cannot fail - Thread* must already exist */
3351 
3352    so = map_usertag_to_SO_lookup_or_alloc( usertag );
3353    tl_assert(so);
3354 
3355    libhb_so_send( thr->hbthr, so, False/*!strong_send*/ );
3356 }
3357 
3358 static
evh__HG_USERSO_RECV_POST(ThreadId tid,UWord usertag)3359 void evh__HG_USERSO_RECV_POST ( ThreadId tid, UWord usertag )
3360 {
3361    /* TID has just notionally received a message from a notional
3362       abstract synchronisation object whose identity is given by
3363       USERTAG.  Bind USERTAG to a real SO if it is not already so
3364       bound.  If the SO has at some point in the past been 'sent' on,
3365       to a 'strong receive' on it, thereby acquiring a dependency on
3366       the sender. */
3367    Thread* thr;
3368    SO*     so;
3369 
3370    if (SHOW_EVENTS >= 1)
3371       VG_(printf)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3372                   (Int)tid, usertag );
3373 
3374    thr = map_threads_maybe_lookup( tid );
3375    tl_assert(thr); /* cannot fail - Thread* must already exist */
3376 
3377    so = map_usertag_to_SO_lookup_or_alloc( usertag );
3378    tl_assert(so);
3379 
3380    /* Acquire a dependency on it.  If the SO has never so far been
3381       sent on, then libhb_so_recv will do nothing.  So we're safe
3382       regardless of SO's history. */
3383    libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3384 }
3385 
3386 static
evh__HG_USERSO_FORGET_ALL(ThreadId tid,UWord usertag)3387 void evh__HG_USERSO_FORGET_ALL ( ThreadId tid, UWord usertag )
3388 {
3389    /* TID declares that any happens-before edges notionally stored in
3390       USERTAG can be deleted.  If (as would normally be the case) a
3391       SO is associated with USERTAG, then the association is removed
3392       and all resources associated with SO are freed.  Importantly,
3393       that frees up any VTSs stored in SO. */
3394    if (SHOW_EVENTS >= 1)
3395       VG_(printf)("evh__HG_USERSO_FORGET_ALL(ctid=%d, usertag=%#lx)\n",
3396                   (Int)tid, usertag );
3397 
3398    map_usertag_to_SO_delete( usertag );
3399 }
3400 
3401 
3402 #if defined(VGO_solaris)
3403 /* ----------------------------------------------------- */
3404 /* --- events to do with bind guard/clear intercepts --- */
3405 /* ----------------------------------------------------- */
3406 
3407 static
evh__HG_RTLD_BIND_GUARD(ThreadId tid,Int flags)3408 void evh__HG_RTLD_BIND_GUARD(ThreadId tid, Int flags)
3409 {
3410    if (SHOW_EVENTS >= 1)
3411       VG_(printf)("evh__HG_RTLD_BIND_GUARD"
3412                   "(tid=%d, flags=%d)\n",
3413                   (Int)tid, flags);
3414 
3415    Thread *thr = map_threads_maybe_lookup(tid);
3416    tl_assert(thr != NULL);
3417 
3418    Int bindflag = (flags & VKI_THR_FLG_RTLD);
3419    if ((bindflag & thr->bind_guard_flag) == 0) {
3420       thr->bind_guard_flag |= bindflag;
3421       HG_(thread_enter_synchr)(thr);
3422       /* Misuse pthread_create_nesting_level for ignoring mutex activity. */
3423       HG_(thread_enter_pthread_create)(thr);
3424    }
3425 }
3426 
3427 static
evh__HG_RTLD_BIND_CLEAR(ThreadId tid,Int flags)3428 void evh__HG_RTLD_BIND_CLEAR(ThreadId tid, Int flags)
3429 {
3430    if (SHOW_EVENTS >= 1)
3431       VG_(printf)("evh__HG_RTLD_BIND_CLEAR"
3432                   "(tid=%d, flags=%d)\n",
3433                   (Int)tid, flags);
3434 
3435    Thread *thr = map_threads_maybe_lookup(tid);
3436    tl_assert(thr != NULL);
3437 
3438    Int bindflag = (flags & VKI_THR_FLG_RTLD);
3439    if ((thr->bind_guard_flag & bindflag) != 0) {
3440       thr->bind_guard_flag &= ~bindflag;
3441       HG_(thread_leave_synchr)(thr);
3442       HG_(thread_leave_pthread_create)(thr);
3443    }
3444 }
3445 #endif /* VGO_solaris */
3446 
3447 
3448 /*--------------------------------------------------------------*/
3449 /*--- Lock acquisition order monitoring                      ---*/
3450 /*--------------------------------------------------------------*/
3451 
3452 /* FIXME: here are some optimisations still to do in
3453           laog__pre_thread_acquires_lock.
3454 
3455    The graph is structured so that if L1 --*--> L2 then L1 must be
3456    acquired before L2.
3457 
3458    The common case is that some thread T holds (eg) L1 L2 and L3 and
3459    is repeatedly acquiring and releasing Ln, and there is no ordering
3460    error in what it is doing.  Hence it repeatly:
3461 
3462    (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3463        produces the answer No (because there is no error).
3464 
3465    (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3466        (because they already got added the first time T acquired Ln).
3467 
3468    Hence cache these two events:
3469 
3470    (1) Cache result of the query from last time.  Invalidate the cache
3471        any time any edges are added to or deleted from laog.
3472 
3473    (2) Cache these add-edge requests and ignore them if said edges
3474        have already been added to laog.  Invalidate the cache any time
3475        any edges are deleted from laog.
3476 */
3477 
3478 typedef
3479    struct {
3480       WordSetID inns; /* in univ_laog */
3481       WordSetID outs; /* in univ_laog */
3482    }
3483    LAOGLinks;
3484 
3485 /* lock order acquisition graph */
3486 static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
3487 
3488 /* EXPOSITION ONLY: for each edge in 'laog', record the two places
3489    where that edge was created, so that we can show the user later if
3490    we need to. */
3491 typedef
3492    struct {
3493       Addr        src_ga; /* Lock guest addresses for */
3494       Addr        dst_ga; /* src/dst of the edge */
3495       ExeContext* src_ec; /* And corresponding places where that */
3496       ExeContext* dst_ec; /* ordering was established */
3497    }
3498    LAOGLinkExposition;
3499 
cmp_LAOGLinkExposition(UWord llx1W,UWord llx2W)3500 static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
3501    /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3502    LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
3503    LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
3504    if (llx1->src_ga < llx2->src_ga) return -1;
3505    if (llx1->src_ga > llx2->src_ga) return  1;
3506    if (llx1->dst_ga < llx2->dst_ga) return -1;
3507    if (llx1->dst_ga > llx2->dst_ga) return  1;
3508    return 0;
3509 }
3510 
3511 static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
3512 /* end EXPOSITION ONLY */
3513 
3514 
3515 __attribute__((noinline))
laog__init(void)3516 static void laog__init ( void )
3517 {
3518    tl_assert(!laog);
3519    tl_assert(!laog_exposition);
3520    tl_assert(HG_(clo_track_lockorders));
3521 
3522    laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
3523                       HG_(free), NULL/*unboxedcmp*/ );
3524 
3525    laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
3526                                  cmp_LAOGLinkExposition );
3527 }
3528 
laog__show(const HChar * who)3529 static void laog__show ( const HChar* who ) {
3530    UWord i, ws_size;
3531    UWord* ws_words;
3532    Lock* me;
3533    LAOGLinks* links;
3534    VG_(printf)("laog (requested by %s) {\n", who);
3535    VG_(initIterFM)( laog );
3536    me = NULL;
3537    links = NULL;
3538    while (VG_(nextIterFM)( laog, (UWord*)&me,
3539                                  (UWord*)&links )) {
3540       tl_assert(me);
3541       tl_assert(links);
3542       VG_(printf)("   node %p:\n", me);
3543       HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3544       for (i = 0; i < ws_size; i++)
3545          VG_(printf)("      inn %#lx\n", ws_words[i] );
3546       HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3547       for (i = 0; i < ws_size; i++)
3548          VG_(printf)("      out %#lx\n", ws_words[i] );
3549       me = NULL;
3550       links = NULL;
3551    }
3552    VG_(doneIterFM)( laog );
3553    VG_(printf)("}\n");
3554 }
3555 
univ_laog_do_GC(void)3556 static void univ_laog_do_GC ( void ) {
3557    Word i;
3558    LAOGLinks* links;
3559    Word seen = 0;
3560    Int prev_next_gc_univ_laog = next_gc_univ_laog;
3561    const UWord univ_laog_cardinality = HG_(cardinalityWSU)( univ_laog);
3562 
3563    Bool *univ_laog_seen = HG_(zalloc) ( "hg.gc_univ_laog.1",
3564                                         (Int) univ_laog_cardinality
3565                                         * sizeof(Bool) );
3566    // univ_laog_seen[*] set to 0 (False) by zalloc.
3567 
3568    VG_(initIterFM)( laog );
3569    links = NULL;
3570    while (VG_(nextIterFM)( laog, NULL, (UWord*)&links )) {
3571       tl_assert(links);
3572       tl_assert(links->inns >= 0 && links->inns < univ_laog_cardinality);
3573       univ_laog_seen[links->inns] = True;
3574       tl_assert(links->outs >= 0 && links->outs < univ_laog_cardinality);
3575       univ_laog_seen[links->outs] = True;
3576       links = NULL;
3577    }
3578    VG_(doneIterFM)( laog );
3579 
3580    for (i = 0; i < (Int)univ_laog_cardinality; i++) {
3581       if (univ_laog_seen[i])
3582          seen++;
3583       else
3584          HG_(dieWS) ( univ_laog, (WordSet)i );
3585    }
3586 
3587    HG_(free) (univ_laog_seen);
3588 
3589    // We need to decide the value of the next_gc.
3590    // 3 solutions were looked at:
3591    // Sol 1: garbage collect at seen * 2
3592    //   This solution was a lot slower, probably because we both do a lot of
3593    //   garbage collection and do not keep long enough laog WV that will become
3594    //   useful  again very soon.
3595    // Sol 2: garbage collect at a percentage increase of the current cardinality
3596    //         (with a min increase of 1)
3597    //   Trials on a small test program with 1%, 5% and 10% increase was done.
3598    //   1% is slightly faster than 5%, which is slightly slower than 10%.
3599    //   However, on a big application, this caused the memory to be exhausted,
3600    //   as even a 1% increase of size at each gc becomes a lot, when many gc
3601    //   are done.
3602    // Sol 3: always garbage collect at current cardinality + 1.
3603    //   This solution was the fastest of the 3 solutions, and caused no memory
3604    //   exhaustion in the big application.
3605    //
3606    // With regards to cost introduced by gc: on the t2t perf test (doing only
3607    // lock/unlock operations), t2t 50 10 2 was about 25% faster than the
3608    // version with garbage collection. With t2t 50 20 2, my machine started
3609    // to page out, and so the garbage collected version was much faster.
3610    // On smaller lock sets (e.g. t2t 20 5 2, giving about 100 locks), the
3611    // difference performance is insignificant (~ 0.1 s).
3612    // Of course, it might be that real life programs are not well represented
3613    // by t2t.
3614 
3615    // If ever we want to have a more sophisticated control
3616    // (e.g. clo options to control the percentage increase or fixed increased),
3617    // we should do it here, eg.
3618    //     next_gc_univ_laog = prev_next_gc_univ_laog + VG_(clo_laog_gc_fixed);
3619    // Currently, we just hard-code the solution 3 above.
3620    next_gc_univ_laog = prev_next_gc_univ_laog + 1;
3621 
3622    if (VG_(clo_stats))
3623       VG_(message)
3624          (Vg_DebugMsg,
3625           "univ_laog_do_GC cardinality entered %d exit %d next gc at %d\n",
3626           (Int)univ_laog_cardinality, (Int)seen, next_gc_univ_laog);
3627 }
3628 
3629 
3630 __attribute__((noinline))
laog__add_edge(Lock * src,Lock * dst)3631 static void laog__add_edge ( Lock* src, Lock* dst ) {
3632    UWord      keyW;
3633    LAOGLinks* links;
3634    Bool       presentF, presentR;
3635    if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
3636 
3637    /* Take the opportunity to sanity check the graph.  Record in
3638       presentF if there is already a src->dst mapping in this node's
3639       forwards links, and presentR if there is already a src->dst
3640       mapping in this node's backwards links.  They should agree!
3641       Also, we need to know whether the edge was already present so as
3642       to decide whether or not to update the link details mapping.  We
3643       can compute presentF and presentR essentially for free, so may
3644       as well do this always. */
3645    presentF = presentR = False;
3646 
3647    /* Update the out edges for src */
3648    keyW  = 0;
3649    links = NULL;
3650    if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)src )) {
3651       WordSetID outs_new;
3652       tl_assert(links);
3653       tl_assert(keyW == (UWord)src);
3654       outs_new = HG_(addToWS)( univ_laog, links->outs, (UWord)dst );
3655       presentF = outs_new == links->outs;
3656       links->outs = outs_new;
3657    } else {
3658       links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
3659       links->inns = HG_(emptyWS)( univ_laog );
3660       links->outs = HG_(singletonWS)( univ_laog, (UWord)dst );
3661       VG_(addToFM)( laog, (UWord)src, (UWord)links );
3662    }
3663    /* Update the in edges for dst */
3664    keyW  = 0;
3665    links = NULL;
3666    if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)dst )) {
3667       WordSetID inns_new;
3668       tl_assert(links);
3669       tl_assert(keyW == (UWord)dst);
3670       inns_new = HG_(addToWS)( univ_laog, links->inns, (UWord)src );
3671       presentR = inns_new == links->inns;
3672       links->inns = inns_new;
3673    } else {
3674       links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
3675       links->inns = HG_(singletonWS)( univ_laog, (UWord)src );
3676       links->outs = HG_(emptyWS)( univ_laog );
3677       VG_(addToFM)( laog, (UWord)dst, (UWord)links );
3678    }
3679 
3680    tl_assert( (presentF && presentR) || (!presentF && !presentR) );
3681 
3682    if (!presentF && src->acquired_at && dst->acquired_at) {
3683       LAOGLinkExposition expo;
3684       /* If this edge is entering the graph, and we have acquired_at
3685          information for both src and dst, record those acquisition
3686          points.  Hence, if there is later a violation of this
3687          ordering, we can show the user the two places in which the
3688          required src-dst ordering was previously established. */
3689       if (0) VG_(printf)("acquire edge %#lx %#lx\n",
3690                          src->guestaddr, dst->guestaddr);
3691       expo.src_ga = src->guestaddr;
3692       expo.dst_ga = dst->guestaddr;
3693       expo.src_ec = NULL;
3694       expo.dst_ec = NULL;
3695       tl_assert(laog_exposition);
3696       if (VG_(lookupFM)( laog_exposition, NULL, NULL, (UWord)&expo )) {
3697          /* we already have it; do nothing */
3698       } else {
3699          LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
3700                                                sizeof(LAOGLinkExposition));
3701          expo2->src_ga = src->guestaddr;
3702          expo2->dst_ga = dst->guestaddr;
3703          expo2->src_ec = src->acquired_at;
3704          expo2->dst_ec = dst->acquired_at;
3705          VG_(addToFM)( laog_exposition, (UWord)expo2, (UWord)NULL );
3706       }
3707    }
3708 
3709    if (HG_(cardinalityWSU) (univ_laog) >= next_gc_univ_laog)
3710       univ_laog_do_GC();
3711 }
3712 
3713 __attribute__((noinline))
laog__del_edge(Lock * src,Lock * dst)3714 static void laog__del_edge ( Lock* src, Lock* dst ) {
3715    UWord      keyW;
3716    LAOGLinks* links;
3717    if (0) VG_(printf)("laog__del_edge enter %p %p\n", src, dst);
3718    /* Update the out edges for src */
3719    keyW  = 0;
3720    links = NULL;
3721    if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)src )) {
3722       tl_assert(links);
3723       tl_assert(keyW == (UWord)src);
3724       links->outs = HG_(delFromWS)( univ_laog, links->outs, (UWord)dst );
3725    }
3726    /* Update the in edges for dst */
3727    keyW  = 0;
3728    links = NULL;
3729    if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)dst )) {
3730       tl_assert(links);
3731       tl_assert(keyW == (UWord)dst);
3732       links->inns = HG_(delFromWS)( univ_laog, links->inns, (UWord)src );
3733    }
3734 
3735    /* Remove the exposition of src,dst (if present) */
3736    {
3737       LAOGLinkExposition *fm_expo;
3738 
3739       LAOGLinkExposition expo;
3740       expo.src_ga = src->guestaddr;
3741       expo.dst_ga = dst->guestaddr;
3742       expo.src_ec = NULL;
3743       expo.dst_ec = NULL;
3744 
3745       if (VG_(delFromFM) (laog_exposition,
3746                           (UWord*)&fm_expo, NULL, (UWord)&expo )) {
3747          HG_(free) (fm_expo);
3748       }
3749    }
3750 
3751    /* deleting edges can increase nr of of WS so check for gc. */
3752    if (HG_(cardinalityWSU) (univ_laog) >= next_gc_univ_laog)
3753       univ_laog_do_GC();
3754    if (0) VG_(printf)("laog__del_edge exit\n");
3755 }
3756 
3757 __attribute__((noinline))
laog__succs(Lock * lk)3758 static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
3759    UWord      keyW;
3760    LAOGLinks* links;
3761    keyW  = 0;
3762    links = NULL;
3763    if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)lk )) {
3764       tl_assert(links);
3765       tl_assert(keyW == (UWord)lk);
3766       return links->outs;
3767    } else {
3768       return HG_(emptyWS)( univ_laog );
3769    }
3770 }
3771 
3772 __attribute__((noinline))
laog__preds(Lock * lk)3773 static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
3774    UWord      keyW;
3775    LAOGLinks* links;
3776    keyW  = 0;
3777    links = NULL;
3778    if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)lk )) {
3779       tl_assert(links);
3780       tl_assert(keyW == (UWord)lk);
3781       return links->inns;
3782    } else {
3783       return HG_(emptyWS)( univ_laog );
3784    }
3785 }
3786 
3787 __attribute__((noinline))
laog__sanity_check(const HChar * who)3788 static void laog__sanity_check ( const HChar* who ) {
3789    UWord i, ws_size;
3790    UWord* ws_words;
3791    Lock* me;
3792    LAOGLinks* links;
3793    VG_(initIterFM)( laog );
3794    me = NULL;
3795    links = NULL;
3796    if (0) VG_(printf)("laog sanity check\n");
3797    while (VG_(nextIterFM)( laog, (UWord*)&me,
3798                                  (UWord*)&links )) {
3799       tl_assert(me);
3800       tl_assert(links);
3801       HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3802       for (i = 0; i < ws_size; i++) {
3803          if ( ! HG_(elemWS)( univ_laog,
3804                              laog__succs( (Lock*)ws_words[i] ),
3805                              (UWord)me ))
3806             goto bad;
3807       }
3808       HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3809       for (i = 0; i < ws_size; i++) {
3810          if ( ! HG_(elemWS)( univ_laog,
3811                              laog__preds( (Lock*)ws_words[i] ),
3812                              (UWord)me ))
3813             goto bad;
3814       }
3815       me = NULL;
3816       links = NULL;
3817    }
3818    VG_(doneIterFM)( laog );
3819    return;
3820 
3821   bad:
3822    VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3823    laog__show(who);
3824    tl_assert(0);
3825 }
3826 
3827 /* If there is a path in laog from 'src' to any of the elements in
3828    'dst', return an arbitrarily chosen element of 'dst' reachable from
3829    'src'.  If no path exist from 'src' to any element in 'dst', return
3830    NULL. */
3831 __attribute__((noinline))
3832 static
laog__do_dfs_from_to(Lock * src,WordSetID dsts)3833 Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3834 {
3835    Lock*     ret;
3836    Word      ssz;
3837    XArray*   stack;   /* of Lock* */
3838    WordFM*   visited; /* Lock* -> void, iow, Set(Lock*) */
3839    Lock*     here;
3840    WordSetID succs;
3841    UWord     succs_size, i;
3842    UWord*    succs_words;
3843    //laog__sanity_check();
3844 
3845    /* If the destination set is empty, we can never get there from
3846       'src' :-), so don't bother to try */
3847    if (HG_(isEmptyWS)( univ_lsets, dsts ))
3848       return NULL;
3849 
3850    ret     = NULL;
3851    stack   = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3852    visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
3853 
3854    (void) VG_(addToXA)( stack, &src );
3855 
3856    while (True) {
3857 
3858       ssz = VG_(sizeXA)( stack );
3859 
3860       if (ssz == 0) { ret = NULL; break; }
3861 
3862       here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3863       VG_(dropTailXA)( stack, 1 );
3864 
3865       if (HG_(elemWS)( univ_lsets, dsts, (UWord)here )) { ret = here; break; }
3866 
3867       if (VG_(lookupFM)( visited, NULL, NULL, (UWord)here ))
3868          continue;
3869 
3870       VG_(addToFM)( visited, (UWord)here, 0 );
3871 
3872       succs = laog__succs( here );
3873       HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3874       for (i = 0; i < succs_size; i++)
3875          (void) VG_(addToXA)( stack, &succs_words[i] );
3876    }
3877 
3878    VG_(deleteFM)( visited, NULL, NULL );
3879    VG_(deleteXA)( stack );
3880    return ret;
3881 }
3882 
3883 
3884 /* Thread 'thr' is acquiring 'lk'.  Check for inconsistent ordering
3885    between 'lk' and the locks already held by 'thr' and issue a
3886    complaint if so.  Also, update the ordering graph appropriately.
3887 */
3888 __attribute__((noinline))
laog__pre_thread_acquires_lock(Thread * thr,Lock * lk)3889 static void laog__pre_thread_acquires_lock (
3890                Thread* thr, /* NB: BEFORE lock is added */
3891                Lock*   lk
3892             )
3893 {
3894    UWord*   ls_words;
3895    UWord    ls_size, i;
3896    Lock*    other;
3897 
3898    /* It may be that 'thr' already holds 'lk' and is recursively
3899       relocking in.  In this case we just ignore the call. */
3900    /* NB: univ_lsets really is correct here */
3901    if (HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lk ))
3902       return;
3903 
3904    /* First, the check.  Complain if there is any path in laog from lk
3905       to any of the locks already held by thr, since if any such path
3906       existed, it would mean that previously lk was acquired before
3907       (rather than after, as we are doing here) at least one of those
3908       locks.
3909    */
3910    other = laog__do_dfs_from_to(lk, thr->locksetA);
3911    if (other) {
3912       LAOGLinkExposition key, *found;
3913       /* So we managed to find a path lk --*--> other in the graph,
3914          which implies that 'lk' should have been acquired before
3915          'other' but is in fact being acquired afterwards.  We present
3916          the lk/other arguments to record_error_LockOrder in the order
3917          in which they should have been acquired. */
3918       /* Go look in the laog_exposition mapping, to find the allocation
3919          points for this edge, so we can show the user. */
3920       key.src_ga = lk->guestaddr;
3921       key.dst_ga = other->guestaddr;
3922       key.src_ec = NULL;
3923       key.dst_ec = NULL;
3924       found = NULL;
3925       if (VG_(lookupFM)( laog_exposition,
3926                          (UWord*)&found, NULL, (UWord)&key )) {
3927          tl_assert(found != &key);
3928          tl_assert(found->src_ga == key.src_ga);
3929          tl_assert(found->dst_ga == key.dst_ga);
3930          tl_assert(found->src_ec);
3931          tl_assert(found->dst_ec);
3932          HG_(record_error_LockOrder)(
3933             thr, lk, other,
3934                  found->src_ec, found->dst_ec, other->acquired_at );
3935       } else {
3936          /* Hmm.  This can't happen (can it?) */
3937          /* Yes, it can happen: see tests/tc14_laog_dinphils.
3938             Imagine we have 3 philosophers A B C, and the forks
3939             between them:
3940 
3941                            C
3942 
3943                        fCA   fBC
3944 
3945                       A   fAB   B
3946 
3947             Let's have the following actions:
3948                    A takes    fCA,fAB
3949                    A releases fCA,fAB
3950                    B takes    fAB,fBC
3951                    B releases fAB,fBC
3952                    C takes    fBC,fCA
3953                    C releases fBC,fCA
3954 
3955             Helgrind will report a lock order error when C takes fCA.
3956             Effectively, we have a deadlock if the following
3957             sequence is done:
3958                 A takes fCA
3959                 B takes fAB
3960                 C takes fBC
3961 
3962             The error reported is:
3963               Observed (incorrect) order fBC followed by fCA
3964             but the stack traces that have established the required order
3965             are not given.
3966 
3967             This is because there is no pair (fCA, fBC) in laog exposition :
3968             the laog_exposition records all pairs of locks between a new lock
3969             taken by a thread and all the already taken locks.
3970             So, there is no laog_exposition (fCA, fBC) as no thread ever
3971             first locked fCA followed by fBC.
3972 
3973             In other words, when the deadlock cycle involves more than
3974             two locks, then helgrind does not report the sequence of
3975             operations that created the cycle.
3976 
3977             However, we can report the current stack trace (where
3978             lk is being taken), and the stack trace where other was acquired:
3979             Effectively, the variable 'other' contains a lock currently
3980             held by this thread, with its 'acquired_at'. */
3981 
3982          HG_(record_error_LockOrder)(
3983             thr, lk, other,
3984                  NULL, NULL, other->acquired_at );
3985       }
3986    }
3987 
3988    /* Second, add to laog the pairs
3989         (old, lk)  |  old <- locks already held by thr
3990       Since both old and lk are currently held by thr, their acquired_at
3991       fields must be non-NULL.
3992    */
3993    tl_assert(lk->acquired_at);
3994    HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3995    for (i = 0; i < ls_size; i++) {
3996       Lock* old = (Lock*)ls_words[i];
3997       tl_assert(old->acquired_at);
3998       laog__add_edge( old, lk );
3999    }
4000 
4001    /* Why "except_Locks" ?  We're here because a lock is being
4002       acquired by a thread, and we're in an inconsistent state here.
4003       See the call points in evhH__post_thread_{r,w}_acquires_lock.
4004       When called in this inconsistent state, locks__sanity_check duly
4005       barfs. */
4006    if (HG_(clo_sanity_flags) & SCE_LAOG)
4007       all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
4008 }
4009 
4010 /* Allocates a duplicate of words. Caller must HG_(free) the result. */
UWordV_dup(UWord * words,Word words_size)4011 static UWord* UWordV_dup(UWord* words, Word words_size)
4012 {
4013    UInt i;
4014 
4015    if (words_size == 0)
4016       return NULL;
4017 
4018    UWord *dup = HG_(zalloc) ("hg.dup.1", (SizeT) words_size * sizeof(UWord));
4019 
4020    for (i = 0; i < words_size; i++)
4021       dup[i] = words[i];
4022 
4023    return dup;
4024 }
4025 
4026 /* Delete from 'laog' any pair mentioning a lock in locksToDelete */
4027 
4028 __attribute__((noinline))
laog__handle_one_lock_deletion(Lock * lk)4029 static void laog__handle_one_lock_deletion ( Lock* lk )
4030 {
4031    WordSetID preds, succs;
4032    UWord preds_size, succs_size, i, j;
4033    UWord *preds_words, *succs_words;
4034 
4035    preds = laog__preds( lk );
4036    succs = laog__succs( lk );
4037 
4038    // We need to duplicate the payload, as these can be garbage collected
4039    // during the del/add operations below.
4040    HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
4041    preds_words = UWordV_dup(preds_words, preds_size);
4042 
4043    HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
4044    succs_words = UWordV_dup(succs_words, succs_size);
4045 
4046    for (i = 0; i < preds_size; i++)
4047       laog__del_edge( (Lock*)preds_words[i], lk );
4048 
4049    for (j = 0; j < succs_size; j++)
4050       laog__del_edge( lk, (Lock*)succs_words[j] );
4051 
4052    for (i = 0; i < preds_size; i++) {
4053       for (j = 0; j < succs_size; j++) {
4054          if (preds_words[i] != succs_words[j]) {
4055             /* This can pass unlocked locks to laog__add_edge, since
4056                we're deleting stuff.  So their acquired_at fields may
4057                be NULL. */
4058             laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
4059          }
4060       }
4061    }
4062 
4063    if (preds_words)
4064       HG_(free) (preds_words);
4065    if (succs_words)
4066       HG_(free) (succs_words);
4067 
4068    // Remove lk information from laog links FM
4069    {
4070       LAOGLinks *links;
4071       Lock* linked_lk;
4072 
4073       if (VG_(delFromFM) (laog,
4074                           (UWord*)&linked_lk, (UWord*)&links, (UWord)lk)) {
4075          tl_assert (linked_lk == lk);
4076          HG_(free) (links);
4077       }
4078    }
4079    /* FIXME ??? What about removing lock lk data from EXPOSITION ??? */
4080 }
4081 
4082 //__attribute__((noinline))
4083 //static void laog__handle_lock_deletions (
4084 //               WordSetID /* in univ_laog */ locksToDelete
4085 //            )
4086 //{
4087 //   Word   i, ws_size;
4088 //   UWord* ws_words;
4089 //
4090 //
4091 //   HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
4092 //   UWordV_dup call needed here ...
4093 //   for (i = 0; i < ws_size; i++)
4094 //      laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
4095 //
4096 //   if (HG_(clo_sanity_flags) & SCE_LAOG)
4097 //      all__sanity_check("laog__handle_lock_deletions-post");
4098 //}
4099 
4100 
4101 /*--------------------------------------------------------------*/
4102 /*--- Malloc/free replacements                               ---*/
4103 /*--------------------------------------------------------------*/
4104 
4105 typedef
4106    struct {
4107       void*       next;    /* required by m_hashtable */
4108       Addr        payload; /* ptr to actual block    */
4109       SizeT       szB;     /* size requested         */
4110       ExeContext* where;   /* where it was allocated */
4111       Thread*     thr;     /* allocating thread      */
4112    }
4113    MallocMeta;
4114 
4115 /* A hash table of MallocMetas, used to track malloc'd blocks
4116    (obviously). */
4117 static VgHashTable *hg_mallocmeta_table = NULL;
4118 
4119 /* MallocMeta are small elements. We use a pool to avoid
4120    the overhead of malloc for each MallocMeta. */
4121 static PoolAlloc *MallocMeta_poolalloc = NULL;
4122 
new_MallocMeta(void)4123 static MallocMeta* new_MallocMeta ( void ) {
4124    MallocMeta* md = VG_(allocEltPA) (MallocMeta_poolalloc);
4125    VG_(memset)(md, 0, sizeof(MallocMeta));
4126    return md;
4127 }
delete_MallocMeta(MallocMeta * md)4128 static void delete_MallocMeta ( MallocMeta* md ) {
4129    VG_(freeEltPA)(MallocMeta_poolalloc, md);
4130 }
4131 
4132 
4133 /* Allocate a client block and set up the metadata for it. */
4134 
4135 static
handle_alloc(ThreadId tid,SizeT szB,SizeT alignB,Bool is_zeroed)4136 void* handle_alloc ( ThreadId tid,
4137                      SizeT szB, SizeT alignB, Bool is_zeroed )
4138 {
4139    Addr        p;
4140    MallocMeta* md;
4141 
4142    tl_assert( ((SSizeT)szB) >= 0 );
4143    p = (Addr)VG_(cli_malloc)(alignB, szB);
4144    if (!p) {
4145       return NULL;
4146    }
4147    if (is_zeroed)
4148       VG_(memset)((void*)p, 0, szB);
4149 
4150    /* Note that map_threads_lookup must succeed (cannot assert), since
4151       memory can only be allocated by currently alive threads, hence
4152       they must have an entry in map_threads. */
4153    md = new_MallocMeta();
4154    md->payload = p;
4155    md->szB     = szB;
4156    md->where   = VG_(record_ExeContext)( tid, 0 );
4157    md->thr     = map_threads_lookup( tid );
4158 
4159    VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
4160 
4161    /* Tell the lower level memory wranglers. */
4162    evh__new_mem_heap( p, szB, is_zeroed );
4163 
4164    return (void*)p;
4165 }
4166 
4167 /* Re the checks for less-than-zero (also in hg_cli__realloc below):
4168    Cast to a signed type to catch any unexpectedly negative args.
4169    We're assuming here that the size asked for is not greater than
4170    2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
4171    platforms). */
hg_cli__malloc(ThreadId tid,SizeT n)4172 static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
4173    if (((SSizeT)n) < 0) return NULL;
4174    return handle_alloc ( tid, n, VG_(clo_alignment),
4175                          /*is_zeroed*/False );
4176 }
hg_cli____builtin_new(ThreadId tid,SizeT n)4177 static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
4178    if (((SSizeT)n) < 0) return NULL;
4179    return handle_alloc ( tid, n, VG_(clo_alignment),
4180                          /*is_zeroed*/False );
4181 }
hg_cli____builtin_vec_new(ThreadId tid,SizeT n)4182 static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
4183    if (((SSizeT)n) < 0) return NULL;
4184    return handle_alloc ( tid, n, VG_(clo_alignment),
4185                          /*is_zeroed*/False );
4186 }
hg_cli__memalign(ThreadId tid,SizeT align,SizeT n)4187 static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
4188    if (((SSizeT)n) < 0) return NULL;
4189    return handle_alloc ( tid, n, align,
4190                          /*is_zeroed*/False );
4191 }
hg_cli__calloc(ThreadId tid,SizeT nmemb,SizeT size1)4192 static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
4193    if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
4194    return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
4195                          /*is_zeroed*/True );
4196 }
4197 
4198 
4199 /* Free a client block, including getting rid of the relevant
4200    metadata. */
4201 
handle_free(ThreadId tid,void * p)4202 static void handle_free ( ThreadId tid, void* p )
4203 {
4204    MallocMeta *md, *old_md;
4205    SizeT      szB;
4206 
4207    /* First see if we can find the metadata for 'p'. */
4208    md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
4209    if (!md)
4210       return; /* apparently freeing a bogus address.  Oh well. */
4211 
4212    tl_assert(md->payload == (Addr)p);
4213    szB = md->szB;
4214 
4215    /* Nuke the metadata block */
4216    old_md = (MallocMeta*)
4217             VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
4218    tl_assert(old_md); /* it must be present - we just found it */
4219    tl_assert(old_md == md);
4220    tl_assert(old_md->payload == (Addr)p);
4221 
4222    VG_(cli_free)((void*)old_md->payload);
4223    delete_MallocMeta(old_md);
4224 
4225    /* Tell the lower level memory wranglers. */
4226    evh__die_mem_heap( (Addr)p, szB );
4227 }
4228 
hg_cli__free(ThreadId tid,void * p)4229 static void hg_cli__free ( ThreadId tid, void* p ) {
4230    handle_free(tid, p);
4231 }
hg_cli____builtin_delete(ThreadId tid,void * p)4232 static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
4233    handle_free(tid, p);
4234 }
hg_cli____builtin_vec_delete(ThreadId tid,void * p)4235 static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
4236    handle_free(tid, p);
4237 }
4238 
4239 
hg_cli__realloc(ThreadId tid,void * payloadV,SizeT new_size)4240 static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
4241 {
4242    MallocMeta *md, *md_new, *md_tmp;
4243    SizeT      i;
4244 
4245    Addr payload = (Addr)payloadV;
4246 
4247    if (((SSizeT)new_size) < 0) return NULL;
4248 
4249    md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
4250    if (!md)
4251       return NULL; /* apparently realloc-ing a bogus address.  Oh well. */
4252 
4253    tl_assert(md->payload == payload);
4254 
4255    if (md->szB == new_size) {
4256       /* size unchanged */
4257       md->where = VG_(record_ExeContext)(tid, 0);
4258       return payloadV;
4259    }
4260 
4261    if (md->szB > new_size) {
4262       /* new size is smaller */
4263       md->szB   = new_size;
4264       md->where = VG_(record_ExeContext)(tid, 0);
4265       evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
4266       return payloadV;
4267    }
4268 
4269    /* else */ {
4270       /* new size is bigger */
4271       Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
4272 
4273       /* First half kept and copied, second half new */
4274       // FIXME: shouldn't we use a copier which implements the
4275       // memory state machine?
4276       evh__copy_mem( payload, p_new, md->szB );
4277       evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
4278                           /*inited*/False );
4279       /* FIXME: can anything funny happen here?  specifically, if the
4280          old range contained a lock, then die_mem_heap will complain.
4281          Is that the correct behaviour?  Not sure. */
4282       evh__die_mem_heap( payload, md->szB );
4283 
4284       /* Copy from old to new */
4285       for (i = 0; i < md->szB; i++)
4286          ((UChar*)p_new)[i] = ((UChar*)payload)[i];
4287 
4288       /* Because the metadata hash table is index by payload address,
4289          we have to get rid of the old hash table entry and make a new
4290          one.  We can't just modify the existing metadata in place,
4291          because then it would (almost certainly) be in the wrong hash
4292          chain. */
4293       md_new = new_MallocMeta();
4294       *md_new = *md;
4295 
4296       md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
4297       tl_assert(md_tmp);
4298       tl_assert(md_tmp == md);
4299 
4300       VG_(cli_free)((void*)md->payload);
4301       delete_MallocMeta(md);
4302 
4303       /* Update fields */
4304       md_new->where   = VG_(record_ExeContext)( tid, 0 );
4305       md_new->szB     = new_size;
4306       md_new->payload = p_new;
4307       md_new->thr     = map_threads_lookup( tid );
4308 
4309       /* and add */
4310       VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
4311 
4312       return (void*)p_new;
4313    }
4314 }
4315 
hg_cli_malloc_usable_size(ThreadId tid,void * p)4316 static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
4317 {
4318    MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
4319 
4320    // There may be slop, but pretend there isn't because only the asked-for
4321    // area will have been shadowed properly.
4322    return ( md ? md->szB : 0 );
4323 }
4324 
4325 
4326 /* For error creation: map 'data_addr' to a malloc'd chunk, if any.
4327    Slow linear search.  With a bit of hash table help if 'data_addr'
4328    is either the start of a block or up to 15 word-sized steps along
4329    from the start of a block. */
4330 
addr_is_in_MM_Chunk(MallocMeta * mm,Addr a)4331 static inline Bool addr_is_in_MM_Chunk( MallocMeta* mm, Addr a )
4332 {
4333    /* Accept 'a' as within 'mm' if 'mm's size is zero and 'a' points
4334       right at it. */
4335   if (UNLIKELY(mm->szB == 0 && a == mm->payload))
4336      return True;
4337   /* else normal interval rules apply */
4338   if (LIKELY(a < mm->payload)) return False;
4339   if (LIKELY(a >= mm->payload + mm->szB)) return False;
4340   return True;
4341 }
4342 
HG_(mm_find_containing_block)4343 Bool HG_(mm_find_containing_block)( /*OUT*/ExeContext** where,
4344                                     /*OUT*/UInt*        tnr,
4345                                     /*OUT*/Addr*        payload,
4346                                     /*OUT*/SizeT*       szB,
4347                                     Addr                data_addr )
4348 {
4349    MallocMeta* mm;
4350    Int i;
4351    const Int n_fast_check_words = 16;
4352 
4353    /* First, do a few fast searches on the basis that data_addr might
4354       be exactly the start of a block or up to 15 words inside.  This
4355       can happen commonly via the creq
4356       _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK. */
4357    for (i = 0; i < n_fast_check_words; i++) {
4358       mm = VG_(HT_lookup)( hg_mallocmeta_table,
4359                            data_addr - (UWord)(UInt)i * sizeof(UWord) );
4360       if (UNLIKELY(mm && addr_is_in_MM_Chunk(mm, data_addr)))
4361          goto found;
4362    }
4363 
4364    /* Well, this totally sucks.  But without using an interval tree or
4365       some such, it's hard to see how to do better.  We have to check
4366       every block in the entire table. */
4367    VG_(HT_ResetIter)(hg_mallocmeta_table);
4368    while ( (mm = VG_(HT_Next)(hg_mallocmeta_table)) ) {
4369       if (UNLIKELY(addr_is_in_MM_Chunk(mm, data_addr)))
4370          goto found;
4371    }
4372 
4373    /* Not found.  Bah. */
4374    return False;
4375    /*NOTREACHED*/
4376 
4377   found:
4378    tl_assert(mm);
4379    tl_assert(addr_is_in_MM_Chunk(mm, data_addr));
4380    if (where)   *where   = mm->where;
4381    if (tnr)     *tnr     = mm->thr->errmsg_index;
4382    if (payload) *payload = mm->payload;
4383    if (szB)     *szB     = mm->szB;
4384    return True;
4385 }
4386 
4387 
4388 /*--------------------------------------------------------------*/
4389 /*--- Instrumentation                                        ---*/
4390 /*--------------------------------------------------------------*/
4391 
4392 #define unop(_op, _arg1)         IRExpr_Unop((_op),(_arg1))
4393 #define binop(_op, _arg1, _arg2) IRExpr_Binop((_op),(_arg1),(_arg2))
4394 #define mkexpr(_tmp)             IRExpr_RdTmp((_tmp))
4395 #define mkU32(_n)                IRExpr_Const(IRConst_U32(_n))
4396 #define mkU64(_n)                IRExpr_Const(IRConst_U64(_n))
4397 #define assign(_t, _e)           IRStmt_WrTmp((_t), (_e))
4398 
4399 /* This takes and returns atoms, of course.  Not full IRExprs. */
mk_And1(IRSB * sbOut,IRExpr * arg1,IRExpr * arg2)4400 static IRExpr* mk_And1 ( IRSB* sbOut, IRExpr* arg1, IRExpr* arg2 )
4401 {
4402    tl_assert(arg1 && arg2);
4403    tl_assert(isIRAtom(arg1));
4404    tl_assert(isIRAtom(arg2));
4405    /* Generate 32to1(And32(1Uto32(arg1), 1Uto32(arg2))).  Appalling
4406       code, I know. */
4407    IRTemp wide1 = newIRTemp(sbOut->tyenv, Ity_I32);
4408    IRTemp wide2 = newIRTemp(sbOut->tyenv, Ity_I32);
4409    IRTemp anded = newIRTemp(sbOut->tyenv, Ity_I32);
4410    IRTemp res   = newIRTemp(sbOut->tyenv, Ity_I1);
4411    addStmtToIRSB(sbOut, assign(wide1, unop(Iop_1Uto32, arg1)));
4412    addStmtToIRSB(sbOut, assign(wide2, unop(Iop_1Uto32, arg2)));
4413    addStmtToIRSB(sbOut, assign(anded, binop(Iop_And32, mkexpr(wide1),
4414                                                        mkexpr(wide2))));
4415    addStmtToIRSB(sbOut, assign(res, unop(Iop_32to1, mkexpr(anded))));
4416    return mkexpr(res);
4417 }
4418 
instrument_mem_access(IRSB * sbOut,IRExpr * addr,Int szB,Bool isStore,Int hWordTy_szB,Int goff_sp,IRExpr * guard)4419 static void instrument_mem_access ( IRSB*   sbOut,
4420                                     IRExpr* addr,
4421                                     Int     szB,
4422                                     Bool    isStore,
4423                                     Int     hWordTy_szB,
4424                                     Int     goff_sp,
4425                                     IRExpr* guard ) /* NULL => True */
4426 {
4427    IRType   tyAddr   = Ity_INVALID;
4428    const HChar* hName    = NULL;
4429    void*    hAddr    = NULL;
4430    Int      regparms = 0;
4431    IRExpr** argv     = NULL;
4432    IRDirty* di       = NULL;
4433 
4434    // THRESH is the size of the window above SP (well,
4435    // mostly above) that we assume implies a stack reference.
4436    const Int THRESH = 4096 * 4; // somewhat arbitrary
4437    const Int rz_szB = VG_STACK_REDZONE_SZB;
4438 
4439    tl_assert(isIRAtom(addr));
4440    tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
4441 
4442    tyAddr = typeOfIRExpr( sbOut->tyenv, addr );
4443    tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
4444 
4445    /* So the effective address is in 'addr' now. */
4446    regparms = 1; // unless stated otherwise
4447    if (isStore) {
4448       switch (szB) {
4449          case 1:
4450             hName = "evh__mem_help_cwrite_1";
4451             hAddr = &evh__mem_help_cwrite_1;
4452             argv = mkIRExprVec_1( addr );
4453             break;
4454          case 2:
4455             hName = "evh__mem_help_cwrite_2";
4456             hAddr = &evh__mem_help_cwrite_2;
4457             argv = mkIRExprVec_1( addr );
4458             break;
4459          case 4:
4460             hName = "evh__mem_help_cwrite_4";
4461             hAddr = &evh__mem_help_cwrite_4;
4462             argv = mkIRExprVec_1( addr );
4463             break;
4464          case 8:
4465             hName = "evh__mem_help_cwrite_8";
4466             hAddr = &evh__mem_help_cwrite_8;
4467             argv = mkIRExprVec_1( addr );
4468             break;
4469          default:
4470             tl_assert(szB > 8 && szB <= 512); /* stay sane */
4471             regparms = 2;
4472             hName = "evh__mem_help_cwrite_N";
4473             hAddr = &evh__mem_help_cwrite_N;
4474             argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4475             break;
4476       }
4477    } else {
4478       switch (szB) {
4479          case 1:
4480             hName = "evh__mem_help_cread_1";
4481             hAddr = &evh__mem_help_cread_1;
4482             argv = mkIRExprVec_1( addr );
4483             break;
4484          case 2:
4485             hName = "evh__mem_help_cread_2";
4486             hAddr = &evh__mem_help_cread_2;
4487             argv = mkIRExprVec_1( addr );
4488             break;
4489          case 4:
4490             hName = "evh__mem_help_cread_4";
4491             hAddr = &evh__mem_help_cread_4;
4492             argv = mkIRExprVec_1( addr );
4493             break;
4494          case 8:
4495             hName = "evh__mem_help_cread_8";
4496             hAddr = &evh__mem_help_cread_8;
4497             argv = mkIRExprVec_1( addr );
4498             break;
4499          default:
4500             tl_assert(szB > 8 && szB <= 512); /* stay sane */
4501             regparms = 2;
4502             hName = "evh__mem_help_cread_N";
4503             hAddr = &evh__mem_help_cread_N;
4504             argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4505             break;
4506       }
4507    }
4508 
4509    /* Create the helper. */
4510    tl_assert(hName);
4511    tl_assert(hAddr);
4512    tl_assert(argv);
4513    di = unsafeIRDirty_0_N( regparms,
4514                            hName, VG_(fnptr_to_fnentry)( hAddr ),
4515                            argv );
4516 
4517    if (! HG_(clo_check_stack_refs)) {
4518       /* We're ignoring memory references which are (obviously) to the
4519          stack.  In fact just skip stack refs that are within 4 pages
4520          of SP (SP - the redzone, really), as that's simple, easy, and
4521          filters out most stack references. */
4522       /* Generate the guard condition: "(addr - (SP - RZ)) >u N", for
4523          some arbitrary N.  If that is true then addr is outside the
4524          range (SP - RZ .. SP + N - RZ).  If N is smallish (a few
4525          pages) then we can say addr is within a few pages of SP and
4526          so can't possibly be a heap access, and so can be skipped.
4527 
4528          Note that the condition simplifies to
4529             (addr - SP + RZ) >u N
4530          which generates better code in x86/amd64 backends, but it does
4531          not unfortunately simplify to
4532             (addr - SP) >u (N - RZ)
4533          (would be beneficial because N - RZ is a constant) because
4534          wraparound arithmetic messes up the comparison.  eg.
4535          20 >u 10 == True,
4536          but (20 - 15) >u (10 - 15) == 5 >u (MAXINT-5) == False.
4537       */
4538       IRTemp sp = newIRTemp(sbOut->tyenv, tyAddr);
4539       addStmtToIRSB( sbOut, assign(sp, IRExpr_Get(goff_sp, tyAddr)));
4540 
4541       /* "addr - SP" */
4542       IRTemp addr_minus_sp = newIRTemp(sbOut->tyenv, tyAddr);
4543       addStmtToIRSB(
4544          sbOut,
4545          assign(addr_minus_sp,
4546                 tyAddr == Ity_I32
4547                    ? binop(Iop_Sub32, addr, mkexpr(sp))
4548                    : binop(Iop_Sub64, addr, mkexpr(sp)))
4549       );
4550 
4551       /* "addr - SP + RZ" */
4552       IRTemp diff = newIRTemp(sbOut->tyenv, tyAddr);
4553       addStmtToIRSB(
4554          sbOut,
4555          assign(diff,
4556                 tyAddr == Ity_I32
4557                    ? binop(Iop_Add32, mkexpr(addr_minus_sp), mkU32(rz_szB))
4558                    : binop(Iop_Add64, mkexpr(addr_minus_sp), mkU64(rz_szB)))
4559       );
4560 
4561       /* guardA == "guard on the address" */
4562       IRTemp guardA = newIRTemp(sbOut->tyenv, Ity_I1);
4563       addStmtToIRSB(
4564          sbOut,
4565          assign(guardA,
4566                 tyAddr == Ity_I32
4567                    ? binop(Iop_CmpLT32U, mkU32(THRESH), mkexpr(diff))
4568                    : binop(Iop_CmpLT64U, mkU64(THRESH), mkexpr(diff)))
4569       );
4570       di->guard = mkexpr(guardA);
4571    }
4572 
4573    /* If there's a guard on the access itself (as supplied by the
4574       caller of this routine), we need to AND that in to any guard we
4575       might already have. */
4576    if (guard) {
4577       di->guard = mk_And1(sbOut, di->guard, guard);
4578    }
4579 
4580    /* Add the helper. */
4581    addStmtToIRSB( sbOut, IRStmt_Dirty(di) );
4582 }
4583 
4584 
4585 /* Figure out if GA is a guest code address in the dynamic linker, and
4586    if so return True.  Otherwise (and in case of any doubt) return
4587    False.  (sidedly safe w/ False as the safe value) */
is_in_dynamic_linker_shared_object(Addr ga)4588 static Bool is_in_dynamic_linker_shared_object( Addr ga )
4589 {
4590    DebugInfo* dinfo;
4591    const HChar* soname;
4592    if (0) return False;
4593 
4594    dinfo = VG_(find_DebugInfo)( ga );
4595    if (!dinfo) return False;
4596 
4597    soname = VG_(DebugInfo_get_soname)(dinfo);
4598    tl_assert(soname);
4599    if (0) VG_(printf)("%s\n", soname);
4600 
4601 #  if defined(VGO_linux)
4602    if (VG_STREQ(soname, VG_U_LD_LINUX_SO_3))        return True;
4603    if (VG_STREQ(soname, VG_U_LD_LINUX_SO_2))        return True;
4604    if (VG_STREQ(soname, VG_U_LD_LINUX_X86_64_SO_2)) return True;
4605    if (VG_STREQ(soname, VG_U_LD64_SO_1))            return True;
4606    if (VG_STREQ(soname, VG_U_LD64_SO_2))            return True;
4607    if (VG_STREQ(soname, VG_U_LD_SO_1))              return True;
4608    if (VG_STREQ(soname, VG_U_LD_LINUX_AARCH64_SO_1)) return True;
4609    if (VG_STREQ(soname, VG_U_LD_LINUX_ARMHF_SO_3))  return True;
4610 #  elif defined(VGO_darwin)
4611    if (VG_STREQ(soname, VG_U_DYLD)) return True;
4612 #  elif defined(VGO_solaris)
4613    if (VG_STREQ(soname, VG_U_LD_SO_1)) return True;
4614 #  else
4615 #    error "Unsupported OS"
4616 #  endif
4617    return False;
4618 }
4619 
4620 static
hg_instrument(VgCallbackClosure * closure,IRSB * bbIn,const VexGuestLayout * layout,const VexGuestExtents * vge,const VexArchInfo * archinfo_host,IRType gWordTy,IRType hWordTy)4621 IRSB* hg_instrument ( VgCallbackClosure* closure,
4622                       IRSB* bbIn,
4623                       const VexGuestLayout* layout,
4624                       const VexGuestExtents* vge,
4625                       const VexArchInfo* archinfo_host,
4626                       IRType gWordTy, IRType hWordTy )
4627 {
4628    Int     i;
4629    IRSB*   bbOut;
4630    Addr    cia; /* address of current insn */
4631    IRStmt* st;
4632    Bool    inLDSO = False;
4633    Addr    inLDSOmask4K = 1; /* mismatches on first check */
4634 
4635    const Int goff_sp = layout->offset_SP;
4636 
4637    if (gWordTy != hWordTy) {
4638       /* We don't currently support this case. */
4639       VG_(tool_panic)("host/guest word size mismatch");
4640    }
4641 
4642    if (VKI_PAGE_SIZE < 4096 || VG_(log2)(VKI_PAGE_SIZE) == -1) {
4643       VG_(tool_panic)("implausible or too-small VKI_PAGE_SIZE");
4644    }
4645 
4646    /* Set up BB */
4647    bbOut           = emptyIRSB();
4648    bbOut->tyenv    = deepCopyIRTypeEnv(bbIn->tyenv);
4649    bbOut->next     = deepCopyIRExpr(bbIn->next);
4650    bbOut->jumpkind = bbIn->jumpkind;
4651    bbOut->offsIP   = bbIn->offsIP;
4652 
4653    // Copy verbatim any IR preamble preceding the first IMark
4654    i = 0;
4655    while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
4656       addStmtToIRSB( bbOut, bbIn->stmts[i] );
4657       i++;
4658    }
4659 
4660    // Get the first statement, and initial cia from it
4661    tl_assert(bbIn->stmts_used > 0);
4662    tl_assert(i < bbIn->stmts_used);
4663    st = bbIn->stmts[i];
4664    tl_assert(Ist_IMark == st->tag);
4665    cia = st->Ist.IMark.addr;
4666    st = NULL;
4667 
4668    for (/*use current i*/; i < bbIn->stmts_used; i++) {
4669       st = bbIn->stmts[i];
4670       tl_assert(st);
4671       tl_assert(isFlatIRStmt(st));
4672       switch (st->tag) {
4673          case Ist_NoOp:
4674          case Ist_AbiHint:
4675          case Ist_Put:
4676          case Ist_PutI:
4677          case Ist_Exit:
4678             /* None of these can contain any memory references. */
4679             break;
4680 
4681          case Ist_IMark:
4682             /* no mem refs, but note the insn address. */
4683             cia = st->Ist.IMark.addr;
4684             /* Don't instrument the dynamic linker.  It generates a
4685                lot of races which we just expensively suppress, so
4686                it's pointless.
4687 
4688                Avoid flooding is_in_dynamic_linker_shared_object with
4689                requests by only checking at transitions between 4K
4690                pages. */
4691             if ((cia & ~(Addr)0xFFF) != inLDSOmask4K) {
4692                if (0) VG_(printf)("NEW %#lx\n", cia);
4693                inLDSOmask4K = cia & ~(Addr)0xFFF;
4694                inLDSO = is_in_dynamic_linker_shared_object(cia);
4695             } else {
4696                if (0) VG_(printf)("old %#lx\n", cia);
4697             }
4698             break;
4699 
4700          case Ist_MBE:
4701             switch (st->Ist.MBE.event) {
4702                case Imbe_Fence:
4703                case Imbe_CancelReservation:
4704                   break; /* not interesting */
4705                default:
4706                   goto unhandled;
4707             }
4708             break;
4709 
4710          case Ist_CAS: {
4711             /* Atomic read-modify-write cycle.  Just pretend it's a
4712                read. */
4713             IRCAS* cas    = st->Ist.CAS.details;
4714             Bool   isDCAS = cas->oldHi != IRTemp_INVALID;
4715             if (isDCAS) {
4716                tl_assert(cas->expdHi);
4717                tl_assert(cas->dataHi);
4718             } else {
4719                tl_assert(!cas->expdHi);
4720                tl_assert(!cas->dataHi);
4721             }
4722             /* Just be boring about it. */
4723             if (!inLDSO) {
4724                instrument_mem_access(
4725                   bbOut,
4726                   cas->addr,
4727                   (isDCAS ? 2 : 1)
4728                      * sizeofIRType(typeOfIRExpr(bbIn->tyenv, cas->dataLo)),
4729                   False/*!isStore*/,
4730                   sizeofIRType(hWordTy), goff_sp,
4731                   NULL/*no-guard*/
4732                );
4733             }
4734             break;
4735          }
4736 
4737          case Ist_LLSC: {
4738             /* We pretend store-conditionals don't exist, viz, ignore
4739                them.  Whereas load-linked's are treated the same as
4740                normal loads. */
4741             IRType dataTy;
4742             if (st->Ist.LLSC.storedata == NULL) {
4743                /* LL */
4744                dataTy = typeOfIRTemp(bbIn->tyenv, st->Ist.LLSC.result);
4745                if (!inLDSO) {
4746                   instrument_mem_access(
4747                      bbOut,
4748                      st->Ist.LLSC.addr,
4749                      sizeofIRType(dataTy),
4750                      False/*!isStore*/,
4751                      sizeofIRType(hWordTy), goff_sp,
4752                      NULL/*no-guard*/
4753                   );
4754                }
4755             } else {
4756                /* SC */
4757                /*ignore */
4758             }
4759             break;
4760          }
4761 
4762          case Ist_Store:
4763             if (!inLDSO) {
4764                instrument_mem_access(
4765                   bbOut,
4766                   st->Ist.Store.addr,
4767                   sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
4768                   True/*isStore*/,
4769                   sizeofIRType(hWordTy), goff_sp,
4770                   NULL/*no-guard*/
4771                );
4772             }
4773             break;
4774 
4775          case Ist_StoreG: {
4776             IRStoreG* sg   = st->Ist.StoreG.details;
4777             IRExpr*   data = sg->data;
4778             IRExpr*   addr = sg->addr;
4779             IRType    type = typeOfIRExpr(bbIn->tyenv, data);
4780             tl_assert(type != Ity_INVALID);
4781             instrument_mem_access( bbOut, addr, sizeofIRType(type),
4782                                    True/*isStore*/,
4783                                    sizeofIRType(hWordTy),
4784                                    goff_sp, sg->guard );
4785             break;
4786          }
4787 
4788          case Ist_LoadG: {
4789             IRLoadG* lg       = st->Ist.LoadG.details;
4790             IRType   type     = Ity_INVALID; /* loaded type */
4791             IRType   typeWide = Ity_INVALID; /* after implicit widening */
4792             IRExpr*  addr     = lg->addr;
4793             typeOfIRLoadGOp(lg->cvt, &typeWide, &type);
4794             tl_assert(type != Ity_INVALID);
4795             instrument_mem_access( bbOut, addr, sizeofIRType(type),
4796                                    False/*!isStore*/,
4797                                    sizeofIRType(hWordTy),
4798                                    goff_sp, lg->guard );
4799             break;
4800          }
4801 
4802          case Ist_WrTmp: {
4803             IRExpr* data = st->Ist.WrTmp.data;
4804             if (data->tag == Iex_Load) {
4805                if (!inLDSO) {
4806                   instrument_mem_access(
4807                      bbOut,
4808                      data->Iex.Load.addr,
4809                      sizeofIRType(data->Iex.Load.ty),
4810                      False/*!isStore*/,
4811                      sizeofIRType(hWordTy), goff_sp,
4812                      NULL/*no-guard*/
4813                   );
4814                }
4815             }
4816             break;
4817          }
4818 
4819          case Ist_Dirty: {
4820             Int      dataSize;
4821             IRDirty* d = st->Ist.Dirty.details;
4822             if (d->mFx != Ifx_None) {
4823                /* This dirty helper accesses memory.  Collect the
4824                   details. */
4825                tl_assert(d->mAddr != NULL);
4826                tl_assert(d->mSize != 0);
4827                dataSize = d->mSize;
4828                if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
4829                   if (!inLDSO) {
4830                      instrument_mem_access(
4831                         bbOut, d->mAddr, dataSize, False/*!isStore*/,
4832                         sizeofIRType(hWordTy), goff_sp, NULL/*no-guard*/
4833                      );
4834                   }
4835                }
4836                if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
4837                   if (!inLDSO) {
4838                      instrument_mem_access(
4839                         bbOut, d->mAddr, dataSize, True/*isStore*/,
4840                         sizeofIRType(hWordTy), goff_sp, NULL/*no-guard*/
4841                      );
4842                   }
4843                }
4844             } else {
4845                tl_assert(d->mAddr == NULL);
4846                tl_assert(d->mSize == 0);
4847             }
4848             break;
4849          }
4850 
4851          default:
4852          unhandled:
4853             ppIRStmt(st);
4854             tl_assert(0);
4855 
4856       } /* switch (st->tag) */
4857 
4858       addStmtToIRSB( bbOut, st );
4859    } /* iterate over bbIn->stmts */
4860 
4861    return bbOut;
4862 }
4863 
4864 #undef binop
4865 #undef mkexpr
4866 #undef mkU32
4867 #undef mkU64
4868 #undef assign
4869 
4870 
4871 /*----------------------------------------------------------------*/
4872 /*--- Client requests                                          ---*/
4873 /*----------------------------------------------------------------*/
4874 
4875 /* Sheesh.  Yet another goddam finite map. */
4876 static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
4877 
map_pthread_t_to_Thread_INIT(void)4878 static void map_pthread_t_to_Thread_INIT ( void ) {
4879    if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
4880       map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
4881                                             HG_(free), NULL );
4882    }
4883 }
4884 
4885 /* A list of Ada dependent tasks and their masters. Used for implementing
4886    the Ada task termination semantic as implemented by the
4887    gcc gnat Ada runtime. */
4888 typedef
4889    struct {
4890       void* dependent; // Ada Task Control Block of the Dependent
4891       void* master;    // ATCB of the master
4892       Word  master_level; // level of dependency between master and dependent
4893       Thread* hg_dependent; // helgrind Thread* for dependent task.
4894    }
4895    GNAT_dmml;
4896 static XArray* gnat_dmmls;   /* of GNAT_dmml */
gnat_dmmls_INIT(void)4897 static void gnat_dmmls_INIT (void)
4898 {
4899    if (UNLIKELY(gnat_dmmls == NULL)) {
4900       gnat_dmmls = VG_(newXA) (HG_(zalloc), "hg.gnat_md.1",
4901                                HG_(free),
4902                                sizeof(GNAT_dmml) );
4903    }
4904 }
print_monitor_help(void)4905 static void print_monitor_help ( void )
4906 {
4907    VG_(gdb_printf)
4908       (
4909 "\n"
4910 "helgrind monitor commands:\n"
4911 "  info locks [lock_addr]  : show status of lock at addr lock_addr\n"
4912 "           with no lock_addr, show status of all locks\n"
4913 "  accesshistory <addr> [<len>]   : show access history recorded\n"
4914 "                     for <len> (or 1) bytes at <addr>\n"
4915 "\n");
4916 }
4917 
4918 /* return True if request recognised, False otherwise */
handle_gdb_monitor_command(ThreadId tid,HChar * req)4919 static Bool handle_gdb_monitor_command (ThreadId tid, HChar *req)
4920 {
4921    HChar* wcmd;
4922    HChar s[VG_(strlen(req))]; /* copy for strtok_r */
4923    HChar *ssaveptr;
4924    Int   kwdid;
4925 
4926    VG_(strcpy) (s, req);
4927 
4928    wcmd = VG_(strtok_r) (s, " ", &ssaveptr);
4929    /* NB: if possible, avoid introducing a new command below which
4930       starts with the same first letter(s) as an already existing
4931       command. This ensures a shorter abbreviation for the user. */
4932    switch (VG_(keyword_id)
4933            ("help info accesshistory",
4934             wcmd, kwd_report_duplicated_matches)) {
4935    case -2: /* multiple matches */
4936       return True;
4937    case -1: /* not found */
4938       return False;
4939    case  0: /* help */
4940       print_monitor_help();
4941       return True;
4942    case  1: /* info */
4943       wcmd = VG_(strtok_r) (NULL, " ", &ssaveptr);
4944       switch (kwdid = VG_(keyword_id)
4945               ("locks",
4946                wcmd, kwd_report_all)) {
4947       case -2:
4948       case -1:
4949          break;
4950       case 0: // locks
4951          {
4952             const HChar* wa;
4953             Addr lk_addr = 0;
4954             Bool lk_shown = False;
4955             Bool all_locks = True;
4956             Int i;
4957             Lock* lk;
4958 
4959             wa = VG_(strtok_r) (NULL, " ", &ssaveptr);
4960             if (wa != NULL) {
4961                if (VG_(parse_Addr) (&wa, &lk_addr) )
4962                   all_locks = False;
4963                else {
4964                   VG_(gdb_printf) ("missing or malformed address\n");
4965                }
4966             }
4967             for (i = 0, lk = admin_locks;  lk;  i++, lk = lk->admin_next) {
4968                if (all_locks || lk_addr == lk->guestaddr) {
4969                   pp_Lock(0, lk,
4970                           True /* show_lock_addrdescr */,
4971                           False /* show_internal_data */);
4972                   lk_shown = True;
4973                }
4974             }
4975             if (i == 0)
4976                VG_(gdb_printf) ("no locks\n");
4977             if (!all_locks && !lk_shown)
4978                VG_(gdb_printf) ("lock with address %p not found\n",
4979                                 (void*)lk_addr);
4980          }
4981          break;
4982       default:
4983          tl_assert(0);
4984       }
4985       return True;
4986 
4987    case  2: /* accesshistory */
4988       {
4989          Addr address;
4990          SizeT szB = 1;
4991          if (VG_(strtok_get_address_and_size) (&address, &szB, &ssaveptr)) {
4992             if (szB >= 1)
4993                libhb_event_map_access_history (address, szB, HG_(print_access));
4994             else
4995                VG_(gdb_printf) ("len must be >=1\n");
4996          }
4997          return True;
4998       }
4999 
5000    default:
5001       tl_assert(0);
5002       return False;
5003    }
5004 }
5005 
5006 static
hg_handle_client_request(ThreadId tid,UWord * args,UWord * ret)5007 Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
5008 {
5009    if (!VG_IS_TOOL_USERREQ('H','G',args[0])
5010        && VG_USERREQ__GDB_MONITOR_COMMAND   != args[0])
5011       return False;
5012 
5013    /* Anything that gets past the above check is one of ours, so we
5014       should be able to handle it. */
5015 
5016    /* default, meaningless return value, unless otherwise set */
5017    *ret = 0;
5018 
5019    switch (args[0]) {
5020 
5021       /* --- --- User-visible client requests --- --- */
5022 
5023       case VG_USERREQ__HG_CLEAN_MEMORY:
5024          if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%lu)\n",
5025                             args[1], args[2]);
5026          /* Call die_mem to (expensively) tidy up properly, if there
5027             are any held locks etc in the area.  Calling evh__die_mem
5028             and then evh__new_mem is a bit inefficient; probably just
5029             the latter would do. */
5030          if (args[2] > 0) { /* length */
5031             evh__die_mem(args[1], args[2]);
5032             /* and then set it to New */
5033             evh__new_mem(args[1], args[2]);
5034          }
5035          break;
5036 
5037       case _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK: {
5038          Addr  payload = 0;
5039          SizeT pszB = 0;
5040          if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK(%#lx)\n",
5041                             args[1]);
5042          if (HG_(mm_find_containing_block)(NULL, NULL,
5043                                            &payload, &pszB, args[1])) {
5044             if (pszB > 0) {
5045                evh__die_mem(payload, pszB);
5046                evh__new_mem(payload, pszB);
5047             }
5048             *ret = pszB;
5049          } else {
5050             *ret = (UWord)-1;
5051          }
5052          break;
5053       }
5054 
5055       case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED:
5056          if (0) VG_(printf)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%lu)\n",
5057                             args[1], args[2]);
5058          if (args[2] > 0) { /* length */
5059             evh__untrack_mem(args[1], args[2]);
5060          }
5061          break;
5062 
5063       case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED:
5064          if (0) VG_(printf)("HG_ARANGE_MAKE_TRACKED(%#lx,%lu)\n",
5065                             args[1], args[2]);
5066          if (args[2] > 0) { /* length */
5067             evh__new_mem(args[1], args[2]);
5068          }
5069          break;
5070 
5071       case _VG_USERREQ__HG_GET_ABITS:
5072          if (0) VG_(printf)("HG_GET_ABITS(%#lx,%#lx,%lu)\n",
5073                             args[1], args[2], args[3]);
5074          UChar *zzabit = (UChar *) args[2];
5075          if (zzabit == NULL
5076              || VG_(am_is_valid_for_client)((Addr)zzabit, (SizeT)args[3],
5077                                             VKI_PROT_READ|VKI_PROT_WRITE))
5078             *ret = (UWord) libhb_srange_get_abits ((Addr)   args[1],
5079                                                    (UChar*) args[2],
5080                                                    (SizeT)  args[3]);
5081          else
5082             *ret = -1;
5083          break;
5084 
5085       /* --- --- Client requests for Helgrind's use only --- --- */
5086 
5087       /* Some thread is telling us its pthread_t value.  Record the
5088          binding between that and the associated Thread*, so we can
5089          later find the Thread* again when notified of a join by the
5090          thread. */
5091       case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
5092          Thread* my_thr = NULL;
5093          if (0)
5094          VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
5095                      (void*)args[1]);
5096          map_pthread_t_to_Thread_INIT();
5097          my_thr = map_threads_maybe_lookup( tid );
5098          /* This assertion should hold because the map_threads (tid to
5099             Thread*) binding should have been made at the point of
5100             low-level creation of this thread, which should have
5101             happened prior to us getting this client request for it.
5102             That's because this client request is sent from
5103             client-world from the 'thread_wrapper' function, which
5104             only runs once the thread has been low-level created. */
5105          tl_assert(my_thr != NULL);
5106          /* So now we know that (pthread_t)args[1] is associated with
5107             (Thread*)my_thr.  Note that down. */
5108          if (0)
5109          VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
5110                      (void*)args[1], (void*)my_thr );
5111          VG_(addToFM)( map_pthread_t_to_Thread, (UWord)args[1], (UWord)my_thr );
5112 
5113          if (my_thr->coretid != 1) {
5114             /* FIXME: hardwires assumption about identity of the root thread. */
5115             if (HG_(clo_ignore_thread_creation)) {
5116                HG_(thread_leave_pthread_create)(my_thr);
5117                HG_(thread_leave_synchr)(my_thr);
5118                tl_assert(my_thr->synchr_nesting == 0);
5119             }
5120          }
5121          break;
5122       }
5123 
5124       case _VG_USERREQ__HG_PTH_API_ERROR: {
5125          Thread* my_thr = NULL;
5126          map_pthread_t_to_Thread_INIT();
5127          my_thr = map_threads_maybe_lookup( tid );
5128          tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
5129          HG_(record_error_PthAPIerror)(
5130             my_thr, (HChar*)args[1], (UWord)args[2], (HChar*)args[3] );
5131          break;
5132       }
5133 
5134       /* This thread (tid) has completed a join with the quitting
5135          thread whose pthread_t is in args[1]. */
5136       case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
5137          Thread* thr_q = NULL; /* quitter Thread* */
5138          Bool    found = False;
5139          if (0)
5140          VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
5141                      (void*)args[1]);
5142          map_pthread_t_to_Thread_INIT();
5143          found = VG_(lookupFM)( map_pthread_t_to_Thread,
5144                                 NULL, (UWord*)&thr_q, (UWord)args[1] );
5145           /* Can this fail?  It would mean that our pthread_join
5146              wrapper observed a successful join on args[1] yet that
5147              thread never existed (or at least, it never lodged an
5148              entry in the mapping (via SET_MY_PTHREAD_T)).  Which
5149              sounds like a bug in the threads library. */
5150          // FIXME: get rid of this assertion; handle properly
5151          tl_assert(found);
5152          if (found) {
5153             if (0)
5154             VG_(printf)(".................... quitter Thread* = %p\n",
5155                         thr_q);
5156             evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
5157          }
5158          break;
5159       }
5160 
5161       /* This thread (tid) is informing us of its master. */
5162       case _VG_USERREQ__HG_GNAT_MASTER_HOOK: {
5163          GNAT_dmml dmml;
5164          dmml.dependent = (void*)args[1];
5165          dmml.master = (void*)args[2];
5166          dmml.master_level = (Word)args[3];
5167          dmml.hg_dependent = map_threads_maybe_lookup( tid );
5168          tl_assert(dmml.hg_dependent);
5169 
5170          if (0)
5171          VG_(printf)("HG_GNAT_MASTER_HOOK (tid %d): "
5172                      "dependent = %p master = %p master_level = %ld"
5173                      " dependent Thread* = %p\n",
5174                      (Int)tid, dmml.dependent, dmml.master, dmml.master_level,
5175                      dmml.hg_dependent);
5176          gnat_dmmls_INIT();
5177          VG_(addToXA) (gnat_dmmls, &dmml);
5178          break;
5179       }
5180 
5181       /* This thread (tid) is informing us that it has completed a
5182          master. */
5183       case _VG_USERREQ__HG_GNAT_MASTER_COMPLETED_HOOK: {
5184          Word n;
5185          const Thread *stayer = map_threads_maybe_lookup( tid );
5186          const void *master = (void*)args[1];
5187          const Word master_level = (Word) args[2];
5188          tl_assert(stayer);
5189 
5190          if (0)
5191          VG_(printf)("HG_GNAT_MASTER_COMPLETED_HOOK (tid %d): "
5192                      "self_id = %p master_level = %ld Thread* = %p\n",
5193                      (Int)tid, master, master_level, stayer);
5194 
5195          gnat_dmmls_INIT();
5196          /* Reverse loop on the array, simulating a pthread_join for
5197             the Dependent tasks of the completed master, and removing
5198             them from the array. */
5199          for (n = VG_(sizeXA) (gnat_dmmls) - 1; n >= 0; n--) {
5200             GNAT_dmml *dmml = (GNAT_dmml*) VG_(indexXA)(gnat_dmmls, n);
5201             if (dmml->master == master
5202                 && dmml->master_level == master_level) {
5203                if (0)
5204                VG_(printf)("quitter %p dependency to stayer %p\n",
5205                            dmml->hg_dependent->hbthr,  stayer->hbthr);
5206                tl_assert(dmml->hg_dependent->hbthr != stayer->hbthr);
5207                generate_quitter_stayer_dependence (dmml->hg_dependent->hbthr,
5208                                                    stayer->hbthr);
5209                VG_(removeIndexXA) (gnat_dmmls, n);
5210             }
5211          }
5212          break;
5213       }
5214 
5215       /* EXPOSITION only: by intercepting lock init events we can show
5216          the user where the lock was initialised, rather than only
5217          being able to show where it was first locked.  Intercepting
5218          lock initialisations is not necessary for the basic operation
5219          of the race checker. */
5220       case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
5221          evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
5222          break;
5223 
5224       /* mutex=arg[1], mutex_is_init=arg[2] */
5225       case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
5226          evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1], args[2] != 0 );
5227          break;
5228 
5229       case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE:   // pth_mx_t*
5230          HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5231          if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5232             evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
5233          break;
5234 
5235       case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST:  // pth_mx_t*
5236          if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5237             evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
5238          HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5239          break;
5240 
5241       case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE:     // pth_mx_t*
5242          HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5243          if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5244             evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
5245          break;
5246 
5247       case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST:    // pth_mx_t*, long
5248          if ((args[2] == True) // lock actually taken
5249              && (HG_(get_pthread_create_nesting_level)(tid) == 0))
5250             evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
5251          HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5252          break;
5253 
5254       /* This thread is about to do pthread_cond_signal on the
5255          pthread_cond_t* in arg[1].  Ditto pthread_cond_broadcast. */
5256       case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
5257       case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
5258          HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5259          evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
5260          break;
5261 
5262       case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_POST:
5263       case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_POST:
5264          HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5265          break;
5266 
5267       /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
5268          Returns a flag indicating whether or not the mutex is believed to be
5269          valid for this operation. */
5270       case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
5271          HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5272          Bool mutex_is_valid
5273             = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
5274                                                   (void*)args[2] );
5275          *ret = mutex_is_valid ? 1 : 0;
5276          break;
5277       }
5278 
5279       /* Thread successfully completed pthread_cond_init:
5280          cond=arg[1], cond_attr=arg[2] */
5281       case _VG_USERREQ__HG_PTHREAD_COND_INIT_POST:
5282          evh__HG_PTHREAD_COND_INIT_POST( tid,
5283                                          (void*)args[1], (void*)args[2] );
5284 	 break;
5285 
5286       /* cond=arg[1], cond_is_init=arg[2] */
5287       case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
5288          evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1], args[2] != 0 );
5289          break;
5290 
5291       /* Thread completed pthread_cond_wait, cond=arg[1],
5292          mutex=arg[2], timeout=arg[3], successful=arg[4] */
5293       case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
5294          if (args[4] == True)
5295             evh__HG_PTHREAD_COND_WAIT_POST( tid,
5296                                             (void*)args[1], (void*)args[2],
5297                                             (Bool)args[3] );
5298          HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5299          break;
5300 
5301       case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
5302          evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
5303          break;
5304 
5305       case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
5306          evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
5307          break;
5308 
5309       /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
5310       case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
5311          HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5312          if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5313             evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
5314                                              args[2], args[3] );
5315          break;
5316 
5317       /* rwlock=arg[1], isW=arg[2], tookLock=arg[3] */
5318       case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
5319          if ((args[3] == True)
5320              && (HG_(get_pthread_create_nesting_level)(tid) == 0))
5321             evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
5322          HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5323          break;
5324 
5325       case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
5326          HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5327          if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5328             evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
5329          break;
5330 
5331       case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
5332          if (HG_(get_pthread_create_nesting_level)(tid) == 0)
5333             evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
5334          HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5335          break;
5336 
5337       case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
5338          evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
5339          break;
5340 
5341       case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
5342          evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
5343          break;
5344 
5345       case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
5346          HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5347          evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
5348          break;
5349 
5350       case _VG_USERREQ__HG_POSIX_SEM_POST_POST: /* sem_t* */
5351          HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5352          break;
5353 
5354       case _VG_USERREQ__HG_POSIX_SEM_WAIT_PRE: /* sem_t* */
5355          HG_(thread_enter_synchr)(map_threads_maybe_lookup(tid));
5356          break;
5357 
5358       case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t*, long tookLock */
5359          if (args[2] == True)
5360             evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
5361          HG_(thread_leave_synchr)(map_threads_maybe_lookup(tid));
5362          break;
5363 
5364       case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
5365          /* pth_bar_t*, ulong count, ulong resizable */
5366          evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1],
5367                                                 args[2], args[3] );
5368          break;
5369 
5370       case _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE:
5371          /* pth_bar_t*, ulong newcount */
5372          evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( tid, (void*)args[1],
5373                                               args[2] );
5374          break;
5375 
5376       case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
5377          /* pth_bar_t* */
5378          evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
5379          break;
5380 
5381       case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
5382          /* pth_bar_t* */
5383          evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
5384          break;
5385 
5386       case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE:
5387          /* pth_spinlock_t* */
5388          evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid, (void*)args[1] );
5389          break;
5390 
5391       case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST:
5392          /* pth_spinlock_t* */
5393          evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid, (void*)args[1] );
5394          break;
5395 
5396       case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE:
5397          /* pth_spinlock_t*, Word */
5398          evh__HG_PTHREAD_SPIN_LOCK_PRE( tid, (void*)args[1], args[2] );
5399          break;
5400 
5401       case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST:
5402          /* pth_spinlock_t* */
5403          evh__HG_PTHREAD_SPIN_LOCK_POST( tid, (void*)args[1] );
5404          break;
5405 
5406       case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE:
5407          /* pth_spinlock_t* */
5408          evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid, (void*)args[1] );
5409          break;
5410 
5411       case _VG_USERREQ__HG_CLIENTREQ_UNIMP: {
5412          /* HChar* who */
5413          HChar*  who = (HChar*)args[1];
5414          HChar   buf[50 + 50];
5415          Thread* thr = map_threads_maybe_lookup( tid );
5416          tl_assert( thr ); /* I must be mapped */
5417          tl_assert( who );
5418          tl_assert( VG_(strlen)(who) <= 50 );
5419          VG_(sprintf)(buf, "Unimplemented client request macro \"%s\"", who );
5420          /* record_error_Misc strdup's buf, so this is safe: */
5421          HG_(record_error_Misc)( thr, buf );
5422          break;
5423       }
5424 
5425       case _VG_USERREQ__HG_USERSO_SEND_PRE:
5426          /* UWord arbitrary-SO-tag */
5427          evh__HG_USERSO_SEND_PRE( tid, args[1] );
5428          break;
5429 
5430       case _VG_USERREQ__HG_USERSO_RECV_POST:
5431          /* UWord arbitrary-SO-tag */
5432          evh__HG_USERSO_RECV_POST( tid, args[1] );
5433          break;
5434 
5435       case _VG_USERREQ__HG_USERSO_FORGET_ALL:
5436          /* UWord arbitrary-SO-tag */
5437          evh__HG_USERSO_FORGET_ALL( tid, args[1] );
5438          break;
5439 
5440       case VG_USERREQ__GDB_MONITOR_COMMAND: {
5441          Bool handled = handle_gdb_monitor_command (tid, (HChar*)args[1]);
5442          if (handled)
5443             *ret = 1;
5444          else
5445             *ret = 0;
5446          return handled;
5447       }
5448 
5449       case _VG_USERREQ__HG_PTHREAD_CREATE_BEGIN: {
5450          Thread *thr = map_threads_maybe_lookup(tid);
5451          if (HG_(clo_ignore_thread_creation)) {
5452             HG_(thread_enter_pthread_create)(thr);
5453             HG_(thread_enter_synchr)(thr);
5454          }
5455          break;
5456       }
5457 
5458       case _VG_USERREQ__HG_PTHREAD_CREATE_END: {
5459          Thread *thr = map_threads_maybe_lookup(tid);
5460          if (HG_(clo_ignore_thread_creation)) {
5461             HG_(thread_leave_pthread_create)(thr);
5462             HG_(thread_leave_synchr)(thr);
5463          }
5464          break;
5465       }
5466 
5467       case _VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_PRE: // pth_mx_t*, long tryLock
5468          evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
5469          break;
5470 
5471       case _VG_USERREQ__HG_PTHREAD_MUTEX_ACQUIRE_POST:    // pth_mx_t*
5472          evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
5473          break;
5474 
5475       case _VG_USERREQ__HG_PTHREAD_RWLOCK_ACQUIRED:       // void*, long isW
5476          evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
5477          break;
5478 
5479       case _VG_USERREQ__HG_PTHREAD_RWLOCK_RELEASED:       // void*
5480          evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
5481          break;
5482 
5483       case _VG_USERREQ__HG_POSIX_SEM_RELEASED: /* sem_t* */
5484          evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
5485          break;
5486 
5487       case _VG_USERREQ__HG_POSIX_SEM_ACQUIRED: /* sem_t* */
5488          evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
5489          break;
5490 
5491 #if defined(VGO_solaris)
5492       case _VG_USERREQ__HG_RTLD_BIND_GUARD:
5493          evh__HG_RTLD_BIND_GUARD(tid, args[1]);
5494          break;
5495 
5496       case _VG_USERREQ__HG_RTLD_BIND_CLEAR:
5497          evh__HG_RTLD_BIND_CLEAR(tid, args[1]);
5498          break;
5499 #endif /* VGO_solaris */
5500 
5501       default:
5502          /* Unhandled Helgrind client request! */
5503          tl_assert2(0, "unhandled Helgrind client request 0x%lx",
5504                        args[0]);
5505    }
5506 
5507    return True;
5508 }
5509 
5510 
5511 /*----------------------------------------------------------------*/
5512 /*--- Setup                                                    ---*/
5513 /*----------------------------------------------------------------*/
5514 
hg_process_cmd_line_option(const HChar * arg)5515 static Bool hg_process_cmd_line_option ( const HChar* arg )
5516 {
5517    const HChar* tmp_str;
5518 
5519    if      VG_BOOL_CLO(arg, "--track-lockorders",
5520                             HG_(clo_track_lockorders)) {}
5521    else if VG_BOOL_CLO(arg, "--cmp-race-err-addrs",
5522                             HG_(clo_cmp_race_err_addrs)) {}
5523 
5524    else if VG_XACT_CLO(arg, "--history-level=none",
5525                             HG_(clo_history_level), 0);
5526    else if VG_XACT_CLO(arg, "--history-level=approx",
5527                             HG_(clo_history_level), 1);
5528    else if VG_XACT_CLO(arg, "--history-level=full",
5529                             HG_(clo_history_level), 2);
5530 
5531    else if VG_BINT_CLO(arg, "--conflict-cache-size",
5532                        HG_(clo_conflict_cache_size), 10*1000, 150*1000*1000) {}
5533 
5534    /* "stuvwx" --> stuvwx (binary) */
5535    else if VG_STR_CLO(arg, "--hg-sanity-flags", tmp_str) {
5536       Int j;
5537 
5538       if (6 != VG_(strlen)(tmp_str)) {
5539          VG_(message)(Vg_UserMsg,
5540                       "--hg-sanity-flags argument must have 6 digits\n");
5541          return False;
5542       }
5543       for (j = 0; j < 6; j++) {
5544          if      ('0' == tmp_str[j]) { /* do nothing */ }
5545          else if ('1' == tmp_str[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
5546          else {
5547             VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
5548                                      "only contain 0s and 1s\n");
5549             return False;
5550          }
5551       }
5552       if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
5553    }
5554 
5555    else if VG_BOOL_CLO(arg, "--free-is-write",
5556                             HG_(clo_free_is_write)) {}
5557 
5558    else if VG_XACT_CLO(arg, "--vts-pruning=never",
5559                             HG_(clo_vts_pruning), 0);
5560    else if VG_XACT_CLO(arg, "--vts-pruning=auto",
5561                             HG_(clo_vts_pruning), 1);
5562    else if VG_XACT_CLO(arg, "--vts-pruning=always",
5563                             HG_(clo_vts_pruning), 2);
5564 
5565    else if VG_BOOL_CLO(arg, "--check-stack-refs",
5566                             HG_(clo_check_stack_refs)) {}
5567    else if VG_BOOL_CLO(arg, "--ignore-thread-creation",
5568                             HG_(clo_ignore_thread_creation)) {}
5569 
5570    else
5571       return VG_(replacement_malloc_process_cmd_line_option)(arg);
5572 
5573    return True;
5574 }
5575 
hg_print_usage(void)5576 static void hg_print_usage ( void )
5577 {
5578    VG_(printf)(
5579 "    --free-is-write=no|yes    treat heap frees as writes [no]\n"
5580 "    --track-lockorders=no|yes show lock ordering errors? [yes]\n"
5581 "    --history-level=none|approx|full [full]\n"
5582 "       full:   show both stack traces for a data race (can be very slow)\n"
5583 "       approx: full trace for one thread, approx for the other (faster)\n"
5584 "       none:   only show trace for one thread in a race (fastest)\n"
5585 "    --conflict-cache-size=N   size of 'full' history cache [2000000]\n"
5586 "    --check-stack-refs=no|yes race-check reads and writes on the\n"
5587 "                              main stack and thread stacks? [yes]\n"
5588 "    --ignore-thread-creation=yes|no Ignore activities during thread\n"
5589 "                              creation [%s]\n",
5590 HG_(clo_ignore_thread_creation) ? "yes" : "no"
5591    );
5592 }
5593 
hg_print_debug_usage(void)5594 static void hg_print_debug_usage ( void )
5595 {
5596    VG_(printf)("    --cmp-race-err-addrs=no|yes  are data addresses in "
5597                "race errors significant? [no]\n");
5598    VG_(printf)("    --hg-sanity-flags=<XXXXXX>   sanity check "
5599                "  at events (X = 0|1) [000000]\n");
5600    VG_(printf)("    --hg-sanity-flags values:\n");
5601    VG_(printf)("       010000   after changes to "
5602                "lock-order-acquisition-graph\n");
5603    VG_(printf)("       001000   at memory accesses (NB: not currently used)\n");
5604    VG_(printf)("       000100   at mem permission setting for "
5605                "ranges >= %d bytes\n", SCE_BIGRANGE_T);
5606    VG_(printf)("       000010   at lock/unlock events\n");
5607    VG_(printf)("       000001   at thread create/join events\n");
5608    VG_(printf)(
5609 "    --vts-pruning=never|auto|always [auto]\n"
5610 "       never:   is never done (may cause big space leaks in Helgrind)\n"
5611 "       auto:    done just often enough to keep space usage under control\n"
5612 "       always:  done after every VTS GC (mostly just a big time waster)\n"
5613     );
5614 }
5615 
hg_print_stats(void)5616 static void hg_print_stats (void)
5617 {
5618 
5619    if (1) {
5620       VG_(printf)("\n");
5621       HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
5622       if (HG_(clo_track_lockorders)) {
5623          VG_(printf)("\n");
5624          HG_(ppWSUstats)( univ_laog,  "univ_laog" );
5625       }
5626    }
5627 
5628    //zz       VG_(printf)("\n");
5629    //zz       VG_(printf)(" hbefore: %'10lu queries\n",        stats__hbefore_queries);
5630    //zz       VG_(printf)(" hbefore: %'10lu cache 0 hits\n",   stats__hbefore_cache0s);
5631    //zz       VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
5632    //zz       VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
5633    //zz       VG_(printf)(" hbefore: %'10lu   of which slow\n",
5634    //zz                   stats__hbefore_gsearches - stats__hbefore_gsearchFs);
5635    //zz       VG_(printf)(" hbefore: %'10lu stack high water mark\n",
5636    //zz                   stats__hbefore_stk_hwm);
5637    //zz       VG_(printf)(" hbefore: %'10lu cache invals\n",   stats__hbefore_invals);
5638    //zz       VG_(printf)(" hbefore: %'10lu probes\n",         stats__hbefore_probes);
5639 
5640    VG_(printf)("\n");
5641    VG_(printf)("        locksets: %'8d unique lock sets\n",
5642                (Int)HG_(cardinalityWSU)( univ_lsets ));
5643    if (HG_(clo_track_lockorders)) {
5644       VG_(printf)("       univ_laog: %'8d unique lock sets\n",
5645                   (Int)HG_(cardinalityWSU)( univ_laog ));
5646    }
5647 
5648    //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
5649    //            stats__ga_LL_adds,
5650    //            (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
5651 
5652    VG_(printf)("  LockN-to-P map: %'8llu queries (%llu map size)\n",
5653                HG_(stats__LockN_to_P_queries),
5654                HG_(stats__LockN_to_P_get_map_size)() );
5655 
5656    VG_(printf)("client malloc-ed blocks: %'8u\n",
5657                VG_(HT_count_nodes)(hg_mallocmeta_table));
5658 
5659    VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
5660                HG_(stats__string_table_queries),
5661                HG_(stats__string_table_get_map_size)() );
5662    if (HG_(clo_track_lockorders)) {
5663       VG_(printf)("            LAOG: %'8d map size\n",
5664                   (Int)(laog ? VG_(sizeFM)( laog ) : 0));
5665       VG_(printf)(" LAOG exposition: %'8d map size\n",
5666                   (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
5667    }
5668 
5669    VG_(printf)("           locks: %'8lu acquires, "
5670                "%'lu releases\n",
5671                stats__lockN_acquires,
5672                stats__lockN_releases
5673               );
5674    VG_(printf)("   sanity checks: %'8lu\n", stats__sanity_checks);
5675 
5676    VG_(printf)("\n");
5677    libhb_shutdown(True); // This in fact only print stats.
5678 }
5679 
hg_fini(Int exitcode)5680 static void hg_fini ( Int exitcode )
5681 {
5682    if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
5683       VG_(message)(Vg_UserMsg,
5684                    "For counts of detected and suppressed errors, "
5685                    "rerun with: -v\n");
5686    }
5687 
5688    if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)
5689        && HG_(clo_history_level) >= 2) {
5690       VG_(umsg)(
5691          "Use --history-level=approx or =none to gain increased speed, at\n" );
5692       VG_(umsg)(
5693          "the cost of reduced accuracy of conflicting-access information\n");
5694    }
5695 
5696    if (SHOW_DATA_STRUCTURES)
5697       pp_everything( PP_ALL, "SK_(fini)" );
5698    if (HG_(clo_sanity_flags))
5699       all__sanity_check("SK_(fini)");
5700 
5701    if (VG_(clo_stats))
5702       hg_print_stats();
5703 }
5704 
5705 /* FIXME: move these somewhere sane */
5706 
5707 static
for_libhb__get_stacktrace(Thr * hbt,Addr * frames,UWord nRequest)5708 void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
5709 {
5710    Thread*     thr;
5711    ThreadId    tid;
5712    UWord       nActual;
5713    tl_assert(hbt);
5714    thr = libhb_get_Thr_hgthread( hbt );
5715    tl_assert(thr);
5716    tid = map_threads_maybe_reverse_lookup_SLOW(thr);
5717    nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
5718                                          NULL, NULL, 0 );
5719    tl_assert(nActual <= nRequest);
5720    for (; nActual < nRequest; nActual++)
5721       frames[nActual] = 0;
5722 }
5723 
5724 static
for_libhb__get_EC(Thr * hbt)5725 ExeContext* for_libhb__get_EC ( Thr* hbt )
5726 {
5727    Thread*     thr;
5728    ThreadId    tid;
5729    ExeContext* ec;
5730    tl_assert(hbt);
5731    thr = libhb_get_Thr_hgthread( hbt );
5732    tl_assert(thr);
5733    tid = map_threads_maybe_reverse_lookup_SLOW(thr);
5734    /* this will assert if tid is invalid */
5735    ec = VG_(record_ExeContext)( tid, 0 );
5736    return ec;
5737 }
5738 
5739 
hg_post_clo_init(void)5740 static void hg_post_clo_init ( void )
5741 {
5742    Thr* hbthr_root;
5743 
5744    /////////////////////////////////////////////
5745    hbthr_root = libhb_init( for_libhb__get_stacktrace,
5746                             for_libhb__get_EC );
5747    /////////////////////////////////////////////
5748 
5749 
5750    if (HG_(clo_track_lockorders))
5751       laog__init();
5752 
5753    initialise_data_structures(hbthr_root);
5754 }
5755 
hg_info_location(Addr a)5756 static void hg_info_location (Addr a)
5757 {
5758    (void) HG_(get_and_pp_addrdescr) (a);
5759 }
5760 
hg_pre_clo_init(void)5761 static void hg_pre_clo_init ( void )
5762 {
5763    VG_(details_name)            ("Helgrind");
5764    VG_(details_version)         (NULL);
5765    VG_(details_description)     ("a thread error detector");
5766    VG_(details_copyright_author)(
5767       "Copyright (C) 2007-2015, and GNU GPL'd, by OpenWorks LLP et al.");
5768    VG_(details_bug_reports_to)  (VG_BUGS_TO);
5769    VG_(details_avg_translation_sizeB) ( 320 );
5770 
5771    VG_(basic_tool_funcs)          (hg_post_clo_init,
5772                                    hg_instrument,
5773                                    hg_fini);
5774 
5775    VG_(needs_core_errors)         ();
5776    VG_(needs_tool_errors)         (HG_(eq_Error),
5777                                    HG_(before_pp_Error),
5778                                    HG_(pp_Error),
5779                                    False,/*show TIDs for errors*/
5780                                    HG_(update_extra),
5781                                    HG_(recognised_suppression),
5782                                    HG_(read_extra_suppression_info),
5783                                    HG_(error_matches_suppression),
5784                                    HG_(get_error_name),
5785                                    HG_(get_extra_suppression_info),
5786                                    HG_(print_extra_suppression_use),
5787                                    HG_(update_extra_suppression_use));
5788 
5789    VG_(needs_xml_output)          ();
5790 
5791    VG_(needs_command_line_options)(hg_process_cmd_line_option,
5792                                    hg_print_usage,
5793                                    hg_print_debug_usage);
5794    VG_(needs_client_requests)     (hg_handle_client_request);
5795 
5796    // FIXME?
5797    //VG_(needs_sanity_checks)       (hg_cheap_sanity_check,
5798    //                                hg_expensive_sanity_check);
5799 
5800    VG_(needs_print_stats) (hg_print_stats);
5801    VG_(needs_info_location) (hg_info_location);
5802 
5803    VG_(needs_malloc_replacement)  (hg_cli__malloc,
5804                                    hg_cli____builtin_new,
5805                                    hg_cli____builtin_vec_new,
5806                                    hg_cli__memalign,
5807                                    hg_cli__calloc,
5808                                    hg_cli__free,
5809                                    hg_cli____builtin_delete,
5810                                    hg_cli____builtin_vec_delete,
5811                                    hg_cli__realloc,
5812                                    hg_cli_malloc_usable_size,
5813                                    HG_CLI__DEFAULT_MALLOC_REDZONE_SZB );
5814 
5815    /* 21 Dec 08: disabled this; it mostly causes H to start more
5816       slowly and use significantly more memory, without very often
5817       providing useful results.  The user can request to load this
5818       information manually with --read-var-info=yes. */
5819    if (0) VG_(needs_var_info)(); /* optional */
5820 
5821    VG_(track_new_mem_startup)     ( evh__new_mem_w_perms );
5822    VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
5823    VG_(track_new_mem_brk)         ( evh__new_mem_w_tid );
5824    VG_(track_new_mem_mmap)        ( evh__new_mem_w_perms );
5825    VG_(track_new_mem_stack)       ( evh__new_mem_stack );
5826 
5827    // FIXME: surely this isn't thread-aware
5828    VG_(track_copy_mem_remap)      ( evh__copy_mem );
5829 
5830    VG_(track_change_mem_mprotect) ( evh__set_perms );
5831 
5832    VG_(track_die_mem_stack_signal)( evh__die_mem );
5833    VG_(track_die_mem_brk)         ( evh__die_mem_munmap );
5834    VG_(track_die_mem_munmap)      ( evh__die_mem_munmap );
5835 
5836    /* evh__die_mem calls at the end libhb_srange_noaccess_NoFX
5837       which has no effect. We do not use  VG_(track_die_mem_stack),
5838       as this would be an expensive way to do nothing. */
5839    // VG_(track_die_mem_stack)       ( evh__die_mem );
5840 
5841    // FIXME: what is this for?
5842    VG_(track_ban_mem_stack)       (NULL);
5843 
5844    VG_(track_pre_mem_read)        ( evh__pre_mem_read );
5845    VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
5846    VG_(track_pre_mem_write)       ( evh__pre_mem_write );
5847    VG_(track_post_mem_write)      (NULL);
5848 
5849    /////////////////
5850 
5851    VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
5852    VG_(track_pre_thread_ll_exit)  ( evh__pre_thread_ll_exit );
5853 
5854    VG_(track_start_client_code)( evh__start_client_code );
5855    VG_(track_stop_client_code)( evh__stop_client_code );
5856 
5857    /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
5858       as described in comments at the top of pub_tool_hashtable.h, are
5859       met.  Blargh. */
5860    tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
5861    tl_assert( sizeof(UWord) == sizeof(Addr) );
5862    hg_mallocmeta_table
5863       = VG_(HT_construct)( "hg_malloc_metadata_table" );
5864 
5865    MallocMeta_poolalloc = VG_(newPA) ( sizeof(MallocMeta),
5866                                        1000,
5867                                        HG_(zalloc),
5868                                        "hg_malloc_metadata_pool",
5869                                        HG_(free));
5870 
5871    // add a callback to clean up on (threaded) fork.
5872    VG_(atfork)(NULL/*pre*/, NULL/*parent*/, evh__atfork_child/*child*/);
5873 }
5874 
5875 VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
5876 
5877 /*--------------------------------------------------------------------*/
5878 /*--- end                                                hg_main.c ---*/
5879 /*--------------------------------------------------------------------*/
5880