1 /*
2 This file is part of drd, a thread error detector.
3
4 Copyright (C) 2006-2013 Bart Van Assche <bvanassche@acm.org>.
5
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 02111-1307, USA.
20
21 The GNU General Public License is contained in the file COPYING.
22 */
23
24
25 #include "drd_clientobj.h"
26 #include "drd_error.h"
27 #include "drd_rwlock.h"
28 #include "pub_tool_vki.h"
29 #include "pub_tool_errormgr.h" // VG_(maybe_record_error)()
30 #include "pub_tool_libcassert.h" // tl_assert()
31 #include "pub_tool_libcprint.h" // VG_(message)()
32 #include "pub_tool_libcproc.h" // VG_(read_millisecond_timer)()
33 #include "pub_tool_machine.h" // VG_(get_IP)()
34 #include "pub_tool_mallocfree.h" // VG_(malloc)(), VG_(free)()
35 #include "pub_tool_threadstate.h" // VG_(get_running_tid)()
36
37
38 /* Local type definitions. */
39
40 struct rwlock_thread_info
41 {
42 UWord tid; // DrdThreadId.
43 UInt reader_nesting_count;
44 UInt writer_nesting_count;
45 // Segment of last unlock call by this thread that unlocked a writer lock.
46 Segment* latest_wrlocked_segment;
47 // Segment of last unlock call by this thread that unlocked a reader lock.
48 Segment* latest_rdlocked_segment;
49 };
50
51
52 /* Local functions. */
53
54 static void rwlock_cleanup(struct rwlock_info* p);
55 static void rwlock_delete_thread(struct rwlock_info* const p,
56 const DrdThreadId tid);
57
58
59 /* Local variables. */
60
61 static Bool DRD_(s_trace_rwlock);
62 static UInt DRD_(s_exclusive_threshold_ms);
63 static UInt DRD_(s_shared_threshold_ms);
64 static ULong DRD_(s_rwlock_segment_creation_count);
65
66
67 /* Function definitions. */
68
DRD_(rwlock_set_trace)69 void DRD_(rwlock_set_trace)(const Bool trace_rwlock)
70 {
71 tl_assert(trace_rwlock == False || trace_rwlock == True);
72 DRD_(s_trace_rwlock) = trace_rwlock;
73 }
74
DRD_(rwlock_set_exclusive_threshold)75 void DRD_(rwlock_set_exclusive_threshold)(const UInt exclusive_threshold_ms)
76 {
77 DRD_(s_exclusive_threshold_ms) = exclusive_threshold_ms;
78 }
79
DRD_(rwlock_set_shared_threshold)80 void DRD_(rwlock_set_shared_threshold)(const UInt shared_threshold_ms)
81 {
82 DRD_(s_shared_threshold_ms) = shared_threshold_ms;
83 }
84
DRD_(rwlock_is_rdlocked)85 static Bool DRD_(rwlock_is_rdlocked)(struct rwlock_info* p)
86 {
87 struct rwlock_thread_info* q;
88
89 VG_(OSetGen_ResetIter)(p->thread_info);
90 for ( ; (q = VG_(OSetGen_Next)(p->thread_info)) != 0; )
91 {
92 return q->reader_nesting_count > 0;
93 }
94 return False;
95 }
96
DRD_(rwlock_is_wrlocked)97 static Bool DRD_(rwlock_is_wrlocked)(struct rwlock_info* p)
98 {
99 struct rwlock_thread_info* q;
100
101 VG_(OSetGen_ResetIter)(p->thread_info);
102 for ( ; (q = VG_(OSetGen_Next)(p->thread_info)) != 0; )
103 {
104 return q->writer_nesting_count > 0;
105 }
106 return False;
107 }
108
DRD_(rwlock_is_locked)109 static Bool DRD_(rwlock_is_locked)(struct rwlock_info* p)
110 {
111 return DRD_(rwlock_is_rdlocked)(p) || DRD_(rwlock_is_wrlocked)(p);
112 }
113
DRD_(rwlock_is_rdlocked_by)114 static Bool DRD_(rwlock_is_rdlocked_by)(struct rwlock_info* p,
115 const DrdThreadId tid)
116 {
117 const UWord uword_tid = tid;
118 struct rwlock_thread_info* q;
119
120 q = VG_(OSetGen_Lookup)(p->thread_info, &uword_tid);
121 return q && q->reader_nesting_count > 0;
122 }
123
DRD_(rwlock_is_wrlocked_by)124 static Bool DRD_(rwlock_is_wrlocked_by)(struct rwlock_info* p,
125 const DrdThreadId tid)
126 {
127 const UWord uword_tid = tid;
128 struct rwlock_thread_info* q;
129
130 q = VG_(OSetGen_Lookup)(p->thread_info, &uword_tid);
131 return q && q->writer_nesting_count > 0;
132 }
133
DRD_(rwlock_is_locked_by)134 static Bool DRD_(rwlock_is_locked_by)(struct rwlock_info* p,
135 const DrdThreadId tid)
136 {
137 return (DRD_(rwlock_is_rdlocked_by)(p, tid)
138 || DRD_(rwlock_is_wrlocked_by)(p, tid));
139 }
140
141 /** Either look up or insert a node corresponding to DRD thread id 'tid'. */
142 static
143 struct rwlock_thread_info*
DRD_(lookup_or_insert_node)144 DRD_(lookup_or_insert_node)(OSet* oset, const UWord tid)
145 {
146 struct rwlock_thread_info* q;
147
148 q = VG_(OSetGen_Lookup)(oset, &tid);
149 if (q == 0)
150 {
151 q = VG_(OSetGen_AllocNode)(oset, sizeof(*q));
152 q->tid = tid;
153 q->reader_nesting_count = 0;
154 q->writer_nesting_count = 0;
155 q->latest_wrlocked_segment = 0;
156 q->latest_rdlocked_segment = 0;
157 VG_(OSetGen_Insert)(oset, q);
158 }
159 tl_assert(q);
160 return q;
161 }
162
163 /**
164 * Combine the vector clock corresponding to the last unlock operation of
165 * reader-writer lock p into the vector clock of thread 'tid'.
166 */
DRD_(rwlock_combine_other_vc)167 static void DRD_(rwlock_combine_other_vc)(struct rwlock_info* const p,
168 const DrdThreadId tid,
169 const Bool readers_too)
170 {
171 struct rwlock_thread_info* q;
172 VectorClock old_vc;
173
174 DRD_(vc_copy)(&old_vc, DRD_(thread_get_vc)(tid));
175 VG_(OSetGen_ResetIter)(p->thread_info);
176 for ( ; (q = VG_(OSetGen_Next)(p->thread_info)) != 0; ) {
177 if (q->tid != tid) {
178 if (q->latest_wrlocked_segment)
179 DRD_(vc_combine)(DRD_(thread_get_vc)(tid),
180 &q->latest_wrlocked_segment->vc);
181 if (readers_too && q->latest_rdlocked_segment)
182 DRD_(vc_combine)(DRD_(thread_get_vc)(tid),
183 &q->latest_rdlocked_segment->vc);
184 }
185 }
186 DRD_(thread_update_conflict_set)(tid, &old_vc);
187 DRD_(vc_cleanup)(&old_vc);
188 }
189
190 /**
191 * Compare the type of the rwlock specified at initialization time with
192 * the type passed as an argument, and complain if these two types do not
193 * match.
194 */
drd_rwlock_check_type(struct rwlock_info * const p,const RwLockT rwlock_type)195 static Bool drd_rwlock_check_type(struct rwlock_info* const p,
196 const RwLockT rwlock_type)
197 {
198 tl_assert(p);
199 /* The code below has to be updated if additional rwlock types are added. */
200 tl_assert(rwlock_type == pthread_rwlock || rwlock_type == user_rwlock);
201 tl_assert(p->rwlock_type == pthread_rwlock || p->rwlock_type == user_rwlock);
202
203 if (p->rwlock_type == rwlock_type)
204 return True;
205
206 {
207 RwlockErrInfo REI = { DRD_(thread_get_running_tid)(), p->a1 };
208 VG_(maybe_record_error)
209 (VG_(get_running_tid)(),
210 RwlockErr,
211 VG_(get_IP)(VG_(get_running_tid)()),
212 rwlock_type == pthread_rwlock
213 ? "Attempt to use a user-defined rwlock as a POSIX rwlock"
214 : "Attempt to use a POSIX rwlock as a user-defined rwlock",
215 &REI);
216 }
217 return False;
218 }
219
220 /** Initialize the rwlock_info data structure *p. */
221 static
DRD_(rwlock_initialize)222 void DRD_(rwlock_initialize)(struct rwlock_info* const p, const Addr rwlock,
223 const RwLockT rwlock_type)
224 {
225 tl_assert(rwlock != 0);
226 tl_assert(p->a1 == rwlock);
227 tl_assert(p->type == ClientRwlock);
228
229 p->cleanup = (void(*)(DrdClientobj*))rwlock_cleanup;
230 p->delete_thread
231 = (void(*)(DrdClientobj*, DrdThreadId))rwlock_delete_thread;
232 p->rwlock_type = rwlock_type;
233 p->thread_info = VG_(OSetGen_Create)(
234 0, 0, VG_(malloc), "drd.rwlock.ri.1", VG_(free));
235 p->acquiry_time_ms = 0;
236 p->acquired_at = 0;
237 }
238
239 /** Deallocate the memory that was allocated by rwlock_initialize(). */
rwlock_cleanup(struct rwlock_info * p)240 static void rwlock_cleanup(struct rwlock_info* p)
241 {
242 struct rwlock_thread_info* q;
243
244 tl_assert(p);
245
246 if (DRD_(s_trace_rwlock))
247 DRD_(trace_msg)("[%d] rwlock_destroy 0x%lx",
248 DRD_(thread_get_running_tid)(), p->a1);
249
250 if (DRD_(rwlock_is_locked)(p))
251 {
252 RwlockErrInfo REI = { DRD_(thread_get_running_tid)(), p->a1 };
253 VG_(maybe_record_error)(VG_(get_running_tid)(),
254 RwlockErr,
255 VG_(get_IP)(VG_(get_running_tid)()),
256 "Destroying locked rwlock",
257 &REI);
258 }
259
260 VG_(OSetGen_ResetIter)(p->thread_info);
261 for ( ; (q = VG_(OSetGen_Next)(p->thread_info)) != 0; )
262 {
263 DRD_(sg_put)(q->latest_wrlocked_segment);
264 DRD_(sg_put)(q->latest_rdlocked_segment);
265 }
266
267 VG_(OSetGen_Destroy)(p->thread_info);
268 }
269
270 static
271 struct rwlock_info*
DRD_(rwlock_get_or_allocate)272 DRD_(rwlock_get_or_allocate)(const Addr rwlock, const RwLockT rwlock_type)
273 {
274 struct rwlock_info* p;
275
276 tl_assert(offsetof(DrdClientobj, rwlock) == 0);
277 p = &(DRD_(clientobj_get)(rwlock, ClientRwlock)->rwlock);
278 if (p)
279 {
280 drd_rwlock_check_type(p, rwlock_type);
281 return p;
282 }
283
284 if (DRD_(clientobj_present)(rwlock, rwlock + 1))
285 {
286 GenericErrInfo GEI = {
287 .tid = DRD_(thread_get_running_tid)(),
288 .addr = rwlock,
289 };
290 VG_(maybe_record_error)(VG_(get_running_tid)(),
291 GenericErr,
292 VG_(get_IP)(VG_(get_running_tid)()),
293 "Not a reader-writer lock",
294 &GEI);
295 return 0;
296 }
297
298 p = &(DRD_(clientobj_add)(rwlock, ClientRwlock)->rwlock);
299 DRD_(rwlock_initialize)(p, rwlock, rwlock_type);
300 return p;
301 }
302
DRD_(rwlock_get)303 static struct rwlock_info* DRD_(rwlock_get)(const Addr rwlock)
304 {
305 tl_assert(offsetof(DrdClientobj, rwlock) == 0);
306 return &(DRD_(clientobj_get)(rwlock, ClientRwlock)->rwlock);
307 }
308
309 /** Called before pthread_rwlock_init(). */
DRD_(rwlock_pre_init)310 struct rwlock_info* DRD_(rwlock_pre_init)(const Addr rwlock,
311 const RwLockT rwlock_type)
312 {
313 struct rwlock_info* p;
314
315 if (DRD_(s_trace_rwlock))
316 DRD_(trace_msg)("[%d] rwlock_init 0x%lx",
317 DRD_(thread_get_running_tid)(), rwlock);
318
319 p = DRD_(rwlock_get)(rwlock);
320
321 if (p)
322 drd_rwlock_check_type(p, rwlock_type);
323
324 if (p)
325 {
326 const ThreadId vg_tid = VG_(get_running_tid)();
327 RwlockErrInfo REI = { DRD_(thread_get_running_tid)(), p->a1 };
328 VG_(maybe_record_error)(vg_tid,
329 RwlockErr,
330 VG_(get_IP)(vg_tid),
331 "Reader-writer lock reinitialization",
332 &REI);
333 return p;
334 }
335
336 p = DRD_(rwlock_get_or_allocate)(rwlock, rwlock_type);
337
338 return p;
339 }
340
341 /** Called after pthread_rwlock_destroy(). */
DRD_(rwlock_post_destroy)342 void DRD_(rwlock_post_destroy)(const Addr rwlock, const RwLockT rwlock_type)
343 {
344 struct rwlock_info* p;
345
346 p = DRD_(rwlock_get)(rwlock);
347 if (p == 0)
348 {
349 GenericErrInfo GEI = {
350 .tid = DRD_(thread_get_running_tid)(),
351 .addr = rwlock,
352 };
353 VG_(maybe_record_error)(VG_(get_running_tid)(),
354 GenericErr,
355 VG_(get_IP)(VG_(get_running_tid)()),
356 "Not a reader-writer lock",
357 &GEI);
358 return;
359 }
360
361 drd_rwlock_check_type(p, rwlock_type);
362
363 DRD_(clientobj_remove)(rwlock, ClientRwlock);
364 }
365
366 /**
367 * Called before pthread_rwlock_rdlock() is invoked. If a data structure for
368 * the client-side object was not yet created, do this now. Also check whether
369 * an attempt is made to lock recursively a synchronization object that must
370 * not be locked recursively.
371 */
DRD_(rwlock_pre_rdlock)372 void DRD_(rwlock_pre_rdlock)(const Addr rwlock, const RwLockT rwlock_type)
373 {
374 struct rwlock_info* p;
375
376 if (DRD_(s_trace_rwlock))
377 DRD_(trace_msg)("[%d] pre_rwlock_rdlock 0x%lx",
378 DRD_(thread_get_running_tid)(), rwlock);
379
380 p = DRD_(rwlock_get_or_allocate)(rwlock, rwlock_type);
381 tl_assert(p);
382
383 if (DRD_(rwlock_is_wrlocked_by)(p, DRD_(thread_get_running_tid)())) {
384 RwlockErrInfo REI = { DRD_(thread_get_running_tid)(), p->a1 };
385 VG_(maybe_record_error)(VG_(get_running_tid)(),
386 RwlockErr,
387 VG_(get_IP)(VG_(get_running_tid)()),
388 "Already locked for writing by calling thread",
389 &REI);
390 }
391 }
392
393 /**
394 * Update rwlock_info state when locking the pthread_rwlock_t mutex.
395 * Note: this function must be called after pthread_rwlock_rdlock() has been
396 * called, or a race condition is triggered !
397 */
DRD_(rwlock_post_rdlock)398 void DRD_(rwlock_post_rdlock)(const Addr rwlock, const RwLockT rwlock_type,
399 const Bool took_lock)
400 {
401 const DrdThreadId drd_tid = DRD_(thread_get_running_tid)();
402 struct rwlock_info* p;
403 struct rwlock_thread_info* q;
404
405 if (DRD_(s_trace_rwlock))
406 DRD_(trace_msg)("[%d] post_rwlock_rdlock 0x%lx", drd_tid, rwlock);
407
408 p = DRD_(rwlock_get)(rwlock);
409
410 if (! p || ! took_lock)
411 return;
412
413 tl_assert(! DRD_(rwlock_is_wrlocked)(p));
414
415 q = DRD_(lookup_or_insert_node)(p->thread_info, drd_tid);
416 if (++q->reader_nesting_count == 1)
417 {
418 DRD_(thread_new_segment)(drd_tid);
419 DRD_(s_rwlock_segment_creation_count)++;
420 DRD_(rwlock_combine_other_vc)(p, drd_tid, False);
421
422 p->acquiry_time_ms = VG_(read_millisecond_timer)();
423 p->acquired_at = VG_(record_ExeContext)(VG_(get_running_tid)(), 0);
424 }
425 }
426
427 /**
428 * Called before pthread_rwlock_wrlock() is invoked. If a data structure for
429 * the client-side object was not yet created, do this now. Also check whether
430 * an attempt is made to lock recursively a synchronization object that must
431 * not be locked recursively.
432 */
DRD_(rwlock_pre_wrlock)433 void DRD_(rwlock_pre_wrlock)(const Addr rwlock, const RwLockT rwlock_type)
434 {
435 struct rwlock_info* p;
436
437 p = DRD_(rwlock_get)(rwlock);
438
439 if (DRD_(s_trace_rwlock))
440 DRD_(trace_msg)("[%d] pre_rwlock_wrlock 0x%lx",
441 DRD_(thread_get_running_tid)(), rwlock);
442
443 if (p == 0)
444 p = DRD_(rwlock_get_or_allocate)(rwlock, rwlock_type);
445
446 tl_assert(p);
447
448 if (DRD_(rwlock_is_wrlocked_by)(p, DRD_(thread_get_running_tid)()))
449 {
450 RwlockErrInfo REI = { DRD_(thread_get_running_tid)(), p->a1 };
451 VG_(maybe_record_error)(VG_(get_running_tid)(),
452 RwlockErr,
453 VG_(get_IP)(VG_(get_running_tid)()),
454 "Recursive writer locking not allowed",
455 &REI);
456 }
457 }
458
459 /**
460 * Update rwlock_info state when locking the pthread_rwlock_t rwlock.
461 * Note: this function must be called after pthread_rwlock_wrlock() has
462 * finished, or a race condition is triggered !
463 */
DRD_(rwlock_post_wrlock)464 void DRD_(rwlock_post_wrlock)(const Addr rwlock, const RwLockT rwlock_type,
465 const Bool took_lock)
466 {
467 const DrdThreadId drd_tid = DRD_(thread_get_running_tid)();
468 struct rwlock_info* p;
469 struct rwlock_thread_info* q;
470
471 p = DRD_(rwlock_get)(rwlock);
472
473 if (DRD_(s_trace_rwlock))
474 DRD_(trace_msg)("[%d] post_rwlock_wrlock 0x%lx", drd_tid, rwlock);
475
476 if (! p || ! took_lock)
477 return;
478
479 q = DRD_(lookup_or_insert_node)(p->thread_info,
480 DRD_(thread_get_running_tid)());
481 tl_assert(q->writer_nesting_count == 0);
482 q->writer_nesting_count++;
483 tl_assert(q->writer_nesting_count == 1);
484 DRD_(thread_new_segment)(drd_tid);
485 DRD_(s_rwlock_segment_creation_count)++;
486 DRD_(rwlock_combine_other_vc)(p, drd_tid, True);
487 p->acquiry_time_ms = VG_(read_millisecond_timer)();
488 p->acquired_at = VG_(record_ExeContext)(VG_(get_running_tid)(), 0);
489 }
490
491 /**
492 * Update rwlock_info state when unlocking the pthread_rwlock_t rwlock.
493 *
494 * @param rwlock Pointer to pthread_rwlock_t data structure in the client space.
495 *
496 * @return New value of the rwlock recursion count.
497 *
498 * @note This function must be called before pthread_rwlock_unlock() is called,
499 * or a race condition is triggered !
500 */
DRD_(rwlock_pre_unlock)501 void DRD_(rwlock_pre_unlock)(const Addr rwlock, const RwLockT rwlock_type)
502 {
503 const DrdThreadId drd_tid = DRD_(thread_get_running_tid)();
504 const ThreadId vg_tid = VG_(get_running_tid)();
505 struct rwlock_info* p;
506 struct rwlock_thread_info* q;
507
508 if (DRD_(s_trace_rwlock))
509 DRD_(trace_msg)("[%d] rwlock_unlock 0x%lx", drd_tid, rwlock);
510
511 p = DRD_(rwlock_get)(rwlock);
512 if (p == 0)
513 {
514 GenericErrInfo GEI = {
515 .tid = DRD_(thread_get_running_tid)(),
516 .addr = rwlock,
517 };
518 VG_(maybe_record_error)(VG_(get_running_tid)(),
519 GenericErr,
520 VG_(get_IP)(VG_(get_running_tid)()),
521 "Not a reader-writer lock",
522 &GEI);
523 return;
524 }
525
526 drd_rwlock_check_type(p, rwlock_type);
527
528 if (! DRD_(rwlock_is_locked_by)(p, drd_tid))
529 {
530 RwlockErrInfo REI = { DRD_(thread_get_running_tid)(), p->a1 };
531 VG_(maybe_record_error)(vg_tid,
532 RwlockErr,
533 VG_(get_IP)(vg_tid),
534 "Reader-writer lock not locked by calling thread",
535 &REI);
536 return;
537 }
538 q = DRD_(lookup_or_insert_node)(p->thread_info, drd_tid);
539 tl_assert(q);
540 if (q->reader_nesting_count > 0)
541 {
542 q->reader_nesting_count--;
543 if (q->reader_nesting_count == 0 && DRD_(s_shared_threshold_ms) > 0)
544 {
545 Long held = VG_(read_millisecond_timer)() - p->acquiry_time_ms;
546 if (held > DRD_(s_shared_threshold_ms))
547 {
548 HoldtimeErrInfo HEI
549 = { DRD_(thread_get_running_tid)(),
550 rwlock, p->acquired_at, held, DRD_(s_shared_threshold_ms) };
551 VG_(maybe_record_error)(vg_tid,
552 HoldtimeErr,
553 VG_(get_IP)(vg_tid),
554 "rwlock",
555 &HEI);
556 }
557 }
558 if (q->reader_nesting_count == 0 && q->writer_nesting_count == 0)
559 {
560 /*
561 * This pthread_rwlock_unlock() call really unlocks the rwlock. Save
562 * the current vector clock of the thread such that it is available
563 * when this rwlock is locked again.
564 */
565 DRD_(thread_get_latest_segment)(&q->latest_rdlocked_segment, drd_tid);
566 DRD_(thread_new_segment)(drd_tid);
567 DRD_(s_rwlock_segment_creation_count)++;
568 }
569 }
570 else if (q->writer_nesting_count > 0)
571 {
572 q->writer_nesting_count--;
573 if (q->writer_nesting_count == 0 && DRD_(s_exclusive_threshold_ms) > 0)
574 {
575 Long held = VG_(read_millisecond_timer)() - p->acquiry_time_ms;
576 if (held > DRD_(s_exclusive_threshold_ms))
577 {
578 HoldtimeErrInfo HEI
579 = { DRD_(thread_get_running_tid)(),
580 rwlock, p->acquired_at, held,
581 DRD_(s_exclusive_threshold_ms) };
582 VG_(maybe_record_error)(vg_tid,
583 HoldtimeErr,
584 VG_(get_IP)(vg_tid),
585 "rwlock",
586 &HEI);
587 }
588 }
589 if (q->reader_nesting_count == 0 && q->writer_nesting_count == 0)
590 {
591 /*
592 * This pthread_rwlock_unlock() call really unlocks the rwlock. Save
593 * the current vector clock of the thread such that it is available
594 * when this rwlock is locked again.
595 */
596 DRD_(thread_get_latest_segment)(&q->latest_wrlocked_segment, drd_tid);
597 DRD_(thread_new_segment)(drd_tid);
598 DRD_(s_rwlock_segment_creation_count)++;
599 }
600 }
601 else
602 {
603 tl_assert(False);
604 }
605 }
606
607 /** Called when thread tid stops to exist. */
rwlock_delete_thread(struct rwlock_info * const p,const DrdThreadId tid)608 static void rwlock_delete_thread(struct rwlock_info* const p,
609 const DrdThreadId tid)
610 {
611 struct rwlock_thread_info* q;
612
613 if (DRD_(rwlock_is_locked_by)(p, tid))
614 {
615 RwlockErrInfo REI = { DRD_(thread_get_running_tid)(), p->a1 };
616 VG_(maybe_record_error)(VG_(get_running_tid)(),
617 RwlockErr,
618 VG_(get_IP)(VG_(get_running_tid)()),
619 "Reader-writer lock still locked at thread exit",
620 &REI);
621 q = DRD_(lookup_or_insert_node)(p->thread_info, tid);
622 q->reader_nesting_count = 0;
623 q->writer_nesting_count = 0;
624 }
625 }
626
DRD_(get_rwlock_segment_creation_count)627 ULong DRD_(get_rwlock_segment_creation_count)(void)
628 {
629 return DRD_(s_rwlock_segment_creation_count);
630 }
631