1 
2 /*--------------------------------------------------------------------*/
3 /*--- A header file for all parts of the MemCheck tool.            ---*/
4 /*---                                                 mc_include.h ---*/
5 /*--------------------------------------------------------------------*/
6 
7 /*
8    This file is part of MemCheck, a heavyweight Valgrind tool for
9    detecting memory errors.
10 
11    Copyright (C) 2000-2015 Julian Seward
12       jseward@acm.org
13 
14    This program is free software; you can redistribute it and/or
15    modify it under the terms of the GNU General Public License as
16    published by the Free Software Foundation; either version 2 of the
17    License, or (at your option) any later version.
18 
19    This program is distributed in the hope that it will be useful, but
20    WITHOUT ANY WARRANTY; without even the implied warranty of
21    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22    General Public License for more details.
23 
24    You should have received a copy of the GNU General Public License
25    along with this program; if not, write to the Free Software
26    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27    02111-1307, USA.
28 
29    The GNU General Public License is contained in the file COPYING.
30 */
31 
32 #ifndef __MC_INCLUDE_H
33 #define __MC_INCLUDE_H
34 
35 #define MC_(str)    VGAPPEND(vgMemCheck_,str)
36 
37 
38 /* This is a private header file for use only within the
39    memcheck/ directory. */
40 
41 /*------------------------------------------------------------*/
42 /*--- Tracking the heap                                    ---*/
43 /*------------------------------------------------------------*/
44 
45 /* By default, we want at least a 16B redzone on client heap blocks
46    for Memcheck.
47    The default can be modified by --redzone-size. */
48 #define MC_MALLOC_DEFAULT_REDZONE_SZB    16
49 // effective redzone, as (possibly) modified by --redzone-size:
50 extern SizeT MC_(Malloc_Redzone_SzB);
51 
52 /* For malloc()/new/new[] vs. free()/delete/delete[] mismatch checking. */
53 typedef
54    enum {
55       MC_AllocMalloc = 0,
56       MC_AllocNew    = 1,
57       MC_AllocNewVec = 2,
58       MC_AllocCustom = 3
59    }
60    MC_AllocKind;
61 
62 /* This describes a heap block. Nb: first two fields must match core's
63  * VgHashNode. */
64 typedef
65    struct _MC_Chunk {
66       struct _MC_Chunk* next;
67       Addr         data;            // Address of the actual block.
68       SizeT        szB : (sizeof(SizeT)*8)-2; // Size requested; 30 or 62 bits.
69       MC_AllocKind allockind : 2;   // Which operation did the allocation.
70       ExeContext*  where[0];
71       /* Variable-length array. The size depends on MC_(clo_keep_stacktraces).
72          This array optionally stores the alloc and/or free stack trace. */
73    }
74    MC_Chunk;
75 
76 /* Returns the execontext where the MC_Chunk was allocated/freed.
77    Returns VG_(null_ExeContext)() if the execontext has not been recorded (due
78    to MC_(clo_keep_stacktraces) and/or because block not yet freed). */
79 ExeContext* MC_(allocated_at) (MC_Chunk*);
80 ExeContext* MC_(freed_at) (MC_Chunk*);
81 
82 /* Records and sets execontext according to MC_(clo_keep_stacktraces) */
83 void  MC_(set_allocated_at) (ThreadId, MC_Chunk*);
84 void  MC_(set_freed_at) (ThreadId, MC_Chunk*);
85 
86 /* number of pointers needed according to MC_(clo_keep_stacktraces). */
87 UInt MC_(n_where_pointers) (void);
88 
89 /* Memory pool.  Nb: first two fields must match core's VgHashNode. */
90 typedef
91    struct _MC_Mempool {
92       struct _MC_Mempool* next;
93       Addr          pool;           // pool identifier
94       SizeT         rzB;            // pool red-zone size
95       Bool          is_zeroed;      // allocations from this pool are zeroed
96       VgHashTable  *chunks;         // chunks associated with this pool
97    }
98    MC_Mempool;
99 
100 
101 void* MC_(new_block)  ( ThreadId tid,
102                         Addr p, SizeT size, SizeT align,
103                         Bool is_zeroed, MC_AllocKind kind,
104                         VgHashTable *table);
105 void MC_(handle_free) ( ThreadId tid,
106                         Addr p, UInt rzB, MC_AllocKind kind );
107 
108 void MC_(create_mempool)  ( Addr pool, UInt rzB, Bool is_zeroed );
109 void MC_(destroy_mempool) ( Addr pool );
110 void MC_(mempool_alloc)   ( ThreadId tid, Addr pool,
111                             Addr addr, SizeT size );
112 void MC_(mempool_free)    ( Addr pool, Addr addr );
113 void MC_(mempool_trim)    ( Addr pool, Addr addr, SizeT size );
114 void MC_(move_mempool)    ( Addr poolA, Addr poolB );
115 void MC_(mempool_change)  ( Addr pool, Addr addrA, Addr addrB, SizeT size );
116 Bool MC_(mempool_exists)  ( Addr pool );
117 
118 /* Searches for a recently freed block which might bracket Addr a.
119    Return the MC_Chunk* for this block or NULL if no bracketting block
120    is found. */
121 MC_Chunk* MC_(get_freed_block_bracketting)( Addr a );
122 
123 /* For efficient pooled alloc/free of the MC_Chunk. */
124 extern PoolAlloc* MC_(chunk_poolalloc);
125 
126 /* For tracking malloc'd blocks.  Nb: it's quite important that it's a
127    VgHashTable, because VgHashTable allows duplicate keys without complaint.
128    This can occur if a user marks a malloc() block as also a custom block with
129    MALLOCLIKE_BLOCK. */
130 extern VgHashTable *MC_(malloc_list);
131 
132 /* For tracking memory pools. */
133 extern VgHashTable *MC_(mempool_list);
134 
135 /* Shadow memory functions */
136 Bool MC_(check_mem_is_noaccess)( Addr a, SizeT len, Addr* bad_addr );
137 void MC_(make_mem_noaccess)        ( Addr a, SizeT len );
138 void MC_(make_mem_undefined_w_otag)( Addr a, SizeT len, UInt otag );
139 void MC_(make_mem_defined)         ( Addr a, SizeT len );
140 void MC_(copy_address_range_state) ( Addr src, Addr dst, SizeT len );
141 
142 void MC_(print_malloc_stats) ( void );
143 /* nr of free operations done */
144 SizeT MC_(get_cmalloc_n_frees) ( void );
145 
146 void* MC_(malloc)               ( ThreadId tid, SizeT n );
147 void* MC_(__builtin_new)        ( ThreadId tid, SizeT n );
148 void* MC_(__builtin_vec_new)    ( ThreadId tid, SizeT n );
149 void* MC_(memalign)             ( ThreadId tid, SizeT align, SizeT n );
150 void* MC_(calloc)               ( ThreadId tid, SizeT nmemb, SizeT size1 );
151 void  MC_(free)                 ( ThreadId tid, void* p );
152 void  MC_(__builtin_delete)     ( ThreadId tid, void* p );
153 void  MC_(__builtin_vec_delete) ( ThreadId tid, void* p );
154 void* MC_(realloc)              ( ThreadId tid, void* p, SizeT new_size );
155 SizeT MC_(malloc_usable_size)   ( ThreadId tid, void* p );
156 
157 void MC_(handle_resizeInPlace)(ThreadId tid, Addr p,
158                                SizeT oldSizeB, SizeT newSizeB, SizeT rzB);
159 
160 
161 /*------------------------------------------------------------*/
162 /*--- Origin tracking translate-time support               ---*/
163 /*------------------------------------------------------------*/
164 
165 /* See detailed comments in mc_machine.c. */
166 Int MC_(get_otrack_shadow_offset) ( Int offset, Int szB );
167 IRType MC_(get_otrack_reg_array_equiv_int_type) ( IRRegArray* arr );
168 
169 /* Constants which are used as the lowest 2 bits in origin tags.
170 
171    An origin tag comprises an upper 30-bit ECU field and a lower 2-bit
172    'kind' field.  The ECU field is a number given out by m_execontext
173    and has a 1-1 mapping with ExeContext*s.  An ECU can be used
174    directly as an origin tag (otag), but in fact we want to put
175    additional information 'kind' field to indicate roughly where the
176    tag came from.  This helps print more understandable error messages
177    for the user -- it has no other purpose.
178 
179    Hence the following 2-bit constants are needed for 'kind' field.
180 
181    To summarise:
182 
183    * Both ECUs and origin tags are represented as 32-bit words
184 
185    * m_execontext and the core-tool interface deal purely in ECUs.
186      They have no knowledge of origin tags - that is a purely
187      Memcheck-internal matter.
188 
189    * all valid ECUs have the lowest 2 bits zero and at least
190      one of the upper 30 bits nonzero (see VG_(is_plausible_ECU))
191 
192    * to convert from an ECU to an otag, OR in one of the MC_OKIND_
193      constants below
194 
195    * to convert an otag back to an ECU, AND it with ~3
196 */
197 
198 #define MC_OKIND_UNKNOWN  0  /* unknown origin */
199 #define MC_OKIND_HEAP     1  /* this is a heap origin */
200 #define MC_OKIND_STACK    2  /* this is a stack origin */
201 #define MC_OKIND_USER     3  /* arises from user-supplied client req */
202 
203 
204 /*------------------------------------------------------------*/
205 /*--- Profiling of memory events                           ---*/
206 /*------------------------------------------------------------*/
207 
208 /* Define to collect detailed performance info. */
209 /* #define MC_PROFILE_MEMORY */
210 #ifdef MC_PROFILE_MEMORY
211 
212 /* Order of enumerators does not matter. But MCPE_LAST has to be the
213    last entry in the list as it is used as an array bound. */
214 enum {
215    MCPE_LOADV8,
216    MCPE_LOADV8_SLOW1,
217    MCPE_LOADV8_SLOW2,
218    MCPE_LOADV16,
219    MCPE_LOADV16_SLOW1,
220    MCPE_LOADV16_SLOW2,
221    MCPE_LOADV32,
222    MCPE_LOADV32_SLOW1,
223    MCPE_LOADV32_SLOW2,
224    MCPE_LOADV64,
225    MCPE_LOADV64_SLOW1,
226    MCPE_LOADV64_SLOW2,
227    MCPE_LOADV_128_OR_256,
228    MCPE_LOADV_128_OR_256_SLOW_LOOP,
229    MCPE_LOADV_128_OR_256_SLOW1,
230    MCPE_LOADV_128_OR_256_SLOW2,
231    MCPE_LOADVN_SLOW,
232    MCPE_LOADVN_SLOW_LOOP,
233    MCPE_STOREV8,
234    MCPE_STOREV8_SLOW1,
235    MCPE_STOREV8_SLOW2,
236    MCPE_STOREV8_SLOW3,
237    MCPE_STOREV8_SLOW4,
238    MCPE_STOREV16,
239    MCPE_STOREV16_SLOW1,
240    MCPE_STOREV16_SLOW2,
241    MCPE_STOREV16_SLOW3,
242    MCPE_STOREV16_SLOW4,
243    MCPE_STOREV32,
244    MCPE_STOREV32_SLOW1,
245    MCPE_STOREV32_SLOW2,
246    MCPE_STOREV32_SLOW3,
247    MCPE_STOREV32_SLOW4,
248    MCPE_STOREV64,
249    MCPE_STOREV64_SLOW1,
250    MCPE_STOREV64_SLOW2,
251    MCPE_STOREV64_SLOW3,
252    MCPE_STOREV64_SLOW4,
253    MCPE_STOREVN_SLOW,
254    MCPE_STOREVN_SLOW_LOOP,
255    MCPE_MAKE_ALIGNED_WORD32_UNDEFINED,
256    MCPE_MAKE_ALIGNED_WORD32_UNDEFINED_SLOW,
257    MCPE_MAKE_ALIGNED_WORD64_UNDEFINED,
258    MCPE_MAKE_ALIGNED_WORD64_UNDEFINED_SLOW,
259    MCPE_MAKE_ALIGNED_WORD32_NOACCESS,
260    MCPE_MAKE_ALIGNED_WORD32_NOACCESS_SLOW,
261    MCPE_MAKE_ALIGNED_WORD64_NOACCESS,
262    MCPE_MAKE_ALIGNED_WORD64_NOACCESS_SLOW,
263    MCPE_MAKE_MEM_NOACCESS,
264    MCPE_MAKE_MEM_UNDEFINED,
265    MCPE_MAKE_MEM_UNDEFINED_W_OTAG,
266    MCPE_MAKE_MEM_DEFINED,
267    MCPE_CHEAP_SANITY_CHECK,
268    MCPE_EXPENSIVE_SANITY_CHECK,
269    MCPE_COPY_ADDRESS_RANGE_STATE,
270    MCPE_COPY_ADDRESS_RANGE_STATE_LOOP1,
271    MCPE_COPY_ADDRESS_RANGE_STATE_LOOP2,
272    MCPE_CHECK_MEM_IS_NOACCESS,
273    MCPE_CHECK_MEM_IS_NOACCESS_LOOP,
274    MCPE_IS_MEM_ADDRESSABLE,
275    MCPE_IS_MEM_ADDRESSABLE_LOOP,
276    MCPE_IS_MEM_DEFINED,
277    MCPE_IS_MEM_DEFINED_LOOP,
278    MCPE_IS_MEM_DEFINED_COMPREHENSIVE,
279    MCPE_IS_MEM_DEFINED_COMPREHENSIVE_LOOP,
280    MCPE_IS_DEFINED_ASCIIZ,
281    MCPE_IS_DEFINED_ASCIIZ_LOOP,
282    MCPE_FIND_CHUNK_FOR_OLD,
283    MCPE_FIND_CHUNK_FOR_OLD_LOOP,
284    MCPE_SET_ADDRESS_RANGE_PERMS,
285    MCPE_SET_ADDRESS_RANGE_PERMS_SINGLE_SECMAP,
286    MCPE_SET_ADDRESS_RANGE_PERMS_STARTOF_SECMAP,
287    MCPE_SET_ADDRESS_RANGE_PERMS_MULTIPLE_SECMAPS,
288    MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM1,
289    MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM2,
290    MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM1_QUICK,
291    MCPE_SET_ADDRESS_RANGE_PERMS_DIST_SM2_QUICK,
292    MCPE_SET_ADDRESS_RANGE_PERMS_LOOP1A,
293    MCPE_SET_ADDRESS_RANGE_PERMS_LOOP1B,
294    MCPE_SET_ADDRESS_RANGE_PERMS_LOOP1C,
295    MCPE_SET_ADDRESS_RANGE_PERMS_LOOP8A,
296    MCPE_SET_ADDRESS_RANGE_PERMS_LOOP8B,
297    MCPE_SET_ADDRESS_RANGE_PERMS_LOOP64K,
298    MCPE_SET_ADDRESS_RANGE_PERMS_LOOP64K_FREE_DIST_SM,
299    MCPE_NEW_MEM_STACK,
300    MCPE_NEW_MEM_STACK_4,
301    MCPE_NEW_MEM_STACK_8,
302    MCPE_NEW_MEM_STACK_12,
303    MCPE_NEW_MEM_STACK_16,
304    MCPE_NEW_MEM_STACK_32,
305    MCPE_NEW_MEM_STACK_112,
306    MCPE_NEW_MEM_STACK_128,
307    MCPE_NEW_MEM_STACK_144,
308    MCPE_NEW_MEM_STACK_160,
309    MCPE_DIE_MEM_STACK,
310    MCPE_DIE_MEM_STACK_4,
311    MCPE_DIE_MEM_STACK_8,
312    MCPE_DIE_MEM_STACK_12,
313    MCPE_DIE_MEM_STACK_16,
314    MCPE_DIE_MEM_STACK_32,
315    MCPE_DIE_MEM_STACK_112,
316    MCPE_DIE_MEM_STACK_128,
317    MCPE_DIE_MEM_STACK_144,
318    MCPE_DIE_MEM_STACK_160,
319    /* Do not add enumerators past this line. */
320    MCPE_LAST
321 };
322 
323 extern ULong MC_(event_ctr)[MCPE_LAST];
324 
325 #  define PROF_EVENT(ev)                           \
326    do { tl_assert((ev) >= 0 && (ev) < MCPE_LAST);  \
327       MC_(event_ctr)[ev]++;                        \
328    } while (False);
329 
330 #else
331 
332 #  define PROF_EVENT(ev)    /* */
333 
334 #endif   /* MC_PROFILE_MEMORY */
335 
336 
337 /*------------------------------------------------------------*/
338 /*--- V and A bits (Victoria & Albert ?)                   ---*/
339 /*------------------------------------------------------------*/
340 
341 /* The number of entries in the primary map can be altered.  However
342    we hardwire the assumption that each secondary map covers precisely
343    64k of address space. */
344 #define SM_SIZE 65536            /* DO NOT CHANGE */
345 #define SM_MASK (SM_SIZE-1)      /* DO NOT CHANGE */
346 
347 #define V_BIT_DEFINED         0
348 #define V_BIT_UNDEFINED       1
349 
350 #define V_BITS8_DEFINED       0
351 #define V_BITS8_UNDEFINED     0xFF
352 
353 #define V_BITS16_DEFINED      0
354 #define V_BITS16_UNDEFINED    0xFFFF
355 
356 #define V_BITS32_DEFINED      0
357 #define V_BITS32_UNDEFINED    0xFFFFFFFF
358 
359 #define V_BITS64_DEFINED      0ULL
360 #define V_BITS64_UNDEFINED    0xFFFFFFFFFFFFFFFFULL
361 
362 
363 /*------------------------------------------------------------*/
364 /*--- Leak checking                                        ---*/
365 /*------------------------------------------------------------*/
366 
367 typedef
368    enum {
369       // Nb: the order is important -- it dictates the order of loss records
370       // of equal sizes.
371       Reachable    =0,  // Definitely reachable from root-set.
372       Possible     =1,  // Possibly reachable from root-set;  involves at
373                         //   least one interior-pointer along the way.
374       IndirectLeak =2,  // Leaked, but reachable from another leaked block
375                         //   (be it Unreached or IndirectLeak).
376       Unreached    =3,  // Not reached, ie. leaked.
377                         //   (At best, only reachable from itself via a cycle.)
378   }
379   Reachedness;
380 
381 // Build mask to check or set Reachedness r membership
382 #define R2S(r) (1 << (r))
383 // Reachedness r is member of the Set s ?
384 #define RiS(r,s) ((s) & R2S(r))
385 // Returns a set containing all Reachedness
386 UInt MC_(all_Reachedness)(void);
387 
388 /* For VALGRIND_COUNT_LEAKS client request */
389 extern SizeT MC_(bytes_leaked);
390 extern SizeT MC_(bytes_indirect);
391 extern SizeT MC_(bytes_dubious);
392 extern SizeT MC_(bytes_reachable);
393 extern SizeT MC_(bytes_suppressed);
394 
395 /* For VALGRIND_COUNT_LEAK_BLOCKS client request */
396 extern SizeT MC_(blocks_leaked);
397 extern SizeT MC_(blocks_indirect);
398 extern SizeT MC_(blocks_dubious);
399 extern SizeT MC_(blocks_reachable);
400 extern SizeT MC_(blocks_suppressed);
401 
402 typedef
403    enum {
404       LC_Off,
405       LC_Summary,
406       LC_Full,
407    }
408    LeakCheckMode;
409 
410 typedef
411    enum {
412       LCD_Any,       // output all loss records, whatever the delta
413       LCD_Increased, // output loss records with an increase in size or blocks
414       LCD_Changed,   // output loss records with an increase or
415                      //decrease in size or blocks
416    }
417    LeakCheckDeltaMode;
418 
419 /* When a LossRecord is put into an OSet, these elements represent the key. */
420 typedef
421    struct _LossRecordKey {
422       Reachedness  state;        // LC_Extra.state value shared by all blocks.
423       ExeContext*  allocated_at; // Where they were allocated.
424    }
425    LossRecordKey;
426 
427 /* A loss record, used for generating err msgs.  Multiple leaked blocks can be
428  * merged into a single loss record if they have the same state and similar
429  * enough allocation points (controlled by --leak-resolution). */
430 typedef
431    struct _LossRecord {
432       LossRecordKey key;  // Key, when used in an OSet.
433       SizeT szB;          // Sum of all MC_Chunk.szB values.
434       SizeT indirect_szB; // Sum of all LC_Extra.indirect_szB values.
435       UInt  num_blocks;   // Number of blocks represented by the record.
436       SizeT old_szB;          // old_* values are the values found during the
437       SizeT old_indirect_szB; // previous leak search. old_* values are used to
438       UInt  old_num_blocks;   // output only the changed/new loss records
439    }
440    LossRecord;
441 
442 typedef
443    struct _LeakCheckParams {
444       LeakCheckMode mode;
445       UInt show_leak_kinds;
446       UInt errors_for_leak_kinds;
447       UInt heuristics;
448       LeakCheckDeltaMode deltamode;
449       UInt max_loss_records_output; // limit on the nr of loss records output.
450       Bool requested_by_monitor_command; // True when requested by gdb/vgdb.
451    }
452    LeakCheckParams;
453 
454 void MC_(detect_memory_leaks) ( ThreadId tid, LeakCheckParams * lcp);
455 
456 // Each time a leak search is done, the leak search generation
457 // MC_(leak_search_gen) is incremented.
458 extern UInt MC_(leak_search_gen);
459 
460 // maintains the lcp.deltamode given in the last call to detect_memory_leaks
461 extern LeakCheckDeltaMode MC_(detect_memory_leaks_last_delta_mode);
462 
463 // prints the list of blocks corresponding to the given loss_record_nr slice
464 // (from/to) (up to maximum max_blocks)
465 // Returns True if loss_record_nr_from identifies a correct loss record
466 // from last leak search, returns False otherwise.
467 // Note that loss_record_nr_to can be bigger than the nr of loss records. All
468 // loss records after from will then be examined and maybe printed.
469 // If heuristics != 0, print only the loss records/blocks found via
470 // one of the heuristics in the set.
471 Bool MC_(print_block_list) ( UInt loss_record_nr_from, UInt loss_record_nr_to,
472                              UInt max_blocks, UInt heuristics);
473 
474 // Prints the addresses/registers/... at which a pointer to
475 // the given range [address, address+szB[ is found.
476 void MC_(who_points_at) ( Addr address, SizeT szB);
477 
478 // if delta_mode == LCD_Any, prints in buf an empty string
479 // otherwise prints a delta in the layout  " (+%'lu)" or " (-%'lu)"
480 extern HChar * MC_(snprintf_delta) (HChar * buf, Int size,
481                                     SizeT current_val, SizeT old_val,
482                                     LeakCheckDeltaMode delta_mode);
483 
484 
485 Bool MC_(is_valid_aligned_word)     ( Addr a );
486 Bool MC_(is_within_valid_secondary) ( Addr a );
487 
488 // Prints as user msg a description of the given loss record.
489 void MC_(pp_LossRecord)(UInt n_this_record, UInt n_total_records,
490                         LossRecord* l);
491 
492 
493 /*------------------------------------------------------------*/
494 /*--- Errors and suppressions                              ---*/
495 /*------------------------------------------------------------*/
496 
497 /* Did we show to the user, any errors for which an uninitialised
498    value origin could have been collected (but wasn't) ?  If yes,
499    then, at the end of the run, print a 1 line message advising that a
500    rerun with --track-origins=yes might help. */
501 extern Bool MC_(any_value_errors);
502 
503 /* Standard functions for error and suppressions as required by the
504    core/tool iface */
505 Bool MC_(eq_Error)           ( VgRes res, const Error* e1, const Error* e2 );
506 void MC_(before_pp_Error)    ( const Error* err );
507 void MC_(pp_Error)           ( const Error* err );
508 UInt MC_(update_Error_extra) ( const Error* err );
509 
510 Bool MC_(is_recognised_suppression) ( const HChar* name, Supp* su );
511 
512 Bool MC_(read_extra_suppression_info) ( Int fd, HChar** buf,
513                                         SizeT* nBuf, Int* lineno, Supp *su );
514 
515 Bool MC_(error_matches_suppression) ( const Error* err, const Supp* su );
516 
517 SizeT MC_(get_extra_suppression_info) ( const Error* err,
518                                         /*OUT*/HChar* buf, Int nBuf );
519 SizeT MC_(print_extra_suppression_use) ( const Supp* su,
520                                          /*OUT*/HChar* buf, Int nBuf );
521 void MC_(update_extra_suppression_use) ( const Error* err, const Supp* su );
522 
523 const HChar* MC_(get_error_name) ( const Error* err );
524 
525 /* Recording of errors */
526 void MC_(record_address_error) ( ThreadId tid, Addr a, Int szB,
527                                  Bool isWrite );
528 void MC_(record_cond_error)    ( ThreadId tid, UInt otag );
529 void MC_(record_value_error)   ( ThreadId tid, Int szB, UInt otag );
530 void MC_(record_jump_error)    ( ThreadId tid, Addr a );
531 
532 void MC_(record_free_error)            ( ThreadId tid, Addr a );
533 void MC_(record_illegal_mempool_error) ( ThreadId tid, Addr a );
534 void MC_(record_freemismatch_error)    ( ThreadId tid, MC_Chunk* mc );
535 
536 void MC_(record_overlap_error)  ( ThreadId tid, const HChar* function,
537                                   Addr src, Addr dst, SizeT szB );
538 void MC_(record_core_mem_error) ( ThreadId tid, const HChar* msg );
539 void MC_(record_regparam_error) ( ThreadId tid, const HChar* msg, UInt otag );
540 void MC_(record_memparam_error) ( ThreadId tid, Addr a,
541                                   Bool isAddrErr, const HChar* msg, UInt otag );
542 void MC_(record_user_error)     ( ThreadId tid, Addr a,
543                                   Bool isAddrErr, UInt otag );
544 
545 Bool MC_(record_leak_error)     ( ThreadId tid,
546                                   UInt n_this_record,
547                                   UInt n_total_records,
548                                   LossRecord* lossRecord,
549                                   Bool print_record,
550                                   Bool count_error );
551 
552 Bool MC_(record_fishy_value_error)  ( ThreadId tid, const HChar* function,
553                                       const HChar *argument_name, SizeT value );
554 
555 /* Leak kinds tokens to call VG_(parse_enum_set). */
556 extern const HChar* MC_(parse_leak_kinds_tokens);
557 
558 /* prints a description of address a */
559 void MC_(pp_describe_addr) (Addr a);
560 
561 /* Is this address in a user-specified "ignored range" ? */
562 Bool MC_(in_ignored_range) ( Addr a );
563 
564 
565 /*------------------------------------------------------------*/
566 /*--- Client blocks                                        ---*/
567 /*------------------------------------------------------------*/
568 
569 /* Describes a client block.  See mc_main.c.  An unused block has
570    start == size == 0.  */
571 typedef
572    struct {
573       Addr        start;
574       SizeT       size;
575       ExeContext* where;
576       HChar*      desc;
577    }
578    CGenBlock;
579 
580 /* Get access to the client block array. */
581 void MC_(get_ClientBlock_array)( /*OUT*/CGenBlock** blocks,
582                                  /*OUT*/UWord* nBlocks );
583 
584 
585 /*------------------------------------------------------------*/
586 /*--- Command line options + defaults                      ---*/
587 /*------------------------------------------------------------*/
588 
589 /* Allow loads from partially-valid addresses?  default: YES */
590 extern Bool MC_(clo_partial_loads_ok);
591 
592 /* Max volume of the freed blocks queue. */
593 extern Long MC_(clo_freelist_vol);
594 
595 /* Blocks with a size >= MC_(clo_freelist_big_blocks) will be put
596    in the "big block" freed blocks queue. */
597 extern Long MC_(clo_freelist_big_blocks);
598 
599 /* Do leak check at exit?  default: NO */
600 extern LeakCheckMode MC_(clo_leak_check);
601 
602 /* How closely should we compare ExeContexts in leak records? default: 2 */
603 extern VgRes MC_(clo_leak_resolution);
604 
605 /* In leak check, show loss records if their R2S(reachedness) is set.
606    Default : R2S(Possible) | R2S(Unreached). */
607 extern UInt MC_(clo_show_leak_kinds);
608 
609 /* In leak check, a loss record is an error if its R2S(reachedness) is set.
610    Default : R2S(Possible) | R2S(Unreached). */
611 extern UInt MC_(clo_errors_for_leak_kinds);
612 
613 /* Various leak check heuristics which can be activated/deactivated. */
614 typedef
615    enum {
616       LchNone                =0,
617       // no heuristic.
618       LchStdString           =1,
619       // Consider interior pointer pointing at the array of char in a
620       // std::string as reachable.
621       LchLength64            =2,
622       // Consider interior pointer pointing at offset 64bit of a block as
623       // reachable, when the first 8 bytes contains the block size - 8.
624       // Such length+interior pointers are used by e.g. sqlite3MemMalloc.
625       // On 64bit platforms LchNewArray will also match these blocks.
626       LchNewArray            =3,
627       // Consider interior pointer pointing at second word of a new[] array as
628       // reachable. Such interior pointers are used for arrays whose elements
629       // have a destructor.
630       LchMultipleInheritance =4,
631       // Conside interior pointer pointing just after what looks a vtable
632       // as reachable.
633   }
634   LeakCheckHeuristic;
635 
636 // Nr of heuristics, including the LchNone heuristic.
637 #define N_LEAK_CHECK_HEURISTICS 5
638 
639 // Build mask to check or set Heuristic h membership
640 #define H2S(h) (1 << (h))
641 // Heuristic h is member of the Set s ?
642 #define HiS(h,s) ((s) & H2S(h))
643 
644 /* Heuristics set to use for the leak search.
645    Default : all heuristics. */
646 extern UInt MC_(clo_leak_check_heuristics);
647 
648 /* Assume accesses immediately below %esp are due to gcc-2.96 bugs.
649  * default: NO */
650 extern Bool MC_(clo_workaround_gcc296_bugs);
651 
652 /* Fill malloc-d/free-d client blocks with a specific value?  -1 if
653    not, else 0x00 .. 0xFF indicating the fill value to use.  Can be
654    useful for causing programs with bad heap corruption to fail in
655    more repeatable ways.  Note that malloc-filled and free-filled
656    areas are still undefined and noaccess respectively.  This merely
657    causes them to contain the specified values. */
658 extern Int MC_(clo_malloc_fill);
659 extern Int MC_(clo_free_fill);
660 
661 /* Which stack trace(s) to keep for malloc'd/free'd client blocks?
662    For each client block, the stack traces where it was allocated
663    and/or freed are optionally kept depending on MC_(clo_keep_stacktraces). */
664 typedef
665    enum {                 // keep alloc stack trace ?  keep free stack trace ?
666       KS_none,            // never                     never
667       KS_alloc,           // always                    never
668       KS_free,            // never                     always
669       KS_alloc_then_free, // when still malloc'd       when free'd
670       KS_alloc_and_free,  // always                    always
671    }
672    KeepStacktraces;
673 extern KeepStacktraces MC_(clo_keep_stacktraces);
674 
675 /* Indicates the level of instrumentation/checking done by Memcheck.
676 
677    1 = No undefined value checking, Addrcheck-style behaviour only:
678        only address checking is done.  This is faster but finds fewer
679        errors.  Note that although Addrcheck had 1 bit per byte
680        overhead vs the old Memcheck's 9 bits per byte, with this mode
681        and compressed V bits, no memory is saved with this mode --
682        it's still 2 bits per byte overhead.  This is a little wasteful
683        -- it could be done with 1 bit per byte -- but lets us reuse
684        the many shadow memory access functions.  Note that in this
685        mode neither the secondary V bit table nor the origin-tag cache
686        are used.
687 
688    2 = Address checking and Undefined value checking are performed,
689        but origins are not tracked.  So the origin-tag cache is not
690        used in this mode.  This setting is the default and corresponds
691        to the "normal" Memcheck behaviour that has shipped for years.
692 
693    3 = Address checking, undefined value checking, and origins for
694        undefined values are tracked.
695 
696    The default is 2.
697 */
698 extern Int MC_(clo_mc_level);
699 
700 /* Should we show mismatched frees?  Default: YES */
701 extern Bool MC_(clo_show_mismatched_frees);
702 
703 /* Should we use expensive definedness checking for add/sub and compare
704    operations? Default: NO */
705 extern Bool MC_(clo_expensive_definedness_checks);
706 
707 /*------------------------------------------------------------*/
708 /*--- Instrumentation                                      ---*/
709 /*------------------------------------------------------------*/
710 
711 /* Functions defined in mc_main.c */
712 
713 /* For the fail_w_o functions, the UWord arg is actually the 32-bit
714    origin tag and should really be UInt, but to be simple and safe
715    considering it's called from generated code, just claim it to be a
716    UWord. */
717 VG_REGPARM(2) void MC_(helperc_value_checkN_fail_w_o) ( HWord, UWord );
718 VG_REGPARM(1) void MC_(helperc_value_check8_fail_w_o) ( UWord );
719 VG_REGPARM(1) void MC_(helperc_value_check4_fail_w_o) ( UWord );
720 VG_REGPARM(1) void MC_(helperc_value_check1_fail_w_o) ( UWord );
721 VG_REGPARM(1) void MC_(helperc_value_check0_fail_w_o) ( UWord );
722 
723 /* And call these ones instead to report an uninitialised value error
724    but with no origin available. */
725 VG_REGPARM(1) void MC_(helperc_value_checkN_fail_no_o) ( HWord );
726 VG_REGPARM(0) void MC_(helperc_value_check8_fail_no_o) ( void );
727 VG_REGPARM(0) void MC_(helperc_value_check4_fail_no_o) ( void );
728 VG_REGPARM(0) void MC_(helperc_value_check1_fail_no_o) ( void );
729 VG_REGPARM(0) void MC_(helperc_value_check0_fail_no_o) ( void );
730 
731 /* V-bits load/store helpers */
732 VG_REGPARM(1) void MC_(helperc_STOREV64be) ( Addr, ULong );
733 VG_REGPARM(1) void MC_(helperc_STOREV64le) ( Addr, ULong );
734 VG_REGPARM(2) void MC_(helperc_STOREV32be) ( Addr, UWord );
735 VG_REGPARM(2) void MC_(helperc_STOREV32le) ( Addr, UWord );
736 VG_REGPARM(2) void MC_(helperc_STOREV16be) ( Addr, UWord );
737 VG_REGPARM(2) void MC_(helperc_STOREV16le) ( Addr, UWord );
738 VG_REGPARM(2) void MC_(helperc_STOREV8)    ( Addr, UWord );
739 
740 VG_REGPARM(2) void  MC_(helperc_LOADV256be) ( /*OUT*/V256*, Addr );
741 VG_REGPARM(2) void  MC_(helperc_LOADV256le) ( /*OUT*/V256*, Addr );
742 VG_REGPARM(2) void  MC_(helperc_LOADV128be) ( /*OUT*/V128*, Addr );
743 VG_REGPARM(2) void  MC_(helperc_LOADV128le) ( /*OUT*/V128*, Addr );
744 VG_REGPARM(1) ULong MC_(helperc_LOADV64be)  ( Addr );
745 VG_REGPARM(1) ULong MC_(helperc_LOADV64le)  ( Addr );
746 VG_REGPARM(1) UWord MC_(helperc_LOADV32be)  ( Addr );
747 VG_REGPARM(1) UWord MC_(helperc_LOADV32le)  ( Addr );
748 VG_REGPARM(1) UWord MC_(helperc_LOADV16be)  ( Addr );
749 VG_REGPARM(1) UWord MC_(helperc_LOADV16le)  ( Addr );
750 VG_REGPARM(1) UWord MC_(helperc_LOADV8)     ( Addr );
751 
752 void MC_(helperc_MAKE_STACK_UNINIT) ( Addr base, UWord len,
753                                                  Addr nia );
754 
755 /* Origin tag load/store helpers */
756 VG_REGPARM(2) void  MC_(helperc_b_store1) ( Addr a, UWord d32 );
757 VG_REGPARM(2) void  MC_(helperc_b_store2) ( Addr a, UWord d32 );
758 VG_REGPARM(2) void  MC_(helperc_b_store4) ( Addr a, UWord d32 );
759 VG_REGPARM(2) void  MC_(helperc_b_store8) ( Addr a, UWord d32 );
760 VG_REGPARM(2) void  MC_(helperc_b_store16)( Addr a, UWord d32 );
761 VG_REGPARM(2) void  MC_(helperc_b_store32)( Addr a, UWord d32 );
762 VG_REGPARM(1) UWord MC_(helperc_b_load1) ( Addr a );
763 VG_REGPARM(1) UWord MC_(helperc_b_load2) ( Addr a );
764 VG_REGPARM(1) UWord MC_(helperc_b_load4) ( Addr a );
765 VG_REGPARM(1) UWord MC_(helperc_b_load8) ( Addr a );
766 VG_REGPARM(1) UWord MC_(helperc_b_load16)( Addr a );
767 VG_REGPARM(1) UWord MC_(helperc_b_load32)( Addr a );
768 
769 /* Functions defined in mc_translate.c */
770 IRSB* MC_(instrument) ( VgCallbackClosure* closure,
771                         IRSB* bb_in,
772                         const VexGuestLayout* layout,
773                         const VexGuestExtents* vge,
774                         const VexArchInfo* archinfo_host,
775                         IRType gWordTy, IRType hWordTy );
776 
777 IRSB* MC_(final_tidy) ( IRSB* );
778 
779 #endif /* ndef __MC_INCLUDE_H */
780 
781 /*--------------------------------------------------------------------*/
782 /*--- end                                                          ---*/
783 /*--------------------------------------------------------------------*/
784