1 
2 /*--------------------------------------------------------------------*/
3 /*--- Take snapshots of client stacks.              m_stacktrace.c ---*/
4 /*--------------------------------------------------------------------*/
5 
6 /*
7    This file is part of Valgrind, a dynamic binary instrumentation
8    framework.
9 
10    Copyright (C) 2000-2013 Julian Seward
11       jseward@acm.org
12 
13    This program is free software; you can redistribute it and/or
14    modify it under the terms of the GNU General Public License as
15    published by the Free Software Foundation; either version 2 of the
16    License, or (at your option) any later version.
17 
18    This program is distributed in the hope that it will be useful, but
19    WITHOUT ANY WARRANTY; without even the implied warranty of
20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21    General Public License for more details.
22 
23    You should have received a copy of the GNU General Public License
24    along with this program; if not, write to the Free Software
25    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
26    02111-1307, USA.
27 
28    The GNU General Public License is contained in the file COPYING.
29 */
30 
31 #include "pub_core_basics.h"
32 #include "pub_core_vki.h"
33 #include "pub_core_threadstate.h"
34 #include "pub_core_debuginfo.h"     // XXX: circular dependency
35 #include "pub_core_aspacemgr.h"     // For VG_(is_addressable)()
36 #include "pub_core_libcbase.h"
37 #include "pub_core_libcassert.h"
38 #include "pub_core_libcprint.h"
39 #include "pub_core_machine.h"
40 #include "pub_core_options.h"
41 #include "pub_core_stacks.h"        // VG_(stack_limits)
42 #include "pub_core_stacktrace.h"
43 #include "pub_core_xarray.h"
44 #include "pub_core_clientstate.h"   // VG_(client__dl_sysinfo_int80)
45 #include "pub_core_trampoline.h"
46 
47 
48 /*------------------------------------------------------------*/
49 /*---                                                      ---*/
50 /*--- BEGIN platform-dependent unwinder worker functions   ---*/
51 /*---                                                      ---*/
52 /*------------------------------------------------------------*/
53 
54 /* Take a snapshot of the client's stack, putting up to 'max_n_ips'
55    IPs into 'ips'.  In order to be thread-safe, we pass in the
56    thread's IP SP, FP if that's meaningful, and LR if that's
57    meaningful.  Returns number of IPs put in 'ips'.
58 
59    If you know what the thread ID for this stack is, send that as the
60    first parameter, else send zero.  This helps generate better stack
61    traces on ppc64-linux and has no effect on other platforms.
62 */
63 
64 /* Do frame merging in the _i frames in _ips array of recursive cycles
65    of up to _nframes.  The merge is done during stack unwinding
66    (i.e. in platform specific unwinders) to collect as many
67    "interesting" stack traces as possible. */
68 #define RECURSIVE_MERGE(_nframes,_ips,_i){                      \
69    Int dist;                                                    \
70    for (dist = 1; dist <= _nframes && dist < (Int)_i; dist++) { \
71       if (_ips[_i-1] == _ips[_i-1-dist]) {                      \
72          _i = _i - dist;                                        \
73          break;                                                 \
74       }                                                         \
75    }                                                            \
76 }
77 
78 
79 /* ------------------------ x86 ------------------------- */
80 
81 #if defined(VGP_x86_linux) || defined(VGP_x86_darwin)
82 
83 #define N_FP_CF_VERIF 1021
84 // prime number so that size of fp_CF_verif is just below 4K or 8K
85 // Note that this prime nr differs from the one chosen in
86 // m_debuginfo/debuginfo.c for the cfsi cache : in case we have
87 // a collision here between two IPs, we expect to not (often) have the
88 // same collision in the cfsi cache (and vice-versa).
89 
90 // unwinding with fp chain is ok:
91 #define FPUNWIND 0
92 // there is no CFI info for this IP:
93 #define NOINFO   1
94 // Unwind with FP is not ok, must use CF unwind:
95 #define CFUNWIND 2
96 
97 static Addr fp_CF_verif_cache [N_FP_CF_VERIF];
98 
99 /* An unwind done by following the fp chain technique can be incorrect
100    as not all frames are respecting the standard bp/sp ABI.
101    The CF information is now generated by default by gcc
102    (as part of the dwarf info). However, unwinding using CF information
103    is significantly slower : a slowdown of 20% has been observed
104    on an helgrind test case.
105    So, by default, the unwinding will be done using the fp chain.
106    But before accepting to unwind an IP with fp_chain, the result
107    of the unwind will be checked with the CF information.
108    This check can give 3 results:
109      FPUNWIND (0): there is CF info, and it gives the same result as fp unwind.
110        => it is assumed that future unwind for this IP can be done
111           with the fast fp chain, without further CF checking
112      NOINFO   (1): there is no CF info (so, fp unwind is the only do-able thing)
113      CFUNWIND (2): there is CF info, but unwind result differs.
114        => it is assumed that future unwind for this IP must be done
115        with the CF info.
116    Of course, if each fp unwind implies a check done with a CF unwind,
117    it would just be slower => we cache the check result in an
118    array of checked Addr.
119    The check for an IP will be stored at
120     fp_CF_verif_cache[IP % N_FP_CF_VERIF] as one of:
121                      IP ^ FPUNWIND
122                      IP ^ NOINFO
123                      IP ^ CFUNWIND
124 
125    Note: we can re-use the last (ROUNDDOWN (log (N_FP_CF_VERIF))) bits
126    to store the check result, as they are guaranteed to be non significant
127    in the comparison between 2 IPs stored in fp_CF_verif_cache).
128    In other words, if two IPs are only differing on the last 2 bits,
129    then they will not land in the same cache bucket.
130 */
131 
132 static UInt fp_CF_verif_generation = 0;
133 // Our cache has to be maintained in sync with the CFI cache.
134 // Each time the CFI cache is changed, its generation will be incremented.
135 // We will clear our cache when our saved generation differs from
136 // the CFI cache generation.
137 
VG_(get_StackTrace_wrk)138 UInt VG_(get_StackTrace_wrk) ( ThreadId tid_if_known,
139                                /*OUT*/Addr* ips, UInt max_n_ips,
140                                /*OUT*/Addr* sps, /*OUT*/Addr* fps,
141                                const UnwindStartRegs* startRegs,
142                                Addr fp_max_orig )
143 {
144    const Bool do_stats = False; // compute and output some stats regularly.
145    static struct {
146       UInt nr; // nr of stacktraces computed
147       UInt nf; // nr of frames computed
148       UInt Ca; // unwind for which cache indicates CFUnwind must be used.
149       UInt FF; // unwind for which cache indicates FPUnwind can be used.
150       UInt Cf; // unwind at end of stack+store CFUNWIND (xip not end of stack).
151       UInt Fw; // unwind at end of stack+store FPUNWIND
152       UInt FO; // unwind + store FPUNWIND
153       UInt CF; // unwind + store CFUNWIND. Details below.
154       UInt xi; UInt xs; UInt xb; // register(s) which caused a 'store CFUNWIND'.
155       UInt Ck; // unwind fp invalid+store FPUNWIND
156       UInt MS; // microsoft unwind
157    } stats;
158 
159    const Bool   debug = False;
160    //                 = VG_(debugLog_getLevel) () > 3;
161    //                 = True;
162    //                 = stats.nr >= 123456;
163    const HChar* unwind_case; // used when debug is True.
164    // Debugging this function is not straightforward.
165    // Here is the easiest way I have found:
166    // 1. Change the above to True.
167    // 2. Start your program under Valgrind with --tool=none --vgdb-error=0
168    // 3. Use GDB/vgdb to put a breakpoint where you want to debug the stacktrace
169    // 4. Continue till breakpoint is encountered
170    // 5. From GDB, use 'monitor v.info scheduler' and examine the unwind traces.
171    //    You might have to do twice 'monitor v.info scheduler' to see
172    //    the effect of caching the results of the verification.
173    //    You can also modify the debug dynamically using by using
174    //    'monitor v.set debuglog 4.
175 
176    Int   i;
177    Addr  fp_max;
178    UInt  n_found = 0;
179    const Int cmrf = VG_(clo_merge_recursive_frames);
180 
181    vg_assert(sizeof(Addr) == sizeof(UWord));
182    vg_assert(sizeof(Addr) == sizeof(void*));
183 
184    D3UnwindRegs fpverif_uregs; // result of CF unwind for a check reason.
185    Addr xip_verified = 0; // xip for which we have calculated fpverif_uregs
186    // 0 assigned to silence false positive -Wuninitialized warning
187    // This is a false positive as xip_verified is assigned when
188    // xip_verif > CFUNWIND and only used if xip_verif > CFUNWIND.
189 
190    D3UnwindRegs uregs;
191    uregs.xip = (Addr)startRegs->r_pc;
192    uregs.xsp = (Addr)startRegs->r_sp;
193    uregs.xbp = startRegs->misc.X86.r_ebp;
194    Addr fp_min = uregs.xsp;
195 
196    /* Snaffle IPs from the client's stack into ips[0 .. max_n_ips-1],
197       stopping when the trail goes cold, which we guess to be
198       when FP is not a reasonable stack location. */
199 
200    // JRS 2002-sep-17: hack, to round up fp_max to the end of the
201    // current page, at least.  Dunno if it helps.
202    // NJN 2002-sep-17: seems to -- stack traces look like 1.0.X again
203    fp_max = VG_PGROUNDUP(fp_max_orig);
204    if (fp_max >= sizeof(Addr))
205       fp_max -= sizeof(Addr);
206 
207    if (debug)
208       VG_(printf)("max_n_ips=%d fp_min=0x%08lx fp_max_orig=0x08%lx, "
209                   "fp_max=0x%08lx ip=0x%08lx fp=0x%08lx\n",
210                   max_n_ips, fp_min, fp_max_orig, fp_max,
211                   uregs.xip, uregs.xbp);
212 
213    /* Assertion broken before main() is reached in pthreaded programs;  the
214     * offending stack traces only have one item.  --njn, 2002-aug-16 */
215    /* vg_assert(fp_min <= fp_max);*/
216    // On Darwin, this kicks in for pthread-related stack traces, so they're
217    // only 1 entry long which is wrong.
218 #  if !defined(VGO_darwin)
219    if (fp_min + 512 >= fp_max) {
220       /* If the stack limits look bogus, don't poke around ... but
221          don't bomb out either. */
222       if (sps) sps[0] = uregs.xsp;
223       if (fps) fps[0] = uregs.xbp;
224       ips[0] = uregs.xip;
225       return 1;
226    }
227 #  endif
228 
229    if (UNLIKELY (fp_CF_verif_generation != VG_(CF_info_generation)())) {
230       fp_CF_verif_generation = VG_(CF_info_generation)();
231       VG_(memset)(&fp_CF_verif_cache, 0, sizeof(fp_CF_verif_cache));
232    }
233 
234 
235    /* Loop unwinding the stack. Note that the IP value we get on
236     * each pass (whether from CFI info or a stack frame) is a
237     * return address so is actually after the calling instruction
238     * in the calling function.
239     *
240     * Because of this we subtract one from the IP after each pass
241     * of the loop so that we find the right CFI block on the next
242     * pass - otherwise we can find the wrong CFI info if it happens
243     * to change after the calling instruction and that will mean
244     * that we will fail to unwind the next step.
245     *
246     * This most frequently happens at the end of a function when
247     * a tail call occurs and we wind up using the CFI info for the
248     * next function which is completely wrong.
249     */
250    if (sps) sps[0] = uregs.xsp;
251    if (fps) fps[0] = uregs.xbp;
252    ips[0] = uregs.xip;
253    i = 1;
254    if (do_stats) stats.nr++;
255 
256    while (True) {
257 
258       if (i >= max_n_ips)
259          break;
260 
261       UWord hash = uregs.xip % N_FP_CF_VERIF;
262       Addr xip_verif = uregs.xip ^ fp_CF_verif_cache [hash];
263       if (debug)
264          VG_(printf)("     uregs.xip 0x%08lx xip_verif[0x%08lx]"
265                      " xbp 0x%08lx xsp 0x%08lx\n",
266                      uregs.xip, xip_verif,
267                      uregs.xbp, uregs.xsp);
268       // If xip is in cache, then xip_verif will be <= CFUNWIND.
269       // Otherwise, if not in cache, xip_verif will be > CFUNWIND.
270 
271       /* Try to derive a new (ip,sp,fp) triple from the current set. */
272 
273       /* Do we have to do CFI unwinding ?
274          We do CFI unwinding if one of the following condition holds:
275          a. fp_CF_verif_cache contains xip but indicates CFUNWIND must
276             be done (i.e. fp unwind check failed when we did the first
277             unwind for this IP).
278          b. fp_CF_verif_cache does not contain xip.
279             We will try CFI unwinding in fpverif_uregs and compare with
280             FP unwind result to insert xip in the cache with the correct
281             indicator. */
282       if (UNLIKELY(xip_verif >= CFUNWIND)) {
283          if (xip_verif == CFUNWIND) {
284             /* case a : do "real" cfi unwind */
285             if ( VG_(use_CF_info)( &uregs, fp_min, fp_max ) ) {
286                if (debug) unwind_case = "Ca";
287                if (do_stats) stats.Ca++;
288                goto unwind_done;
289             }
290             /* ??? cache indicates we have to do CFI unwind (so, we
291              previously found CFI info, and failed the fp unwind
292              check). Now, we just failed with CFI.  So, once we
293              succeed, once we fail.  No idea what is going on =>
294              cleanup the cache entry and fallover to fp unwind (this
295              time). */
296             fp_CF_verif_cache [hash] = 0;
297             if (debug) VG_(printf)("     cache reset as CFI ok then nok\n");
298             //??? stats
299             xip_verif = NOINFO;
300          } else {
301             /* case b : do "verif" cfi unwind in fpverif_uregs */
302             fpverif_uregs = uregs;
303             xip_verified = uregs.xip;
304             if ( !VG_(use_CF_info)( &fpverif_uregs, fp_min, fp_max ) ) {
305                fp_CF_verif_cache [hash] = uregs.xip ^ NOINFO;
306                if (debug) VG_(printf)("     cache NOINFO fpverif_uregs\n");
307                xip_verif = NOINFO;
308             }
309          }
310       }
311 
312       /* On x86, try the old-fashioned method of following the
313          %ebp-chain.  This can be done if the fp_CF_verif_cache for xip
314          indicate fp unwind is ok. This must be done if the cache indicates
315          there is no info. This is also done to confirm what to put in the cache
316          if xip was not in the cache. */
317       /* This deals with frames resulting from functions which begin "pushl%
318          ebp ; movl %esp, %ebp" which is the ABI-mandated preamble. */
319       if (fp_min <= uregs.xbp &&
320           uregs.xbp <= fp_max - 1 * sizeof(UWord)/*see comment below*/)
321       {
322          /* fp looks sane, so use it. */
323          uregs.xip = (((UWord*)uregs.xbp)[1]);
324          // We stop if we hit a zero (the traditional end-of-stack
325          // marker) or a one -- these correspond to recorded IPs of 0 or -1.
326          // The latter because r8818 (in this file) changes the meaning of
327          // entries [1] and above in a stack trace, by subtracting 1 from
328          // them.  Hence stacks that used to end with a zero value now end in
329          // -1 and so we must detect that too.
330          if (0 == uregs.xip || 1 == uregs.xip) {
331             if (xip_verif > CFUNWIND) {
332                // Check if we obtain the same result with fp unwind.
333                // If same result, then mark xip as fp unwindable
334                if (uregs.xip == fpverif_uregs.xip) {
335                   fp_CF_verif_cache [hash] = xip_verified ^ FPUNWIND;
336                   if (debug) VG_(printf)("     cache FPUNWIND 0\n");
337                   unwind_case = "Fw";
338                   if (do_stats) stats.Fw++;
339                   break;
340                } else {
341                   fp_CF_verif_cache [hash] = xip_verified ^ CFUNWIND;
342                   uregs = fpverif_uregs;
343                   if (debug) VG_(printf)("     cache CFUNWIND 0\n");
344                   unwind_case = "Cf";
345                   if (do_stats) stats.Cf++;
346                   goto unwind_done;
347                }
348             } else {
349                // end of stack => out of the loop.
350                break;
351             }
352          }
353 
354          uregs.xsp = uregs.xbp + sizeof(Addr) /*saved %ebp*/
355                                + sizeof(Addr) /*ra*/;
356          uregs.xbp = (((UWord*)uregs.xbp)[0]);
357          if (xip_verif > CFUNWIND) {
358             if (uregs.xip == fpverif_uregs.xip
359                 && uregs.xsp == fpverif_uregs.xsp
360                 && uregs.xbp == fpverif_uregs.xbp) {
361                fp_CF_verif_cache [hash] = xip_verified ^ FPUNWIND;
362                if (debug) VG_(printf)("     cache FPUNWIND >2\n");
363                if (debug) unwind_case = "FO";
364                if (do_stats) stats.FO++;
365             } else {
366                fp_CF_verif_cache [hash] = xip_verified ^ CFUNWIND;
367                if (debug) VG_(printf)("     cache CFUNWIND >2\n");
368                if (do_stats && uregs.xip != fpverif_uregs.xip) stats.xi++;
369                if (do_stats && uregs.xsp != fpverif_uregs.xsp) stats.xs++;
370                if (do_stats && uregs.xbp != fpverif_uregs.xbp) stats.xb++;
371                uregs = fpverif_uregs;
372                if (debug) unwind_case = "CF";
373                if (do_stats) stats.CF++;
374             }
375          } else {
376             if (debug) unwind_case = "FF";
377             if (do_stats) stats.FF++;
378          }
379          goto unwind_done;
380       } else {
381          // fp unwind has failed.
382          // If we were checking the validity of the cfi unwinding,
383          // we mark in the cache that the fp unwind cannot be done, and that
384          // cfi unwind is desired.
385          if (xip_verif > CFUNWIND) {
386             // We know that fpverif_uregs contains valid information,
387             // as a failed cf unwind would have put NOINFO in xip_verif.
388             fp_CF_verif_cache [hash] = xip_verified ^ CFUNWIND;
389             if (debug) VG_(printf)("     cache CFUNWIND as fp failed\n");
390             uregs = fpverif_uregs;
391             if (debug) unwind_case = "Ck";
392             if (do_stats) stats.Ck++;
393             goto unwind_done;
394          }
395          // xip_verif is FPUNWIND or NOINFO.
396          // We failed the cfi unwind and/or the fp unwind.
397          // => fallback to FPO info.
398       }
399 
400       /* And, similarly, try for MSVC FPO unwind info. */
401       if ( VG_(use_FPO_info)( &uregs.xip, &uregs.xsp, &uregs.xbp,
402                               fp_min, fp_max ) ) {
403          if (debug) unwind_case = "MS";
404          if (do_stats) stats.MS++;
405          goto unwind_done;
406       }
407 
408       /* No luck.  We have to give up. */
409       break;
410 
411    unwind_done:
412       /* Add a frame in ips/sps/fps */
413       /* fp is %ebp.  sp is %esp.  ip is %eip. */
414       if (0 == uregs.xip || 1 == uregs.xip) break;
415       if (sps) sps[i] = uregs.xsp;
416       if (fps) fps[i] = uregs.xbp;
417       ips[i++] = uregs.xip - 1;
418       /* -1: refer to calling insn, not the RA */
419       if (debug)
420          VG_(printf)("     ips%s[%d]=0x%08lx\n", unwind_case, i-1, ips[i-1]);
421       uregs.xip = uregs.xip - 1;
422       /* as per comment at the head of this loop */
423       if (UNLIKELY(cmrf > 0)) {RECURSIVE_MERGE(cmrf,ips,i);};
424    }
425 
426    if (do_stats) stats.nf += i;
427    if (do_stats && stats.nr % 10000 == 0) {
428      VG_(printf)("nr %u nf %u "
429                  "Ca %u FF %u "
430                  "Cf %u "
431                  "Fw %u FO %u "
432                  "CF %u (xi %u xs %u xb %u) "
433                  "Ck %u MS %u\n",
434                  stats.nr, stats.nf,
435                  stats.Ca, stats.FF,
436                  stats.Cf,
437                  stats.Fw, stats.FO,
438                  stats.CF, stats.xi, stats.xs, stats.xb,
439                  stats.Ck, stats.MS);
440    }
441    n_found = i;
442    return n_found;
443 }
444 
445 #undef N_FP_CF_VERIF
446 #undef FPUNWIND
447 #undef NOINFO
448 #undef CFUNWIND
449 
450 #endif
451 
452 /* ----------------------- amd64 ------------------------ */
453 
454 #if defined(VGP_amd64_linux) || defined(VGP_amd64_darwin)
455 
VG_(get_StackTrace_wrk)456 UInt VG_(get_StackTrace_wrk) ( ThreadId tid_if_known,
457                                /*OUT*/Addr* ips, UInt max_n_ips,
458                                /*OUT*/Addr* sps, /*OUT*/Addr* fps,
459                                const UnwindStartRegs* startRegs,
460                                Addr fp_max_orig )
461 {
462    Bool  debug = False;
463    Int   i;
464    Addr  fp_max;
465    UInt  n_found = 0;
466    const Int cmrf = VG_(clo_merge_recursive_frames);
467 
468    vg_assert(sizeof(Addr) == sizeof(UWord));
469    vg_assert(sizeof(Addr) == sizeof(void*));
470 
471    D3UnwindRegs uregs;
472    uregs.xip = startRegs->r_pc;
473    uregs.xsp = startRegs->r_sp;
474    uregs.xbp = startRegs->misc.AMD64.r_rbp;
475    Addr fp_min = uregs.xsp;
476 
477    /* Snaffle IPs from the client's stack into ips[0 .. max_n_ips-1],
478       stopping when the trail goes cold, which we guess to be
479       when FP is not a reasonable stack location. */
480 
481    // JRS 2002-sep-17: hack, to round up fp_max to the end of the
482    // current page, at least.  Dunno if it helps.
483    // NJN 2002-sep-17: seems to -- stack traces look like 1.0.X again
484    fp_max = VG_PGROUNDUP(fp_max_orig);
485    if (fp_max >= sizeof(Addr))
486       fp_max -= sizeof(Addr);
487 
488    if (debug)
489       VG_(printf)("max_n_ips=%d fp_min=0x%lx fp_max_orig=0x%lx, "
490                   "fp_max=0x%lx ip=0x%lx fp=0x%lx\n",
491                   max_n_ips, fp_min, fp_max_orig, fp_max,
492                   uregs.xip, uregs.xbp);
493 
494    /* Assertion broken before main() is reached in pthreaded programs;  the
495     * offending stack traces only have one item.  --njn, 2002-aug-16 */
496    /* vg_assert(fp_min <= fp_max);*/
497    // On Darwin, this kicks in for pthread-related stack traces, so they're
498    // only 1 entry long which is wrong.
499 #  if !defined(VGO_darwin)
500    if (fp_min + 256 >= fp_max) {
501       /* If the stack limits look bogus, don't poke around ... but
502          don't bomb out either. */
503       if (sps) sps[0] = uregs.xsp;
504       if (fps) fps[0] = uregs.xbp;
505       ips[0] = uregs.xip;
506       return 1;
507    }
508 #  endif
509 
510    /* fp is %rbp.  sp is %rsp.  ip is %rip. */
511 
512    ips[0] = uregs.xip;
513    if (sps) sps[0] = uregs.xsp;
514    if (fps) fps[0] = uregs.xbp;
515    i = 1;
516 
517 #  if defined(VGO_darwin)
518    if (VG_(is_valid_tid)(tid_if_known) &&
519       VG_(is_in_syscall)(tid_if_known) &&
520       i < max_n_ips) {
521       /* On Darwin, all the system call stubs have no function
522        * prolog.  So instead of top of the stack being a new
523        * frame comprising a saved BP and a return address, we
524        * just have the return address in the caller's frame.
525        * Adjust for this by recording the return address.
526        */
527       ips[i] = *(Addr *)uregs.xsp - 1;
528       if (sps) sps[i] = uregs.xsp;
529       if (fps) fps[i] = uregs.xbp;
530       i++;
531    }
532 #  endif
533 
534    /* Loop unwinding the stack. Note that the IP value we get on
535     * each pass (whether from CFI info or a stack frame) is a
536     * return address so is actually after the calling instruction
537     * in the calling function.
538     *
539     * Because of this we subtract one from the IP after each pass
540     * of the loop so that we find the right CFI block on the next
541     * pass - otherwise we can find the wrong CFI info if it happens
542     * to change after the calling instruction and that will mean
543     * that we will fail to unwind the next step.
544     *
545     * This most frequently happens at the end of a function when
546     * a tail call occurs and we wind up using the CFI info for the
547     * next function which is completely wrong.
548     */
549    while (True) {
550 
551       if (i >= max_n_ips)
552          break;
553 
554       /* Try to derive a new (ip,sp,fp) triple from the current set. */
555 
556       /* First off, see if there is any CFI info to hand which can
557          be used. */
558       if ( VG_(use_CF_info)( &uregs, fp_min, fp_max ) ) {
559          if (0 == uregs.xip || 1 == uregs.xip) break;
560          if (sps) sps[i] = uregs.xsp;
561          if (fps) fps[i] = uregs.xbp;
562          ips[i++] = uregs.xip - 1; /* -1: refer to calling insn, not the RA */
563          if (debug)
564             VG_(printf)("     ipsC[%d]=%#08lx\n", i-1, ips[i-1]);
565          uregs.xip = uregs.xip - 1; /* as per comment at the head of this loop */
566          if (UNLIKELY(cmrf > 0)) {RECURSIVE_MERGE(cmrf,ips,i);};
567          continue;
568       }
569 
570       /* If VG_(use_CF_info) fails, it won't modify ip/sp/fp, so
571          we can safely try the old-fashioned method. */
572       /* This bit is supposed to deal with frames resulting from
573          functions which begin "pushq %rbp ; movq %rsp, %rbp".
574          Unfortunately, since we can't (easily) look at the insns at
575          the start of the fn, like GDB does, there's no reliable way
576          to tell.  Hence the hack of first trying out CFI, and if that
577          fails, then use this as a fallback. */
578       /* Note: re "- 1 * sizeof(UWord)", need to take account of the
579          fact that we are prodding at & ((UWord*)fp)[1] and so need to
580          adjust the limit check accordingly.  Omitting this has been
581          observed to cause segfaults on rare occasions. */
582       if (fp_min <= uregs.xbp && uregs.xbp <= fp_max - 1 * sizeof(UWord)) {
583          /* fp looks sane, so use it. */
584          uregs.xip = (((UWord*)uregs.xbp)[1]);
585          if (0 == uregs.xip || 1 == uregs.xip) break;
586          uregs.xsp = uregs.xbp + sizeof(Addr) /*saved %rbp*/
587                                + sizeof(Addr) /*ra*/;
588          uregs.xbp = (((UWord*)uregs.xbp)[0]);
589          if (sps) sps[i] = uregs.xsp;
590          if (fps) fps[i] = uregs.xbp;
591          ips[i++] = uregs.xip - 1; /* -1: refer to calling insn, not the RA */
592          if (debug)
593             VG_(printf)("     ipsF[%d]=%#08lx\n", i-1, ips[i-1]);
594          uregs.xip = uregs.xip - 1; /* as per comment at the head of this loop */
595          if (UNLIKELY(cmrf > 0)) {RECURSIVE_MERGE(cmrf,ips,i);};
596          continue;
597       }
598 
599       /* Last-ditch hack (evidently GDB does something similar).  We
600          are in the middle of nowhere and we have a nonsense value for
601          the frame pointer.  If the stack pointer is still valid,
602          assume that what it points at is a return address.  Yes,
603          desperate measures.  Could do better here:
604          - check that the supposed return address is in
605            an executable page
606          - check that the supposed return address is just after a call insn
607          - given those two checks, don't just consider *sp as the return
608            address; instead scan a likely section of stack (eg sp .. sp+256)
609            and use suitable values found there.
610       */
611       if (fp_min <= uregs.xsp && uregs.xsp < fp_max) {
612          uregs.xip = ((UWord*)uregs.xsp)[0];
613          if (0 == uregs.xip || 1 == uregs.xip) break;
614          if (sps) sps[i] = uregs.xsp;
615          if (fps) fps[i] = uregs.xbp;
616          ips[i++] = uregs.xip == 0
617                     ? 0 /* sp[0] == 0 ==> stuck at the bottom of a
618                            thread stack */
619                     : uregs.xip - 1;
620                         /* -1: refer to calling insn, not the RA */
621          if (debug)
622             VG_(printf)("     ipsH[%d]=%#08lx\n", i-1, ips[i-1]);
623          uregs.xip = uregs.xip - 1; /* as per comment at the head of this loop */
624          uregs.xsp += 8;
625          if (UNLIKELY(cmrf > 0)) {RECURSIVE_MERGE(cmrf,ips,i);};
626          continue;
627       }
628 
629       /* No luck at all.  We have to give up. */
630       break;
631    }
632 
633    n_found = i;
634    return n_found;
635 }
636 
637 #endif
638 
639 /* -----------------------ppc32/64 ---------------------- */
640 
641 #if defined(VGP_ppc32_linux) || defined(VGP_ppc64be_linux) \
642     || defined(VGP_ppc64le_linux)
643 
VG_(get_StackTrace_wrk)644 UInt VG_(get_StackTrace_wrk) ( ThreadId tid_if_known,
645                                /*OUT*/Addr* ips, UInt max_n_ips,
646                                /*OUT*/Addr* sps, /*OUT*/Addr* fps,
647                                const UnwindStartRegs* startRegs,
648                                Addr fp_max_orig )
649 {
650    Bool  lr_is_first_RA = False;
651 #  if defined(VG_PLAT_USES_PPCTOC) || defined(VGP_ppc64le_linux)
652    Word redir_stack_size = 0;
653    Word redirs_used      = 0;
654 #  endif
655    const Int cmrf = VG_(clo_merge_recursive_frames);
656 
657    Bool  debug = False;
658    Int   i;
659    Addr  fp_max;
660    UInt  n_found = 0;
661 
662    vg_assert(sizeof(Addr) == sizeof(UWord));
663    vg_assert(sizeof(Addr) == sizeof(void*));
664 
665    Addr ip = (Addr)startRegs->r_pc;
666    Addr sp = (Addr)startRegs->r_sp;
667    Addr fp = sp;
668 #  if defined(VGP_ppc32_linux)
669    Addr lr = startRegs->misc.PPC32.r_lr;
670 #  elif defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux)
671    Addr lr = startRegs->misc.PPC64.r_lr;
672 #  endif
673    Addr fp_min = sp;
674 
675    /* Snaffle IPs from the client's stack into ips[0 .. max_n_ips-1],
676       stopping when the trail goes cold, which we guess to be
677       when FP is not a reasonable stack location. */
678 
679    // JRS 2002-sep-17: hack, to round up fp_max to the end of the
680    // current page, at least.  Dunno if it helps.
681    // NJN 2002-sep-17: seems to -- stack traces look like 1.0.X again
682    fp_max = VG_PGROUNDUP(fp_max_orig);
683    if (fp_max >= sizeof(Addr))
684       fp_max -= sizeof(Addr);
685 
686    if (debug)
687       VG_(printf)("max_n_ips=%d fp_min=0x%lx fp_max_orig=0x%lx, "
688                   "fp_max=0x%lx ip=0x%lx fp=0x%lx\n",
689 		  max_n_ips, fp_min, fp_max_orig, fp_max, ip, fp);
690 
691    /* Assertion broken before main() is reached in pthreaded programs;  the
692     * offending stack traces only have one item.  --njn, 2002-aug-16 */
693    /* vg_assert(fp_min <= fp_max);*/
694    if (fp_min + 512 >= fp_max) {
695       /* If the stack limits look bogus, don't poke around ... but
696          don't bomb out either. */
697       if (sps) sps[0] = sp;
698       if (fps) fps[0] = fp;
699       ips[0] = ip;
700       return 1;
701    }
702 
703    /* fp is %r1.  ip is %cia.  Note, ppc uses r1 as both the stack and
704       frame pointers. */
705 
706 #  if defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux)
707    redir_stack_size = VEX_GUEST_PPC64_REDIR_STACK_SIZE;
708    redirs_used      = 0;
709 #  endif
710 
711 #  if defined(VG_PLAT_USES_PPCTOC) || defined (VGP_ppc64le_linux)
712    /* Deal with bogus LR values caused by function
713       interception/wrapping on ppc-TOC platforms; see comment on
714       similar code a few lines further down. */
715    if (lr == (Addr)&VG_(ppctoc_magic_redirect_return_stub)
716        && VG_(is_valid_tid)(tid_if_known)) {
717       Word hsp = VG_(threads)[tid_if_known].arch.vex.guest_REDIR_SP;
718       redirs_used++;
719       if (hsp >= 1 && hsp < redir_stack_size)
720          lr = VG_(threads)[tid_if_known]
721                  .arch.vex.guest_REDIR_STACK[hsp-1];
722    }
723 #  endif
724 
725    /* We have to determine whether or not LR currently holds this fn
726       (call it F)'s return address.  It might not if F has previously
727       called some other function, hence overwriting LR with a pointer
728       to some part of F.  Hence if LR and IP point to the same
729       function then we conclude LR does not hold this function's
730       return address; instead the LR at entry must have been saved in
731       the stack by F's prologue and so we must get it from there
732       instead.  Note all this guff only applies to the innermost
733       frame. */
734    lr_is_first_RA = False;
735    {
736       const HChar *buf_lr, *buf_ip;
737       /* The following conditional looks grossly inefficient and
738          surely could be majorly improved, with not much effort. */
739       if (VG_(get_fnname_raw) (lr, &buf_lr)) {
740          HChar buf_lr_copy[VG_(strlen)(buf_lr) + 1];
741          VG_(strcpy)(buf_lr_copy, buf_lr);
742          if (VG_(get_fnname_raw) (ip, &buf_ip))
743             if (VG_(strcmp)(buf_lr_copy, buf_ip))
744                lr_is_first_RA = True;
745       }
746    }
747 
748    if (sps) sps[0] = fp; /* NB. not sp */
749    if (fps) fps[0] = fp;
750    ips[0] = ip;
751    i = 1;
752 
753    if (fp_min <= fp && fp < fp_max-VG_WORDSIZE+1) {
754 
755       /* initial FP is sane; keep going */
756       fp = (((UWord*)fp)[0]);
757 
758       while (True) {
759 
760         /* On ppc64-linux (ppc64-elf, really), the lr save
761            slot is 2 words back from sp, whereas on ppc32-elf(?) it's
762            only one word back. */
763 #        if defined(VG_PLAT_USES_PPCTOC) || defined(VGP_ppc64le_linux)
764          const Int lr_offset = 2;
765 #        else
766          const Int lr_offset = 1;
767 #        endif
768 
769          if (i >= max_n_ips)
770             break;
771 
772          /* Try to derive a new (ip,fp) pair from the current set. */
773 
774          if (fp_min <= fp && fp <= fp_max - lr_offset * sizeof(UWord)) {
775             /* fp looks sane, so use it. */
776 
777             if (i == 1 && lr_is_first_RA)
778                ip = lr;
779             else
780                ip = (((UWord*)fp)[lr_offset]);
781 
782 #           if defined(VG_PLAT_USES_PPCTOC) || defined(VGP_ppc64le_linux)
783             /* Nasty hack to do with function replacement/wrapping on
784                ppc64-linux.  If LR points to our magic return stub,
785                then we are in a wrapped or intercepted function, in
786                which LR has been messed with.  The original LR will
787                have been pushed onto the thread's hidden REDIR stack
788                one down from the top (top element is the saved R2) and
789                so we should restore the value from there instead.
790                Since nested redirections can and do happen, we keep
791                track of the number of nested LRs used by the unwinding
792                so far with 'redirs_used'. */
793             if (ip == (Addr)&VG_(ppctoc_magic_redirect_return_stub)
794                 && VG_(is_valid_tid)(tid_if_known)) {
795                Word hsp = VG_(threads)[tid_if_known]
796                              .arch.vex.guest_REDIR_SP;
797                hsp -= 2 * redirs_used;
798                redirs_used ++;
799                if (hsp >= 1 && hsp < redir_stack_size)
800                   ip = VG_(threads)[tid_if_known]
801                           .arch.vex.guest_REDIR_STACK[hsp-1];
802             }
803 #           endif
804 
805             if (0 == ip || 1 == ip) break;
806             if (sps) sps[i] = fp; /* NB. not sp */
807             if (fps) fps[i] = fp;
808             fp = (((UWord*)fp)[0]);
809             ips[i++] = ip - 1; /* -1: refer to calling insn, not the RA */
810             if (debug)
811                VG_(printf)("     ipsF[%d]=%#08lx\n", i-1, ips[i-1]);
812             ip = ip - 1; /* ip is probably dead at this point, but
813                             play safe, a la x86/amd64 above.  See
814                             extensive comments above. */
815             if (UNLIKELY(cmrf > 0)) {RECURSIVE_MERGE(cmrf,ips,i);};
816             continue;
817          }
818 
819          /* No luck there.  We have to give up. */
820          break;
821       }
822    }
823 
824    n_found = i;
825    return n_found;
826 }
827 
828 #endif
829 
830 /* ------------------------ arm ------------------------- */
831 
832 #if defined(VGP_arm_linux)
833 
in_same_fn(Addr a1,Addr a2)834 static Bool in_same_fn ( Addr a1, Addr a2 )
835 {
836    const HChar *buf_a1, *buf_a2;
837    /* The following conditional looks grossly inefficient and
838       surely could be majorly improved, with not much effort. */
839    if (VG_(get_fnname_raw) (a1, &buf_a1)) {
840       HChar buf_a1_copy[VG_(strlen)(buf_a1) + 1];
841       VG_(strcpy)(buf_a1_copy, buf_a1);
842       if (VG_(get_fnname_raw) (a2, &buf_a2))
843          if (VG_(strcmp)(buf_a1_copy, buf_a2))
844             return True;
845    }
846    return False;
847 }
848 
in_same_page(Addr a1,Addr a2)849 static Bool in_same_page ( Addr a1, Addr a2 ) {
850    return (a1 & ~0xFFF) == (a2 & ~0xFFF);
851 }
852 
abs_diff(Addr a1,Addr a2)853 static Addr abs_diff ( Addr a1, Addr a2 ) {
854    return (Addr)(a1 > a2 ? a1 - a2 : a2 - a1);
855 }
856 
has_XT_perms(Addr a)857 static Bool has_XT_perms ( Addr a )
858 {
859    NSegment const* seg = VG_(am_find_nsegment)(a);
860    return seg && seg->hasX && seg->hasT;
861 }
862 
looks_like_Thumb_call32(UShort w0,UShort w1)863 static Bool looks_like_Thumb_call32 ( UShort w0, UShort w1 )
864 {
865    if (0)
866       VG_(printf)("isT32call %04x %04x\n", (UInt)w0, (UInt)w1);
867    // BL  simm26
868    if ((w0 & 0xF800) == 0xF000 && (w1 & 0xC000) == 0xC000) return True;
869    // BLX simm26
870    if ((w0 & 0xF800) == 0xF000 && (w1 & 0xC000) == 0xC000) return True;
871    return False;
872 }
873 
looks_like_Thumb_call16(UShort w0)874 static Bool looks_like_Thumb_call16 ( UShort w0 )
875 {
876    return False;
877 }
878 
looks_like_ARM_call(UInt a0)879 static Bool looks_like_ARM_call ( UInt a0 )
880 {
881    if (0)
882       VG_(printf)("isA32call %08x\n", a0);
883    // Leading E forces unconditional only -- fix
884    if ((a0 & 0xFF000000) == 0xEB000000) return True;
885    return False;
886 }
887 
looks_like_RA(Addr ra)888 static Bool looks_like_RA ( Addr ra )
889 {
890    /* 'ra' is a plausible return address if it points to
891        an instruction after a call insn. */
892    Bool isT = (ra & 1);
893    if (isT) {
894       // returning to Thumb code
895       ra &= ~1;
896       ra -= 4;
897       if (has_XT_perms(ra)) {
898          UShort w0 = *(UShort*)ra;
899          UShort w1 = in_same_page(ra, ra+2) ? *(UShort*)(ra+2) : 0;
900          if (looks_like_Thumb_call16(w1) || looks_like_Thumb_call32(w0,w1))
901             return True;
902       }
903    } else {
904       // ARM
905       ra &= ~3;
906       ra -= 4;
907       if (has_XT_perms(ra)) {
908          UInt a0 = *(UInt*)ra;
909          if (looks_like_ARM_call(a0))
910             return True;
911       }
912    }
913    return False;
914 }
915 
VG_(get_StackTrace_wrk)916 UInt VG_(get_StackTrace_wrk) ( ThreadId tid_if_known,
917                                /*OUT*/Addr* ips, UInt max_n_ips,
918                                /*OUT*/Addr* sps, /*OUT*/Addr* fps,
919                                const UnwindStartRegs* startRegs,
920                                Addr fp_max_orig )
921 {
922    Bool  debug = False;
923    Int   i;
924    Addr  fp_max;
925    UInt  n_found = 0;
926    const Int cmrf = VG_(clo_merge_recursive_frames);
927 
928    vg_assert(sizeof(Addr) == sizeof(UWord));
929    vg_assert(sizeof(Addr) == sizeof(void*));
930 
931    D3UnwindRegs uregs;
932    uregs.r15 = startRegs->r_pc & 0xFFFFFFFE;
933    uregs.r14 = startRegs->misc.ARM.r14;
934    uregs.r13 = startRegs->r_sp;
935    uregs.r12 = startRegs->misc.ARM.r12;
936    uregs.r11 = startRegs->misc.ARM.r11;
937    uregs.r7  = startRegs->misc.ARM.r7;
938    Addr fp_min = uregs.r13;
939 
940    /* Snaffle IPs from the client's stack into ips[0 .. max_n_ips-1],
941       stopping when the trail goes cold, which we guess to be
942       when FP is not a reasonable stack location. */
943 
944    // JRS 2002-sep-17: hack, to round up fp_max to the end of the
945    // current page, at least.  Dunno if it helps.
946    // NJN 2002-sep-17: seems to -- stack traces look like 1.0.X again
947    fp_max = VG_PGROUNDUP(fp_max_orig);
948    if (fp_max >= sizeof(Addr))
949       fp_max -= sizeof(Addr);
950 
951    if (debug)
952       VG_(printf)("\nmax_n_ips=%d fp_min=0x%lx fp_max_orig=0x%lx, "
953                   "fp_max=0x%lx r15=0x%lx r13=0x%lx\n",
954                   max_n_ips, fp_min, fp_max_orig, fp_max,
955                   uregs.r15, uregs.r13);
956 
957    /* Assertion broken before main() is reached in pthreaded programs;  the
958     * offending stack traces only have one item.  --njn, 2002-aug-16 */
959    /* vg_assert(fp_min <= fp_max);*/
960    // On Darwin, this kicks in for pthread-related stack traces, so they're
961    // only 1 entry long which is wrong.
962    if (fp_min + 512 >= fp_max) {
963       /* If the stack limits look bogus, don't poke around ... but
964          don't bomb out either. */
965       if (sps) sps[0] = uregs.r13;
966       if (fps) fps[0] = 0;
967       ips[0] = uregs.r15;
968       return 1;
969    }
970 
971    /* */
972 
973    if (sps) sps[0] = uregs.r13;
974    if (fps) fps[0] = 0;
975    ips[0] = uregs.r15;
976    i = 1;
977 
978    /* Loop unwinding the stack. */
979    Bool do_stack_scan = False;
980 
981    /* First try the Official Way, using Dwarf CFI. */
982    while (True) {
983       if (debug) {
984          VG_(printf)("i: %d, r15: 0x%lx, r13: 0x%lx\n",
985                      i, uregs.r15, uregs.r13);
986       }
987 
988       if (i >= max_n_ips)
989          break;
990 
991       if (VG_(use_CF_info)( &uregs, fp_min, fp_max )) {
992          if (sps) sps[i] = uregs.r13;
993          if (fps) fps[i] = 0;
994          ips[i++] = (uregs.r15 & 0xFFFFFFFE) - 1;
995          if (debug)
996             VG_(printf)("USING CFI: r15: 0x%lx, r13: 0x%lx\n",
997                         uregs.r15, uregs.r13);
998          uregs.r15 = (uregs.r15 & 0xFFFFFFFE) - 1;
999          if (UNLIKELY(cmrf > 0)) {RECURSIVE_MERGE(cmrf,ips,i);};
1000          continue;
1001       }
1002 
1003       /* No luck.  We have to give up. */
1004       do_stack_scan = True;
1005       break;
1006    }
1007 
1008    /* Now try Plan B (maybe) -- stack scanning.  This often gives
1009       pretty bad results, so this has to be enabled explicitly by the
1010       user. */
1011    if (do_stack_scan
1012        && i < max_n_ips && i < (Int)VG_(clo_unw_stack_scan_thresh)) {
1013       Int  nByStackScan = 0;
1014       Addr lr = uregs.r14;
1015       Addr sp = uregs.r13 & ~3;
1016       Addr pc = uregs.r15;
1017       // First see if LR contains
1018       // something that could be a valid return address.
1019       if (!in_same_fn(lr, pc) && looks_like_RA(lr)) {
1020          // take it only if 'cand' isn't obviously a duplicate
1021          // of the last found IP value
1022          Addr cand = (lr & 0xFFFFFFFE) - 1;
1023          if (abs_diff(cand, ips[i-1]) > 1) {
1024             if (sps) sps[i] = 0;
1025             if (fps) fps[i] = 0;
1026             ips[i++] = cand;
1027             if (UNLIKELY(cmrf > 0)) {RECURSIVE_MERGE(cmrf,ips,i);};
1028             nByStackScan++;
1029          }
1030       }
1031       while (in_same_page(sp, uregs.r13)) {
1032          if (i >= max_n_ips)
1033             break;
1034          // we're in the same page; fairly safe to keep going
1035          UWord w = *(UWord*)(sp & ~0x3);
1036          if (looks_like_RA(w)) {
1037             Addr cand = (w & 0xFFFFFFFE) - 1;
1038             // take it only if 'cand' isn't obviously a duplicate
1039             // of the last found IP value
1040             if (abs_diff(cand, ips[i-1]) > 1) {
1041                if (sps) sps[i] = 0;
1042                if (fps) fps[i] = 0;
1043                ips[i++] = cand;
1044                if (UNLIKELY(cmrf > 0)) {RECURSIVE_MERGE(cmrf,ips,i);};
1045                if (++nByStackScan >= VG_(clo_unw_stack_scan_frames)) break;
1046             }
1047          }
1048          sp += 4;
1049       }
1050    }
1051 
1052    n_found = i;
1053    return n_found;
1054 }
1055 
1056 #endif
1057 
1058 /* ------------------------ arm64 ------------------------- */
1059 
1060 #if defined(VGP_arm64_linux)
1061 
VG_(get_StackTrace_wrk)1062 UInt VG_(get_StackTrace_wrk) ( ThreadId tid_if_known,
1063                                /*OUT*/Addr* ips, UInt max_n_ips,
1064                                /*OUT*/Addr* sps, /*OUT*/Addr* fps,
1065                                const UnwindStartRegs* startRegs,
1066                                Addr fp_max_orig )
1067 {
1068    Bool  debug = False;
1069    Int   i;
1070    Addr  fp_max;
1071    UInt  n_found = 0;
1072    const Int cmrf = VG_(clo_merge_recursive_frames);
1073 
1074    vg_assert(sizeof(Addr) == sizeof(UWord));
1075    vg_assert(sizeof(Addr) == sizeof(void*));
1076 
1077    D3UnwindRegs uregs;
1078    uregs.pc = startRegs->r_pc;
1079    uregs.sp = startRegs->r_sp;
1080    uregs.x30 = startRegs->misc.ARM64.x30;
1081    uregs.x29 = startRegs->misc.ARM64.x29;
1082    Addr fp_min = uregs.sp;
1083 
1084    /* Snaffle IPs from the client's stack into ips[0 .. max_n_ips-1],
1085       stopping when the trail goes cold, which we guess to be
1086       when FP is not a reasonable stack location. */
1087 
1088    // JRS 2002-sep-17: hack, to round up fp_max to the end of the
1089    // current page, at least.  Dunno if it helps.
1090    // NJN 2002-sep-17: seems to -- stack traces look like 1.0.X again
1091    fp_max = VG_PGROUNDUP(fp_max_orig);
1092    if (fp_max >= sizeof(Addr))
1093       fp_max -= sizeof(Addr);
1094 
1095    if (debug)
1096       VG_(printf)("\nmax_n_ips=%d fp_min=0x%lx fp_max_orig=0x%lx, "
1097                   "fp_max=0x%lx PC=0x%lx SP=0x%lx\n",
1098                   max_n_ips, fp_min, fp_max_orig, fp_max,
1099                   uregs.pc, uregs.sp);
1100 
1101    /* Assertion broken before main() is reached in pthreaded programs;  the
1102     * offending stack traces only have one item.  --njn, 2002-aug-16 */
1103    /* vg_assert(fp_min <= fp_max);*/
1104    // On Darwin, this kicks in for pthread-related stack traces, so they're
1105    // only 1 entry long which is wrong.
1106    if (fp_min + 512 >= fp_max) {
1107       /* If the stack limits look bogus, don't poke around ... but
1108          don't bomb out either. */
1109       if (sps) sps[0] = uregs.sp;
1110       if (fps) fps[0] = uregs.x29;
1111       ips[0] = uregs.pc;
1112       return 1;
1113    }
1114 
1115    /* */
1116 
1117    if (sps) sps[0] = uregs.sp;
1118    if (fps) fps[0] = uregs.x29;
1119    ips[0] = uregs.pc;
1120    i = 1;
1121 
1122    /* Loop unwinding the stack, using CFI. */
1123    while (True) {
1124       if (debug) {
1125          VG_(printf)("i: %d, pc: 0x%lx, sp: 0x%lx\n",
1126                      i, uregs.pc, uregs.sp);
1127       }
1128 
1129       if (i >= max_n_ips)
1130          break;
1131 
1132       if (VG_(use_CF_info)( &uregs, fp_min, fp_max )) {
1133          if (sps) sps[i] = uregs.sp;
1134          if (fps) fps[i] = uregs.x29;
1135          ips[i++] = uregs.pc - 1;
1136          if (debug)
1137             VG_(printf)("USING CFI: pc: 0x%lx, sp: 0x%lx\n",
1138                         uregs.pc, uregs.sp);
1139          uregs.pc = uregs.pc - 1;
1140          if (UNLIKELY(cmrf > 0)) {RECURSIVE_MERGE(cmrf,ips,i);};
1141          continue;
1142       }
1143 
1144       /* No luck.  We have to give up. */
1145       break;
1146    }
1147 
1148    n_found = i;
1149    return n_found;
1150 }
1151 
1152 #endif
1153 
1154 /* ------------------------ s390x ------------------------- */
1155 
1156 #if defined(VGP_s390x_linux)
1157 
VG_(get_StackTrace_wrk)1158 UInt VG_(get_StackTrace_wrk) ( ThreadId tid_if_known,
1159                                /*OUT*/Addr* ips, UInt max_n_ips,
1160                                /*OUT*/Addr* sps, /*OUT*/Addr* fps,
1161                                const UnwindStartRegs* startRegs,
1162                                Addr fp_max_orig )
1163 {
1164    Bool  debug = False;
1165    Int   i;
1166    Addr  fp_max;
1167    UInt  n_found = 0;
1168    const Int cmrf = VG_(clo_merge_recursive_frames);
1169 
1170    vg_assert(sizeof(Addr) == sizeof(UWord));
1171    vg_assert(sizeof(Addr) == sizeof(void*));
1172 
1173    D3UnwindRegs uregs;
1174    uregs.ia = startRegs->r_pc;
1175    uregs.sp = startRegs->r_sp;
1176    Addr fp_min = uregs.sp;
1177    uregs.fp = startRegs->misc.S390X.r_fp;
1178    uregs.lr = startRegs->misc.S390X.r_lr;
1179 
1180    fp_max = VG_PGROUNDUP(fp_max_orig);
1181    if (fp_max >= sizeof(Addr))
1182       fp_max -= sizeof(Addr);
1183 
1184    if (debug)
1185       VG_(printf)("max_n_ips=%d fp_min=0x%lx fp_max_orig=0x%lx, "
1186                   "fp_max=0x%lx IA=0x%lx SP=0x%lx FP=0x%lx\n",
1187                   max_n_ips, fp_min, fp_max_orig, fp_max,
1188                   uregs.ia, uregs.sp,uregs.fp);
1189 
1190    /* The first frame is pretty obvious */
1191    ips[0] = uregs.ia;
1192    if (sps) sps[0] = uregs.sp;
1193    if (fps) fps[0] = uregs.fp;
1194    i = 1;
1195 
1196    /* for everything else we have to rely on the eh_frame. gcc defaults to
1197       not create a backchain and all the other  tools (like gdb) also have
1198       to use the CFI. */
1199    while (True) {
1200       if (i >= max_n_ips)
1201          break;
1202 
1203       if (VG_(use_CF_info)( &uregs, fp_min, fp_max )) {
1204          if (sps) sps[i] = uregs.sp;
1205          if (fps) fps[i] = uregs.fp;
1206          ips[i++] = uregs.ia - 1;
1207          uregs.ia = uregs.ia - 1;
1208          if (UNLIKELY(cmrf > 0)) {RECURSIVE_MERGE(cmrf,ips,i);};
1209          continue;
1210       }
1211       /* A problem on the first frame? Lets assume it was a bad jump.
1212          We will use the link register and the current stack and frame
1213          pointers and see if we can use the CFI in the next round. */
1214       if (i == 1) {
1215          if (sps) {
1216             sps[i] = sps[0];
1217             uregs.sp = sps[0];
1218          }
1219          if (fps) {
1220             fps[i] = fps[0];
1221             uregs.fp = fps[0];
1222          }
1223          uregs.ia = uregs.lr - 1;
1224          ips[i++] = uregs.lr - 1;
1225          if (UNLIKELY(cmrf > 0)) {RECURSIVE_MERGE(cmrf,ips,i);};
1226          continue;
1227       }
1228 
1229       /* No luck.  We have to give up. */
1230       break;
1231    }
1232 
1233    n_found = i;
1234    return n_found;
1235 }
1236 
1237 #endif
1238 
1239 /* ------------------------ mips 32/64 ------------------------- */
1240 #if defined(VGP_mips32_linux) || defined(VGP_mips64_linux)
VG_(get_StackTrace_wrk)1241 UInt VG_(get_StackTrace_wrk) ( ThreadId tid_if_known,
1242                                /*OUT*/Addr* ips, UInt max_n_ips,
1243                                /*OUT*/Addr* sps, /*OUT*/Addr* fps,
1244                                const UnwindStartRegs* startRegs,
1245                                Addr fp_max_orig )
1246 {
1247    Bool  debug = False;
1248    Int   i;
1249    Addr  fp_max;
1250    UInt  n_found = 0;
1251    const Int cmrf = VG_(clo_merge_recursive_frames);
1252 
1253    vg_assert(sizeof(Addr) == sizeof(UWord));
1254    vg_assert(sizeof(Addr) == sizeof(void*));
1255 
1256    D3UnwindRegs uregs;
1257    uregs.pc = startRegs->r_pc;
1258    uregs.sp = startRegs->r_sp;
1259    Addr fp_min = uregs.sp;
1260 
1261 #if defined(VGP_mips32_linux)
1262    uregs.fp = startRegs->misc.MIPS32.r30;
1263    uregs.ra = startRegs->misc.MIPS32.r31;
1264 #elif defined(VGP_mips64_linux)
1265    uregs.fp = startRegs->misc.MIPS64.r30;
1266    uregs.ra = startRegs->misc.MIPS64.r31;
1267 #endif
1268 
1269    /* Snaffle IPs from the client's stack into ips[0 .. max_n_ips-1],
1270       stopping when the trail goes cold, which we guess to be
1271       when FP is not a reasonable stack location. */
1272 
1273    fp_max = VG_PGROUNDUP(fp_max_orig);
1274    if (fp_max >= sizeof(Addr))
1275       fp_max -= sizeof(Addr);
1276 
1277    if (debug)
1278       VG_(printf)("max_n_ips=%d fp_min=0x%lx fp_max_orig=0x%lx, "
1279                   "fp_max=0x%lx pc=0x%lx sp=0x%lx fp=0x%lx\n",
1280                   max_n_ips, fp_min, fp_max_orig, fp_max,
1281                   uregs.pc, uregs.sp, uregs.fp);
1282 
1283    if (sps) sps[0] = uregs.sp;
1284    if (fps) fps[0] = uregs.fp;
1285    ips[0] = uregs.pc;
1286    i = 1;
1287 
1288    /* Loop unwinding the stack. */
1289 
1290    while (True) {
1291       if (debug) {
1292          VG_(printf)("i: %d, pc: 0x%lx, sp: 0x%lx, ra: 0x%lx\n",
1293                      i, uregs.pc, uregs.sp, uregs.ra);
1294       }
1295       if (i >= max_n_ips)
1296          break;
1297 
1298       D3UnwindRegs uregs_copy = uregs;
1299       if (VG_(use_CF_info)( &uregs, fp_min, fp_max )) {
1300          if (debug)
1301             VG_(printf)("USING CFI: pc: 0x%lx, sp: 0x%lx, ra: 0x%lx\n",
1302                         uregs.pc, uregs.sp, uregs.ra);
1303          if (0 != uregs.pc && 1 != uregs.pc) {
1304             if (sps) sps[i] = uregs.sp;
1305             if (fps) fps[i] = uregs.fp;
1306             ips[i++] = uregs.pc - 4;
1307             uregs.pc = uregs.pc - 4;
1308             if (UNLIKELY(cmrf > 0)) {RECURSIVE_MERGE(cmrf,ips,i);};
1309             continue;
1310          } else
1311             uregs = uregs_copy;
1312       }
1313 
1314       int seen_sp_adjust = 0;
1315       long frame_offset = 0;
1316       PtrdiffT offset;
1317       if (VG_(get_inst_offset_in_function)(uregs.pc, &offset)) {
1318          Addr start_pc = uregs.pc - offset;
1319          Addr limit_pc = uregs.pc;
1320          Addr cur_pc;
1321          for (cur_pc = start_pc; cur_pc < limit_pc; cur_pc += 4) {
1322             unsigned long inst, high_word, low_word;
1323             unsigned long * cur_inst;
1324             /* Fetch the instruction.   */
1325             cur_inst = (unsigned long *)cur_pc;
1326             inst = *((UInt *) cur_inst);
1327             if(debug)
1328                VG_(printf)("cur_pc: 0x%lx, inst: 0x%lx\n", cur_pc, inst);
1329 
1330             /* Save some code by pre-extracting some useful fields.  */
1331             high_word = (inst >> 16) & 0xffff;
1332             low_word = inst & 0xffff;
1333 
1334             if (high_word == 0x27bd        /* addiu $sp,$sp,-i */
1335                 || high_word == 0x23bd     /* addi $sp,$sp,-i */
1336                 || high_word == 0x67bd) {  /* daddiu $sp,$sp,-i */
1337                if (low_word & 0x8000)	/* negative stack adjustment? */
1338                   frame_offset += 0x10000 - low_word;
1339                else
1340                   /* Exit loop if a positive stack adjustment is found, which
1341                      usually means that the stack cleanup code in the function
1342                      epilogue is reached.  */
1343                break;
1344             seen_sp_adjust = 1;
1345             }
1346          }
1347          if(debug)
1348             VG_(printf)("offset: 0x%lx\n", frame_offset);
1349       }
1350       if (seen_sp_adjust) {
1351          if (0 == uregs.pc || 1 == uregs.pc) break;
1352          if (uregs.pc == uregs.ra - 8) break;
1353          if (sps) {
1354             sps[i] = uregs.sp + frame_offset;
1355          }
1356          uregs.sp = uregs.sp + frame_offset;
1357 
1358          if (fps) {
1359             fps[i] = fps[0];
1360             uregs.fp = fps[0];
1361          }
1362          if (0 == uregs.ra || 1 == uregs.ra) break;
1363          uregs.pc = uregs.ra - 8;
1364          ips[i++] = uregs.ra - 8;
1365          if (UNLIKELY(cmrf > 0)) {RECURSIVE_MERGE(cmrf,ips,i);};
1366          continue;
1367       }
1368 
1369       if (i == 1) {
1370          if (sps) {
1371             sps[i] = sps[0];
1372             uregs.sp = sps[0];
1373          }
1374          if (fps) {
1375             fps[i] = fps[0];
1376             uregs.fp = fps[0];
1377          }
1378          if (0 == uregs.ra || 1 == uregs.ra) break;
1379          uregs.pc = uregs.ra - 8;
1380          ips[i++] = uregs.ra - 8;
1381          if (UNLIKELY(cmrf > 0)) {RECURSIVE_MERGE(cmrf,ips,i);};
1382          continue;
1383       }
1384       /* No luck.  We have to give up. */
1385       break;
1386    }
1387 
1388    n_found = i;
1389    return n_found;
1390 }
1391 
1392 #endif
1393 
1394 /* ------------------------ tilegx ------------------------- */
1395 #if defined(VGP_tilegx_linux)
VG_(get_StackTrace_wrk)1396 UInt VG_(get_StackTrace_wrk) ( ThreadId tid_if_known,
1397                                /*OUT*/Addr* ips, UInt max_n_ips,
1398                                /*OUT*/Addr* sps, /*OUT*/Addr* fps,
1399                                const UnwindStartRegs* startRegs,
1400                                Addr fp_max_orig )
1401 {
1402    Bool  debug = False;
1403    Int   i;
1404    Addr  fp_max;
1405    UInt  n_found = 0;
1406    const Int cmrf = VG_(clo_merge_recursive_frames);
1407 
1408    vg_assert(sizeof(Addr) == sizeof(UWord));
1409    vg_assert(sizeof(Addr) == sizeof(void*));
1410 
1411    D3UnwindRegs uregs;
1412    uregs.pc = startRegs->r_pc;
1413    uregs.sp = startRegs->r_sp;
1414    Addr fp_min = uregs.sp;
1415 
1416    uregs.fp = startRegs->misc.TILEGX.r52;
1417    uregs.lr = startRegs->misc.TILEGX.r55;
1418 
1419    fp_max = VG_PGROUNDUP(fp_max_orig);
1420    if (fp_max >= sizeof(Addr))
1421       fp_max -= sizeof(Addr);
1422 
1423    if (debug)
1424       VG_(printf)("max_n_ips=%d fp_min=0x%lx fp_max_orig=0x%lx, "
1425                   "fp_max=0x%lx pc=0x%lx sp=0x%lx fp=0x%lx\n",
1426                   max_n_ips, fp_min, fp_max_orig, fp_max,
1427                   uregs.pc, uregs.sp, uregs.fp);
1428 
1429    if (sps) sps[0] = uregs.sp;
1430    if (fps) fps[0] = uregs.fp;
1431    ips[0] = uregs.pc;
1432    i = 1;
1433 
1434    /* Loop unwinding the stack. */
1435    while (True) {
1436       if (debug) {
1437          VG_(printf)("i: %d, pc: 0x%lx, sp: 0x%lx, lr: 0x%lx\n",
1438                      i, uregs.pc, uregs.sp, uregs.lr);
1439      }
1440      if (i >= max_n_ips)
1441         break;
1442 
1443      D3UnwindRegs uregs_copy = uregs;
1444      if (VG_(use_CF_info)( &uregs, fp_min, fp_max )) {
1445         if (debug)
1446            VG_(printf)("USING CFI: pc: 0x%lx, sp: 0x%lx, fp: 0x%lx, lr: 0x%lx\n",
1447                        uregs.pc, uregs.sp, uregs.fp, uregs.lr);
1448         if (0 != uregs.pc && 1 != uregs.pc &&
1449             (uregs.pc < fp_min || uregs.pc > fp_max)) {
1450            if (sps) sps[i] = uregs.sp;
1451            if (fps) fps[i] = uregs.fp;
1452            if (uregs.pc != uregs_copy.pc && uregs.sp != uregs_copy.sp)
1453               ips[i++] = uregs.pc - 8;
1454            uregs.pc = uregs.pc - 8;
1455            if (UNLIKELY(cmrf > 0)) { RECURSIVE_MERGE(cmrf,ips,i); };
1456            continue;
1457         } else
1458            uregs = uregs_copy;
1459      }
1460 
1461      Long frame_offset = 0;
1462      PtrdiffT offset;
1463      if (VG_(get_inst_offset_in_function)(uregs.pc, &offset)) {
1464         Addr start_pc = uregs.pc;
1465         Addr limit_pc = uregs.pc - offset;
1466         Addr cur_pc;
1467         /* Try to find any stack adjustment from current instruction
1468            bundles downward. */
1469         for (cur_pc = start_pc; cur_pc > limit_pc; cur_pc -= 8) {
1470            ULong inst;
1471            Long off = 0;
1472            ULong* cur_inst;
1473            /* Fetch the instruction.   */
1474            cur_inst = (ULong *)cur_pc;
1475            inst = *cur_inst;
1476            if(debug)
1477               VG_(printf)("cur_pc: 0x%lx, inst: 0x%lx\n", cur_pc, inst);
1478 
1479            if ((inst & 0xC000000000000000ULL) == 0) {
1480               /* Bundle is X type. */
1481              if ((inst & 0xC000000070000fffULL) ==
1482                  (0x0000000010000db6ULL)) {
1483                 /* addli at X0 */
1484                 off = (short)(0xFFFF & (inst >> 12));
1485              } else if ((inst & 0xF80007ff80000000ULL) ==
1486                         (0x000006db00000000ULL)) {
1487                 /* addli at X1 addli*/
1488                 off = (short)(0xFFFF & (inst >> 43));
1489              } else if ((inst & 0xC00000007FF00FFFULL) ==
1490                         (0x0000000040100db6ULL)) {
1491                 /* addi at X0 */
1492                 off = (char)(0xFF & (inst >> 12));
1493              } else if ((inst & 0xFFF807ff80000000ULL) ==
1494                         (0x180806db00000000ULL)) {
1495                 /* addi at X1 */
1496                 off = (char)(0xFF & (inst >> 43));
1497              }
1498            } else {
1499               /* Bundle is Y type. */
1500               if ((inst & 0x0000000078000FFFULL) ==
1501                   (0x0000000000000db6ULL)) {
1502                  /* addi at Y0 */
1503                  off = (char)(0xFF & (inst >> 12));
1504               } else if ((inst & 0x3C0007FF80000000ULL) ==
1505                          (0x040006db00000000ULL)) {
1506                  /* addi at Y1 */
1507                  off = (char)(0xFF & (inst >> 43));
1508               }
1509            }
1510 
1511            if(debug && off)
1512               VG_(printf)("offset: -0x%lx\n", -off);
1513 
1514            if (off < 0) {
1515               /* frame offset should be modular of 8 */
1516               vg_assert((off & 7) == 0);
1517               frame_offset += off;
1518            } else if (off > 0)
1519               /* Exit loop if a positive stack adjustment is found, which
1520                  usually means that the stack cleanup code in the function
1521                  epilogue is reached.  */
1522              break;
1523         }
1524      }
1525 
1526      if (frame_offset < 0) {
1527         if (0 == uregs.pc || 1 == uregs.pc) break;
1528 
1529         /* Subtract the offset from the current stack. */
1530         uregs.sp = uregs.sp + (ULong)(-frame_offset);
1531 
1532         if (debug)
1533            VG_(printf)("offset: i: %d, pc: 0x%lx, sp: 0x%lx, lr: 0x%lx\n",
1534                        i, uregs.pc, uregs.sp, uregs.lr);
1535 
1536         if (uregs.pc == uregs.lr - 8 ||
1537             uregs.lr - 8 >= fp_min && uregs.lr - 8 <= fp_max) {
1538            if (debug)
1539               VG_(printf)("new lr = 0x%lx\n", *(ULong*)uregs.sp);
1540            uregs.lr = *(ULong*)uregs.sp;
1541         }
1542 
1543         uregs.pc = uregs.lr - 8;
1544 
1545         if (uregs.lr != 0) {
1546            /* Avoid the invalid pc = 0xffff...ff8 */
1547            if (sps)
1548               sps[i] = uregs.sp;
1549 
1550            if (fps)
1551               fps[i] = fps[0];
1552 
1553            ips[i++] = uregs.pc;
1554 
1555            if (UNLIKELY(cmrf > 0)) { RECURSIVE_MERGE(cmrf,ips,i); };
1556         }
1557         continue;
1558      }
1559 
1560      /* A special case for the 1st frame. Assume it was a bad jump.
1561         Use the link register "lr" and current stack and frame to
1562         try again. */
1563      if (i == 1) {
1564         if (sps) {
1565            sps[1] = sps[0];
1566            uregs.sp = sps[0];
1567         }
1568         if (fps) {
1569            fps[1] = fps[0];
1570            uregs.fp = fps[0];
1571         }
1572         if (0 == uregs.lr || 1 == uregs.lr)
1573            break;
1574 
1575         uregs.pc = uregs.lr - 8;
1576         ips[i++] = uregs.lr - 8;
1577         if (UNLIKELY(cmrf > 0)) { RECURSIVE_MERGE(cmrf,ips,i); };
1578         continue;
1579      }
1580      /* No luck.  We have to give up. */
1581      break;
1582    }
1583 
1584    if (debug) {
1585       /* Display the back trace. */
1586       Int ii ;
1587       for ( ii = 0; ii < i; ii++) {
1588          if (sps) {
1589             VG_(printf)("%d: pc=%lx  ", ii, ips[ii]);
1590             VG_(printf)("sp=%lx\n", sps[ii]);
1591          } else {
1592             VG_(printf)("%d: pc=%lx\n", ii, ips[ii]);
1593          }
1594       }
1595    }
1596 
1597    n_found = i;
1598    return n_found;
1599 }
1600 #endif
1601 
1602 /*------------------------------------------------------------*/
1603 /*---                                                      ---*/
1604 /*--- END platform-dependent unwinder worker functions     ---*/
1605 /*---                                                      ---*/
1606 /*------------------------------------------------------------*/
1607 
1608 /*------------------------------------------------------------*/
1609 /*--- Exported functions.                                  ---*/
1610 /*------------------------------------------------------------*/
1611 
VG_(get_StackTrace)1612 UInt VG_(get_StackTrace) ( ThreadId tid,
1613                            /*OUT*/StackTrace ips, UInt max_n_ips,
1614                            /*OUT*/StackTrace sps,
1615                            /*OUT*/StackTrace fps,
1616                            Word first_ip_delta )
1617 {
1618    /* Get the register values with which to start the unwind. */
1619    UnwindStartRegs startRegs;
1620    VG_(memset)( &startRegs, 0, sizeof(startRegs) );
1621    VG_(get_UnwindStartRegs)( &startRegs, tid );
1622 
1623    Addr stack_highest_byte = VG_(threads)[tid].client_stack_highest_byte;
1624    Addr stack_lowest_byte  = 0;
1625 
1626 #  if defined(VGP_x86_linux)
1627    /* Nasty little hack to deal with syscalls - if libc is using its
1628       _dl_sysinfo_int80 function for syscalls (the TLS version does),
1629       then ip will always appear to be in that function when doing a
1630       syscall, not the actual libc function doing the syscall.  This
1631       check sees if IP is within that function, and pops the return
1632       address off the stack so that ip is placed within the library
1633       function calling the syscall.  This makes stack backtraces much
1634       more useful.
1635 
1636       The function is assumed to look like this (from glibc-2.3.6 sources):
1637          _dl_sysinfo_int80:
1638             int $0x80
1639             ret
1640       That is 3 (2+1) bytes long.  We could be more thorough and check
1641       the 3 bytes of the function are as expected, but I can't be
1642       bothered.
1643    */
1644    if (VG_(client__dl_sysinfo_int80) != 0 /* we know its address */
1645        && startRegs.r_pc >= VG_(client__dl_sysinfo_int80)
1646        && startRegs.r_pc < VG_(client__dl_sysinfo_int80)+3
1647        && VG_(am_is_valid_for_client)(startRegs.r_pc, sizeof(Addr),
1648                                       VKI_PROT_READ)) {
1649       startRegs.r_pc  = (ULong) *(Addr*)(UWord)startRegs.r_sp;
1650       startRegs.r_sp += (ULong) sizeof(Addr);
1651    }
1652 #  endif
1653 
1654    /* See if we can get a better idea of the stack limits */
1655    VG_(stack_limits)( (Addr)startRegs.r_sp,
1656                       &stack_lowest_byte, &stack_highest_byte );
1657 
1658    /* Take into account the first_ip_delta. */
1659    startRegs.r_pc += (Long)(Word)first_ip_delta;
1660 
1661    if (0)
1662       VG_(printf)("tid %d: stack_highest=0x%08lx ip=0x%010llx "
1663                   "sp=0x%010llx\n",
1664 		  tid, stack_highest_byte,
1665                   startRegs.r_pc, startRegs.r_sp);
1666 
1667    return VG_(get_StackTrace_wrk)(tid, ips, max_n_ips,
1668                                        sps, fps,
1669                                        &startRegs,
1670                                        stack_highest_byte);
1671 }
1672 
printIpDesc(UInt n,Addr ip,void * uu_opaque)1673 static void printIpDesc(UInt n, Addr ip, void* uu_opaque)
1674 {
1675    InlIPCursor *iipc = VG_(new_IIPC)(ip);
1676 
1677    do {
1678       const HChar *buf = VG_(describe_IP)(ip, iipc);
1679       if (VG_(clo_xml)) {
1680          VG_(printf_xml)("    %s\n", buf);
1681       } else {
1682          VG_(message)(Vg_UserMsg, "   %s %s\n",
1683                       ( n == 0 ? "at" : "by" ), buf);
1684       }
1685       n++;
1686       // Increase n to show "at" for only one level.
1687    } while (VG_(next_IIPC)(iipc));
1688    VG_(delete_IIPC)(iipc);
1689 }
1690 
1691 /* Print a StackTrace. */
VG_(pp_StackTrace)1692 void VG_(pp_StackTrace) ( StackTrace ips, UInt n_ips )
1693 {
1694    vg_assert( n_ips > 0 );
1695 
1696    if (VG_(clo_xml))
1697       VG_(printf_xml)("  <stack>\n");
1698 
1699    VG_(apply_StackTrace)( printIpDesc, NULL, ips, n_ips );
1700 
1701    if (VG_(clo_xml))
1702       VG_(printf_xml)("  </stack>\n");
1703 }
1704 
1705 /* Get and immediately print a StackTrace. */
VG_(get_and_pp_StackTrace)1706 void VG_(get_and_pp_StackTrace) ( ThreadId tid, UInt max_n_ips )
1707 {
1708    Addr ips[max_n_ips];
1709    UInt n_ips
1710       = VG_(get_StackTrace)(tid, ips, max_n_ips,
1711                             NULL/*array to dump SP values in*/,
1712                             NULL/*array to dump FP values in*/,
1713                             0/*first_ip_delta*/);
1714    VG_(pp_StackTrace)(ips, n_ips);
1715 }
1716 
VG_(apply_StackTrace)1717 void VG_(apply_StackTrace)(
1718         void(*action)(UInt n, Addr ip, void* opaque),
1719         void* opaque,
1720         StackTrace ips, UInt n_ips
1721      )
1722 {
1723    Bool main_done = False;
1724    Int i = 0;
1725 
1726    vg_assert(n_ips > 0);
1727    do {
1728       Addr ip = ips[i];
1729 
1730       // Stop after the first appearance of "main" or one of the other names
1731       // (the appearance of which is a pretty good sign that we've gone past
1732       // main without seeing it, for whatever reason)
1733       if ( ! VG_(clo_show_below_main) ) {
1734          Vg_FnNameKind kind = VG_(get_fnname_kind_from_IP)(ip);
1735          if (Vg_FnNameMain == kind || Vg_FnNameBelowMain == kind) {
1736             main_done = True;
1737          }
1738       }
1739 
1740       // Act on the ip
1741       action(i, ip, opaque);
1742 
1743       i++;
1744    } while (i < n_ips && !main_done);
1745 }
1746 
1747 
1748 /*--------------------------------------------------------------------*/
1749 /*--- end                                                          ---*/
1750 /*--------------------------------------------------------------------*/
1751