Searched refs:VG_IS_16_ALIGNED (Results 1 – 12 of 12) sorted by relevance
138 CHECK( VG_IS_16_ALIGNED(0x0) ); in test_VG_IS_XYZ_ALIGNED()139 CHECK( ! VG_IS_16_ALIGNED(0x1) ); in test_VG_IS_XYZ_ALIGNED()140 CHECK( ! VG_IS_16_ALIGNED(0x2) ); in test_VG_IS_XYZ_ALIGNED()141 CHECK( ! VG_IS_16_ALIGNED(0x3) ); in test_VG_IS_XYZ_ALIGNED()142 CHECK( ! VG_IS_16_ALIGNED(0x4) ); in test_VG_IS_XYZ_ALIGNED()143 CHECK( ! VG_IS_16_ALIGNED(0x5) ); in test_VG_IS_XYZ_ALIGNED()144 CHECK( ! VG_IS_16_ALIGNED(0x6) ); in test_VG_IS_XYZ_ALIGNED()145 CHECK( ! VG_IS_16_ALIGNED(0x7) ); in test_VG_IS_XYZ_ALIGNED()146 CHECK( ! VG_IS_16_ALIGNED(0x8) ); in test_VG_IS_XYZ_ALIGNED()147 CHECK( ! VG_IS_16_ALIGNED(0x9) ); in test_VG_IS_XYZ_ALIGNED()[all …]
154 vg_assert(VG_IS_16_ALIGNED(sizeof(struct vg_sig_private))); in VG_()155 vg_assert(VG_IS_16_ALIGNED(sizeof(struct rt_sigframe))); in VG_()164 vg_assert(VG_IS_16_ALIGNED(sp)); in VG_()306 vg_assert(VG_IS_16_ALIGNED(sp)); in VG_()
150 vg_assert(VG_IS_16_ALIGNED(sizeof(struct hacky_sigframe))); in VG_()161 vg_assert(VG_IS_16_ALIGNED(esp+4)); in VG_()241 vg_assert(VG_IS_16_ALIGNED((Addr)frame + 4)); in VG_()
162 vg_assert(VG_IS_16_ALIGNED(sizeof(struct hacky_sigframe))); in VG_()173 vg_assert(VG_IS_16_ALIGNED(rsp+8)); in VG_()254 vg_assert(VG_IS_16_ALIGNED((Addr)frame + 8)); in VG_()
656 vg_assert(VG_IS_16_ALIGNED(sp)); in VG_()851 vg_assert(VG_IS_16_ALIGNED(sp)); in VG_()
172 vg_assert(VG_IS_16_ALIGNED(signo_addr)); in VG_()
765 vg_assert(VG_IS_16_ALIGNED(offsetof(VexGuestX86State,guest_XMM0))); in do_pre_run_checks()780 vg_assert(VG_IS_16_ALIGNED(offsetof(VexGuestAMD64State,guest_YMM0))); in do_pre_run_checks()790 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex.guest_VSR0)); in do_pre_run_checks()791 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow1.guest_VSR0)); in do_pre_run_checks()792 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow2.guest_VSR0)); in do_pre_run_checks()794 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex.guest_VSR1)); in do_pre_run_checks()795 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow1.guest_VSR1)); in do_pre_run_checks()796 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow2.guest_VSR1)); in do_pre_run_checks()802 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex.guest_D0)); in do_pre_run_checks()803 vg_assert(VG_IS_16_ALIGNED(& tst->arch.vex_shadow1.guest_D0)); in do_pre_run_checks()[all …]
188 #define VG_IS_16_ALIGNED(aaa_p) (0 == (((Addr)(aaa_p)) & ((Addr)0xf))) macro
99 vg_assert(VG_IS_16_ALIGNED(stack)); in ML_()
106 vg_assert(VG_IS_16_ALIGNED(stack)); in ML_()
2334 vg_assert(VG_IS_16_ALIGNED( ((Addr) & VG_(tt_fast)[0]) )); in VG_()
2343 vg_assert(VG_IS_16_ALIGNED(signo_addr)); in async_signalhandler_solaris_preprocess()