Home
last modified time | relevance | path

Searched refs:kmp_int64 (Results 1 – 24 of 24) sorted by relevance

/external/llvm-project/openmp/runtime/src/
Dkmp_os.h126 typedef __int64 kmp_int64; typedef
134 typedef struct kmp_struct64 kmp_int64; typedef
159 typedef long long kmp_int64; typedef
195 typedef kmp_int64 kmp_int;
459 InterlockedExchange64((volatile kmp_int64 *)(p), (kmp_int64)(v))
473 extern kmp_int64 __kmp_test_then_add64(volatile kmp_int64 *p, kmp_int64 v);
483 extern kmp_int32 __kmp_compare_and_store64(volatile kmp_int64 *p, kmp_int64 cv,
484 kmp_int64 sv);
491 extern kmp_int64 __kmp_compare_and_store_ret64(volatile kmp_int64 *p,
492 kmp_int64 cv, kmp_int64 sv);
[all …]
Dkmp_atomic.h487 void __kmpc_atomic_fixed8_add(ident_t *id_ref, int gtid, kmp_int64 *lhs,
488 kmp_int64 rhs);
489 void __kmpc_atomic_fixed8_sub(ident_t *id_ref, int gtid, kmp_int64 *lhs,
490 kmp_int64 rhs);
516 void __kmpc_atomic_fixed8_andb(ident_t *id_ref, int gtid, kmp_int64 *lhs,
517 kmp_int64 rhs);
518 void __kmpc_atomic_fixed8_div(ident_t *id_ref, int gtid, kmp_int64 *lhs,
519 kmp_int64 rhs);
522 void __kmpc_atomic_fixed8_mul(ident_t *id_ref, int gtid, kmp_int64 *lhs,
523 kmp_int64 rhs);
[all …]
Dkmp_sched.cpp44 kmp_int64 t; \
45 kmp_int64 u = (kmp_int64)(*pupper); \
46 kmp_int64 l = (kmp_int64)(*plower); \
47 kmp_int64 i = (kmp_int64)incr; \
838 kmp_int32 *plastiter, kmp_int64 *plower, in __kmpc_for_static_init_8()
839 kmp_int64 *pupper, kmp_int64 *pstride, in __kmpc_for_static_init_8()
840 kmp_int64 incr, kmp_int64 chunk) { in __kmpc_for_static_init_8()
841 __kmp_for_static_init<kmp_int64>(loc, gtid, schedtype, plastiter, plower, in __kmpc_for_static_init_8()
856 kmp_int64 *pstride, kmp_int64 incr, in __kmpc_for_static_init_8u()
857 kmp_int64 chunk) { in __kmpc_for_static_init_8u()
[all …]
Dkmp_dispatch.h205 __forceinline kmp_int64 test_then_add<kmp_int64>(volatile kmp_int64 *p,
206 kmp_int64 d) {
207 kmp_int64 r;
223 __forceinline kmp_int64 test_then_inc_acq<kmp_int64>(volatile kmp_int64 *p) {
224 kmp_int64 r;
240 __forceinline kmp_int64 test_then_inc<kmp_int64>(volatile kmp_int64 *p) {
241 kmp_int64 r;
257 __forceinline kmp_int32 compare_and_swap<kmp_int64>(volatile kmp_int64 *p,
258 kmp_int64 c, kmp_int64 s) {
Dz_Windows_NT-586_util.cpp78 kmp_int64 old_value, new_value; in __kmp_test_then_add8()
91 kmp_int64 __kmp_test_then_add64(volatile kmp_int64 *p, kmp_int64 d) { in __kmp_test_then_add64()
92 kmp_int64 old_value, new_value; in __kmp_test_then_add64()
110 while (!__kmp_compare_and_store64((volatile kmp_int64 *)p, old_value, in __kmp_test_then_or64()
125 while (!__kmp_compare_and_store64((volatile kmp_int64 *)p, old_value, in __kmp_test_then_and64()
Dkmp_atomic.cpp925 ATOMIC_FIXED_ADD(fixed8, add, kmp_int64, 64, +, 8i, 7,
927 ATOMIC_FIXED_ADD(fixed8, sub, kmp_int64, 64, -, 8i, 7,
1012 ATOMIC_CMPXCHG(fixed8, andb, kmp_int64, 64, &, 8i, 7,
1014 ATOMIC_CMPXCHG(fixed8, div, kmp_int64, 64, /, 8i, 7,
1018 ATOMIC_CMPXCHG(fixed8, mul, kmp_int64, 64, *, 8i, 7,
1020 ATOMIC_CMPXCHG(fixed8, orb, kmp_int64, 64, |, 8i, 7,
1022 ATOMIC_CMPXCHG(fixed8, shl, kmp_int64, 64, <<, 8i, 7,
1024 ATOMIC_CMPXCHG(fixed8, shr, kmp_int64, 64, >>, 8i, 7,
1028 ATOMIC_CMPXCHG(fixed8, xor, kmp_int64, 64, ^, 8i, 7,
1089 ATOMIC_CMPX_L(fixed8, andl, kmp_int64, 64, &&, 8i, 7,
[all …]
Dkmp_dispatch.cpp733 __kmp_dispatch_init_hier_runtime<kmp_int64>(ident_t *loc, kmp_int64 lb, in __kmp_dispatch_init_hier_runtime()
734 kmp_int64 ub, kmp_int64 st) { in __kmp_dispatch_init_hier_runtime()
735 __kmp_dispatch_init_hierarchy<kmp_int64>( in __kmp_dispatch_init_hier_runtime()
742 kmp_uint64 ub, kmp_int64 st) { in __kmp_dispatch_init_hier_runtime()
1296 kmp_int64 b; in __kmp_dispatch_next_algorithm()
1302 vold.b = *(volatile kmp_int64 *)(&pr->u.p.count); in __kmp_dispatch_next_algorithm()
1306 (volatile kmp_int64 *)&pr->u.p.count, in __kmp_dispatch_next_algorithm()
1307 *VOLATILE_CAST(kmp_int64 *) & vold.b, in __kmp_dispatch_next_algorithm()
1308 *VOLATILE_CAST(kmp_int64 *) & vnew.b)) { in __kmp_dispatch_next_algorithm()
1310 vold.b = *(volatile kmp_int64 *)(&pr->u.p.count); in __kmp_dispatch_next_algorithm()
[all …]
Dkmp.h271 ((kmp_int64)((((kmp_uint64)(HIGH_32)) << 32) | (kmp_uint64)(LOW_32)))
490 kmp_int64 sched;
1669 kmp_int64 count; // current chunk number for static & static-steal scheduling
1670 kmp_int64 ub; /* upper-bound */
1672 kmp_int64 lb; /* lower-bound */
1673 kmp_int64 st; /* stride */
1674 kmp_int64 tc; /* trip count (number of iterations) */
1675 kmp_int64 static_steal_counter; /* for static_steal only; maybe better to put
1687 kmp_int64 parm1;
1688 kmp_int64 parm2;
[all …]
Dz_Windows_NT-586_asm.asm209 ; __kmp_compare_and_store64( volatile kmp_int64 *p, kmp_int64 cv, kmp_int64 sv );
418 ; kmp_int64
419 ; __kmp_compare_and_store_ret64( volatile kmp_int64 *p, kmp_int64 cv, kmp_int64 sv );
694 ; __kmp_test_then_add64( volatile kmp_int64 *p, kmp_int64 d );
802 ; __kmp_compare_and_store64( volatile kmp_int64 *p, kmp_int64 cv, kmp_int64 sv );
903 ; kmp_int64
904 ; __kmp_xchg_fixed64( volatile kmp_int64 *p, kmp_int64 d );
1007 ; kmp_int64
1008 ; __kmp_compare_and_store_ret64( volatile kmp_int64 *p, kmp_int64 cv, kmp_int64 sv );
Dkmp_gsupport.cpp873 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
911 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
938 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
1036 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
1084 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
1778 (kmp_uint64 *)&(loop_bounds[1]), (kmp_int64)step, nogroup, in __GOMP_taskloop()
1792 kmp_int64 num_dims = th->th.th_dispatch->th_doacross_info[0]; in __kmp_GOMP_doacross_post()
1793 kmp_int64 *vec = in __kmp_GOMP_doacross_post()
1794 (kmp_int64 *)__kmp_thread_malloc(th, sizeof(kmp_int64) * num_dims); in __kmp_GOMP_doacross_post()
1795 for (kmp_int64 i = 0; i < num_dims; ++i) { in __kmp_GOMP_doacross_post()
[all …]
Dkmp_alloc.cpp32 typedef kmp_int64 bufsize;
950 gtid, (kmp_uint64)thr->totalloc, (kmp_int64)thr->numget, in bfreed()
951 (kmp_int64)thr->numrel, (kmp_int64)thr->numpblk, in bfreed()
952 (kmp_int64)thr->numpget, (kmp_int64)thr->numprel, in bfreed()
953 (kmp_int64)thr->numdget, (kmp_int64)thr->numdrel); in bfreed()
1482 KMP_TEST_THEN_ADD64((kmp_int64 *)&al->pool_used, desc.size_a); in __kmpc_alloc()
1485 KMP_TEST_THEN_ADD64((kmp_int64 *)&al->pool_used, -desc.size_a); in __kmpc_alloc()
1538 KMP_TEST_THEN_ADD64((kmp_int64 *)&al->pool_used, desc.size_a); in __kmpc_alloc()
1541 KMP_TEST_THEN_ADD64((kmp_int64 *)&al->pool_used, -desc.size_a); in __kmpc_alloc()
1693 KMP_TEST_THEN_ADD64((kmp_int64 *)&al->pool_used, -desc.size_a); in __kmpc_free()
[all …]
Dkmp_csupport.cpp435 *(kmp_int64 *)(&this_thr->th.th_teams_size) = 0L; in __kmpc_fork_teams()
3871 kmp_int64 last, trace_count; in __kmpc_doacross_init()
3896 pr_buf->th_doacross_info = (kmp_int64 *)__kmp_thread_malloc( in __kmpc_doacross_init()
3897 th, sizeof(kmp_int64) * (4 * num_dims + 1)); in __kmpc_doacross_init()
3900 (kmp_int64)num_dims; // first element is number of dimensions in __kmpc_doacross_init()
3903 pr_buf->th_doacross_info[1] = (kmp_int64)&sh_buf->doacross_num_done; in __kmpc_doacross_init()
3909 kmp_int64 in __kmpc_doacross_init()
3961 (volatile kmp_int64 *)&sh_buf->doacross_flags, NULL, 1LL); in __kmpc_doacross_init()
3974 while (*(volatile kmp_int64 *)&sh_buf->doacross_flags == 1LL) in __kmpc_doacross_init()
3988 void __kmpc_doacross_wait(ident_t *loc, int gtid, const kmp_int64 *vec) { in __kmpc_doacross_wait()
[all …]
Dkmp_dispatch_hier.h58 kmp_int64 *large_chunks;
67 large_chunks = (kmp_int64 *)__kmp_allocate(sizeof(kmp_int64) * in append()
78 large_chunks[current_size] = (kmp_int64)chunk; in append()
96 kmp_int64 temp4 = large_chunks[i]; in sort()
313 volatile kmp_int64 *val; in barrier()
323 val = RCAST(volatile kmp_int64 *, &(bdata->val[current_index])); in barrier()
Dkmp_tasking.cpp1397 kmp_int64 device_id) { in __kmpc_omp_target_task_alloc()
4052 kmp_int64 retval; in get_lb()
4056 retval = *(kmp_int64 *)((char *)task + lower_offset); in get_lb()
4061 retval = (kmp_int64)*lb; in get_lb()
4063 kmp_int64 *lb = RCAST(kmp_int64 *, task->shareds); in get_lb()
4064 retval = (kmp_int64)*lb; in get_lb()
4068 retval = *(kmp_int64 *)((char *)task + lower_offset); in get_lb()
4073 kmp_int64 retval; in get_ub()
4077 retval = *(kmp_int64 *)((char *)task + upper_offset); in get_ub()
4082 retval = (kmp_int64)*ub; in get_ub()
[all …]
Dz_Windows_NT_util.cpp140 static kmp_int64 __kmp_win32_time;
901 __kmp_win32_time = (kmp_int64)time.QuadPart; in __kmp_clear_system_time()
941 *delta = ((double)(((kmp_int64)now.QuadPart) - __kmp_win32_time)) * in __kmp_read_system_time()
Dompt-specific.cpp478 uint64_t new_thread = KMP_TEST_THEN_INC64((kmp_int64 *)&thread); in __ompt_get_unique_id_internal()
Dz_Linux_util.cpp407 kmp_int64 __kmp_test_then_add64(volatile kmp_int64 *p, kmp_int64 d) { in __kmp_test_then_add64()
408 kmp_int64 old_value, new_value; in __kmp_test_then_add64()
Dkmp_lock.cpp1147 enqueued = KMP_COMPARE_AND_STORE_ACQ64((volatile kmp_int64 *)tail_id_p, in __kmp_acquire_queuing_lock_timed_template()
1431 RCAST(volatile kmp_int64 *, tail_id_p), KMP_PACK_64(head, head), in __kmp_release_queuing_lock()
2681 return ((kmp_int64)b - (kmp_int64)a) > 0; in before()
Dkmp_wait_release.h630 return KMP_TEST_THEN_ADD4_64(RCAST(volatile kmp_int64 *, f));
Dkmp_runtime.cpp1430 __kmp_stkpadding += (short)((kmp_int64)dummy); in __kmp_fork_call()
6637 KMP_DEBUG_ASSERT(sizeof(kmp_int64) == 8); in __kmp_do_serial_initialize()
/external/llvm-project/openmp/runtime/test/tasking/
Dkmp_taskloop.c54 typedef long long kmp_int64; typedef
61 kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st,
62 int nogroup, int sched, kmp_int64 grainsize, void *task_dup );
Dkmp_detach_tasks_t2.c13 typedef long long kmp_int64; typedef
Dkmp_detach_tasks_t1.c13 typedef long long kmp_int64; typedef
Dkmp_detach_tasks_t3.c15 typedef long long kmp_int64; typedef