/bionic/tests/ |
D | threads_test.cpp | 31 #if __has_include(<threads.h>) 67 TEST(threads, call_once) { in TEST() argument 81 TEST(threads, cnd_broadcast__cnd_wait) { in TEST() argument 117 TEST(threads, cnd_init__cnd_destroy) { in TEST() argument 127 TEST(threads, cnd_signal__cnd_wait) { in TEST() argument 177 TEST(threads, cnd_timedwait_timedout) { in TEST() argument 193 TEST(threads, cnd_timedwait) { in TEST() argument 223 TEST(threads, mtx_init) { in TEST() argument 237 TEST(threads, mtx_destroy) { in TEST() argument 247 TEST(threads, mtx_lock_plain) { in TEST() argument [all …]
|
D | malloc_stress_test.cpp | 51 std::vector<std::thread*> threads; in TEST() local 53 threads.push_back(new std::thread([]() { in TEST() 65 for (auto thread : threads) { in TEST() 69 threads.clear(); in TEST()
|
D | leak_test.cpp | 134 struct thread_data { pthread_barrier_t* barrier; pid_t* tid; } threads[thread_count]; in TEST() local 142 threads[i] = {&barrier, &tids[i]}; in TEST() 150 ASSERT_EQ(0, pthread_create(&thread, nullptr, thread_function, &threads[i])); in TEST()
|
D | ifaddrs_test.cpp | 278 std::vector<std::thread*> threads; in TEST() local 280 threads.push_back(new std::thread([]() { in TEST() 286 for (auto& t : threads) { in TEST()
|
D | malloc_test.cpp | 925 std::vector<std::thread*> threads; in TEST() local 936 threads.push_back(t); in TEST() 959 for (auto thread : threads) { in TEST() 1213 pthread_t threads[kNumThreads]; in SetAllocationLimitMultipleThreads() local 1215 ASSERT_EQ(0, pthread_create(&threads[i], nullptr, SetAllocationLimit, &go)); in SetAllocationLimitMultipleThreads() 1229 ASSERT_EQ(0, pthread_join(threads[i], &result)); in SetAllocationLimitMultipleThreads()
|
D | pthread_test.cpp | 2701 std::vector<pthread_t> threads(data.thread_count); in TEST() local 2702 std::vector<BarrierTestHelperArg> args(threads.size()); in TEST() 2703 for (size_t i = 0; i < threads.size(); ++i) { in TEST() 2706 ASSERT_EQ(0, pthread_create(&threads[i], nullptr, in TEST() 2709 for (size_t i = 0; i < threads.size(); ++i) { in TEST() 2710 ASSERT_EQ(0, pthread_join(threads[i], nullptr)); in TEST() 2772 std::vector<pthread_t> threads(THREAD_COUNT); in TEST() local 2779 ASSERT_EQ(0, pthread_create(&threads[i], nullptr, in TEST() 2784 ASSERT_EQ(0, pthread_join(threads[i], nullptr)); in TEST()
|
/bionic/libc/async_safe/ |
D | README.md | 6 it among threads, whereas these functions connect to liblog for each log message. While it's 10 threads. Therefore, we maintain these two separate mechanisms.
|
/bionic/tools/versioner/src/ |
D | Driver.cpp | 214 std::vector<std::thread> threads; in initializeTargetCC1FlagCache() local 216 threads.emplace_back([type, &vfs, &reqs]() { in initializeTargetCC1FlagCache() 226 for (auto& thread : threads) { in initializeTargetCC1FlagCache()
|
D | versioner.cpp | 194 std::vector<std::thread> threads; in compileHeaders() local 241 threads.emplace_back([&jobs, &job_index, &result, vfs]() { in compileHeaders() 255 for (auto& thread : threads) { in compileHeaders() 258 threads.clear(); in compileHeaders()
|
/bionic/tests/headers/posix/ |
D | threads_h.c | 29 #if __has_include(<threads.h>)
|
/bionic/docs/ |
D | fdsan.md | 12 For example, given two threads running the following code: 111 std::vector<std::thread> threads; 113 threads.emplace_back(function); 115 for (auto& thread : threads) { 121 When running the program, the threads' executions will be interleaved as follows:
|
D | native_allocator.md | 33 This function, when called, should pause all threads that are making a 35 is made to `malloc_enable`, the paused threads should start running again. 318 mechanism will simulate this by creating threads and replaying the operations 321 in all threads since it collapses all of the allocation operations to occur 322 one after another. This will cause a lot of threads allocating at the same
|
D | elf-tls.md | 221 `dlopen` can initialize the new static TLS memory in all existing threads. A thread list could be 455 `thread_local` | - C11: a macro for `_Thread_local` via `threads.h`<br/> - C++11: a keyword, allo… 562 On the other hand, maybe lazy allocation is a feature, because not all threads will use a dlopen'ed 567 > up the process. It would be a waste of memory and time to allocate the storage for all threads. A 570 > alternative to stopping all threads and allocating storage for all threads before letting them run
|
D | status.md | 54 * Full C11 `<threads.h>` (available as inlines for older API levels).
|
/bionic/libc/ |
D | Android.bp | 1173 "bionic/threads.cpp",
|