/external/llvm/lib/Target/ARM/ |
D | ARMScheduleA9.td | 82 // No operand cycles 203 // FIXME: If address is 64-bit aligned, AGU cycles is 1. 347 // FIXME: If address is 64-bit aligned, AGU cycles is 1. 469 // Extra latency cycles since wbck is 2 cycles 478 // Extra latency cycles since wbck is 2 cycles 488 // Extra latency cycles since wbck is 4 cycles 497 // Extra latency cycles since wbck is 4 cycles 669 // Extra 1 latency cycle since wbck is 2 cycles 678 // Extra 1 latency cycle since wbck is 2 cycles 719 // FIXME: assumes 2 doubles which requires 2 LS cycles. [all …]
|
D | ARMScheduleV6.td | 24 // No operand cycles 109 // Scaled register offset, issues over 2 cycles 122 // Scaled register offset with update, issues over 2 cycles 162 // Scaled register offset, issues over 2 cycles 175 // Scaled register offset with update, issues over 2 cycles
|
/external/antlr/antlr-3.4/tool/src/main/java/org/antlr/tool/ |
D | LeftRecursionCyclesMessage.java | 39 public Collection cycles; field in LeftRecursionCyclesMessage 41 public LeftRecursionCyclesMessage(Collection cycles) { in LeftRecursionCyclesMessage() argument 43 this.cycles = cycles; in LeftRecursionCyclesMessage() 48 st.add("listOfCycles", cycles); in toString()
|
/external/boringssl/linux-arm/crypto/sha/ |
D | sha256-armv4.S | 14 @ lute" terms is ~2250 cycles per 64-byte block or ~35 cycles per 20 @ Cortex A8 core and ~20 cycles per processed byte. 25 @ improvement on Cortex A8 core and ~15.4 cycles per processed byte. 30 @ byte in 12.5 cycles or 23% faster than integer-only code. Snapdragon 31 @ S4 does it in 12.5 cycles too, but it's 50% faster than integer-only
|
D | sha512-armv4.S | 14 @ by gcc 3.4 and it spends ~72 clock cycles per byte [on single-issue 20 @ Cortex A8 core and ~40 cycles per processed byte. 25 @ improvement on Coxtex A8 core and ~38 cycles per byte. 30 @ one byte in 23.3 cycles or ~60% faster than integer-only code.
|
/external/linux-tools-perf/src/tools/perf/util/ |
D | parse-events.l | 129 cpu-cycles|cycles { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES); } 130 stalled-cycles-frontend|idle-cycles-frontend { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT… 131 stalled-cycles-backend|idle-cycles-backend { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_H… 137 bus-cycles { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_BUS_CYCLES); } 138 ref-cycles { return sym(yyscanner, PERF_TYPE_HARDWARE, PERF_COUNT_HW_REF_CPU_CYCLES); }
|
/external/fio/ |
D | gettime.c | 261 uint64_t minc, maxc, avg, cycles[NR_TIME_ITERS]; in calibrate_cpu_clock() local 264 cycles[0] = get_cycles_per_usec(); in calibrate_cpu_clock() 267 cycles[i] = get_cycles_per_usec(); in calibrate_cpu_clock() 268 delta = cycles[i] - mean; in calibrate_cpu_clock() 271 S += delta * (cycles[i] - mean); in calibrate_cpu_clock() 279 if (!cycles[0] && !cycles[NR_TIME_ITERS - 1]) in calibrate_cpu_clock() 287 double this = cycles[i]; in calibrate_cpu_clock() 289 minc = min(cycles[i], minc); in calibrate_cpu_clock() 290 maxc = max(cycles[i], maxc); in calibrate_cpu_clock() 303 (unsigned long long) cycles[i] / 10); in calibrate_cpu_clock()
|
/external/linux-tools-perf/src/tools/perf/tests/attr/ |
D | README | 50 perf record --group -e cycles,instructions kill (test-record-group) 51 perf record -e '{cycles,instructions}' kill (test-record-group1) 57 perf stat -e cycles kill (test-stat-basic) 62 perf stat --group -e cycles,instructions kill (test-stat-group) 63 perf stat -e '{cycles,instructions}' kill (test-stat-group1) 64 perf stat -i -e cycles kill (test-stat-no-inherit)
|
D | test-stat-basic | 3 args = -e cycles kill >/dev/null 2>&1
|
D | test-stat-no-inherit | 3 args = -i -e cycles kill >/dev/null 2>&1
|
D | test-stat-group1 | 3 args = -e '{cycles,instructions}' kill >/dev/null 2>&1
|
/external/mesa3d/src/gallium/drivers/llvmpipe/ |
D | lp_test_conv.c | 66 double cycles, in write_tsv_row() argument 71 fprintf(fp, "%.1f\t", cycles / MAX2(src_type.length, dst_type.length)); in write_tsv_row() 163 int64_t cycles[LP_TEST_NUM_SAMPLES]; in test_one() local 246 cycles[i] = end_counter - start_counter; in test_one() 299 sum += cycles[i]; in test_one() 300 sum2 += cycles[i]*cycles[i]; in test_one() 309 if(fabs(cycles[i] - avg) <= 4.0*std) { in test_one() 310 sum += cycles[i]; in test_one()
|
D | lp_test_blend.c | 85 double cycles, in write_tsv_row() argument 91 fprintf(fp, "%.1f\t", cycles / type.length); in write_tsv_row() 96 fprintf(fp, "%.1f\t", cycles / (4 * type.length)); in write_tsv_row() 470 int64_t cycles[LP_TEST_NUM_SAMPLES]; in test_one() local 523 cycles[i] = end_counter - start_counter; in test_one() 603 cycles[i] = end_counter - start_counter; in test_one() 660 sum += cycles[i]; in test_one() 661 sum2 += cycles[i]*cycles[i]; in test_one() 670 if(fabs(cycles[i] - avg) <= 4.0*std) { in test_one() 671 sum += cycles[i]; in test_one()
|
/external/mesa3d/src/mesa/math/ |
D | m_debug_xform.c | 169 int mtype, unsigned long *cycles ) in test_transform_function() argument 179 (void) cycles; in test_transform_function() 246 BEGIN_RACE( *cycles ); in test_transform_function() 248 END_RACE( *cycles ); in test_transform_function() 314 unsigned long *cycles = &(benchmark_tab[psize-1][mtype]); in _math_test_all_transform_functions() local 316 if ( test_transform_function( func, psize, mtype, cycles ) == 0 ) { in _math_test_all_transform_functions()
|
D | m_debug_norm.c | 196 static int test_norm_function( normal_func func, int mtype, long *cycles ) in test_norm_function() argument 209 (void) cycles; in test_norm_function() 285 BEGIN_RACE( *cycles ); in test_norm_function() 287 END_RACE( *cycles ); in test_norm_function() 359 long *cycles = &benchmark_tab[mtype]; in _math_test_all_normal_transform_functions() local 361 if ( test_norm_function( func, mtype, cycles ) == 0 ) { in _math_test_all_normal_transform_functions()
|
D | m_debug_clip.c | 230 int psize, long *cycles ) in test_cliptest_function() argument 241 (void) cycles; in test_cliptest_function() 282 BEGIN_RACE( *cycles ); in test_cliptest_function() 284 END_RACE( *cycles ); in test_cliptest_function() 384 long *cycles = &(benchmark_tab[np][psize-1]); in _math_test_all_cliptest_functions() local 386 if ( test_cliptest_function( func, np, psize, cycles ) == 0 ) { in _math_test_all_cliptest_functions()
|
/external/ceres-solver/data/nist/ |
D | ENSO.dat | 15 reveals 3 significant cycles. The annual cycle is the 16 strongest, but cycles with periods of approximately 44 17 and 26 months are also present. These cycles
|
/external/linux-tools-perf/src/tools/perf/Documentation/ |
D | perf-list.txt | 47 The precise modifier works with event types 0x76 (cpu-cycles, CPU 54 perf record -a -e cpu-cycles:p ... # use ibs op counting cycles 55 perf record -a -e r076:p ... # same as -e cpu-cycles:p 82 cycles
|
/external/llvm/include/llvm/Target/ |
D | TargetItinerary.td | 48 // cycles should elapse from the start of this stage to the start of 57 class InstrStage<int cycles, list<FuncUnit> units, 60 int Cycles = cycles; // length of stage in machine cycles 62 int TimeInc = timeinc; // cycles till start of next stage
|
D | TargetSchedule.td | 85 int HighLatency = -1; // Approximation of cycles for "high latency" ops. 86 int MispredictPenalty = -1; // Extra cycles for a mispredicted branch. 122 // clock cycles, but the scheduler does not pin them to a particular 258 // Optionally, ResourceCycles indicates the number of cycles the 294 class ProcReadAdvance<int cycles, list<SchedWrite> writes = []> { 295 int Cycles = cycles; 304 // to reduce latency of a prior write by N cycles. A negative advance 313 class ReadAdvance<SchedRead read, int cycles, list<SchedWrite> writes = []> 314 : ProcReadAdvance<cycles, writes> { 320 class SchedReadAdvance<int cycles, list<SchedWrite> writes = []> : SchedRead, [all …]
|
/external/llvm/lib/Target/AArch64/ |
D | AArch64SchedCyclone.td | 19 let MispredictPenalty = 16; // 14-19 cycles are typical. 151 // consumes a shift pipeline for two cycles. 202 // 32-bit divide takes 7-13 cycles. 10 cycles covers a 20-bit quotient. 203 // The ID pipe is consumed for 2 cycles: issue and writeback. 209 // 64-bit divide takes 7-21 cycles. 13 cycles covers a 32-bit quotient. 210 // The ID pipe is consumed for 2 cycles: issue and writeback. 221 // Integer loads take 4 cycles and use one LS unit for one cycle. 226 // Store-load forwarding is 4 cycles. 301 // Simple vector operations take 2 cycles. 310 // Simple floating-point operations take 2 cycles. [all …]
|
/external/llvm/lib/Target/PowerPC/ |
D | PPCScheduleP7.td | 59 // The VSU XS is similar to the POWER6, but with a pipeline length of 2 cycles 60 // (instead of 3 cycles on the POWER6). VSU XS handles vector FX-style ops. 65 // The VSU PM is similar to the POWER6, but with a pipeline length of 3 cycles 66 // (instead of 4 cycles on the POWER6). vsel is handled by the PM pipeline 69 // FMA from the VSUs can forward results in 6 cycles. VS1 XS and vector FP 71 // IFU/IDU will not dispatch an XS instructon 5 cycles after a vector FP 74 // Three cycles after an L1 cache hit, a dependent VSU instruction can issue.
|
/external/llvm/lib/Target/X86/ |
D | X86SchedSandyBridge.td | 63 // Loads are 4 cycles, so ReadAfterLd registers needn't be available until 4 64 // cycles after the memory operand. 78 // Memory variant also uses a cycle on port 2/3 and adds 4 cycles to the 101 // The complex ones can only execute on port 1, and they require two cycles on 118 defm : SBWriteResPair<WriteFDiv, SBPort0, 12>; // 10-14 cycles.
|
D | X86ScheduleSLM.td | 50 // Loads are 3 cycles, so ReadAfterLd registers needn't be available until 3 51 // cycles after the memory operand. 65 // Memory variant also uses a cycle on MEC_RSV and adds 3 cycles to the 87 // The complex ones can only execute on port 1, and they require two cycles on
|
/external/zlib/src/examples/ |
D | gzappend.c | 126 unsigned cycles; in rotate() local 154 cycles = gcd(len, rot); /* number of cycles */ in rotate() 156 start = from = list + cycles; /* start index is arbitrary */ in rotate() 166 } while (--cycles); in rotate()
|