/external/zstd/lib/compress/ |
D | zstd_fast.c | 68 U32 offset_1=rep[0], offset_2=rep[1]; in ZSTD_compressBlock_fast_generic() local 79 if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; in ZSTD_compressBlock_fast_generic() 101 BYTE const* repMatch = ip2 - offset_1; in ZSTD_compressBlock_fast_generic() 115 if ((offset_1 > 0) & (MEM_read32(repMatch) == MEM_read32(ip2))) { in ZSTD_compressBlock_fast_generic() 141 offset_2 = offset_1; in ZSTD_compressBlock_fast_generic() 142 offset_1 = (U32)(ip0-match0); in ZSTD_compressBlock_fast_generic() 143 offcode = offset_1 + ZSTD_REP_MOVE; in ZSTD_compressBlock_fast_generic() 167 …{ U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offs… in ZSTD_compressBlock_fast_generic() 178 rep[0] = offset_1 ? offset_1 : offsetSaved; in ZSTD_compressBlock_fast_generic() 224 U32 offset_1=rep[0], offset_2=rep[1]; in ZSTD_compressBlock_fast_dictMatchState_generic() local [all …]
|
D | zstd_double_fast.c | 71 U32 offset_1=rep[0], offset_2=rep[1]; in ZSTD_compressBlock_doubleFast_generic() local 115 if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0; in ZSTD_compressBlock_doubleFast_generic() 120 assert(offset_1 <= dictAndPrefixLength); in ZSTD_compressBlock_doubleFast_generic() 137 const U32 repIndex = curr + 1 - offset_1; in ZSTD_compressBlock_doubleFast_generic() 157 && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) { in ZSTD_compressBlock_doubleFast_generic() 158 mLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; in ZSTD_compressBlock_doubleFast_generic() 250 offset_2 = offset_1; in ZSTD_compressBlock_doubleFast_generic() 251 offset_1 = offset; in ZSTD_compressBlock_doubleFast_generic() 283 …U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset… in ZSTD_compressBlock_doubleFast_generic() 300 …U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; /* swap offset_2 <=> offset_… in ZSTD_compressBlock_doubleFast_generic() [all …]
|
D | zstd_lazy.c | 901 U32 offset_1 = rep[0], offset_2 = rep[1], savedOffset=0; in ZSTD_compressBlock_lazy_generic() local 927 if (offset_1 > maxRep) savedOffset = offset_1, offset_1 = 0; in ZSTD_compressBlock_lazy_generic() 932 assert(offset_1 <= dictAndPrefixLength); in ZSTD_compressBlock_lazy_generic() 950 const U32 repIndex = (U32)(ip - base) + 1 - offset_1; in ZSTD_compressBlock_lazy_generic() 963 && ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) { in ZSTD_compressBlock_lazy_generic() 964 matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4; in ZSTD_compressBlock_lazy_generic() 985 && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { in ZSTD_compressBlock_lazy_generic() 986 size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4; in ZSTD_compressBlock_lazy_generic() 993 const U32 repIndex = (U32)(ip - base) - offset_1; in ZSTD_compressBlock_lazy_generic() 1020 && (offset) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) { in ZSTD_compressBlock_lazy_generic() [all …]
|
/external/flatbuffers/net/FlatBuffers/ |
D | Table.cs | 174 public static int CompareStrings(int offset_1, int offset_2, ByteBuffer bb) in CompareStrings() argument 176 offset_1 += bb.GetInt(offset_1); in CompareStrings() 178 var len_1 = bb.GetInt(offset_1); in CompareStrings() 180 var startPos_1 = offset_1 + sizeof(int); in CompareStrings() 193 public static int CompareStrings(int offset_1, byte[] key, ByteBuffer bb) in CompareStrings() argument 195 offset_1 += bb.GetInt(offset_1); in CompareStrings() 196 var len_1 = bb.GetInt(offset_1); in CompareStrings() 198 var startPos_1 = offset_1 + sizeof(int); in CompareStrings()
|
/external/flatbuffers/java/com/google/flatbuffers/ |
D | Table.java | 256 protected static int compareStrings(int offset_1, int offset_2, ByteBuffer bb) { in compareStrings() argument 257 offset_1 += bb.getInt(offset_1); in compareStrings() 259 int len_1 = bb.getInt(offset_1); in compareStrings() 261 int startPos_1 = offset_1 + SIZEOF_INT; in compareStrings() 278 protected static int compareStrings(int offset_1, byte[] key, ByteBuffer bb) { in compareStrings() argument 279 offset_1 += bb.getInt(offset_1); in compareStrings() 280 int len_1 = bb.getInt(offset_1); in compareStrings() 282 int startPos_1 = offset_1 + Constants.SIZEOF_INT; in compareStrings()
|
/external/tensorflow/tensorflow/lite/kernels/internal/reference/ |
D | strided_slice.h | 70 for (int offset_1 = (offset_0 + start_1) * input_shape.Dims(2), in StridedSlice() local 73 !LoopCondition(offset_1, end_1, params_copy.strides[1]); in StridedSlice() 74 offset_1 += step_1) { in StridedSlice() 75 for (int offset_2 = (offset_1 + start_2) * input_shape.Dims(3), in StridedSlice() 76 end_2 = (offset_1 + stop_2) * input_shape.Dims(3), in StridedSlice()
|
/external/python/cpython3/Lib/zoneinfo/ |
D | _zoneinfo.py | 374 offset_1 = utcoffsets[trans_idx[0]] 375 if offset_1 > offset_0: 376 offset_1, offset_0 = offset_0, offset_1 378 offset_0 = offset_1 = utcoffsets[0] 381 trans_list_wall[1][0] += offset_1 385 offset_1 = utcoffsets[trans_idx[i]] 387 if offset_1 > offset_0: 388 offset_1, offset_0 = offset_0, offset_1 391 trans_list_wall[1][i] += offset_1
|
/external/python/cpython3/Modules/ |
D | _zoneinfo.c | 2071 int64_t offset_0, offset_1, buff; in ts_to_local() local 2074 offset_1 = utcoff[trans_idx[0]]; in ts_to_local() 2076 if (offset_1 > offset_0) { in ts_to_local() 2077 _swap(offset_0, offset_1, buff); in ts_to_local() 2082 offset_1 = utcoff[0]; in ts_to_local() 2086 trans_local[1][0] += offset_1; in ts_to_local() 2090 offset_1 = utcoff[trans_idx[i]]; in ts_to_local() 2092 if (offset_1 > offset_0) { in ts_to_local() 2093 _swap(offset_1, offset_0, buff); in ts_to_local() 2097 trans_local[1][i] += offset_1; in ts_to_local()
|
/external/mesa3d/src/intel/compiler/ |
D | brw_eu_validate.c | 1591 unsigned offset_1 = offset_0; \ in region_alignment_rules() 1595 offset_1 = __builtin_ctzll(src ## n ## _access_mask[i]) - 32; \ in region_alignment_rules() 1600 ERROR_IF(num_sources == 2 && offset_0 != offset_1, \ in region_alignment_rules()
|
/external/libaom/libaom/av1/common/x86/ |
D | jnt_convolve_avx2.c | 212 const int offset_1 = (1 << (bd + FILTER_BITS - 2)); in av1_dist_wtd_convolve_y_avx2() local 213 const __m256i offset_const_1 = _mm256_set1_epi16(offset_1); in av1_dist_wtd_convolve_y_avx2()
|
/external/libgav1/libgav1/src/ |
D | motion_vector.cc | 488 const int offset_1 = tile.current_frame() in TemporalScan() local 492 Clip3(offset_1, -kMaxFrameDistance, kMaxFrameDistance); in TemporalScan()
|
/external/tensorflow/tensorflow/lite/kernels/internal/optimized/ |
D | optimized_ops.h | 5482 for (int offset_1 = (offset_0 + start_1) * input_shape.Dims(2), in StridedSlice() local 5485 !LoopCondition(offset_1, end_1, params_copy.strides[1]); in StridedSlice() 5486 offset_1 += step_1) { in StridedSlice() 5487 for (int offset_2 = (offset_1 + start_2) * input_shape.Dims(3), in StridedSlice() 5488 end_2 = (offset_1 + stop_2) * input_shape.Dims(3), in StridedSlice()
|