/frameworks/base/core/java/android/view/ |
D | WindowlessWindowLayout.java | 42 float compatScale, ClientWindowFrames frames) { in computeFrames() argument 43 if (frames.attachedFrame == null) { in computeFrames() 44 frames.frame.set(0, 0, attrs.width, attrs.height); in computeFrames() 45 frames.parentFrame.set(frames.frame); in computeFrames() 46 frames.displayFrame.set(frames.frame); in computeFrames() 51 frames.attachedFrame.height()); in computeFrames() 53 frames.attachedFrame.width()); in computeFrames() 54 Gravity.apply(attrs.gravity, width, height, frames.attachedFrame, in computeFrames() 57 frames.frame); in computeFrames() 58 frames.displayFrame.set(frames.frame); in computeFrames() [all …]
|
D | WindowRelayoutResult.java | 37 public final ClientWindowFrames frames; field in WindowRelayoutResult 84 public WindowRelayoutResult(@NonNull ClientWindowFrames frames, in WindowRelayoutResult() argument 88 this.frames = requireNonNull(frames); in WindowRelayoutResult() 102 frames.readFromParcel(in); in readFromParcel() 113 frames.writeToParcel(dest, flags); in writeToParcel()
|
/frameworks/av/media/libaudioprocessing/ |
D | RecordBufferConverter.cpp | 78 AudioBufferProvider *provider, size_t frames) in convert() argument 90 for (size_t i = frames; i > 0; ) { in convert() 94 frames -= i; // cannot fill request. in convert() 109 if (mBufFrameSize != 0 && mBufFrames < frames) { in convert() 111 mBufFrames = frames; in convert() 115 memset(mBuf, 0, frames * mBufFrameSize); in convert() 116 frames = mResampler->resample((int32_t*)mBuf, frames, provider); in convert() 118 convertResampler(dst, mBuf, frames); in convert() 125 return frames; in convert() 225 void *dst, const void *src, size_t frames) in convertNoResampler() argument [all …]
|
D | BufferProviders.cpp | 286 void DownmixerBufferProvider::copyFrames(void *dst, const void *src, size_t frames) in copyFrames() argument 289 mInBuffer->setFrameCount(frames); in copyFrames() 290 mInBuffer->update(mInFrameSize * frames); in copyFrames() 291 mOutBuffer->setFrameCount(frames); in copyFrames() 296 mOutBuffer->update(mOutFrameSize * frames); in copyFrames() 301 mOutBuffer->commit(mOutFrameSize * frames); in copyFrames() 363 void RemixBufferProvider::copyFrames(void *dst, const void *src, size_t frames) in copyFrames() argument 366 src, mInputChannels, mIdxAry, mSampleSize, frames); in copyFrames() 386 void ChannelMixBufferProvider::copyFrames(void *dst, const void *src, size_t frames) in copyFrames() argument 390 frames, false /* accumulate */); in copyFrames() [all …]
|
/frameworks/av/services/audioflinger/afutils/ |
D | NBAIO_Tee.h | 128 TEE_FLAG flags = TEE_FLAG_NONE, size_t frames = 0) const { 129 return mTee->set(format, flags, frames); 133 TEE_FLAG flags = TEE_FLAG_NONE, size_t frames = 0) const { 134 return mTee->set(Format_from_SR_C(sampleRate, channelCount, format), flags, frames); 183 status_t set(const NBAIO_Format &format, TEE_FLAG flags, size_t frames) { in set() argument 202 if (frames == 0) { in set() 203 frames = (static_cast<long long>(DEFAULT_TEE_DURATION_MS) * format.mSampleRate) in set() 210 if (Format_isEqual(format, mFormat) && frames == mFrames) { in set() 215 auto sinksource = makeSinkSource(format, frames, &enabled); in set() 224 mFrames = frames; in set() [all …]
|
/frameworks/av/media/libaaudio/src/flowgraph/resampler/ |
D | README.md | 39 Note that the number of output frames generated for a given number of input frames can vary. 41 …Hz to 48000 Hz and using an input buffer with 960 frames. If you calculate the number of output fr… 45 …umber of frames. So the resampler will sometimes generate 1044 frames and sometimes 1045 frames. O… 47 You can either use a fixed number of input frames or a fixed number of output frames. The other fra… 49 ## Calling the Resampler with a fixed number of OUTPUT frames 51 …his example, suppose we have a fixed number of output frames and a variable number of input frames. 56 int numOutputFrames; // number of frames of output 72 ## Calling the Resampler with a fixed number of INPUT frames 74 …this example, suppose we have a fixed number of input frames and a variable number of output frame… 80 int numInputFrames; // number of frames of input
|
/frameworks/base/packages/SystemUI/src/com/android/systemui/statusbar/events/ |
D | SystemEventChipAnimationController.kt | 38 import com.android.systemui.util.animation.AnimationUtil.Companion.frames 109 startDelay = 7.frames in onSystemEventAnimationBegin() 110 duration = 5.frames in onSystemEventAnimationBegin() 116 startDelay = 10.frames in onSystemEventAnimationBegin() 117 duration = 10.frames in onSystemEventAnimationBegin() 122 startDelay = 7.frames in onSystemEventAnimationBegin() 123 duration = 23.frames in onSystemEventAnimationBegin() 151 duration = 9.frames in createMoveOutAnimationForDot() 159 startDelay = 9.frames in createMoveOutAnimationForDot() 160 duration = 20.frames in createMoveOutAnimationForDot() [all …]
|
/frameworks/base/packages/SystemUI/src/com/android/systemui/statusbar/phone/fragment/ |
D | StatusBarSystemEventAnimator.kt | 28 import com.android.systemui.util.animation.AnimationUtil.Companion.frames 71 duration = 23.frames in onSystemEventAnimationBegin() 78 duration = 8.frames in onSystemEventAnimationBegin() 93 duration = 23.frames in onSystemEventAnimationFinish() 94 startDelay = 7.frames in onSystemEventAnimationFinish() 101 duration = 5.frames in onSystemEventAnimationFinish() 102 startDelay = 11.frames in onSystemEventAnimationFinish()
|
/frameworks/av/media/libeffects/downmix/tests/ |
D | downmix_tests.cpp | 134 const size_t frames = input.size() / channels; in channelStatistics() local 135 if (frames > 0) { in channelStatistics() 137 for (size_t i = 0; i < frames; ++i) { in channelStatistics() 156 size_t frames = 100; // set to an even number (2, 4, 6 ... ) stream alternates +1, -1. in testBalance() local 159 std::vector<float> input(frames * inChannels); in testBalance() 160 std::vector<float> output(frames * outChannels); in testBalance() 173 for (unsigned j = 0; j < frames; ++j) { in testBalance() 178 run(sampleRate, channelMask, input, output, frames); in testBalance() 233 std::vector<float>& input, std::vector<float>& output, size_t frames) { in run() argument 236 ASSERT_EQ(frames * inputChannelCount_, input.size()); in run() [all …]
|
/frameworks/av/media/libstagefright/webm/ |
D | WebmFrameThread.cpp | 115 List<const sp<WebmFrame> >& frames, in initCluster() 118 CHECK(!frames.empty() && children.empty()); in initCluster() 120 const sp<WebmFrame> f = *(frames.begin()); in initCluster() 141 void WebmFrameSinkThread::flushFrames(List<const sp<WebmFrame> >& frames, bool last) { in flushFrames() argument 142 if (frames.empty()) { in flushFrames() 148 initCluster(frames, clusterTimecodeL, children); in flushFrames() 152 size_t n = frames.size(); in flushFrames() 165 const sp<WebmFrame> f = *(frames.begin()); in flushFrames() 172 initCluster(frames, clusterTimecodeL, children); in flushFrames() 175 frames.erase(frames.begin()); in flushFrames() [all …]
|
/frameworks/base/packages/SystemUI/src/com/android/systemui/statusbar/notification/ |
D | ColorUpdateLogger.kt | 47 private val frames: MutableList<Frame> = mutableListOf() constant 60 val didAppend = frames.lastOrNull()?.tryAddTrigger(event) == true in logTriggerEvent() 62 frames.add(Frame(event)) in logTriggerEvent() 63 if (frames.size > maxFrames) frames.removeAt(0) in logTriggerEvent() 70 val frame = frames.lastOrNull() ?: return in logEvent() 82 val frame = frames.lastOrNull() ?: return in logNotificationEvent() 90 pw.printCollection("frames", frames) { it.dump(pw) } in dump()
|
/frameworks/av/media/libaaudio/src/legacy/ |
D | AudioStreamLegacy.h | 75 virtual int64_t incrementClientFrameCounter(int32_t frames) = 0; 109 int64_t incrementFramesWritten(int32_t frames) { in incrementFramesWritten() argument 110 return mFramesWritten.increment(frames); in incrementFramesWritten() 113 int64_t incrementFramesRead(int32_t frames) { in incrementFramesRead() argument 114 return mFramesRead.increment(frames); in incrementFramesRead()
|
/frameworks/av/media/libaudioprocessing/tests/ |
D | test_utils.h | 102 TestProvider(void* addr, size_t frames, size_t frameSize, 105 mNumFrames(frames), 194 static void createSine(void *vbuffer, size_t frames, 199 for (size_t i = 0; i < frames; ++i) { 218 static void createChirp(void *vbuffer, size_t frames, 224 double k = (maxfreq - minfreq) / (2. * tscale * frames); 225 for (size_t i = 0; i < frames; ++i) { 281 createBufferByFrames<T>(info.channels, info.samplerate, info.frames); 291 void createBufferByFrames(size_t channels, uint32_t sampleRate, size_t frames) 293 mNumFrames = frames;
|
/frameworks/av/media/libstagefright/rtsp/ |
D | ARTPAssembler.cpp | 81 const List<sp<ABuffer> > &frames) { in MakeADTSCompoundFromAACFrames() 83 for (List<sp<ABuffer> >::const_iterator it = frames.begin(); in MakeADTSCompoundFromAACFrames() 84 it != frames.end(); ++it) { in MakeADTSCompoundFromAACFrames() 91 for (List<sp<ABuffer> >::const_iterator it = frames.begin(); in MakeADTSCompoundFromAACFrames() 92 it != frames.end(); ++it) { in MakeADTSCompoundFromAACFrames() 121 CopyTimes(accessUnit, *frames.begin()); in MakeADTSCompoundFromAACFrames()
|
/frameworks/native/services/surfaceflinger/TimeStats/timestatsatomsproto/ |
D | timestats_atoms.proto | 54 // Total number of frames presented during the tracing period 58 // Total number of frames missed 62 // Total number of frames that fell back to client composition 95 // Number of frames where SF saw a frame, based on its frame timeline. 99 // Number of frames where SF saw a janky frame. 102 // Number of janky frames where SF spent a long time on the CPU. 105 // Number of janky frames where SF spent a long time on the GPU. 108 // Number of janky frames where SF missed the frame deadline, but there 112 // Number of janky frames where the app missed the frame deadline, but 116 // Number of janky frames that were caused because of scheduling errors in [all …]
|
/frameworks/base/core/java/android/speech/tts/ |
D | SynthesisPlaybackQueueItem.java | 213 public final int frames; field in SynthesisPlaybackQueueItem.ProgressMarker 219 public ProgressMarker(int frames, int start, int end) { in ProgressMarker() argument 220 this.frames = frames; in ProgressMarker() 232 int markerInFrames = marker.frames == 0 ? 1 : marker.frames; in updateMarker() 251 getDispatcher().dispatchOnRangeStart(marker.start, marker.end, marker.frames); in onMarkerReached()
|
/frameworks/av/services/audioflinger/timing/ |
D | SynchronizedRecordState.h | 66 ssize_t updateRecordFrames(size_t frames) { in updateRecordFrames() argument 70 ALOGV("%s: trigger countdown %zd by %zu frames", __func__, mFramesToDrop, frames); in updateRecordFrames() 71 mFramesToDrop -= (ssize_t)frames; in updateRecordFrames() 76 mFramesToDrop += (ssize_t)frames; in updateRecordFrames()
|
/frameworks/av/media/module/bqhelper/tests/ |
D | FrameDropper_test.cpp | 99 void RunTest(const TestFrame* frames, size_t size) { in RunTest() argument 102 int64_t testTimeUs = frames[i].timeUs + jitter; in RunTest() 104 (long long)frames[i].timeUs, (long long)testTimeUs, jitter); in RunTest() 105 EXPECT_EQ(frames[i].shouldDrop, mFrameDropper->shouldDrop(testTimeUs)); in RunTest()
|
/frameworks/native/services/inputflinger/reader/ |
D | TouchVideoDevice.cpp | 162 std::vector<TouchVideoFrame> frames = readFrames(); in readAndQueueFrames() local 163 const size_t numFrames = frames.size(); in readAndQueueFrames() 169 mFrames.insert(mFrames.end(), std::make_move_iterator(frames.begin()), in readAndQueueFrames() 170 std::make_move_iterator(frames.end())); in readAndQueueFrames() 181 std::vector<TouchVideoFrame> frames = std::move(mFrames); in consumeFrames() local 183 return frames; in consumeFrames() 223 std::vector<TouchVideoFrame> frames; in readFrames() local 229 frames.push_back(std::move(*frame)); in readFrames() 231 return frames; in readFrames()
|
/frameworks/av/media/libaudioprocessing/include/media/ |
D | BufferProviders.h | 88 virtual void copyFrames(void *dst, const void *src, size_t frames) = 0; 109 virtual void copyFrames(void *dst, const void *src, size_t frames); 141 void copyFrames(void *dst, const void *src, size_t frames) override; 163 virtual void copyFrames(void *dst, const void *src, size_t frames); 180 virtual void copyFrames(void *dst, const void *src, size_t frames); 193 virtual void copyFrames(void *dst, const void *src, size_t frames); 267 void copyFrames(void *dst, const void *src, size_t frames) override; 297 void copyFrames(void *dst, const void *src, size_t frames) override;
|
D | RecordBufferConverter.h | 63 size_t convert(void *dst, AudioBufferProvider *provider, size_t frames); 83 void convertNoResampler(void *dst, const void *src, size_t frames); 86 void convertResampler(void *dst, /*not-a-const*/ void *src, size_t frames);
|
/frameworks/wilhelm/tools/permute/ |
D | permute.c | 202 switch (sfinfo_in.frames) { in permute() 205 fprintf(stderr, "%s: unsupported frames %d\n", path_in, (int) sfinfo_in.frames); in permute() 212 double durationSeconds = (double) sfinfo_in.frames / (double) sfinfo_in.samplerate; in permute() 224 used = split(&s, 0, sfinfo_in.frames, s.mSegmentMax); in permute() 241 void *ptr = malloc(sfinfo_in.frames * frameSizeRead); in permute() 244 count = sf_readf_short(sf_in, ptr, sfinfo_in.frames); in permute() 245 if (count != sfinfo_in.frames) { in permute() 247 (int) sfinfo_in.frames, (int) count); in permute() 279 assert(permutedStart == sfinfo_in.frames); in permute()
|
/frameworks/av/services/audioflinger/datapath/ |
D | AudioStreamOut.cpp | 48 status_t AudioStreamOut::getRenderPosition(uint64_t *frames) in getRenderPosition() argument 60 *frames = halPosition / mRateMultiplier; in getRenderPosition() 65 status_t AudioStreamOut::getPresentationPosition(uint64_t *frames, struct timespec *timestamp) in getPresentationPosition() argument 83 *frames = adjustedPosition / mRateMultiplier; in getPresentationPosition() 86 *frames = halPosition; in getPresentationPosition()
|
/frameworks/native/opengl/tests/hwc/ |
D | hwcStress.cpp | 199 static vector <vector <sp<GraphicBuffer> > > frames; variable 413 list = hwcTestCreateLayerList(testRandMod(frames.size()) + 1); in main() 421 selectedFrames = vectorRandSelect(frames, list->numHwLayers); in main() 562 frames.clear(); in initFrames() 563 frames.resize(rows); in initFrames() 591 frames[row].resize(cols); in initFrames() 596 frames[row][col] = new GraphicBuffer(w, h, format, texUsage); in initFrames() 597 if ((rv = frames[row][col]->initCheck()) != NO_ERROR) { in initFrames() 604 hwcTestFillColor(frames[row][col].get(), color, alpha); in initFrames() 607 frames[row][col].get(), frames[row][col]->handle, in initFrames()
|
/frameworks/av/media/libaudioprocessing/tests/fuzzer/ |
D | libaudioprocessing_record_buffer_converter_fuzzer.cpp | 158 const size_t frames = fdp.ConsumeIntegralInRange<size_t>(0, MAX_FRAMES + 1); in LLVMFuzzerTestOneInput() local 159 int8_t dst[dstFrameSize * frames]; in LLVMFuzzerTestOneInput() 160 memset(dst, 0, sizeof(int8_t) * dstFrameSize * frames); in LLVMFuzzerTestOneInput() 171 converter.convert(dst, &provider, frames); in LLVMFuzzerTestOneInput()
|