/external/chromium_org/third_party/WebKit/Source/devtools/front_end/timeline/ |
TimelineFrameModel.js | 50 frames: function() 80 var frames = this._frames; 81 var firstFrame = insertionIndexForObjectInListSortedByFunction(startTime, frames, compareEndTime); 82 var lastFrame = insertionIndexForObjectInListSortedByFunction(endTime, frames, compareStartTime); 83 return frames.slice(firstFrame, lastFrame); 118 // - only show frames that either did not wait for the main thread frame or had one committed. 519 * @param {!Array.<!WebInspector.TimelineFrame>} frames 521 WebInspector.FrameStatistics = function(frames) 523 this.frameCount = frames.length; 527 this.startOffset = frames[0].startTimeOffset [all...] |
/bootable/recovery/minui/ |
minui.h | 98 // should have a 'Frames' text chunk whose value is the number of 99 // frames this image represents. The pixel data itself is interlaced 102 int* frames, gr_surface** pSurface);
|
/bootable/recovery/ |
screen_ui.h | 121 void LoadBitmapArray(const char* filename, int* frames, gr_surface** surface);
|
/external/chromium_org/media/audio/ |
audio_power_monitor.cc | 34 DCHECK_LE(num_frames, buffer.frames());
|
/external/chromium_org/media/base/ |
audio_converter.h | 54 // frames are available to satisfy the request. The return value is the 68 // for 10 frames of data (indicated by the size of AudioBus provided) and the 89 // The maximum size in frames that guarantees we will only make a single call 105 void CreateUnmixedAudioIfNecessary(int frames);
|
test_helpers.h | 88 // Create an AudioBuffer containing |frames| frames of data, where each sample 93 // |start| + |channel| * |frames| * |increment| + index * |increment| 101 // start + frames * increment 102 // start + (frames + 1) * increment 103 // start + (frames + 2) * increment, ... 113 size_t frames,
|
audio_splicer.cc | 22 // in the source content. Unit is frames. 174 // Overlapping buffers marked as splice frames are handled by AudioSplicer, 301 // may not actually have a splice. Here we check if any frames exist before 377 // Determine crossfade frame count based on available frames in each splicer 386 // There must always be frames to crossfade, otherwise the splice should not 412 // Adjust the number of frames remaining before the splice. NOTE: This is 423 output_bus->frames() - frames_read); 445 DCHECK_EQ(output_bus->frames(), frames_read); 467 frames_read < output_bus->frames()) { 471 std::min(postroll->frame_count(), output_bus->frames() - frames_read) [all...] |
/external/chromium_org/media/filters/ |
audio_file_reader.cc | 140 // Read until we hit EOF or we've read the requested number of frames. 145 while (current_frame < audio_bus->frames() && continue_decoding && 147 // Make a shallow copy of packet so we can slide packet.data as frames are 173 // Determine the number of sample-frames we just decoded. Check overflow. 203 if (current_frame + frames_read > audio_bus->frames()) { 205 frames_read = audio_bus->frames() - current_frame; 242 // Zero any remaining frames. 244 current_frame, audio_bus->frames() - current_frame); 246 // Returns the actual number of sample-frames decoded. 255 // |duration| has been calculated from an exact number of sample-frames [all...] |
/external/chromium_org/third_party/webrtc/tools/frame_analyzer/ |
video_quality_analysis.h | 35 std::vector<AnalysisResult> frames; member in struct:webrtc::test::ResultsContainer 41 // comprises the frames that were captured during the quality measurement test. 42 // There may be missing or duplicate frames. Also the frames start at a random 45 // actual frames in the test file and their position in the reference video, so 46 // that the analysis could run with the right frames from both videos. The stats 59 // frames are exactly the same) will be 48. In the case of SSIM the max return 66 // numbers compatible format to stdout. If the results object contains no frames 74 // Calculates max repeated and skipped frames and prints them to stdout in a
|
/external/compiler-rt/lib/sanitizer_common/ |
sanitizer_symbolizer.h | 89 // Fills at most "max_frames" elements of "frames" with descriptions 92 virtual uptr SymbolizePC(uptr address, AddressInfo *frames, uptr max_frames) {
|
/external/lldb/examples/python/ |
stacks.py | 11 description='''This command will enumerate all stack frames, print the stack size for each, and print an aggregation of which functions have the largest stack frame sizes at the end.''' 23 for frame in thread.frames:
|
/external/tinyalsa/ |
tinycap.c | 80 unsigned int frames; local 167 frames = capture_sample(file, card, device, header.num_channels, 170 printf("Captured %d frames\n", frames); 173 header.data_sz = frames * header.block_align;
|
/external/valgrind/main/gdbserver_tests/ |
mssnapshot.stderrB.exp | 15 v.set merge-recursive-frames <num> : merge recursive calls in max <num> frames
|
/frameworks/av/media/libnbaio/ |
LibsndfileSource.cpp | 29 mEstimatedFramesUntilEOF(sfinfo.frames), 55 // Detect EOF by zero frames read, not by mFramesUntilEOF as it could be inaccurate 62 // We didn't read any frames during the current loop cycle, so disable
|
/frameworks/base/cmds/bootanimation/ |
BootAnimation.h | 73 SortedVector<Frame> frames; member in struct:android::BootAnimation::Animation::Part
|
/hardware/qcom/audio/legacy/libalsa-intf/ |
alsa_pcm.c | 472 long frames) 482 frames = frames * channels *2 ; 484 while (frames-- > 0) { 493 long frames) 506 frames = frames * channels *2 ; 508 while (frames-- > 0) { 528 long frames; local 532 frames = (pcm->flags & PCM_MONO) ? (count / 2) : (count / 4) [all...] |
/system/media/audio_utils/ |
resampler.c | 37 size_t frames_in; // number of frames in input buffer 38 size_t frames_rq; // cached number of output frames 39 size_t frames_needed; // minimum number of input frames to produce 40 // frames_rq output frames 71 // outputs a number of frames less or equal to *outFrameCount and updates *outFrameCount 72 // with the actual number of frames produced. 88 // update and cache the number of frames needed at the input sampling rate to produce 89 // the number of frames requested at the output sampling rate 99 // make sure that the number of frames present in rsmp->in_buf (rsmp->frames_in) is at 100 // least the number of frames needed to produce the number of frames requested a 239 int frames = speex_resampler_get_input_latency(rsmp->speex_resampler); local [all...] |
/development/ndk/platforms/android-8/samples/bitmap-plasma/jni/ |
plasma.c | 284 FrameStats frames[ MAX_FRAME_STATS ]; member in struct:__anon1638 317 minRender = maxRender = avgRender = s->frames[nn].renderTime; 318 minFrame = maxFrame = avgFrame = s->frames[nn].frameTime; 323 double render = s->frames[nn].renderTime; 326 double frame = s->frames[nn].frameTime; 349 s->frames[nn].renderTime = renderTime; 350 s->frames[nn].frameTime = frameTime;
|
/development/ndk/platforms/android-9/samples/native-plasma/jni/ |
plasma.c | 293 FrameStats frames[ MAX_FRAME_STATS ]; member in struct:__anon1836 326 minRender = maxRender = avgRender = s->frames[nn].renderTime; 327 minFrame = maxFrame = avgFrame = s->frames[nn].frameTime; 332 double render = s->frames[nn].renderTime; 335 double frame = s->frames[nn].frameTime; 358 s->frames[nn].renderTime = renderTime; 359 s->frames[nn].frameTime = frameTime;
|
/external/qemu/distrib/sdl-1.2.15/src/audio/alsa/ |
SDL_alsa_audio.c | 76 static int (*SDL_NAME(snd_pcm_hw_params_get_period_size))(const snd_pcm_hw_params_t *params, snd_pcm_uframes_t *frames, int *dir); 401 snd_pcm_uframes_t frames; local 418 frames = spec->samples; 419 status = SDL_NAME(snd_pcm_hw_params_set_period_size_near)(pcm_handle, hwparams, &frames, NULL); 438 snd_pcm_uframes_t frames; local 454 frames = spec->samples * 2; 455 status = SDL_NAME(snd_pcm_hw_params_set_buffer_size_near)(pcm_handle, hwparams, &frames);
|
/external/chromium_org/third_party/WebKit/Tools/Scripts/webkitpy/thirdparty/mod_pywebsocket/ |
_stream_hybi.py | 31 """This file provides classes and helper functions for parsing/building frames 179 unmask_receive: unmask received frames. When received unmasked 213 'configuration for received frames') 302 # This is for skipping UTF-8 encoding when building text type frames 309 # frames in the message are all the same. 319 raise ValueError('Message types are different in frames for ' 349 'Payload data size of control frames must be 125 bytes or less') 392 # Filters applied to frames. 396 # Filters applied to messages. Control frames are not affected by them. 406 """A class for parsing/building frames of the WebSocket protoco [all...] |
/external/chromium_org/third_party/libvpx/source/libvpx/ |
rate_hist.c | 32 int frames; member in struct:rate_hist 43 // to determine the number of frames in rc_buf_sz milliseconds, with an 51 hist->frames = 0; 84 int idx = hist->frames++ % hist->samples; 94 for (i = hist->frames; i > 0 && hist->frames - i < hist->samples; i--) {
|
/external/libvpx/libvpx/ |
rate_hist.c | 32 int frames; member in struct:rate_hist 43 // to determine the number of frames in rc_buf_sz milliseconds, with an 51 hist->frames = 0; 84 int idx = hist->frames++ % hist->samples; 94 for (i = hist->frames; i > 0 && hist->frames - i < hist->samples; i--) {
|
/frameworks/av/services/audioflinger/tests/ |
test-mixer.cpp | 48 fprintf(stderr, " -P # frames provided per call to resample() in CSV format\n"); 55 uint32_t sampleRate, uint32_t channels, size_t frames, bool isBufferFloat) { 61 info.frames = 0; 65 printf("saving file:%s channels:%u samplerate:%u frames:%zu\n", 66 filename, info.channels, info.samplerate, frames); 73 (void) sf_writef_float(sf, (float*)buffer, frames); 75 (void) sf_writef_short(sf, (short*)buffer, frames); 186 // calculate the number of output frames 290 outputFrames = i; // reset output frames to the data actually produced.
|
/hardware/intel/common/omx-components/videocodec/libvpx_internal/libvpx/ |
rate_hist.c | 32 int frames; member in struct:rate_hist 43 // to determine the number of frames in rc_buf_sz milliseconds, with an 51 hist->frames = 0; 84 int idx = hist->frames++ % hist->samples; 94 for (i = hist->frames; i > 0 && hist->frames - i < hist->samples; i--) {
|