/external/webrtc/webrtc/modules/video_coding/codecs/vp8/ |
vp8_impl.h | 49 virtual int Encode(const VideoFrame& input_image, 78 int UpdateCodecFrameSize(const VideoFrame& input_image); 86 int GetEncodedPartitions(const VideoFrame& input_image, 131 int Decode(const EncodedImage& input_image, 149 int DecodePartitions(const EncodedImage& input_image,
|
vp8_impl.cc | 724 const VideoFrame& input_image = local 727 if (quality_scaler_enabled_ && (input_image.width() != codec_.width || 728 input_image.height() != codec_.height)) { 729 int ret = UpdateCodecFrameSize(input_image); 734 // Since we are extracting raw pointers from |input_image| to 736 // |input_image| might be scaled from |frame|. In that case, the resolution of 738 RTC_DCHECK_EQ(input_image.width(), static_cast<int>(raw_images_[0].d_w)); 739 RTC_DCHECK_EQ(input_image.height(), static_cast<int>(raw_images_[0].d_h)); 744 const_cast<uint8_t*>(input_image.buffer(kYPlane)); 746 const_cast<uint8_t*>(input_image.buffer(kUPlane)) [all...] |
simulcast_encoder_adapter.cc | 232 const VideoFrame& input_image, 261 int src_width = input_image.width(); 262 int src_height = input_image.height(); 284 input_image.IsZeroSize()) { 285 streaminfos_[stream_idx].encoder->Encode(input_image, codec_specific_info, 294 input_image.buffer(kYPlane), input_image.stride(kYPlane), 295 input_image.buffer(kUPlane), input_image.stride(kUPlane), 296 input_image.buffer(kVPlane), input_image.stride(kVPlane), src_width [all...] |
simulcast_encoder_adapter.h | 43 int Encode(const VideoFrame& input_image,
|
/external/webrtc/webrtc/video/ |
video_decoder.cc | 86 const EncodedImage& input_image, 93 if (!fallback_decoder_ || input_image._frameType == kVideoFrameKey) { 94 int32_t ret = decoder_->Decode(input_image, missing_frames, fragmentation, 112 return fallback_decoder_->Decode(input_image, missing_frames, fragmentation,
|
video_decoder_unittest.cc | 32 int32_t Decode(const EncodedImage& input_image,
|
video_send_stream_tests.cc | [all...] |
/external/webrtc/webrtc/test/ |
fake_encoder.h | 34 int32_t Encode(const VideoFrame& input_image, 78 int32_t Encode(const VideoFrame& input_image,
|
fake_encoder.cc | 48 int32_t FakeEncoder::Encode(const VideoFrame& input_image, 102 encoded._timeStamp = input_image.timestamp(); 103 encoded.capture_time_ms_ = input_image.render_time_ms(); 200 int32_t DelayedEncoder::Encode(const VideoFrame& input_image, 204 return FakeEncoder::Encode(input_image, codec_specific_info, frame_types);
|
configurable_frame_size_encoder.h | 31 int32_t Encode(const VideoFrame& input_image,
|
/external/webrtc/webrtc/modules/video_coding/codecs/h264/ |
h264_video_toolbox_decoder.cc | 122 const EncodedImage& input_image, 127 RTC_DCHECK(input_image._buffer); 130 if (!H264AnnexBBufferToCMSampleBuffer(input_image._buffer, 131 input_image._length, video_format_, 147 new internal::FrameDecodeParams(callback_, input_image._timeStamp));
|
h264_video_toolbox_decoder.h | 36 int Decode(const EncodedImage& input_image,
|
h264_video_toolbox_encoder.h | 39 int Encode(const VideoFrame& input_image,
|
h264_video_toolbox_encoder.cc | 236 const VideoFrame& input_image, 239 if (input_image.IsZeroSize()) { 263 if (!internal::CopyVideoFrameToPixelBuffer(input_image, pixel_buffer)) { 281 CMTimeMake(input_image.render_time_ms(), 1000); 291 input_image.render_time_ms(), input_image.timestamp()));
|
/external/vboot_reference/scripts/image_signing/ |
sign_official_build.sh | 26 Usage: $PROG <type> input_image /path/to/keys/dir [output_image] [version_file] 87 INPUT_IMAGE=$2 455 # Args: INPUT_IMAGE KEY_DIR OUTPUT_IMAGE 538 extract_image_partition ${INPUT_IMAGE} 3 ${rootfs_image} 550 kernel_config=$(grab_kernel_config ${INPUT_IMAGE} $partnum) 588 { load_kernel_test "${INPUT_IMAGE}" "${try_key}" -b 2 >/dev/null 2>&1 && \ 591 { load_kernel_test "${INPUT_IMAGE}" "${try_key}" -b 3 >/dev/null 2>&1 && \ 597 { load_kernel_test "${INPUT_IMAGE}" "${try_key}" -b 0 >/dev/null 2>&1 && \ 600 { load_kernel_test "${INPUT_IMAGE}" "${try_key}" -b 1 >/dev/null 2>&1 && \ 604 verify_image_rootfs "${INPUT_IMAGE}" [all...] |
/external/webrtc/webrtc/modules/video_coding/codecs/vp9/ |
vp9_impl.cc | 475 int VP9EncoderImpl::Encode(const VideoFrame& input_image, 481 if (input_image.IsZeroSize()) { 492 RTC_DCHECK_EQ(input_image.width(), static_cast<int>(raw_->d_w)); 493 RTC_DCHECK_EQ(input_image.height(), static_cast<int>(raw_->d_h)); 496 // This was necessary since you need some information from input_image. 499 input_image_ = &input_image; 503 raw_->planes[VPX_PLANE_Y] = const_cast<uint8_t*>(input_image.buffer(kYPlane)); 504 raw_->planes[VPX_PLANE_U] = const_cast<uint8_t*>(input_image.buffer(kUPlane)); 505 raw_->planes[VPX_PLANE_V] = const_cast<uint8_t*>(input_image.buffer(kVPlane)); 506 raw_->stride[VPX_PLANE_Y] = input_image.stride(kYPlane) [all...] |
vp9_impl.h | 40 int Encode(const VideoFrame& input_image, 142 int Decode(const EncodedImage& input_image,
|
/external/webrtc/webrtc/ |
video_decoder.h | 66 virtual int32_t Decode(const EncodedImage& input_image, 97 int32_t Decode(const EncodedImage& input_image,
|
/packages/apps/Camera2/jni/ |
tinyplanet.cc | 92 void StereographicProjection(float scale, float angle, unsigned char* input_image, 96 ImageRGBA input(input_image, input_width, input_height);
|
/packages/apps/Gallery2/jni/filters/ |
tinyplanet.cc | 91 void StereographicProjection(float scale, float angle, unsigned char* input_image, 95 ImageRGBA input(input_image, input_width, input_height);
|
/external/opencv3/apps/annotation/ |
opencv_annotation.cpp | 74 void get_annotations(Mat input_image, stringstream* output_stream) 82 image = input_image; 130 rectangle(input_image, Point(roi_x0,roi_y0), Point(roi_x1,roi_y1), Scalar(0,255,0), 1);
|
/external/opencv3/samples/cpp/ |
create_mask.cpp | 129 cout << "usage: " << argv[0] << " <input_image>" << endl;
|
/art/patchoat/ |
patchoat.cc | 193 std::unique_ptr<File> input_image(OS::OpenFileForReading(input_image_filename.c_str())); 194 if (input_image.get() == nullptr) { 199 int64_t image_len = input_image->GetLength(); 205 if (sizeof(image_header) != input_image->Read(reinterpret_cast<char*>(&image_header), 207 LOG(ERROR) << "Unable to read image header from image file " << input_image->GetPath(); 210 /*bool is_image_pic = */IsImagePic(image_header, input_image->GetPath()); 219 input_image->Fd(), 222 input_image->GetPath().c_str(), 225 LOG(ERROR) << "Unable to map image file " << input_image->GetPath() << " : " << error_msg; 228 space_to_file_map.emplace(space, std::move(input_image)); [all...] |
/external/opencv3/doc/py_tutorials/py_imgproc/py_colorspaces/ |
py_colorspaces.markdown | 18 For color conversion, we use the function cv2.cvtColor(input_image, flag) where flag determines the
|
/external/webrtc/talk/app/webrtc/java/jni/ |
androidmediaencoder_jni.cc | 94 int32_t Encode(const webrtc::VideoFrame& input_image, 137 const webrtc::VideoFrame& input_image, [all...] |