1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "content/common/gpu/media/gpu_video_decode_accelerator.h" 6 7 #include <vector> 8 9 #include "base/bind.h" 10 #include "base/command_line.h" 11 #include "base/logging.h" 12 #include "base/stl_util.h" 13 14 #include "content/common/gpu/gpu_channel.h" 15 #include "content/common/gpu/gpu_messages.h" 16 #include "content/public/common/content_switches.h" 17 #include "gpu/command_buffer/common/command_buffer.h" 18 #include "ipc/ipc_message_macros.h" 19 #include "ipc/ipc_message_utils.h" 20 #include "ui/gl/gl_context.h" 21 #include "ui/gl/gl_surface_egl.h" 22 23 #if defined(OS_WIN) 24 #include "base/win/windows_version.h" 25 #include "content/common/gpu/media/dxva_video_decode_accelerator.h" 26 #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) && defined(USE_X11) 27 #include "content/common/gpu/media/exynos_video_decode_accelerator.h" 28 #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) && defined(USE_X11) 29 #include "ui/gl/gl_context_glx.h" 30 #include "content/common/gpu/media/vaapi_video_decode_accelerator.h" 31 #elif defined(OS_ANDROID) 32 #include "content/common/gpu/media/android_video_decode_accelerator.h" 33 #endif 34 35 #include "gpu/command_buffer/service/texture_manager.h" 36 #include "ui/gfx/size.h" 37 38 using gpu::gles2::TextureManager; 39 40 namespace content { 41 42 static bool MakeDecoderContextCurrent( 43 const base::WeakPtr<GpuCommandBufferStub> stub) { 44 if (!stub.get()) { 45 DLOG(ERROR) << "Stub is gone; won't MakeCurrent()."; 46 return false; 47 } 48 49 if (!stub->decoder()->MakeCurrent()) { 50 DLOG(ERROR) << "Failed to MakeCurrent()"; 51 return false; 52 } 53 54 return true; 55 } 56 57 GpuVideoDecodeAccelerator::GpuVideoDecodeAccelerator(int32 host_route_id, 58 GpuCommandBufferStub* stub) 59 : init_done_msg_(NULL), 60 host_route_id_(host_route_id), 61 stub_(stub), 62 texture_target_(0) { 63 DCHECK(stub_); 64 stub_->AddDestructionObserver(this); 65 stub_->channel()->AddRoute(host_route_id_, this); 66 make_context_current_ = 67 base::Bind(&MakeDecoderContextCurrent, stub_->AsWeakPtr()); 68 } 69 70 GpuVideoDecodeAccelerator::~GpuVideoDecodeAccelerator() { 71 DCHECK(stub_); 72 if (video_decode_accelerator_) 73 video_decode_accelerator_.release()->Destroy(); 74 75 stub_->channel()->RemoveRoute(host_route_id_); 76 stub_->RemoveDestructionObserver(this); 77 } 78 79 bool GpuVideoDecodeAccelerator::OnMessageReceived(const IPC::Message& msg) { 80 DCHECK(stub_); 81 if (!video_decode_accelerator_) 82 return false; 83 bool handled = true; 84 IPC_BEGIN_MESSAGE_MAP(GpuVideoDecodeAccelerator, msg) 85 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Decode, OnDecode) 86 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_AssignPictureBuffers, 87 OnAssignPictureBuffers) 88 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_ReusePictureBuffer, 89 OnReusePictureBuffer) 90 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Flush, OnFlush) 91 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Reset, OnReset) 92 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Destroy, OnDestroy) 93 IPC_MESSAGE_UNHANDLED(handled = false) 94 IPC_END_MESSAGE_MAP() 95 return handled; 96 } 97 98 void GpuVideoDecodeAccelerator::ProvidePictureBuffers( 99 uint32 requested_num_of_buffers, 100 const gfx::Size& dimensions, 101 uint32 texture_target) { 102 if (!Send(new AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers( 103 host_route_id_, requested_num_of_buffers, dimensions, 104 texture_target))) { 105 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers) " 106 << "failed"; 107 } 108 texture_target_ = texture_target; 109 } 110 111 void GpuVideoDecodeAccelerator::DismissPictureBuffer( 112 int32 picture_buffer_id) { 113 // Notify client that picture buffer is now unused. 114 if (!Send(new AcceleratedVideoDecoderHostMsg_DismissPictureBuffer( 115 host_route_id_, picture_buffer_id))) { 116 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_DismissPictureBuffer) " 117 << "failed"; 118 } 119 } 120 121 void GpuVideoDecodeAccelerator::PictureReady( 122 const media::Picture& picture) { 123 if (!Send(new AcceleratedVideoDecoderHostMsg_PictureReady( 124 host_route_id_, 125 picture.picture_buffer_id(), 126 picture.bitstream_buffer_id()))) { 127 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_PictureReady) failed"; 128 } 129 } 130 131 void GpuVideoDecodeAccelerator::NotifyError( 132 media::VideoDecodeAccelerator::Error error) { 133 if (init_done_msg_) { 134 // If we get an error while we're initializing, NotifyInitializeDone won't 135 // be called, so we need to send the reply (with an error) here. 136 GpuCommandBufferMsg_CreateVideoDecoder::WriteReplyParams( 137 init_done_msg_, -1); 138 if (!Send(init_done_msg_)) 139 DLOG(ERROR) << "Send(init_done_msg_) failed"; 140 init_done_msg_ = NULL; 141 return; 142 } 143 if (!Send(new AcceleratedVideoDecoderHostMsg_ErrorNotification( 144 host_route_id_, error))) { 145 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ErrorNotification) " 146 << "failed"; 147 } 148 } 149 150 void GpuVideoDecodeAccelerator::Initialize( 151 const media::VideoCodecProfile profile, 152 IPC::Message* init_done_msg) { 153 DCHECK(stub_); 154 DCHECK(!video_decode_accelerator_.get()); 155 DCHECK(!init_done_msg_); 156 DCHECK(init_done_msg); 157 init_done_msg_ = init_done_msg; 158 159 #if !defined(OS_WIN) 160 // Ensure we will be able to get a GL context at all before initializing 161 // non-Windows VDAs. 162 if (!make_context_current_.Run()) { 163 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE); 164 return; 165 } 166 #endif 167 168 #if defined(OS_WIN) 169 if (base::win::GetVersion() < base::win::VERSION_WIN7) { 170 NOTIMPLEMENTED() << "HW video decode acceleration not available."; 171 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE); 172 return; 173 } 174 DLOG(INFO) << "Initializing DXVA HW decoder for windows."; 175 video_decode_accelerator_.reset(new DXVAVideoDecodeAccelerator( 176 this, make_context_current_)); 177 #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) && defined(USE_X11) 178 video_decode_accelerator_.reset(new ExynosVideoDecodeAccelerator( 179 gfx::GLSurfaceEGL::GetHardwareDisplay(), 180 stub_->decoder()->GetGLContext()->GetHandle(), 181 this, 182 make_context_current_)); 183 #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) && defined(USE_X11) 184 gfx::GLContextGLX* glx_context = 185 static_cast<gfx::GLContextGLX*>(stub_->decoder()->GetGLContext()); 186 GLXContext glx_context_handle = 187 static_cast<GLXContext>(glx_context->GetHandle()); 188 video_decode_accelerator_.reset(new VaapiVideoDecodeAccelerator( 189 glx_context->display(), glx_context_handle, this, 190 make_context_current_)); 191 #elif defined(OS_ANDROID) 192 video_decode_accelerator_.reset(new AndroidVideoDecodeAccelerator( 193 this, 194 stub_->decoder()->AsWeakPtr(), 195 make_context_current_)); 196 #else 197 NOTIMPLEMENTED() << "HW video decode acceleration not available."; 198 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE); 199 return; 200 #endif 201 202 if (!video_decode_accelerator_->Initialize(profile)) 203 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE); 204 } 205 206 void GpuVideoDecodeAccelerator::OnDecode( 207 base::SharedMemoryHandle handle, int32 id, uint32 size) { 208 DCHECK(video_decode_accelerator_.get()); 209 if (id < 0) { 210 DLOG(FATAL) << "BitstreamBuffer id " << id << " out of range"; 211 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT); 212 return; 213 } 214 video_decode_accelerator_->Decode(media::BitstreamBuffer(id, handle, size)); 215 } 216 217 void GpuVideoDecodeAccelerator::OnAssignPictureBuffers( 218 const std::vector<int32>& buffer_ids, 219 const std::vector<uint32>& texture_ids, 220 const std::vector<gfx::Size>& sizes) { 221 DCHECK(stub_); 222 if (buffer_ids.size() != texture_ids.size() || 223 buffer_ids.size() != sizes.size()) { 224 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT); 225 return; 226 } 227 228 gpu::gles2::GLES2Decoder* command_decoder = stub_->decoder(); 229 gpu::gles2::TextureManager* texture_manager = 230 command_decoder->GetContextGroup()->texture_manager(); 231 232 std::vector<media::PictureBuffer> buffers; 233 for (uint32 i = 0; i < buffer_ids.size(); ++i) { 234 if (buffer_ids[i] < 0) { 235 DLOG(FATAL) << "Buffer id " << buffer_ids[i] << " out of range"; 236 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT); 237 return; 238 } 239 gpu::gles2::TextureRef* texture_ref = texture_manager->GetTexture( 240 texture_ids[i]); 241 if (!texture_ref) { 242 DLOG(FATAL) << "Failed to find texture id " << texture_ids[i]; 243 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT); 244 return; 245 } 246 gpu::gles2::Texture* info = texture_ref->texture(); 247 if (info->target() != texture_target_) { 248 DLOG(FATAL) << "Texture target mismatch for texture id " 249 << texture_ids[i]; 250 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT); 251 return; 252 } 253 // GL_TEXTURE_EXTERNAL_OES textures have their dimensions defined by the 254 // underlying EGLImage. 255 if (texture_target_ != GL_TEXTURE_EXTERNAL_OES) { 256 GLsizei width = 0, height = 0; 257 info->GetLevelSize(texture_target_, 0, &width, &height); 258 if (width != sizes[i].width() || height != sizes[i].height()) { 259 DLOG(FATAL) << "Size mismatch for texture id " << texture_ids[i]; 260 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT); 261 return; 262 } 263 } 264 if (!texture_manager->ClearRenderableLevels(command_decoder, texture_ref)) { 265 DLOG(FATAL) << "Failed to Clear texture id " << texture_ids[i]; 266 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE); 267 return; 268 } 269 uint32 service_texture_id; 270 if (!command_decoder->GetServiceTextureId( 271 texture_ids[i], &service_texture_id)) { 272 DLOG(FATAL) << "Failed to translate texture!"; 273 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE); 274 return; 275 } 276 buffers.push_back(media::PictureBuffer( 277 buffer_ids[i], sizes[i], service_texture_id)); 278 } 279 video_decode_accelerator_->AssignPictureBuffers(buffers); 280 } 281 282 void GpuVideoDecodeAccelerator::OnReusePictureBuffer( 283 int32 picture_buffer_id) { 284 DCHECK(video_decode_accelerator_.get()); 285 video_decode_accelerator_->ReusePictureBuffer(picture_buffer_id); 286 } 287 288 void GpuVideoDecodeAccelerator::OnFlush() { 289 DCHECK(video_decode_accelerator_.get()); 290 video_decode_accelerator_->Flush(); 291 } 292 293 void GpuVideoDecodeAccelerator::OnReset() { 294 DCHECK(video_decode_accelerator_.get()); 295 video_decode_accelerator_->Reset(); 296 } 297 298 void GpuVideoDecodeAccelerator::OnDestroy() { 299 DCHECK(video_decode_accelerator_.get()); 300 delete this; 301 } 302 303 void GpuVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer( 304 int32 bitstream_buffer_id) { 305 if (!Send(new AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed( 306 host_route_id_, bitstream_buffer_id))) { 307 DLOG(ERROR) 308 << "Send(AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed) " 309 << "failed"; 310 } 311 } 312 313 void GpuVideoDecodeAccelerator::NotifyInitializeDone() { 314 GpuCommandBufferMsg_CreateVideoDecoder::WriteReplyParams( 315 init_done_msg_, host_route_id_); 316 if (!Send(init_done_msg_)) 317 DLOG(ERROR) << "Send(init_done_msg_) failed"; 318 init_done_msg_ = NULL; 319 } 320 321 void GpuVideoDecodeAccelerator::NotifyFlushDone() { 322 if (!Send(new AcceleratedVideoDecoderHostMsg_FlushDone(host_route_id_))) 323 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_FlushDone) failed"; 324 } 325 326 void GpuVideoDecodeAccelerator::NotifyResetDone() { 327 if (!Send(new AcceleratedVideoDecoderHostMsg_ResetDone(host_route_id_))) 328 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ResetDone) failed"; 329 } 330 331 void GpuVideoDecodeAccelerator::OnWillDestroyStub() { 332 delete this; 333 } 334 335 bool GpuVideoDecodeAccelerator::Send(IPC::Message* message) { 336 DCHECK(stub_); 337 return stub_->channel()->Send(message); 338 } 339 340 } // namespace content 341