1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "content/common/gpu/media/gpu_video_decode_accelerator.h" 6 7 #include <vector> 8 9 #include "base/bind.h" 10 #include "base/command_line.h" 11 #include "base/logging.h" 12 #include "base/message_loop/message_loop_proxy.h" 13 #include "base/stl_util.h" 14 15 #include "content/common/gpu/gpu_channel.h" 16 #include "content/common/gpu/gpu_messages.h" 17 #include "content/public/common/content_switches.h" 18 #include "gpu/command_buffer/common/command_buffer.h" 19 #include "ipc/ipc_message_macros.h" 20 #include "ipc/ipc_message_utils.h" 21 #include "media/base/limits.h" 22 #include "ui/gl/gl_context.h" 23 #include "ui/gl/gl_surface_egl.h" 24 25 #if defined(OS_WIN) 26 #include "base/win/windows_version.h" 27 #include "content/common/gpu/media/dxva_video_decode_accelerator.h" 28 #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) && defined(USE_X11) 29 #include "content/common/gpu/media/exynos_video_decode_accelerator.h" 30 #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) && defined(USE_X11) 31 #include "ui/gl/gl_context_glx.h" 32 #include "content/common/gpu/media/vaapi_video_decode_accelerator.h" 33 #elif defined(OS_ANDROID) 34 #include "content/common/gpu/media/android_video_decode_accelerator.h" 35 #endif 36 37 #include "ui/gfx/size.h" 38 39 namespace content { 40 41 static bool MakeDecoderContextCurrent( 42 const base::WeakPtr<GpuCommandBufferStub> stub) { 43 if (!stub.get()) { 44 DLOG(ERROR) << "Stub is gone; won't MakeCurrent()."; 45 return false; 46 } 47 48 if (!stub->decoder()->MakeCurrent()) { 49 DLOG(ERROR) << "Failed to MakeCurrent()"; 50 return false; 51 } 52 53 return true; 54 } 55 56 // A helper class that works like AutoLock but only acquires the lock when 57 // DCHECK is on. 58 class DebugAutoLock { 59 public: 60 explicit DebugAutoLock(base::Lock& lock) : lock_(lock) { 61 if (DCHECK_IS_ON()) 62 lock_.Acquire(); 63 } 64 65 ~DebugAutoLock() { 66 if (DCHECK_IS_ON()) { 67 lock_.AssertAcquired(); 68 lock_.Release(); 69 } 70 } 71 72 private: 73 base::Lock& lock_; 74 DISALLOW_COPY_AND_ASSIGN(DebugAutoLock); 75 }; 76 77 class GpuVideoDecodeAccelerator::MessageFilter 78 : public IPC::ChannelProxy::MessageFilter { 79 public: 80 MessageFilter(GpuVideoDecodeAccelerator* owner, int32 host_route_id) 81 : owner_(owner), host_route_id_(host_route_id) {} 82 83 virtual void OnChannelError() OVERRIDE { channel_ = NULL; } 84 85 virtual void OnChannelClosing() OVERRIDE { channel_ = NULL; } 86 87 virtual void OnFilterAdded(IPC::Channel* channel) OVERRIDE { 88 channel_ = channel; 89 } 90 91 virtual void OnFilterRemoved() OVERRIDE { 92 // This will delete |owner_| and |this|. 93 owner_->OnFilterRemoved(); 94 } 95 96 virtual bool OnMessageReceived(const IPC::Message& msg) OVERRIDE { 97 if (msg.routing_id() != host_route_id_) 98 return false; 99 100 IPC_BEGIN_MESSAGE_MAP(MessageFilter, msg) 101 IPC_MESSAGE_FORWARD(AcceleratedVideoDecoderMsg_Decode, owner_, 102 GpuVideoDecodeAccelerator::OnDecode) 103 IPC_MESSAGE_UNHANDLED(return false;) 104 IPC_END_MESSAGE_MAP() 105 return true; 106 } 107 108 bool SendOnIOThread(IPC::Message* message) { 109 DCHECK(!message->is_sync()); 110 if (!channel_) { 111 delete message; 112 return false; 113 } 114 return channel_->Send(message); 115 } 116 117 protected: 118 virtual ~MessageFilter() {} 119 120 private: 121 GpuVideoDecodeAccelerator* owner_; 122 int32 host_route_id_; 123 // The channel to which this filter was added. 124 IPC::Channel* channel_; 125 }; 126 127 GpuVideoDecodeAccelerator::GpuVideoDecodeAccelerator( 128 int32 host_route_id, 129 GpuCommandBufferStub* stub, 130 const scoped_refptr<base::MessageLoopProxy>& io_message_loop) 131 : init_done_msg_(NULL), 132 host_route_id_(host_route_id), 133 stub_(stub), 134 texture_target_(0), 135 filter_removed_(true, false), 136 io_message_loop_(io_message_loop), 137 weak_factory_for_io_(this) { 138 DCHECK(stub_); 139 stub_->AddDestructionObserver(this); 140 stub_->channel()->AddRoute(host_route_id_, this); 141 child_message_loop_ = base::MessageLoopProxy::current(); 142 make_context_current_ = 143 base::Bind(&MakeDecoderContextCurrent, stub_->AsWeakPtr()); 144 } 145 146 GpuVideoDecodeAccelerator::~GpuVideoDecodeAccelerator() { 147 // This class can only be self-deleted from OnWillDestroyStub(), which means 148 // the VDA has already been destroyed in there. 149 CHECK(!video_decode_accelerator_.get()); 150 } 151 152 bool GpuVideoDecodeAccelerator::OnMessageReceived(const IPC::Message& msg) { 153 if (!video_decode_accelerator_) 154 return false; 155 156 bool handled = true; 157 IPC_BEGIN_MESSAGE_MAP(GpuVideoDecodeAccelerator, msg) 158 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Decode, OnDecode) 159 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_AssignPictureBuffers, 160 OnAssignPictureBuffers) 161 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_ReusePictureBuffer, 162 OnReusePictureBuffer) 163 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Flush, OnFlush) 164 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Reset, OnReset) 165 IPC_MESSAGE_HANDLER(AcceleratedVideoDecoderMsg_Destroy, OnDestroy) 166 IPC_MESSAGE_UNHANDLED(handled = false) 167 IPC_END_MESSAGE_MAP() 168 return handled; 169 } 170 171 void GpuVideoDecodeAccelerator::ProvidePictureBuffers( 172 uint32 requested_num_of_buffers, 173 const gfx::Size& dimensions, 174 uint32 texture_target) { 175 if (dimensions.width() > media::limits::kMaxDimension || 176 dimensions.height() > media::limits::kMaxDimension || 177 dimensions.GetArea() > media::limits::kMaxCanvas) { 178 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE); 179 return; 180 } 181 if (!Send(new AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers( 182 host_route_id_, 183 requested_num_of_buffers, 184 dimensions, 185 texture_target))) { 186 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ProvidePictureBuffers) " 187 << "failed"; 188 } 189 texture_dimensions_ = dimensions; 190 texture_target_ = texture_target; 191 } 192 193 void GpuVideoDecodeAccelerator::DismissPictureBuffer( 194 int32 picture_buffer_id) { 195 // Notify client that picture buffer is now unused. 196 if (!Send(new AcceleratedVideoDecoderHostMsg_DismissPictureBuffer( 197 host_route_id_, picture_buffer_id))) { 198 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_DismissPictureBuffer) " 199 << "failed"; 200 } 201 DebugAutoLock auto_lock(debug_uncleared_textures_lock_); 202 uncleared_textures_.erase(picture_buffer_id); 203 } 204 205 void GpuVideoDecodeAccelerator::PictureReady( 206 const media::Picture& picture) { 207 // VDA may call PictureReady on IO thread. SetTextureCleared should run on 208 // the child thread. VDA is responsible to call PictureReady on the child 209 // thread when a picture buffer is delivered the first time. 210 if (child_message_loop_->BelongsToCurrentThread()) { 211 SetTextureCleared(picture); 212 } else { 213 DCHECK(io_message_loop_->BelongsToCurrentThread()); 214 if (DCHECK_IS_ON()) { 215 DebugAutoLock auto_lock(debug_uncleared_textures_lock_); 216 DCHECK_EQ(0u, uncleared_textures_.count(picture.picture_buffer_id())); 217 } 218 } 219 220 if (!Send(new AcceleratedVideoDecoderHostMsg_PictureReady( 221 host_route_id_, 222 picture.picture_buffer_id(), 223 picture.bitstream_buffer_id()))) { 224 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_PictureReady) failed"; 225 } 226 } 227 228 void GpuVideoDecodeAccelerator::NotifyError( 229 media::VideoDecodeAccelerator::Error error) { 230 if (init_done_msg_) { 231 // If we get an error while we're initializing, NotifyInitializeDone won't 232 // be called, so we need to send the reply (with an error) here. 233 GpuCommandBufferMsg_CreateVideoDecoder::WriteReplyParams( 234 init_done_msg_, -1); 235 if (!Send(init_done_msg_)) 236 DLOG(ERROR) << "Send(init_done_msg_) failed"; 237 init_done_msg_ = NULL; 238 return; 239 } 240 if (!Send(new AcceleratedVideoDecoderHostMsg_ErrorNotification( 241 host_route_id_, error))) { 242 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ErrorNotification) " 243 << "failed"; 244 } 245 } 246 247 void GpuVideoDecodeAccelerator::Initialize( 248 const media::VideoCodecProfile profile, 249 IPC::Message* init_done_msg) { 250 DCHECK(!video_decode_accelerator_.get()); 251 DCHECK(!init_done_msg_); 252 DCHECK(init_done_msg); 253 init_done_msg_ = init_done_msg; 254 255 #if !defined(OS_WIN) 256 // Ensure we will be able to get a GL context at all before initializing 257 // non-Windows VDAs. 258 if (!make_context_current_.Run()) { 259 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE); 260 return; 261 } 262 #endif 263 264 #if defined(OS_WIN) 265 if (base::win::GetVersion() < base::win::VERSION_WIN7) { 266 NOTIMPLEMENTED() << "HW video decode acceleration not available."; 267 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE); 268 return; 269 } 270 DVLOG(0) << "Initializing DXVA HW decoder for windows."; 271 video_decode_accelerator_.reset(new DXVAVideoDecodeAccelerator( 272 this, make_context_current_)); 273 #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_ARMEL) && defined(USE_X11) 274 video_decode_accelerator_.reset(new ExynosVideoDecodeAccelerator( 275 gfx::GLSurfaceEGL::GetHardwareDisplay(), 276 stub_->decoder()->GetGLContext()->GetHandle(), 277 this, 278 weak_factory_for_io_.GetWeakPtr(), 279 make_context_current_, 280 io_message_loop_)); 281 #elif defined(OS_CHROMEOS) && defined(ARCH_CPU_X86_FAMILY) && defined(USE_X11) 282 gfx::GLContextGLX* glx_context = 283 static_cast<gfx::GLContextGLX*>(stub_->decoder()->GetGLContext()); 284 GLXContext glx_context_handle = 285 static_cast<GLXContext>(glx_context->GetHandle()); 286 video_decode_accelerator_.reset(new VaapiVideoDecodeAccelerator( 287 glx_context->display(), glx_context_handle, this, 288 make_context_current_)); 289 #elif defined(OS_ANDROID) 290 video_decode_accelerator_.reset(new AndroidVideoDecodeAccelerator( 291 this, 292 stub_->decoder()->AsWeakPtr(), 293 make_context_current_)); 294 #else 295 NOTIMPLEMENTED() << "HW video decode acceleration not available."; 296 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE); 297 return; 298 #endif 299 300 if (video_decode_accelerator_->CanDecodeOnIOThread()) { 301 filter_ = new MessageFilter(this, host_route_id_); 302 stub_->channel()->AddFilter(filter_.get()); 303 } 304 305 if (!video_decode_accelerator_->Initialize(profile)) 306 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE); 307 } 308 309 // Runs on IO thread if video_decode_accelerator_->CanDecodeOnIOThread() is 310 // true, otherwise on the main thread. 311 void GpuVideoDecodeAccelerator::OnDecode( 312 base::SharedMemoryHandle handle, int32 id, uint32 size) { 313 DCHECK(video_decode_accelerator_.get()); 314 if (id < 0) { 315 DLOG(ERROR) << "BitstreamBuffer id " << id << " out of range"; 316 if (child_message_loop_->BelongsToCurrentThread()) { 317 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT); 318 } else { 319 child_message_loop_->PostTask( 320 FROM_HERE, 321 base::Bind(&GpuVideoDecodeAccelerator::NotifyError, 322 base::Unretained(this), 323 media::VideoDecodeAccelerator::INVALID_ARGUMENT)); 324 } 325 return; 326 } 327 video_decode_accelerator_->Decode(media::BitstreamBuffer(id, handle, size)); 328 } 329 330 void GpuVideoDecodeAccelerator::OnAssignPictureBuffers( 331 const std::vector<int32>& buffer_ids, 332 const std::vector<uint32>& texture_ids) { 333 if (buffer_ids.size() != texture_ids.size()) { 334 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT); 335 return; 336 } 337 338 gpu::gles2::GLES2Decoder* command_decoder = stub_->decoder(); 339 gpu::gles2::TextureManager* texture_manager = 340 command_decoder->GetContextGroup()->texture_manager(); 341 342 std::vector<media::PictureBuffer> buffers; 343 std::vector<scoped_refptr<gpu::gles2::TextureRef> > textures; 344 for (uint32 i = 0; i < buffer_ids.size(); ++i) { 345 if (buffer_ids[i] < 0) { 346 DLOG(ERROR) << "Buffer id " << buffer_ids[i] << " out of range"; 347 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT); 348 return; 349 } 350 gpu::gles2::TextureRef* texture_ref = texture_manager->GetTexture( 351 texture_ids[i]); 352 if (!texture_ref) { 353 DLOG(ERROR) << "Failed to find texture id " << texture_ids[i]; 354 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT); 355 return; 356 } 357 gpu::gles2::Texture* info = texture_ref->texture(); 358 if (info->target() != texture_target_) { 359 DLOG(ERROR) << "Texture target mismatch for texture id " 360 << texture_ids[i]; 361 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT); 362 return; 363 } 364 if (texture_target_ == GL_TEXTURE_EXTERNAL_OES) { 365 // GL_TEXTURE_EXTERNAL_OES textures have their dimensions defined by the 366 // underlying EGLImage. Use |texture_dimensions_| for this size. 367 texture_manager->SetLevelInfo(texture_ref, 368 GL_TEXTURE_EXTERNAL_OES, 369 0, 370 0, 371 texture_dimensions_.width(), 372 texture_dimensions_.height(), 373 1, 374 0, 375 0, 376 0, 377 false); 378 } else { 379 // For other targets, texture dimensions should already be defined. 380 GLsizei width = 0, height = 0; 381 info->GetLevelSize(texture_target_, 0, &width, &height); 382 if (width != texture_dimensions_.width() || 383 height != texture_dimensions_.height()) { 384 DLOG(ERROR) << "Size mismatch for texture id " << texture_ids[i]; 385 NotifyError(media::VideoDecodeAccelerator::INVALID_ARGUMENT); 386 return; 387 } 388 } 389 uint32 service_texture_id; 390 if (!command_decoder->GetServiceTextureId( 391 texture_ids[i], &service_texture_id)) { 392 DLOG(ERROR) << "Failed to translate texture!"; 393 NotifyError(media::VideoDecodeAccelerator::PLATFORM_FAILURE); 394 return; 395 } 396 buffers.push_back(media::PictureBuffer( 397 buffer_ids[i], texture_dimensions_, service_texture_id)); 398 textures.push_back(texture_ref); 399 } 400 video_decode_accelerator_->AssignPictureBuffers(buffers); 401 DebugAutoLock auto_lock(debug_uncleared_textures_lock_); 402 for (uint32 i = 0; i < buffer_ids.size(); ++i) 403 uncleared_textures_[buffer_ids[i]] = textures[i]; 404 } 405 406 void GpuVideoDecodeAccelerator::OnReusePictureBuffer( 407 int32 picture_buffer_id) { 408 DCHECK(video_decode_accelerator_.get()); 409 video_decode_accelerator_->ReusePictureBuffer(picture_buffer_id); 410 } 411 412 void GpuVideoDecodeAccelerator::OnFlush() { 413 DCHECK(video_decode_accelerator_.get()); 414 video_decode_accelerator_->Flush(); 415 } 416 417 void GpuVideoDecodeAccelerator::OnReset() { 418 DCHECK(video_decode_accelerator_.get()); 419 video_decode_accelerator_->Reset(); 420 } 421 422 void GpuVideoDecodeAccelerator::OnDestroy() { 423 DCHECK(video_decode_accelerator_.get()); 424 OnWillDestroyStub(); 425 } 426 427 void GpuVideoDecodeAccelerator::OnFilterRemoved() { 428 // We're destroying; cancel all callbacks. 429 weak_factory_for_io_.InvalidateWeakPtrs(); 430 filter_removed_.Signal(); 431 } 432 433 void GpuVideoDecodeAccelerator::NotifyEndOfBitstreamBuffer( 434 int32 bitstream_buffer_id) { 435 if (!Send(new AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed( 436 host_route_id_, bitstream_buffer_id))) { 437 DLOG(ERROR) 438 << "Send(AcceleratedVideoDecoderHostMsg_BitstreamBufferProcessed) " 439 << "failed"; 440 } 441 } 442 443 void GpuVideoDecodeAccelerator::NotifyInitializeDone() { 444 GpuCommandBufferMsg_CreateVideoDecoder::WriteReplyParams( 445 init_done_msg_, host_route_id_); 446 if (!Send(init_done_msg_)) 447 DLOG(ERROR) << "Send(init_done_msg_) failed"; 448 init_done_msg_ = NULL; 449 } 450 451 void GpuVideoDecodeAccelerator::NotifyFlushDone() { 452 if (!Send(new AcceleratedVideoDecoderHostMsg_FlushDone(host_route_id_))) 453 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_FlushDone) failed"; 454 } 455 456 void GpuVideoDecodeAccelerator::NotifyResetDone() { 457 if (!Send(new AcceleratedVideoDecoderHostMsg_ResetDone(host_route_id_))) 458 DLOG(ERROR) << "Send(AcceleratedVideoDecoderHostMsg_ResetDone) failed"; 459 } 460 461 void GpuVideoDecodeAccelerator::OnWillDestroyStub() { 462 // The stub is going away, so we have to stop and destroy VDA here, before 463 // returning, because the VDA may need the GL context to run and/or do its 464 // cleanup. We cannot destroy the VDA before the IO thread message filter is 465 // removed however, since we cannot service incoming messages with VDA gone. 466 // We cannot simply check for existence of VDA on IO thread though, because 467 // we don't want to synchronize the IO thread with the ChildThread. 468 // So we have to wait for the RemoveFilter callback here instead and remove 469 // the VDA after it arrives and before returning. 470 if (filter_.get()) { 471 stub_->channel()->RemoveFilter(filter_.get()); 472 filter_removed_.Wait(); 473 } 474 475 stub_->channel()->RemoveRoute(host_route_id_); 476 stub_->RemoveDestructionObserver(this); 477 478 if (video_decode_accelerator_) 479 video_decode_accelerator_.release()->Destroy(); 480 481 delete this; 482 } 483 484 bool GpuVideoDecodeAccelerator::Send(IPC::Message* message) { 485 if (filter_.get() && io_message_loop_->BelongsToCurrentThread()) 486 return filter_->SendOnIOThread(message); 487 DCHECK(child_message_loop_->BelongsToCurrentThread()); 488 return stub_->channel()->Send(message); 489 } 490 491 void GpuVideoDecodeAccelerator::SetTextureCleared( 492 const media::Picture& picture) { 493 DCHECK(child_message_loop_->BelongsToCurrentThread()); 494 DebugAutoLock auto_lock(debug_uncleared_textures_lock_); 495 std::map<int32, scoped_refptr<gpu::gles2::TextureRef> >::iterator it; 496 it = uncleared_textures_.find(picture.picture_buffer_id()); 497 if (it == uncleared_textures_.end()) 498 return; // the texture has been cleared 499 500 scoped_refptr<gpu::gles2::TextureRef> texture_ref = it->second; 501 GLenum target = texture_ref->texture()->target(); 502 gpu::gles2::TextureManager* texture_manager = 503 stub_->decoder()->GetContextGroup()->texture_manager(); 504 DCHECK(!texture_ref->texture()->IsLevelCleared(target, 0)); 505 texture_manager->SetLevelCleared(texture_ref, target, 0, true); 506 uncleared_textures_.erase(it); 507 } 508 509 } // namespace content 510