1 /* 2 * libjingle 3 * Copyright 2004 Google Inc. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions are met: 7 * 8 * 1. Redistributions of source code must retain the above copyright notice, 9 * this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright notice, 11 * this list of conditions and the following disclaimer in the documentation 12 * and/or other materials provided with the distribution. 13 * 3. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED 17 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 18 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO 19 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 20 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 21 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 22 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 23 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 24 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF 25 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 #ifndef TALK_MEDIA_BASE_HYBRIDVIDEOENGINE_H_ 29 #define TALK_MEDIA_BASE_HYBRIDVIDEOENGINE_H_ 30 31 #include <string> 32 #include <vector> 33 34 #include "talk/base/logging.h" 35 #include "talk/base/sigslotrepeater.h" 36 #include "talk/media/base/codec.h" 37 #include "talk/media/base/mediachannel.h" 38 #include "talk/media/base/videocapturer.h" 39 #include "talk/media/base/videocommon.h" 40 41 namespace cricket { 42 43 struct Device; 44 struct VideoFormat; 45 class HybridVideoEngineInterface; 46 class VideoCapturer; 47 class VideoFrame; 48 class VideoRenderer; 49 50 // HybridVideoMediaChannels work with a HybridVideoEngine to combine 51 // two unrelated VideoMediaChannel implementations into a single class. 52 class HybridVideoMediaChannel : public VideoMediaChannel { 53 public: 54 HybridVideoMediaChannel(HybridVideoEngineInterface* engine, 55 VideoMediaChannel* channel1, 56 VideoMediaChannel* channel2); 57 virtual ~HybridVideoMediaChannel(); 58 59 // VideoMediaChannel methods 60 virtual void SetInterface(NetworkInterface* iface); 61 virtual bool SetOptions(const VideoOptions& options); 62 virtual bool GetOptions(VideoOptions* options) const; 63 virtual bool AddSendStream(const StreamParams& sp); 64 virtual bool RemoveSendStream(uint32 ssrc); 65 virtual bool SetRenderer(uint32 ssrc, VideoRenderer* renderer); 66 virtual bool SetRender(bool render); 67 virtual bool MuteStream(uint32 ssrc, bool muted); 68 69 virtual bool SetRecvCodecs(const std::vector<VideoCodec>& codecs); 70 virtual bool SetRecvRtpHeaderExtensions( 71 const std::vector<RtpHeaderExtension>& extensions); 72 73 virtual bool SetSendCodecs(const std::vector<VideoCodec>& codecs); 74 virtual bool GetSendCodec(VideoCodec* codec); 75 virtual bool SetSendStreamFormat(uint32 ssrc, const VideoFormat& format); 76 virtual bool SetSendRtpHeaderExtensions( 77 const std::vector<RtpHeaderExtension>& extensions); 78 virtual bool SetSendBandwidth(bool autobw, int bps); 79 virtual bool SetSend(bool send); 80 81 virtual bool AddRecvStream(const StreamParams& sp); 82 virtual bool RemoveRecvStream(uint32 ssrc); 83 virtual bool SetCapturer(uint32 ssrc, VideoCapturer* capturer); 84 85 virtual bool SendIntraFrame(); 86 virtual bool RequestIntraFrame(); 87 88 virtual bool GetStats(VideoMediaInfo* info); 89 90 virtual void OnPacketReceived(talk_base::Buffer* packet); 91 virtual void OnRtcpReceived(talk_base::Buffer* packet); 92 virtual void OnReadyToSend(bool ready); 93 94 virtual void UpdateAspectRatio(int ratio_w, int ratio_h); 95 96 void OnLocalFrame(VideoCapturer*, const VideoFrame*); 97 void OnLocalFrameFormat(VideoCapturer*, const VideoFormat*); 98 99 bool sending() const { return sending_; } 100 101 private: 102 bool SelectActiveChannel(const std::vector<VideoCodec>& codecs); 103 void SplitCodecs(const std::vector<VideoCodec>& codecs, 104 std::vector<VideoCodec>* codecs1, 105 std::vector<VideoCodec>* codecs2); 106 107 void OnMediaError(uint32 ssrc, Error error); 108 109 HybridVideoEngineInterface* engine_; 110 talk_base::scoped_ptr<VideoMediaChannel> channel1_; 111 talk_base::scoped_ptr<VideoMediaChannel> channel2_; 112 VideoMediaChannel* active_channel_; 113 bool sending_; 114 }; 115 116 // Interface class for HybridVideoChannels to talk to the engine. 117 class HybridVideoEngineInterface { 118 public: 119 virtual ~HybridVideoEngineInterface() {} 120 virtual bool HasCodec1(const VideoCodec& codec) = 0; 121 virtual bool HasCodec2(const VideoCodec& codec) = 0; 122 virtual void OnSendChange1(VideoMediaChannel* channel1, bool send) = 0; 123 virtual void OnSendChange2(VideoMediaChannel* channel1, bool send) = 0; 124 virtual void OnNewSendResolution(int width, int height) = 0; 125 }; 126 127 // The HybridVideoEngine class combines two unrelated VideoEngine impls 128 // into a single class. It creates HybridVideoMediaChannels that also contain 129 // a VideoMediaChannel implementation from each engine. Policy is then used 130 // during call setup to determine which VideoMediaChannel should be used. 131 // Currently, this policy is based on what codec the remote side wants to use. 132 template<class VIDEO1, class VIDEO2> 133 class HybridVideoEngine : public HybridVideoEngineInterface { 134 public: 135 HybridVideoEngine() { 136 // Unify the codec lists. 137 codecs_ = video1_.codecs(); 138 codecs_.insert(codecs_.end(), video2_.codecs().begin(), 139 video2_.codecs().end()); 140 141 rtp_header_extensions_ = video1_.rtp_header_extensions(); 142 rtp_header_extensions_.insert(rtp_header_extensions_.end(), 143 video2_.rtp_header_extensions().begin(), 144 video2_.rtp_header_extensions().end()); 145 146 SignalCaptureStateChange.repeat(video2_.SignalCaptureStateChange); 147 } 148 149 bool Init(talk_base::Thread* worker_thread) { 150 if (!video1_.Init(worker_thread)) { 151 LOG(LS_ERROR) << "Failed to init VideoEngine1"; 152 return false; 153 } 154 if (!video2_.Init(worker_thread)) { 155 LOG(LS_ERROR) << "Failed to init VideoEngine2"; 156 video1_.Terminate(); 157 return false; 158 } 159 return true; 160 } 161 void Terminate() { 162 video1_.Terminate(); 163 video2_.Terminate(); 164 } 165 166 int GetCapabilities() { 167 return (video1_.GetCapabilities() | video2_.GetCapabilities()); 168 } 169 HybridVideoMediaChannel* CreateChannel(VoiceMediaChannel* channel) { 170 talk_base::scoped_ptr<VideoMediaChannel> channel1( 171 video1_.CreateChannel(channel)); 172 if (!channel1) { 173 LOG(LS_ERROR) << "Failed to create VideoMediaChannel1"; 174 return NULL; 175 } 176 talk_base::scoped_ptr<VideoMediaChannel> channel2( 177 video2_.CreateChannel(channel)); 178 if (!channel2) { 179 LOG(LS_ERROR) << "Failed to create VideoMediaChannel2"; 180 return NULL; 181 } 182 return new HybridVideoMediaChannel(this, 183 channel1.release(), channel2.release()); 184 } 185 186 bool SetOptions(int o) { 187 return video1_.SetOptions(o) && video2_.SetOptions(o); 188 } 189 bool SetDefaultEncoderConfig(const VideoEncoderConfig& config) { 190 VideoEncoderConfig conf = config; 191 if (video1_.codecs().size() > 0) { 192 conf.max_codec.name = video1_.codecs()[0].name; 193 if (!video1_.SetDefaultEncoderConfig(conf)) { 194 LOG(LS_ERROR) << "Failed to SetDefaultEncoderConfig for video1"; 195 return false; 196 } 197 } 198 if (video2_.codecs().size() > 0) { 199 conf.max_codec.name = video2_.codecs()[0].name; 200 if (!video2_.SetDefaultEncoderConfig(conf)) { 201 LOG(LS_ERROR) << "Failed to SetDefaultEncoderConfig for video2"; 202 return false; 203 } 204 } 205 return true; 206 } 207 const std::vector<VideoCodec>& codecs() const { 208 return codecs_; 209 } 210 const std::vector<RtpHeaderExtension>& rtp_header_extensions() const { 211 return rtp_header_extensions_; 212 } 213 void SetLogging(int min_sev, const char* filter) { 214 video1_.SetLogging(min_sev, filter); 215 video2_.SetLogging(min_sev, filter); 216 } 217 218 VideoFormat GetStartCaptureFormat() const { 219 return video2_.GetStartCaptureFormat(); 220 } 221 222 // TODO(juberti): Remove these functions after we do the capturer refactoring. 223 // For now they are set to always use the second engine for capturing, which 224 // is convenient given our intended use case. 225 bool SetCaptureDevice(const Device* device) { 226 return video2_.SetCaptureDevice(device); 227 } 228 VideoCapturer* GetVideoCapturer() const { 229 return video2_.GetVideoCapturer(); 230 } 231 bool SetLocalRenderer(VideoRenderer* renderer) { 232 return video2_.SetLocalRenderer(renderer); 233 } 234 sigslot::repeater2<VideoCapturer*, CaptureState> SignalCaptureStateChange; 235 236 virtual bool HasCodec1(const VideoCodec& codec) { 237 return HasCodec(video1_, codec); 238 } 239 virtual bool HasCodec2(const VideoCodec& codec) { 240 return HasCodec(video2_, codec); 241 } 242 template<typename VIDEO> 243 bool HasCodec(const VIDEO& engine, const VideoCodec& codec) const { 244 for (std::vector<VideoCodec>::const_iterator i = engine.codecs().begin(); 245 i != engine.codecs().end(); 246 ++i) { 247 if (i->Matches(codec)) { 248 return true; 249 } 250 } 251 return false; 252 } 253 virtual void OnSendChange1(VideoMediaChannel* channel1, bool send) { 254 } 255 virtual void OnSendChange2(VideoMediaChannel* channel2, bool send) { 256 } 257 virtual void OnNewSendResolution(int width, int height) { 258 } 259 260 protected: 261 VIDEO1 video1_; 262 VIDEO2 video2_; 263 std::vector<VideoCodec> codecs_; 264 std::vector<RtpHeaderExtension> rtp_header_extensions_; 265 }; 266 267 } // namespace cricket 268 269 #endif // TALK_MEDIA_BASE_HYBRIDVIDEOENGINE_H_ 270