Home | History | Annotate | Download | only in functional
      1 #!/usr/bin/env python
      2 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
      3 # Use of this source code is governed by a BSD-style license that can be
      4 # found in the LICENSE file.
      5 
      6 import os
      7 import subprocess
      8 import sys
      9 
     10 import pyauto_functional
     11 import pyauto
     12 import pyauto_paths
     13 import pyauto_utils
     14 import webrtc_test_base
     15 
     16 # If you change the port number, don't forget to modify video_extraction.js too.
     17 _PYWEBSOCKET_PORT_NUMBER = '12221'
     18 
     19 _HOME_ENV_NAME = 'HOMEPATH' if pyauto.PyUITest.IsWin() else 'HOME'
     20 _WORKING_DIR = os.path.join(os.environ[_HOME_ENV_NAME], 'webrtc_video_quality')
     21 
     22 # This is the reference file that is being played by the virtual web camera.
     23 _REFERENCE_YUV_FILE = os.path.join(_WORKING_DIR, 'reference_video.yuv')
     24 
     25 # The YUV file is the file produced by rgba_to_i420_converter.
     26 _OUTPUT_YUV_FILE = os.path.join(_WORKING_DIR, 'captured_video.yuv')
     27 
     28 
     29 class MissingRequiredToolException(Exception):
     30   pass
     31 
     32 
     33 class FailedToRunToolException(Exception):
     34   pass
     35 
     36 
     37 class WebrtcVideoQualityTest(webrtc_test_base.WebrtcTestBase):
     38   """Test the video quality of the WebRTC output.
     39 
     40   Prerequisites: This test case must run on a machine with a virtual webcam that
     41   plays video from the reference file located in the location defined by
     42   _REFERENCE_YUV_FILE. You must also compile the chromium_builder_webrtc target
     43   before you run this test to get all the tools built.
     44   The external compare_videos.py script also depends on two external executables
     45   which must be located in the PATH when running this test.
     46   * zxing (see the CPP version at https://code.google.com/p/zxing)
     47   * ffmpeg 0.11.1 or compatible version (see http://www.ffmpeg.org)
     48 
     49   The test case will launch a custom binary (peerconnection_server) which will
     50   allow two WebRTC clients to find each other.
     51 
     52   The test also runs several other custom binaries - rgba_to_i420 converter and
     53   frame_analyzer. Both tools can be found under third_party/webrtc/tools. The
     54   test also runs a stand alone Python implementation of a WebSocket server
     55   (pywebsocket) and a barcode_decoder script.
     56   """
     57 
     58   def setUp(self):
     59     pyauto.PyUITest.setUp(self)
     60     if not os.path.exists(_WORKING_DIR):
     61       self.fail('Cannot find the working directory for the reference video and '
     62                 'the temporary files: %s' % _WORKING_DIR)
     63     if not os.path.exists(_REFERENCE_YUV_FILE):
     64       self.fail('Cannot find the reference file to be used for video quality '
     65                 'comparison: %s' % _REFERENCE_YUV_FILE)
     66     self.StartPeerConnectionServer()
     67 
     68   def tearDown(self):
     69     self._StopPywebsocketServer()
     70     self.StopPeerConnectionServer()
     71 
     72     pyauto.PyUITest.tearDown(self)
     73     self.assertEquals('', self.CheckErrorsAndCrashes())
     74 
     75   def _WebRtcCallWithHelperPage(self, test_page, helper_page):
     76 
     77     """Tests we can call, let run for some time and hang up with WebRTC.
     78 
     79     This test exercises pretty much the whole happy-case for the WebRTC
     80     JavaScript API. Currently, it exercises a normal call setup using the API
     81     defined at http://dev.w3.org/2011/webrtc/editor/webrtc.html. The API is
     82     still evolving.
     83 
     84     The test will load the supplied HTML file, which in turn will load different
     85     javascript files depending on which version of the signaling protocol
     86     we are running.
     87     The supplied HTML files will be loaded in two tabs and tell the web
     88     pages to start up WebRTC, which will acquire video and audio devices on the
     89     system. This will launch a dialog in Chrome which we click past using the
     90     automation controller. Then, we will order both tabs to connect the server,
     91     which will make the two tabs aware of each other. Once that is done we order
     92     one tab to call the other.
     93 
     94     We make sure that the javascript tells us that the call succeeded, lets it
     95     run for some time and try to hang up the call after that. While the call is
     96     running, we capture frames with the help of the functions in the
     97     video_extraction.js file.
     98 
     99     Args:
    100       test_page(string): The name of the test HTML page. It is looked for in the
    101         webrtc directory under chrome/test/data.
    102       helper_page(string): The name of the helper HTML page. It is looked for in
    103         the same directory as the test_page.
    104     """
    105     assert helper_page
    106     url = self.GetFileURLForDataPath('webrtc', test_page)
    107     helper_url = self.GetFileURLForDataPath('webrtc', helper_page)
    108 
    109     # Start the helper page in the first tab
    110     self.NavigateToURL(helper_url)
    111 
    112     # Start the test page in the second page.
    113     self.AppendTab(pyauto.GURL(url))
    114 
    115     self.assertEquals('ok-got-stream', self.GetUserMedia(tab_index=0))
    116     self.assertEquals('ok-got-stream', self.GetUserMedia(tab_index=1))
    117     self.Connect('user_1', tab_index=0)
    118     self.Connect('user_2', tab_index=1)
    119 
    120     self.CreatePeerConnection(tab_index=0)
    121     self.AddUserMediaLocalStream(tab_index=0)
    122     self.EstablishCall(from_tab_with_index=0, to_tab_with_index=1)
    123 
    124     # Wait for JavaScript to capture all the frames. In the HTML file we specify
    125     # how many seconds to capture frames.
    126     done_capturing = self.WaitUntil(
    127         function=lambda: self.ExecuteJavascript('doneFrameCapturing()',
    128                                                 tab_index=1),
    129         expect_retval='done-capturing', retry_sleep=1.0,
    130         # TODO(phoglund): Temporary fix; remove after 2013-04-01
    131         timeout=90)
    132 
    133     self.assertTrue(done_capturing,
    134                     msg='Timed out while waiting frames to be captured.')
    135 
    136     # The hang-up will automatically propagate to the second tab.
    137     self.HangUp(from_tab_with_index=0)
    138     self.WaitUntilHangUpVerified(tab_index=1)
    139 
    140     self.Disconnect(tab_index=0)
    141     self.Disconnect(tab_index=1)
    142 
    143     # Ensure we didn't miss any errors.
    144     self.AssertNoFailures(tab_index=0)
    145     self.AssertNoFailures(tab_index=1)
    146 
    147   def testVgaVideoQuality(self):
    148     """Tests the WebRTC video output for a VGA video input.
    149 
    150     On the bots we will be running fake webcam driver and we will feed a video
    151     with overlaid barcodes. In order to run the analysis on the output, we need
    152     to use the original input video as a reference video.
    153     """
    154     helper_page = webrtc_test_base.WebrtcTestBase.DEFAULT_TEST_PAGE
    155     self._StartVideoQualityTest(test_page='webrtc_video_quality_test.html',
    156                                 helper_page=helper_page,
    157                                 reference_yuv=_REFERENCE_YUV_FILE, width=640,
    158                                 height=480)
    159 
    160   def _StartVideoQualityTest(self, reference_yuv,
    161                              test_page='webrtc_video_quality_test.html',
    162                              helper_page='webrtc_jsep01_test.html',
    163                              width=640, height=480):
    164     """Captures video output into a canvas and sends it to a server.
    165 
    166     This test captures the output frames of a WebRTC connection to a canvas and
    167     later sends them over WebSocket to a WebSocket server implemented in Python.
    168     At the server side we can store the frames for subsequent quality analysis.
    169 
    170     After the frames are sent to the pywebsocket server, we run the RGBA to I420
    171     converter, the barcode decoder and finally the frame analyzer. We also print
    172     everything to the Perf Graph for visualization
    173 
    174     Args:
    175       reference_yuv(string): The name of the reference YUV video that will be
    176         used in the analysis.
    177       test_page(string): The name of the test HTML page. To be looked for in the
    178         webrtc directory under chrome/test/data.
    179       helper_page(string): The name of the HTML helper page. To be looked for in
    180         the same directory as the test_page.
    181       width(int): The width of the test video frames.
    182       height(int): The height of the test video frames.
    183     """
    184     self._StartPywebsocketServer()
    185 
    186     self._WebRtcCallWithHelperPage(test_page, helper_page)
    187 
    188     # Wait for JavaScript to send all the frames to the server. The test will
    189     # have quite a lot of frames to send, so it will take at least several
    190     # seconds.
    191     no_more_frames = self.WaitUntil(
    192         function=lambda: self.ExecuteJavascript('haveMoreFramesToSend()',
    193                                                 tab_index=1),
    194         expect_retval='no-more-frames', retry_sleep=1, timeout=150)
    195     self.assertTrue(no_more_frames,
    196                     msg='Timed out while waiting for frames to send.')
    197 
    198     self.assertTrue(self._RunRGBAToI420Converter(width, height))
    199 
    200     stats_file = os.path.join(_WORKING_DIR, 'pyauto_stats.txt')
    201     analysis_result = self._CompareVideos(width, height, _OUTPUT_YUV_FILE,
    202                                           reference_yuv, stats_file)
    203     self._ProcessPsnrAndSsimOutput(analysis_result)
    204     self._ProcessFramesCountOutput(analysis_result)
    205 
    206   def _StartPywebsocketServer(self):
    207     """Starts the pywebsocket server."""
    208     print 'Starting pywebsocket server.'
    209 
    210     # Pywebsocket source directory.
    211     path_pyws_dir = os.path.join(pyauto_paths.GetThirdPartyDir(), 'pywebsocket',
    212                                  'src')
    213 
    214     # Pywebsocket standalone server.
    215     path_to_pywebsocket= os.path.join(path_pyws_dir, 'mod_pywebsocket',
    216                                       'standalone.py')
    217 
    218     # Path to the data handler to handle data received by the server.
    219     path_to_handler = os.path.join(pyauto_paths.GetSourceDir(), 'chrome',
    220                                    'test', 'functional')
    221 
    222     # The python interpreter binary.
    223     python_interp = sys.executable
    224 
    225     # The pywebsocket start command - we could add --log-level=debug for debug.
    226     # -p stands for port, -d stands for root_directory (where the data handlers
    227     # are).
    228     start_cmd = [python_interp, path_to_pywebsocket,
    229                  '-p', _PYWEBSOCKET_PORT_NUMBER,
    230                  '-d', path_to_handler,]
    231     env = os.environ
    232     # Set PYTHONPATH to include the pywebsocket base directory.
    233     env['PYTHONPATH'] = (path_pyws_dir + os.path.pathsep +
    234                          env.get('PYTHONPATH', ''))
    235 
    236     # Start the pywebsocket server. The server will not start instantly, so the
    237     # code opening websockets to it should take this into account.
    238     self._pywebsocket_server = subprocess.Popen(start_cmd, env=env)
    239 
    240   def _StopPywebsocketServer(self):
    241     """Stops the running instance of pywebsocket server."""
    242     print 'Stopping pywebsocket server.'
    243     if self._pywebsocket_server:
    244       self._pywebsocket_server.kill()
    245 
    246   def _RunRGBAToI420Converter(self, width, height):
    247     """Runs the RGBA to I420 converter.
    248 
    249     The rgba_to_i420_converter is part of the webrtc_test_tools target which
    250     should be build prior to running this test. The resulting binary should live
    251     next to Chrome.
    252 
    253     Args:
    254       width(int): The width of the frames to be converted and stitched together.
    255       height(int): The height of the frames to be converted and stitched.
    256 
    257     Returns:
    258       (bool): True if the conversion is successful, false otherwise.
    259     """
    260     path_to_rgba_converter = os.path.join(self.BrowserPath(),
    261                                           'rgba_to_i420_converter')
    262     path_to_rgba_converter = os.path.abspath(path_to_rgba_converter)
    263     path_to_rgba_converter = self.BinPathForPlatform(path_to_rgba_converter)
    264 
    265     if not os.path.exists(path_to_rgba_converter):
    266       raise webrtc_test_base.MissingRequiredBinaryException(
    267           'Could not locate rgba_to_i420_converter! Did you build the '
    268           'webrtc_test_tools target?')
    269 
    270     # We produce an output file that will later be used as an input to the
    271     # barcode decoder and frame analyzer tools.
    272     start_cmd = [path_to_rgba_converter, '--frames_dir=%s' % _WORKING_DIR,
    273                  '--output_file=%s' % _OUTPUT_YUV_FILE, '--width=%d' % width,
    274                  '--height=%d' % height, '--delete_frames']
    275     print 'Start command: ', ' '.join(start_cmd)
    276     rgba_converter = subprocess.Popen(start_cmd, stdout=sys.stdout,
    277                                       stderr=sys.stderr)
    278     rgba_converter.wait()
    279     return rgba_converter.returncode == 0
    280 
    281   def _CompareVideos(self, width, height, captured_video_filename,
    282                      reference_video_filename, stats_filename):
    283     """Compares the captured video with the reference video.
    284 
    285     The barcode decoder decodes the captured video containing barcodes overlaid
    286     into every frame of the video (produced by rgba_to_i420_converter). It
    287     produces a set of PNG images and a stats file that describes the relation
    288     between the filenames and the (decoded) frame number of each frame.
    289 
    290     Args:
    291       width(int): The frames width of the video to be decoded.
    292       height(int): The frames height of the video to be decoded.
    293       captured_video_filename(string): The captured video file we want to
    294         extract frame images and decode frame numbers from.
    295       reference_video_filename(string): The reference video file we want to
    296         compare the captured video quality with.
    297       stats_filename(string): Filename for the output file containing
    298         data that shows the relation between each frame filename and the
    299         reference file's frame numbers.
    300 
    301     Returns:
    302       (string): The output of the script.
    303 
    304     Raises:
    305       FailedToRunToolException: If the script fails to run.
    306     """
    307     path_to_analyzer = os.path.join(self.BrowserPath(), 'frame_analyzer')
    308     path_to_analyzer = os.path.abspath(path_to_analyzer)
    309     path_to_analyzer = self.BinPathForPlatform(path_to_analyzer)
    310 
    311     path_to_compare_script = os.path.join(pyauto_paths.GetThirdPartyDir(),
    312                                           'webrtc', 'tools',
    313                                           'compare_videos.py')
    314     if not os.path.exists(path_to_compare_script):
    315       raise MissingRequiredToolException('Cannot find the script at %s' %
    316                                          path_to_compare_script)
    317     python_interp = sys.executable
    318     cmd = [
    319       python_interp,
    320       path_to_compare_script,
    321       '--ref_video=%s' % reference_video_filename,
    322       '--test_video=%s' % captured_video_filename,
    323       '--frame_analyzer=%s' % path_to_analyzer,
    324       '--yuv_frame_width=%d' % width,
    325       '--yuv_frame_height=%d' % height,
    326       '--stats_file=%s' % stats_filename,
    327     ]
    328     print 'Start command: ', ' '.join(cmd)
    329 
    330     compare_videos = subprocess.Popen(cmd, stdout=subprocess.PIPE,
    331                                       stderr=subprocess.PIPE)
    332     output, error = compare_videos.communicate()
    333     if compare_videos.returncode != 0:
    334       raise FailedToRunToolException('Failed to run compare videos script!')
    335 
    336     return output
    337 
    338   def _ProcessFramesCountOutput(self, output):
    339     """Processes the analyzer output for the different frame counts.
    340 
    341     The frame analyzer outputs additional information about the number of unique
    342     frames captured, The max number of repeated frames in a sequence and the
    343     max number of skipped frames. These values are then written to the Perf
    344     Graph. (Note: Some of the repeated or skipped frames will probably be due to
    345     the imperfection of JavaScript timers.)
    346 
    347     Args:
    348       output(string): The output from the frame analyzer to be processed.
    349     """
    350     # The output from frame analyzer will be in the format:
    351     # <PSNR and SSIM stats>
    352     # Unique_frames_count:<value>
    353     # Max_repeated:<value>
    354     # Max_skipped:<value>
    355     unique_fr_pos = output.rfind('Unique_frames_count')
    356     result_str = output[unique_fr_pos:]
    357 
    358     result_list = result_str.split()
    359 
    360     for result in result_list:
    361       colon_pos = result.find(':')
    362       key = result[:colon_pos]
    363       value = result[colon_pos+1:]
    364       pyauto_utils.PrintPerfResult(key, 'VGA', value, '')
    365 
    366   def _ProcessPsnrAndSsimOutput(self, output):
    367     """Processes the analyzer output to extract the PSNR and SSIM values.
    368 
    369     The frame analyzer produces PSNR and SSIM results for every unique frame
    370     that has been captured. This method forms a list of all the psnr and ssim
    371     values and passes it to PrintPerfResult() for printing on the Perf Graph.
    372 
    373     Args:
    374       output(string): The output from the frame analyzer to be processed.
    375     """
    376     # The output is in the format:
    377     # BSTATS
    378     # psnr ssim; psnr ssim; ... psnr ssim;
    379     # ESTATS
    380     stats_beginning = output.find('BSTATS')  # Get the beginning of the stats
    381     stats_ending = output.find('ESTATS')  # Get the end of the stats
    382     stats_str = output[(stats_beginning + len('BSTATS')):stats_ending]
    383 
    384     stats_list = stats_str.split(';')
    385 
    386     psnr = []
    387     ssim = []
    388 
    389     for item in stats_list:
    390       item = item.strip()
    391       if item != '':
    392         entry = item.split(' ')
    393         psnr.append(float(entry[0]))
    394         ssim.append(float(entry[1]))
    395 
    396     pyauto_utils.PrintPerfResult('PSNR', 'VGA', psnr, '')
    397     pyauto_utils.PrintPerfResult('SSIM', 'VGA', ssim, '')
    398 
    399 
    400 if __name__ == '__main__':
    401   pyauto_functional.Main()
    402