Home | History | Annotate | Download | only in platform_PrinterPpds
      1 # Copyright 2018 The Chromium OS Authors. All rights reserved.
      2 # Use of this source code is governed by a BSD-style license that can be
      3 # found in the LICENSE file.
      4 
      5 import dbus
      6 import gzip
      7 import logging
      8 import os
      9 import subprocess
     10 import shutil
     11 import tempfile
     12 
     13 from autotest_lib.client.bin import test
     14 from autotest_lib.client.common_lib import error
     15 from autotest_lib.client.common_lib import file_utils
     16 from autotest_lib.client.cros import debugd_util
     17 
     18 import archiver
     19 import configurator
     20 import helpers
     21 import fake_printer
     22 import log_reader
     23 import multithreaded_processor
     24 
     25 # Timeout for printing documents in seconds
     26 _FAKE_PRINTER_TIMEOUT = 200
     27 
     28 # Prefix for CUPS printer name
     29 _FAKE_PRINTER_ID = 'FakePrinter'
     30 
     31 # First port number to use, this test uses consecutive ports numbers,
     32 # different for every PPD file
     33 _FIRST_PORT_NUMBER = 9000
     34 
     35 # Values are from platform/system_api/dbus/debugd/dbus-constants.h.
     36 _CUPS_SUCCESS = 0
     37 
     38 # Exceptions, cases that we want to omit/ignore
     39 # key: document; values: list of PPD files
     40 _EXCEPTIONS = { 'split_streams.pdf': ['HP-DeskJet_200-pcl3.ppd.gz',
     41         'HP-DeskJet_310-pcl3.ppd.gz', 'HP-DeskJet_320-pcl3.ppd.gz',
     42         'HP-DeskJet_340C-pcl3.ppd.gz', 'HP-DeskJet_540C-pcl3.ppd.gz',
     43         'HP-DeskJet_560C-pcl3.ppd.gz'] }
     44 
     45 class platform_PrinterPpds(test.test):
     46     """
     47     This test gets a list of PPD files and a list of test documents. It tries
     48     to add printer using each PPD file and to print all test documents on
     49     every printer created this way. Becasue the number of PPD files to test can
     50     be large (more then 3K), PPD files are tested simultaneously in many
     51     threads.
     52 
     53     """
     54     version = 3
     55 
     56 
     57     def _get_filenames_from_PPD_indexes(self):
     58         """
     59         It returns all PPD filenames from SCS server.
     60 
     61         @returns a list of PPD filenames without duplicates
     62 
     63         """
     64         # extracts PPD filenames from all 20 index files (in parallel)
     65         outputs = self._processor.run(helpers.get_filenames_from_PPD_index, 20)
     66         # joins obtained lists and performs deduplication
     67         ppd_files = set()
     68         for output in outputs:
     69             ppd_files.update(output)
     70         return list(ppd_files)
     71 
     72 
     73     def _calculate_full_path(self, path):
     74         """
     75         Converts path given as a parameter to absolute path.
     76 
     77         @param path: a path set in configuration (relative, absolute or None)
     78 
     79         @returns absolute path or None if the input parameter was None
     80 
     81         """
     82         if path is None or os.path.isabs(path):
     83             return path
     84         path_current = os.path.dirname(os.path.realpath(__file__))
     85         return os.path.join(path_current, path)
     86 
     87 
     88     def initialize(
     89             self, path_docs, path_ppds=None, path_digests=None,
     90             debug_mode=False, threads_count=8):
     91         """
     92         @param path_docs: path to local directory with documents to print
     93         @param path_ppds: path to local directory with PPD files to test;
     94                 if None is set then all PPD files from the SCS server are
     95                 downloaded and tested
     96         @param path_digests: path to local directory with digests files for
     97                 test documents; if None is set then content of printed
     98                 documents is not verified
     99         @param debug_mode: if set to True, then the autotest temporarily
    100                 remounts the root partition in R/W mode and changes CUPS
    101                 configuration, what allows to extract pipelines for all tested
    102                 PPDs and rerun the outside CUPS
    103         @param threads_count: number of threads to use
    104 
    105         """
    106         # Calculates absolute paths for all parameters
    107         self._location_of_test_docs = self._calculate_full_path(path_docs)
    108         self._location_of_PPD_files = self._calculate_full_path(path_ppds)
    109         location_of_digests_files = self._calculate_full_path(path_digests)
    110 
    111         # This object is used for running tasks in many threads simultaneously
    112         self._processor = multithreaded_processor.MultithreadedProcessor(
    113                 threads_count)
    114 
    115         # This object is responsible for parsing CUPS logs
    116         self._log_reader = log_reader.LogReader()
    117 
    118         # This object is responsible for the system configuration
    119         self._configurator = configurator.Configurator()
    120         self._configurator.configure(debug_mode)
    121 
    122         # Reads list of test documents
    123         self._docs = helpers.list_entries_from_directory(
    124                             path=self._location_of_test_docs,
    125                             with_suffixes=('.pdf'),
    126                             nonempty_results=True,
    127                             include_directories=False)
    128 
    129         # Get list of PPD files ...
    130         if self._location_of_PPD_files is None:
    131             # ... from the SCS server
    132             self._ppds = self._get_filenames_from_PPD_indexes()
    133         else:
    134             # ... from the given local directory
    135             # Unpack archives with all PPD files:
    136             path_archive = self._calculate_full_path('ppds_all.tar.xz')
    137             path_target_dir = self._calculate_full_path('.')
    138             file_utils.rm_dir_if_exists(
    139                     os.path.join(path_target_dir,'ppds_all'))
    140             subprocess.call(['tar', 'xJf', path_archive, '-C', path_target_dir])
    141             path_archive = self._calculate_full_path('ppds_100.tar.xz')
    142             file_utils.rm_dir_if_exists(
    143                     os.path.join(path_target_dir,'ppds_100'))
    144             subprocess.call(['tar', 'xJf', path_archive, '-C', path_target_dir])
    145             # Load PPD files from the chosen directory
    146             self._ppds = helpers.list_entries_from_directory(
    147                             path=self._location_of_PPD_files,
    148                             with_suffixes=('.ppd','.ppd.gz'),
    149                             nonempty_results=True,
    150                             include_directories=False)
    151         self._ppds.sort()
    152 
    153         # Load digests files
    154         self._digests = dict()
    155         if location_of_digests_files is None:
    156             for doc_name in self._docs:
    157                 self._digests[doc_name] = dict()
    158         else:
    159             path_blacklist = os.path.join(location_of_digests_files,
    160                     'blacklist.txt')
    161             blacklist = helpers.load_blacklist(path_blacklist)
    162             for doc_name in self._docs:
    163                 digests_name = doc_name + '.digests'
    164                 path = os.path.join(location_of_digests_files, digests_name)
    165                 self._digests[doc_name] = helpers.parse_digests_file(path,
    166                         blacklist)
    167 
    168         # Prepare a working directory for pipelines
    169         if debug_mode:
    170             self._pipeline_dir = tempfile.mkdtemp(dir='/tmp')
    171         else:
    172             self._pipeline_dir = None
    173 
    174 
    175     def cleanup(self):
    176         """
    177         Cleanup.
    178 
    179         """
    180         # Resore previous system settings
    181         self._configurator.restore()
    182 
    183         # Delete directories with PPD files
    184         path_ppds = self._calculate_full_path('ppds_100')
    185         file_utils.rm_dir_if_exists(path_ppds)
    186         path_ppds = self._calculate_full_path('ppds_all')
    187         file_utils.rm_dir_if_exists(path_ppds)
    188 
    189         # Delete pipeline working directory
    190         if self._pipeline_dir is not None:
    191             file_utils.rm_dir_if_exists(self._pipeline_dir)
    192 
    193 
    194     def run_once(self, path_outputs=None):
    195         """
    196         This is the main test function. It runs the testing procedure for
    197         every PPD file. Tests are run simultaneously in many threads.
    198 
    199         @param path_outputs: if it is not None, raw outputs sent
    200                 to printers are dumped here; the directory is overwritten if
    201                 already exists (is deleted and recreated)
    202 
    203         @raises error.TestFail if at least one of the tests failed
    204 
    205         """
    206         # Set directory for output documents
    207         self._path_output_directory = self._calculate_full_path(path_outputs)
    208         if self._path_output_directory is not None:
    209             # Delete whole directory if already exists
    210             file_utils.rm_dir_if_exists(self._path_output_directory)
    211             # Create archivers
    212             self._archivers = dict()
    213             for doc_name in self._docs:
    214                 path_for_archiver = os.path.join(self._path_output_directory,
    215                         doc_name)
    216                 self._archivers[doc_name] = archiver.Archiver(path_for_archiver,
    217                         self._ppds, 50)
    218             # A place for new digests
    219             self._new_digests = dict()
    220             for doc_name in self._docs:
    221                 self._new_digests[doc_name] = dict()
    222 
    223         # Runs tests for all PPD files (in parallel)
    224         outputs = self._processor.run(self._thread_test_PPD, len(self._ppds))
    225 
    226         # Analyses tests' outputs, prints a summary report and builds a list
    227         # of PPD filenames that failed
    228         failures = []
    229         for i, output in enumerate(outputs):
    230             ppd_file = self._ppds[i]
    231             if output != True:
    232                 failures.append(ppd_file)
    233             else:
    234                 output = 'OK'
    235             line = "%s: %s" % (ppd_file, output)
    236             logging.info(line)
    237 
    238         # Calculate digests files for output documents (if dumped)
    239         if self._path_output_directory is not None:
    240             for doc_name in self._docs:
    241                 path = os.path.join(self._path_output_directory,
    242                         doc_name + '.digests')
    243                 helpers.save_digests_file(path, self._new_digests[doc_name],
    244                         failures)
    245 
    246         # Raises an exception if at least one test failed
    247         if len(failures) > 0:
    248             failures.sort()
    249             raise error.TestFail(
    250                     'Test failed for %d PPD files: %s'
    251                     % (len(failures), ', '.join(failures)) )
    252 
    253 
    254     def _thread_test_PPD(self, task_id):
    255         """
    256         Runs a test procedure for single PPD file.
    257 
    258         It retrieves assigned PPD file and run for it a test procedure.
    259 
    260         @param task_id: an index of the PPD file in self._ppds
    261 
    262         @returns True when the test was passed or description of the error
    263                 (string) if the test failed
    264 
    265         """
    266         # Gets content of the PPD file
    267         try:
    268             ppd_file = self._ppds[task_id]
    269             if self._location_of_PPD_files is None:
    270                 # Downloads PPD file from the SCS server
    271                 ppd_content = helpers.download_PPD_file(ppd_file)
    272             else:
    273                 # Reads PPD file from local filesystem
    274                 path_ppd = os.path.join(self._location_of_PPD_files, ppd_file)
    275                 with open(path_ppd, 'rb') as ppd_file_descriptor:
    276                     ppd_content = ppd_file_descriptor.read()
    277         except BaseException as e:
    278             return 'MISSING PPD: ' + str(e)
    279 
    280         # Runs the test procedure
    281         try:
    282             port = _FIRST_PORT_NUMBER + task_id
    283             self._PPD_test_procedure(ppd_file, ppd_content, port)
    284         except BaseException as e:
    285             return 'FAIL: ' + str(e)
    286 
    287         return True
    288 
    289 
    290     def _PPD_test_procedure(self, ppd_name, ppd_content, port):
    291         """
    292         Test procedure for single PPD file.
    293 
    294         It tries to run the following steps:
    295         1. Starts an instance of FakePrinter
    296         2. Configures CUPS printer
    297         3. For each test document run the following steps:
    298             3a. Sends tests documents to the CUPS printer
    299             3b. Fetches the raw document from the FakePrinter
    300             3c. Parse CUPS logs and check for any errors
    301             3d. If self._pipeline_dir is set, extract the executed CUPS
    302                 pipeline, rerun it in bash console and verify every step and
    303                 final output
    304             3e. If self._path_output_directory is set, save the raw document
    305                 and all intermediate steps in the provided directory
    306             3f. If the digest is available, verify a digest of an output
    307                 documents
    308         4. Removes CUPS printer and stops FakePrinter
    309         If the test fails this method throws an exception.
    310 
    311         @param ppd_name: a name of the PPD file
    312         @param ppd_content: a content of the PPD file
    313         @param port: a port for the printer
    314 
    315         @throws Exception when the test fails
    316 
    317         """
    318         # Create work directory for external pipelines and save the PPD file
    319         # there (if needed)
    320         path_ppd = None
    321         if self._pipeline_dir is not None:
    322             path_pipeline_ppd_dir = os.path.join(self._pipeline_dir, ppd_name)
    323             os.makedirs(path_pipeline_ppd_dir)
    324             path_ppd = os.path.join(path_pipeline_ppd_dir, ppd_name)
    325             with open(path_ppd, 'wb') as file_ppd:
    326                 file_ppd.write(ppd_content)
    327             if path_ppd.endswith('.gz'):
    328                 subprocess.call(['gzip', '-d', path_ppd])
    329                 path_ppd = path_ppd[0:-3]
    330 
    331         try:
    332             # Starts the fake printer
    333             with fake_printer.FakePrinter(port) as printer:
    334 
    335                 # Add a CUPS printer manually with given ppd file
    336                 cups_printer_id = '%s_at_%05d' % (_FAKE_PRINTER_ID,port)
    337                 result = debugd_util.iface().CupsAddManuallyConfiguredPrinter(
    338                                              cups_printer_id,
    339                                              'socket://127.0.0.1:%d' % port,
    340                                              dbus.ByteArray(ppd_content))
    341                 if result != _CUPS_SUCCESS:
    342                     raise Exception('valid_config - Could not setup valid '
    343                         'printer %d' % result)
    344 
    345                 # Prints all test documents
    346                 try:
    347                     for doc_name in self._docs:
    348                         # Omit exceptions
    349                         if ( doc_name in _EXCEPTIONS and
    350                                 ppd_name in _EXCEPTIONS[doc_name] ):
    351                             if self._path_output_directory is not None:
    352                                 self._new_digests[doc_name][ppd_name] = (
    353                                         helpers.calculate_digest('\x00') )
    354                             continue
    355                         # Full path to the test document
    356                         path_doc = os.path.join(
    357                                         self._location_of_test_docs, doc_name)
    358                         # Sends test document to printer
    359                         argv = ['lp', '-d', cups_printer_id]
    360                         argv += [path_doc]
    361                         subprocess.call(argv)
    362                         # Prepare a workdir for the pipeline (if needed)
    363                         path_pipeline_workdir_temp = None
    364                         if self._pipeline_dir is not None:
    365                             path_pipeline_workdir = os.path.join(
    366                                     path_pipeline_ppd_dir, doc_name)
    367                             path_pipeline_workdir_temp = os.path.join(
    368                                     path_pipeline_workdir, 'temp')
    369                             os.makedirs(path_pipeline_workdir_temp)
    370                         # Gets the output document from the fake printer
    371                         doc = printer.fetch_document(_FAKE_PRINTER_TIMEOUT)
    372                         digest = helpers.calculate_digest(doc)
    373                         # Retrive data from the log file
    374                         no_errors, logs, pipeline = \
    375                                 self._log_reader.extract_result(
    376                                         cups_printer_id, path_ppd, path_doc,
    377                                         path_pipeline_workdir_temp)
    378                         # Archive obtained results in the output directory
    379                         if self._path_output_directory is not None:
    380                             self._archivers[doc_name].save_file(
    381                                     ppd_name, '.out', doc, apply_gzip=True)
    382                             self._archivers[doc_name].save_file(
    383                                     ppd_name, '.log', logs)
    384                             if pipeline is not None:
    385                                 self._archivers[doc_name].save_file(
    386                                         ppd_name, '.sh', pipeline)
    387                             # Set new digest
    388                             self._new_digests[doc_name][ppd_name] = digest
    389                         # Fail if any of CUPS filters failed
    390                         if not no_errors:
    391                             raise Exception('One of the CUPS filters failed')
    392                         # Reruns the pipeline and dump intermediate outputs
    393                         if self._pipeline_dir is not None:
    394                             self._rerun_whole_pipeline(
    395                                         pipeline, path_pipeline_workdir,
    396                                         ppd_name, doc_name, digest)
    397                             shutil.rmtree(path_pipeline_workdir)
    398                         # Check document's digest (if known)
    399                         if ppd_name in self._digests[doc_name]:
    400                             digest_expected = self._digests[doc_name][ppd_name]
    401                             if digest_expected != digest:
    402                                 message = 'Document\'s digest does not match'
    403                                 raise Exception(message)
    404                         else:
    405                             # Simple validation
    406                             if len(doc) < 16:
    407                                 raise Exception('Empty output')
    408                 finally:
    409                     # Remove CUPS printer
    410                     debugd_util.iface().CupsRemovePrinter(cups_printer_id)
    411 
    412             # The fake printer is stopped at the end of "with" statement
    413         finally:
    414             # Finalize archivers and cleaning
    415             if self._path_output_directory is not None:
    416                 for doc_name in self._docs:
    417                     self._archivers[doc_name].finalize_prefix(ppd_name)
    418             # Clean the pipelines' working directories
    419             if self._pipeline_dir is not None:
    420                 shutil.rmtree(path_pipeline_ppd_dir)
    421 
    422 
    423     def _rerun_whole_pipeline(
    424             self, pipeline, path_workdir, ppd_name, doc_name, digest):
    425         """
    426         Reruns the whole pipeline outside CUPS server.
    427 
    428         Reruns a printing pipeline dumped from CUPS. All intermediate outputs
    429         are dumped and archived for future analysis.
    430 
    431         @param pipeline: a pipeline as a bash script
    432         @param path_workdir: an existing directory to use as working directory
    433         @param ppd_name: a filenames prefix used for archivers
    434         @param doc_name: a document name, used to select a proper archiver
    435         @param digest: an digest of the output produced by CUPS (for comparison)
    436 
    437         @raises Exception in case of any errors
    438 
    439         """
    440         # Save pipeline to a file
    441         path_pipeline = os.path.join(path_workdir, 'pipeline.sh')
    442         with open(path_pipeline, 'wb') as file_pipeline:
    443             file_pipeline.write(pipeline)
    444         # Run the pipeline
    445         argv = ['/bin/bash', '-e', path_pipeline]
    446         ret = subprocess.Popen(argv, cwd=path_workdir).wait()
    447         # Find the number of output files
    448         i = 1
    449         while os.path.isfile(os.path.join(path_workdir, "%d.doc.gz" % i)):
    450             i += 1
    451         files_count = i-1
    452         # Reads the last output (to compare it with the output produced by CUPS)
    453         if ret == 0:
    454             with gzip.open(os.path.join(path_workdir,
    455                     "%d.doc.gz" % files_count)) as last_file:
    456                 content_digest = helpers.calculate_digest(last_file.read())
    457         # Archives all intermediate files (if desired)
    458         if self._path_output_directory is not None:
    459             for i in range(1,files_count+1):
    460                 self._archivers[doc_name].move_file(ppd_name, ".err%d" % i,
    461                         os.path.join(path_workdir, "%d.err" % i))
    462                 self._archivers[doc_name].move_file(ppd_name, ".out%d.gz" % i,
    463                         os.path.join(path_workdir, "%d.doc.gz" % i))
    464         # Validation
    465         if ret != 0:
    466             raise Exception("A pipeline script returned %d" % ret)
    467         if content_digest != digest:
    468             raise Exception("The output returned by the pipeline is different"
    469                     " than the output produced by CUPS")
    470