1 #!/usr/bin/env python 2 # 3 # Copyright (c) 2009 Google Inc. All rights reserved. 4 # 5 # Redistribution and use in source and binary forms, with or without 6 # modification, are permitted provided that the following conditions are 7 # met: 8 # 9 # * Redistributions of source code must retain the above copyright 10 # notice, this list of conditions and the following disclaimer. 11 # * Redistributions in binary form must reproduce the above 12 # copyright notice, this list of conditions and the following disclaimer 13 # in the documentation and/or other materials provided with the 14 # distribution. 15 # * Neither the name of Google Inc. nor the names of its 16 # contributors may be used to endorse or promote products derived from 17 # this software without specific prior written permission. 18 # 19 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 22 # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 23 # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 24 # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 25 # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 26 # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 27 # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 28 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 31 # Here are some issues that I've had people identify in my code during reviews, 32 # that I think are possible to flag automatically in a lint tool. If these were 33 # caught by lint, it would save time both for myself and that of my reviewers. 34 # Most likely, some of these are beyond the scope of the current lint framework, 35 # but I think it is valuable to retain these wish-list items even if they cannot 36 # be immediately implemented. 37 # 38 # Suggestions 39 # ----------- 40 # - Check for no 'explicit' for multi-arg ctor 41 # - Check for boolean assign RHS in parens 42 # - Check for ctor initializer-list colon position and spacing 43 # - Check that if there's a ctor, there should be a dtor 44 # - Check accessors that return non-pointer member variables are 45 # declared const 46 # - Check accessors that return non-const pointer member vars are 47 # *not* declared const 48 # - Check for using public includes for testing 49 # - Check for spaces between brackets in one-line inline method 50 # - Check for no assert() 51 # - Check for spaces surrounding operators 52 # - Check for 0 in pointer context (should be NULL) 53 # - Check for 0 in char context (should be '\0') 54 # - Check for camel-case method name conventions for methods 55 # that are not simple inline getters and setters 56 # - Do not indent namespace contents 57 # - Avoid inlining non-trivial constructors in header files 58 # - Check for old-school (void) cast for call-sites of functions 59 # ignored return value 60 # - Check gUnit usage of anonymous namespace 61 # - Check for class declaration order (typedefs, consts, enums, 62 # ctor(s?), dtor, friend declarations, methods, member vars) 63 # 64 65 """Does google-lint on c++ files. 66 67 The goal of this script is to identify places in the code that *may* 68 be in non-compliance with google style. It does not attempt to fix 69 up these problems -- the point is to educate. It does also not 70 attempt to find all problems, or to ensure that everything it does 71 find is legitimately a problem. 72 73 In particular, we can get very confused by /* and // inside strings! 74 We do a small hack, which is to ignore //'s with "'s after them on the 75 same line, but it is far from perfect (in either direction). 76 """ 77 78 import codecs 79 import copy 80 import getopt 81 import math # for log 82 import os 83 import re 84 import sre_compile 85 import string 86 import sys 87 import unicodedata 88 89 90 _USAGE = """ 91 Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...] 92 [--counting=total|toplevel|detailed] 93 <file> [file] ... 94 95 The style guidelines this tries to follow are those in 96 http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml 97 98 Every problem is given a confidence score from 1-5, with 5 meaning we are 99 certain of the problem, and 1 meaning it could be a legitimate construct. 100 This will miss some errors, and is not a substitute for a code review. 101 102 To suppress false-positive errors of a certain category, add a 103 'NOLINT(category)' comment to the line. NOLINT or NOLINT(*) 104 suppresses errors of all categories on that line. 105 106 The files passed in will be linted; at least one file must be provided. 107 Linted extensions are .cc, .cpp, and .h. Other file types will be ignored. 108 109 Flags: 110 111 output=vs7 112 By default, the output is formatted to ease emacs parsing. Visual Studio 113 compatible output (vs7) may also be used. Other formats are unsupported. 114 115 verbose=# 116 Specify a number 0-5 to restrict errors to certain verbosity levels. 117 118 filter=-x,+y,... 119 Specify a comma-separated list of category-filters to apply: only 120 error messages whose category names pass the filters will be printed. 121 (Category names are printed with the message and look like 122 "[whitespace/indent]".) Filters are evaluated left to right. 123 "-FOO" and "FOO" means "do not print categories that start with FOO". 124 "+FOO" means "do print categories that start with FOO". 125 126 Examples: --filter=-whitespace,+whitespace/braces 127 --filter=whitespace,runtime/printf,+runtime/printf_format 128 --filter=-,+build/include_what_you_use 129 130 To see a list of all the categories used in cpplint, pass no arg: 131 --filter= 132 133 counting=total|toplevel|detailed 134 The total number of errors found is always printed. If 135 'toplevel' is provided, then the count of errors in each of 136 the top-level categories like 'build' and 'whitespace' will 137 also be printed. If 'detailed' is provided, then a count 138 is provided for each category like 'build/class'. 139 140 root=subdir 141 The root directory used for deriving header guard CPP variable. 142 By default, the header guard CPP variable is calculated as the relative 143 path to the directory that contains .git, .hg, or .svn. When this flag 144 is specified, the relative path is calculated from the specified 145 directory. If the specified directory does not exist, this flag is 146 ignored. 147 148 Examples: 149 Assuing that src/.git exists, the header guard CPP variables for 150 src/chrome/browser/ui/browser.h are: 151 152 No flag => CHROME_BROWSER_UI_BROWSER_H_ 153 --root=chrome => BROWSER_UI_BROWSER_H_ 154 --root=chrome/browser => UI_BROWSER_H_ 155 """ 156 157 # We categorize each error message we print. Here are the categories. 158 # We want an explicit list so we can list them all in cpplint --filter=. 159 # If you add a new error message with a new category, add it to the list 160 # here! cpplint_unittest.py should tell you if you forget to do this. 161 # \ used for clearer layout -- pylint: disable-msg=C6013 162 _ERROR_CATEGORIES = [ 163 'build/class', 164 'build/deprecated', 165 'build/endif_comment', 166 'build/explicit_make_pair', 167 'build/forward_decl', 168 'build/header_guard', 169 'build/include', 170 'build/include_alpha', 171 'build/include_order', 172 'build/include_what_you_use', 173 'build/namespaces', 174 'build/printf_format', 175 'build/storage_class', 176 'legal/copyright', 177 'readability/alt_tokens', 178 'readability/braces', 179 'readability/casting', 180 'readability/check', 181 'readability/constructors', 182 'readability/fn_size', 183 'readability/function', 184 'readability/multiline_comment', 185 'readability/multiline_string', 186 'readability/namespace', 187 'readability/nolint', 188 'readability/streams', 189 'readability/todo', 190 'readability/utf8', 191 'runtime/arrays', 192 'runtime/casting', 193 'runtime/explicit', 194 'runtime/int', 195 'runtime/init', 196 'runtime/invalid_increment', 197 'runtime/member_string_references', 198 'runtime/memset', 199 'runtime/operator', 200 'runtime/printf', 201 'runtime/printf_format', 202 'runtime/references', 203 'runtime/rtti', 204 'runtime/sizeof', 205 'runtime/string', 206 'runtime/threadsafe_fn', 207 'whitespace/blank_line', 208 'whitespace/braces', 209 'whitespace/comma', 210 'whitespace/comments', 211 'whitespace/empty_loop_body', 212 'whitespace/end_of_line', 213 'whitespace/ending_newline', 214 'whitespace/forcolon', 215 'whitespace/indent', 216 'whitespace/labels', 217 'whitespace/line_length', 218 'whitespace/newline', 219 'whitespace/operators', 220 'whitespace/parens', 221 'whitespace/semicolon', 222 'whitespace/tab', 223 'whitespace/todo' 224 ] 225 226 # The default state of the category filter. This is overrided by the --filter= 227 # flag. By default all errors are on, so only add here categories that should be 228 # off by default (i.e., categories that must be enabled by the --filter= flags). 229 # All entries here should start with a '-' or '+', as in the --filter= flag. 230 _DEFAULT_FILTERS = ['-build/include_alpha'] 231 232 # We used to check for high-bit characters, but after much discussion we 233 # decided those were OK, as long as they were in UTF-8 and didn't represent 234 # hard-coded international strings, which belong in a separate i18n file. 235 236 # Headers that we consider STL headers. 237 _STL_HEADERS = frozenset([ 238 'algobase.h', 'algorithm', 'alloc.h', 'bitset', 'deque', 'exception', 239 'function.h', 'functional', 'hash_map', 'hash_map.h', 'hash_set', 240 'hash_set.h', 'iterator', 'list', 'list.h', 'map', 'memory', 'new', 241 'pair.h', 'pthread_alloc', 'queue', 'set', 'set.h', 'sstream', 'stack', 242 'stl_alloc.h', 'stl_relops.h', 'type_traits.h', 243 'utility', 'vector', 'vector.h', 244 ]) 245 246 247 # Non-STL C++ system headers. 248 _CPP_HEADERS = frozenset([ 249 'algo.h', 'builtinbuf.h', 'bvector.h', 'cassert', 'cctype', 250 'cerrno', 'cfloat', 'ciso646', 'climits', 'clocale', 'cmath', 251 'complex', 'complex.h', 'csetjmp', 'csignal', 'cstdarg', 'cstddef', 252 'cstdio', 'cstdlib', 'cstring', 'ctime', 'cwchar', 'cwctype', 253 'defalloc.h', 'deque.h', 'editbuf.h', 'exception', 'fstream', 254 'fstream.h', 'hashtable.h', 'heap.h', 'indstream.h', 'iomanip', 255 'iomanip.h', 'ios', 'iosfwd', 'iostream', 'iostream.h', 'istream', 256 'istream.h', 'iterator.h', 'limits', 'map.h', 'multimap.h', 'multiset.h', 257 'numeric', 'ostream', 'ostream.h', 'parsestream.h', 'pfstream.h', 258 'PlotFile.h', 'procbuf.h', 'pthread_alloc.h', 'rope', 'rope.h', 259 'ropeimpl.h', 'SFile.h', 'slist', 'slist.h', 'stack.h', 'stdexcept', 260 'stdiostream.h', 'streambuf', 'streambuf.h', 'stream.h', 'strfile.h', 261 'string', 'strstream', 'strstream.h', 'tempbuf.h', 'tree.h', 'typeinfo', 262 'valarray', 263 ]) 264 265 266 # Assertion macros. These are defined in base/logging.h and 267 # testing/base/gunit.h. Note that the _M versions need to come first 268 # for substring matching to work. 269 _CHECK_MACROS = [ 270 'DCHECK', 'CHECK', 271 'EXPECT_TRUE_M', 'EXPECT_TRUE', 272 'ASSERT_TRUE_M', 'ASSERT_TRUE', 273 'EXPECT_FALSE_M', 'EXPECT_FALSE', 274 'ASSERT_FALSE_M', 'ASSERT_FALSE', 275 ] 276 277 # Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE 278 _CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS]) 279 280 for op, replacement in [('==', 'EQ'), ('!=', 'NE'), 281 ('>=', 'GE'), ('>', 'GT'), 282 ('<=', 'LE'), ('<', 'LT')]: 283 _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement 284 _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement 285 _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement 286 _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement 287 _CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement 288 _CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement 289 290 for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'), 291 ('>=', 'LT'), ('>', 'LE'), 292 ('<=', 'GT'), ('<', 'GE')]: 293 _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement 294 _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement 295 _CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement 296 _CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement 297 298 # Alternative tokens and their replacements. For full list, see section 2.5 299 # Alternative tokens [lex.digraph] in the C++ standard. 300 # 301 # Digraphs (such as '%:') are not included here since it's a mess to 302 # match those on a word boundary. 303 _ALT_TOKEN_REPLACEMENT = { 304 'and': '&&', 305 'bitor': '|', 306 'or': '||', 307 'xor': '^', 308 'compl': '~', 309 'bitand': '&', 310 'and_eq': '&=', 311 'or_eq': '|=', 312 'xor_eq': '^=', 313 'not': '!', 314 'not_eq': '!=' 315 } 316 317 # Compile regular expression that matches all the above keywords. The "[ =()]" 318 # bit is meant to avoid matching these keywords outside of boolean expressions. 319 # 320 # False positives include C-style multi-line comments (http://go/nsiut ) 321 # and multi-line strings (http://go/beujw ), but those have always been 322 # troublesome for cpplint. 323 _ALT_TOKEN_REPLACEMENT_PATTERN = re.compile( 324 r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)') 325 326 327 # These constants define types of headers for use with 328 # _IncludeState.CheckNextIncludeOrder(). 329 _C_SYS_HEADER = 1 330 _CPP_SYS_HEADER = 2 331 _LIKELY_MY_HEADER = 3 332 _POSSIBLE_MY_HEADER = 4 333 _OTHER_HEADER = 5 334 335 # These constants define the current inline assembly state 336 _NO_ASM = 0 # Outside of inline assembly block 337 _INSIDE_ASM = 1 # Inside inline assembly block 338 _END_ASM = 2 # Last line of inline assembly block 339 _BLOCK_ASM = 3 # The whole block is an inline assembly block 340 341 # Match start of assembly blocks 342 _MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)' 343 r'(?:\s+(volatile|__volatile__))?' 344 r'\s*[{(]') 345 346 347 _regexp_compile_cache = {} 348 349 # Finds occurrences of NOLINT or NOLINT(...). 350 _RE_SUPPRESSION = re.compile(r'\bNOLINT\b(\([^)]*\))?') 351 352 # {str, set(int)}: a map from error categories to sets of linenumbers 353 # on which those errors are expected and should be suppressed. 354 _error_suppressions = {} 355 356 # The root directory used for deriving header guard CPP variable. 357 # This is set by --root flag. 358 _root = None 359 360 def ParseNolintSuppressions(filename, raw_line, linenum, error): 361 """Updates the global list of error-suppressions. 362 363 Parses any NOLINT comments on the current line, updating the global 364 error_suppressions store. Reports an error if the NOLINT comment 365 was malformed. 366 367 Args: 368 filename: str, the name of the input file. 369 raw_line: str, the line of input text, with comments. 370 linenum: int, the number of the current line. 371 error: function, an error handler. 372 """ 373 # FIXME(adonovan): "NOLINT(" is misparsed as NOLINT(*). 374 matched = _RE_SUPPRESSION.search(raw_line) 375 if matched: 376 category = matched.group(1) 377 if category in (None, '(*)'): # => "suppress all" 378 _error_suppressions.setdefault(None, set()).add(linenum) 379 else: 380 if category.startswith('(') and category.endswith(')'): 381 category = category[1:-1] 382 if category in _ERROR_CATEGORIES: 383 _error_suppressions.setdefault(category, set()).add(linenum) 384 else: 385 error(filename, linenum, 'readability/nolint', 5, 386 'Unknown NOLINT error category: %s' % category) 387 388 389 def ResetNolintSuppressions(): 390 "Resets the set of NOLINT suppressions to empty." 391 _error_suppressions.clear() 392 393 394 def IsErrorSuppressedByNolint(category, linenum): 395 """Returns true if the specified error category is suppressed on this line. 396 397 Consults the global error_suppressions map populated by 398 ParseNolintSuppressions/ResetNolintSuppressions. 399 400 Args: 401 category: str, the category of the error. 402 linenum: int, the current line number. 403 Returns: 404 bool, True iff the error should be suppressed due to a NOLINT comment. 405 """ 406 return (linenum in _error_suppressions.get(category, set()) or 407 linenum in _error_suppressions.get(None, set())) 408 409 def Match(pattern, s): 410 """Matches the string with the pattern, caching the compiled regexp.""" 411 # The regexp compilation caching is inlined in both Match and Search for 412 # performance reasons; factoring it out into a separate function turns out 413 # to be noticeably expensive. 414 if not pattern in _regexp_compile_cache: 415 _regexp_compile_cache[pattern] = sre_compile.compile(pattern) 416 return _regexp_compile_cache[pattern].match(s) 417 418 419 def Search(pattern, s): 420 """Searches the string for the pattern, caching the compiled regexp.""" 421 if not pattern in _regexp_compile_cache: 422 _regexp_compile_cache[pattern] = sre_compile.compile(pattern) 423 return _regexp_compile_cache[pattern].search(s) 424 425 426 class _IncludeState(dict): 427 """Tracks line numbers for includes, and the order in which includes appear. 428 429 As a dict, an _IncludeState object serves as a mapping between include 430 filename and line number on which that file was included. 431 432 Call CheckNextIncludeOrder() once for each header in the file, passing 433 in the type constants defined above. Calls in an illegal order will 434 raise an _IncludeError with an appropriate error message. 435 436 """ 437 # self._section will move monotonically through this set. If it ever 438 # needs to move backwards, CheckNextIncludeOrder will raise an error. 439 _INITIAL_SECTION = 0 440 _MY_H_SECTION = 1 441 _C_SECTION = 2 442 _CPP_SECTION = 3 443 _OTHER_H_SECTION = 4 444 445 _TYPE_NAMES = { 446 _C_SYS_HEADER: 'C system header', 447 _CPP_SYS_HEADER: 'C++ system header', 448 _LIKELY_MY_HEADER: 'header this file implements', 449 _POSSIBLE_MY_HEADER: 'header this file may implement', 450 _OTHER_HEADER: 'other header', 451 } 452 _SECTION_NAMES = { 453 _INITIAL_SECTION: "... nothing. (This can't be an error.)", 454 _MY_H_SECTION: 'a header this file implements', 455 _C_SECTION: 'C system header', 456 _CPP_SECTION: 'C++ system header', 457 _OTHER_H_SECTION: 'other header', 458 } 459 460 def __init__(self): 461 dict.__init__(self) 462 # The name of the current section. 463 self._section = self._INITIAL_SECTION 464 # The path of last found header. 465 self._last_header = '' 466 467 def CanonicalizeAlphabeticalOrder(self, header_path): 468 """Returns a path canonicalized for alphabetical comparison. 469 470 - replaces "-" with "_" so they both cmp the same. 471 - removes '-inl' since we don't require them to be after the main header. 472 - lowercase everything, just in case. 473 474 Args: 475 header_path: Path to be canonicalized. 476 477 Returns: 478 Canonicalized path. 479 """ 480 return header_path.replace('-inl.h', '.h').replace('-', '_').lower() 481 482 def IsInAlphabeticalOrder(self, header_path): 483 """Check if a header is in alphabetical order with the previous header. 484 485 Args: 486 header_path: Header to be checked. 487 488 Returns: 489 Returns true if the header is in alphabetical order. 490 """ 491 canonical_header = self.CanonicalizeAlphabeticalOrder(header_path) 492 if self._last_header > canonical_header: 493 return False 494 self._last_header = canonical_header 495 return True 496 497 def CheckNextIncludeOrder(self, header_type): 498 """Returns a non-empty error message if the next header is out of order. 499 500 This function also updates the internal state to be ready to check 501 the next include. 502 503 Args: 504 header_type: One of the _XXX_HEADER constants defined above. 505 506 Returns: 507 The empty string if the header is in the right order, or an 508 error message describing what's wrong. 509 510 """ 511 error_message = ('Found %s after %s' % 512 (self._TYPE_NAMES[header_type], 513 self._SECTION_NAMES[self._section])) 514 515 last_section = self._section 516 517 if header_type == _C_SYS_HEADER: 518 if self._section <= self._C_SECTION: 519 self._section = self._C_SECTION 520 else: 521 self._last_header = '' 522 return error_message 523 elif header_type == _CPP_SYS_HEADER: 524 if self._section <= self._CPP_SECTION: 525 self._section = self._CPP_SECTION 526 else: 527 self._last_header = '' 528 return error_message 529 elif header_type == _LIKELY_MY_HEADER: 530 if self._section <= self._MY_H_SECTION: 531 self._section = self._MY_H_SECTION 532 else: 533 self._section = self._OTHER_H_SECTION 534 elif header_type == _POSSIBLE_MY_HEADER: 535 if self._section <= self._MY_H_SECTION: 536 self._section = self._MY_H_SECTION 537 else: 538 # This will always be the fallback because we're not sure 539 # enough that the header is associated with this file. 540 self._section = self._OTHER_H_SECTION 541 else: 542 assert header_type == _OTHER_HEADER 543 self._section = self._OTHER_H_SECTION 544 545 if last_section != self._section: 546 self._last_header = '' 547 548 return '' 549 550 551 class _CppLintState(object): 552 """Maintains module-wide state..""" 553 554 def __init__(self): 555 self.verbose_level = 1 # global setting. 556 self.error_count = 0 # global count of reported errors 557 # filters to apply when emitting error messages 558 self.filters = _DEFAULT_FILTERS[:] 559 self.counting = 'total' # In what way are we counting errors? 560 self.errors_by_category = {} # string to int dict storing error counts 561 562 # output format: 563 # "emacs" - format that emacs can parse (default) 564 # "vs7" - format that Microsoft Visual Studio 7 can parse 565 self.output_format = 'emacs' 566 567 def SetOutputFormat(self, output_format): 568 """Sets the output format for errors.""" 569 self.output_format = output_format 570 571 def SetVerboseLevel(self, level): 572 """Sets the module's verbosity, and returns the previous setting.""" 573 last_verbose_level = self.verbose_level 574 self.verbose_level = level 575 return last_verbose_level 576 577 def SetCountingStyle(self, counting_style): 578 """Sets the module's counting options.""" 579 self.counting = counting_style 580 581 def SetFilters(self, filters): 582 """Sets the error-message filters. 583 584 These filters are applied when deciding whether to emit a given 585 error message. 586 587 Args: 588 filters: A string of comma-separated filters (eg "+whitespace/indent"). 589 Each filter should start with + or -; else we die. 590 591 Raises: 592 ValueError: The comma-separated filters did not all start with '+' or '-'. 593 E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter" 594 """ 595 # Default filters always have less priority than the flag ones. 596 self.filters = _DEFAULT_FILTERS[:] 597 for filt in filters.split(','): 598 clean_filt = filt.strip() 599 if clean_filt: 600 self.filters.append(clean_filt) 601 for filt in self.filters: 602 if not (filt.startswith('+') or filt.startswith('-')): 603 raise ValueError('Every filter in --filters must start with + or -' 604 ' (%s does not)' % filt) 605 606 def ResetErrorCounts(self): 607 """Sets the module's error statistic back to zero.""" 608 self.error_count = 0 609 self.errors_by_category = {} 610 611 def IncrementErrorCount(self, category): 612 """Bumps the module's error statistic.""" 613 self.error_count += 1 614 if self.counting in ('toplevel', 'detailed'): 615 if self.counting != 'detailed': 616 category = category.split('/')[0] 617 if category not in self.errors_by_category: 618 self.errors_by_category[category] = 0 619 self.errors_by_category[category] += 1 620 621 def PrintErrorCounts(self): 622 """Print a summary of errors by category, and the total.""" 623 for category, count in self.errors_by_category.iteritems(): 624 sys.stderr.write('Category \'%s\' errors found: %d\n' % 625 (category, count)) 626 sys.stderr.write('Total errors found: %d\n' % self.error_count) 627 628 _cpplint_state = _CppLintState() 629 630 631 def _OutputFormat(): 632 """Gets the module's output format.""" 633 return _cpplint_state.output_format 634 635 636 def _SetOutputFormat(output_format): 637 """Sets the module's output format.""" 638 _cpplint_state.SetOutputFormat(output_format) 639 640 641 def _VerboseLevel(): 642 """Returns the module's verbosity setting.""" 643 return _cpplint_state.verbose_level 644 645 646 def _SetVerboseLevel(level): 647 """Sets the module's verbosity, and returns the previous setting.""" 648 return _cpplint_state.SetVerboseLevel(level) 649 650 651 def _SetCountingStyle(level): 652 """Sets the module's counting options.""" 653 _cpplint_state.SetCountingStyle(level) 654 655 656 def _Filters(): 657 """Returns the module's list of output filters, as a list.""" 658 return _cpplint_state.filters 659 660 661 def _SetFilters(filters): 662 """Sets the module's error-message filters. 663 664 These filters are applied when deciding whether to emit a given 665 error message. 666 667 Args: 668 filters: A string of comma-separated filters (eg "whitespace/indent"). 669 Each filter should start with + or -; else we die. 670 """ 671 _cpplint_state.SetFilters(filters) 672 673 674 class _FunctionState(object): 675 """Tracks current function name and the number of lines in its body.""" 676 677 _NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc. 678 _TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER. 679 680 def __init__(self): 681 self.in_a_function = False 682 self.lines_in_function = 0 683 self.current_function = '' 684 685 def Begin(self, function_name): 686 """Start analyzing function body. 687 688 Args: 689 function_name: The name of the function being tracked. 690 """ 691 self.in_a_function = True 692 self.lines_in_function = 0 693 self.current_function = function_name 694 695 def Count(self): 696 """Count line in current function body.""" 697 if self.in_a_function: 698 self.lines_in_function += 1 699 700 def Check(self, error, filename, linenum): 701 """Report if too many lines in function body. 702 703 Args: 704 error: The function to call with any errors found. 705 filename: The name of the current file. 706 linenum: The number of the line to check. 707 """ 708 if Match(r'T(EST|est)', self.current_function): 709 base_trigger = self._TEST_TRIGGER 710 else: 711 base_trigger = self._NORMAL_TRIGGER 712 trigger = base_trigger * 2**_VerboseLevel() 713 714 if self.lines_in_function > trigger: 715 error_level = int(math.log(self.lines_in_function / base_trigger, 2)) 716 # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ... 717 if error_level > 5: 718 error_level = 5 719 error(filename, linenum, 'readability/fn_size', error_level, 720 'Small and focused functions are preferred:' 721 ' %s has %d non-comment lines' 722 ' (error triggered by exceeding %d lines).' % ( 723 self.current_function, self.lines_in_function, trigger)) 724 725 def End(self): 726 """Stop analyzing function body.""" 727 self.in_a_function = False 728 729 730 class _IncludeError(Exception): 731 """Indicates a problem with the include order in a file.""" 732 pass 733 734 735 class FileInfo: 736 """Provides utility functions for filenames. 737 738 FileInfo provides easy access to the components of a file's path 739 relative to the project root. 740 """ 741 742 def __init__(self, filename): 743 self._filename = filename 744 745 def FullName(self): 746 """Make Windows paths like Unix.""" 747 return os.path.abspath(self._filename).replace('\\', '/') 748 749 def RepositoryName(self): 750 """FullName after removing the local path to the repository. 751 752 If we have a real absolute path name here we can try to do something smart: 753 detecting the root of the checkout and truncating /path/to/checkout from 754 the name so that we get header guards that don't include things like 755 "C:\Documents and Settings\..." or "/home/username/..." in them and thus 756 people on different computers who have checked the source out to different 757 locations won't see bogus errors. 758 """ 759 fullname = self.FullName() 760 761 if os.path.exists(fullname): 762 project_dir = os.path.dirname(fullname) 763 764 if os.path.exists(os.path.join(project_dir, ".svn")): 765 # If there's a .svn file in the current directory, we recursively look 766 # up the directory tree for the top of the SVN checkout 767 root_dir = project_dir 768 one_up_dir = os.path.dirname(root_dir) 769 while os.path.exists(os.path.join(one_up_dir, ".svn")): 770 root_dir = os.path.dirname(root_dir) 771 one_up_dir = os.path.dirname(one_up_dir) 772 773 prefix = os.path.commonprefix([root_dir, project_dir]) 774 return fullname[len(prefix) + 1:] 775 776 # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by 777 # searching up from the current path. 778 root_dir = os.path.dirname(fullname) 779 while (root_dir != os.path.dirname(root_dir) and 780 not os.path.exists(os.path.join(root_dir, ".git")) and 781 not os.path.exists(os.path.join(root_dir, ".hg")) and 782 not os.path.exists(os.path.join(root_dir, ".svn"))): 783 root_dir = os.path.dirname(root_dir) 784 785 if (os.path.exists(os.path.join(root_dir, ".git")) or 786 os.path.exists(os.path.join(root_dir, ".hg")) or 787 os.path.exists(os.path.join(root_dir, ".svn"))): 788 prefix = os.path.commonprefix([root_dir, project_dir]) 789 return fullname[len(prefix) + 1:] 790 791 # Don't know what to do; header guard warnings may be wrong... 792 return fullname 793 794 def Split(self): 795 """Splits the file into the directory, basename, and extension. 796 797 For 'chrome/browser/browser.cc', Split() would 798 return ('chrome/browser', 'browser', '.cc') 799 800 Returns: 801 A tuple of (directory, basename, extension). 802 """ 803 804 googlename = self.RepositoryName() 805 project, rest = os.path.split(googlename) 806 return (project,) + os.path.splitext(rest) 807 808 def BaseName(self): 809 """File base name - text after the final slash, before the final period.""" 810 return self.Split()[1] 811 812 def Extension(self): 813 """File extension - text following the final period.""" 814 return self.Split()[2] 815 816 def NoExtension(self): 817 """File has no source file extension.""" 818 return '/'.join(self.Split()[0:2]) 819 820 def IsSource(self): 821 """File has a source file extension.""" 822 return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx') 823 824 825 def _ShouldPrintError(category, confidence, linenum): 826 """If confidence >= verbose, category passes filter and is not suppressed.""" 827 828 # There are three ways we might decide not to print an error message: 829 # a "NOLINT(category)" comment appears in the source, 830 # the verbosity level isn't high enough, or the filters filter it out. 831 if IsErrorSuppressedByNolint(category, linenum): 832 return False 833 if confidence < _cpplint_state.verbose_level: 834 return False 835 836 is_filtered = False 837 for one_filter in _Filters(): 838 if one_filter.startswith('-'): 839 if category.startswith(one_filter[1:]): 840 is_filtered = True 841 elif one_filter.startswith('+'): 842 if category.startswith(one_filter[1:]): 843 is_filtered = False 844 else: 845 assert False # should have been checked for in SetFilter. 846 if is_filtered: 847 return False 848 849 return True 850 851 852 def Error(filename, linenum, category, confidence, message): 853 """Logs the fact we've found a lint error. 854 855 We log where the error was found, and also our confidence in the error, 856 that is, how certain we are this is a legitimate style regression, and 857 not a misidentification or a use that's sometimes justified. 858 859 False positives can be suppressed by the use of 860 "cpplint(category)" comments on the offending line. These are 861 parsed into _error_suppressions. 862 863 Args: 864 filename: The name of the file containing the error. 865 linenum: The number of the line containing the error. 866 category: A string used to describe the "category" this bug 867 falls under: "whitespace", say, or "runtime". Categories 868 may have a hierarchy separated by slashes: "whitespace/indent". 869 confidence: A number from 1-5 representing a confidence score for 870 the error, with 5 meaning that we are certain of the problem, 871 and 1 meaning that it could be a legitimate construct. 872 message: The error message. 873 """ 874 if _ShouldPrintError(category, confidence, linenum): 875 _cpplint_state.IncrementErrorCount(category) 876 if _cpplint_state.output_format == 'vs7': 877 sys.stderr.write('%s(%s): %s [%s] [%d]\n' % ( 878 filename, linenum, message, category, confidence)) 879 elif _cpplint_state.output_format == 'eclipse': 880 sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % ( 881 filename, linenum, message, category, confidence)) 882 else: 883 sys.stderr.write('%s:%s: %s [%s] [%d]\n' % ( 884 filename, linenum, message, category, confidence)) 885 886 887 # Matches standard C++ escape esequences per 2.13.2.3 of the C++ standard. 888 _RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile( 889 r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)') 890 # Matches strings. Escape codes should already be removed by ESCAPES. 891 _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"') 892 # Matches characters. Escape codes should already be removed by ESCAPES. 893 _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'") 894 # Matches multi-line C++ comments. 895 # This RE is a little bit more complicated than one might expect, because we 896 # have to take care of space removals tools so we can handle comments inside 897 # statements better. 898 # The current rule is: We only clear spaces from both sides when we're at the 899 # end of the line. Otherwise, we try to remove spaces from the right side, 900 # if this doesn't work we try on left side but only if there's a non-character 901 # on the right. 902 _RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile( 903 r"""(\s*/\*.*\*/\s*$| 904 /\*.*\*/\s+| 905 \s+/\*.*\*/(?=\W)| 906 /\*.*\*/)""", re.VERBOSE) 907 908 909 def IsCppString(line): 910 """Does line terminate so, that the next symbol is in string constant. 911 912 This function does not consider single-line nor multi-line comments. 913 914 Args: 915 line: is a partial line of code starting from the 0..n. 916 917 Returns: 918 True, if next character appended to 'line' is inside a 919 string constant. 920 """ 921 922 line = line.replace(r'\\', 'XX') # after this, \\" does not match to \" 923 return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1 924 925 926 def FindNextMultiLineCommentStart(lines, lineix): 927 """Find the beginning marker for a multiline comment.""" 928 while lineix < len(lines): 929 if lines[lineix].strip().startswith('/*'): 930 # Only return this marker if the comment goes beyond this line 931 if lines[lineix].strip().find('*/', 2) < 0: 932 return lineix 933 lineix += 1 934 return len(lines) 935 936 937 def FindNextMultiLineCommentEnd(lines, lineix): 938 """We are inside a comment, find the end marker.""" 939 while lineix < len(lines): 940 if lines[lineix].strip().endswith('*/'): 941 return lineix 942 lineix += 1 943 return len(lines) 944 945 946 def RemoveMultiLineCommentsFromRange(lines, begin, end): 947 """Clears a range of lines for multi-line comments.""" 948 # Having // dummy comments makes the lines non-empty, so we will not get 949 # unnecessary blank line warnings later in the code. 950 for i in range(begin, end): 951 lines[i] = '// dummy' 952 953 954 def RemoveMultiLineComments(filename, lines, error): 955 """Removes multiline (c-style) comments from lines.""" 956 lineix = 0 957 while lineix < len(lines): 958 lineix_begin = FindNextMultiLineCommentStart(lines, lineix) 959 if lineix_begin >= len(lines): 960 return 961 lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin) 962 if lineix_end >= len(lines): 963 error(filename, lineix_begin + 1, 'readability/multiline_comment', 5, 964 'Could not find end of multi-line comment') 965 return 966 RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1) 967 lineix = lineix_end + 1 968 969 970 def CleanseComments(line): 971 """Removes //-comments and single-line C-style /* */ comments. 972 973 Args: 974 line: A line of C++ source. 975 976 Returns: 977 The line with single-line comments removed. 978 """ 979 commentpos = line.find('//') 980 if commentpos != -1 and not IsCppString(line[:commentpos]): 981 line = line[:commentpos].rstrip() 982 # get rid of /* ... */ 983 return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line) 984 985 986 class CleansedLines(object): 987 """Holds 3 copies of all lines with different preprocessing applied to them. 988 989 1) elided member contains lines without strings and comments, 990 2) lines member contains lines without comments, and 991 3) raw_lines member contains all the lines without processing. 992 All these three members are of <type 'list'>, and of the same length. 993 """ 994 995 def __init__(self, lines): 996 self.elided = [] 997 self.lines = [] 998 self.raw_lines = lines 999 self.num_lines = len(lines) 1000 for linenum in range(len(lines)): 1001 self.lines.append(CleanseComments(lines[linenum])) 1002 elided = self._CollapseStrings(lines[linenum]) 1003 self.elided.append(CleanseComments(elided)) 1004 1005 def NumLines(self): 1006 """Returns the number of lines represented.""" 1007 return self.num_lines 1008 1009 @staticmethod 1010 def _CollapseStrings(elided): 1011 """Collapses strings and chars on a line to simple "" or '' blocks. 1012 1013 We nix strings first so we're not fooled by text like '"http://"' 1014 1015 Args: 1016 elided: The line being processed. 1017 1018 Returns: 1019 The line with collapsed strings. 1020 """ 1021 if not _RE_PATTERN_INCLUDE.match(elided): 1022 # Remove escaped characters first to make quote/single quote collapsing 1023 # basic. Things that look like escaped characters shouldn't occur 1024 # outside of strings and chars. 1025 elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided) 1026 elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided) 1027 elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided) 1028 return elided 1029 1030 1031 def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar): 1032 """Find the position just after the matching endchar. 1033 1034 Args: 1035 line: a CleansedLines line. 1036 startpos: start searching at this position. 1037 depth: nesting level at startpos. 1038 startchar: expression opening character. 1039 endchar: expression closing character. 1040 1041 Returns: 1042 Index just after endchar. 1043 """ 1044 for i in xrange(startpos, len(line)): 1045 if line[i] == startchar: 1046 depth += 1 1047 elif line[i] == endchar: 1048 depth -= 1 1049 if depth == 0: 1050 return i + 1 1051 return -1 1052 1053 1054 def CloseExpression(clean_lines, linenum, pos): 1055 """If input points to ( or { or [, finds the position that closes it. 1056 1057 If lines[linenum][pos] points to a '(' or '{' or '[', finds the 1058 linenum/pos that correspond to the closing of the expression. 1059 1060 Args: 1061 clean_lines: A CleansedLines instance containing the file. 1062 linenum: The number of the line to check. 1063 pos: A position on the line. 1064 1065 Returns: 1066 A tuple (line, linenum, pos) pointer *past* the closing brace, or 1067 (line, len(lines), -1) if we never find a close. Note we ignore 1068 strings and comments when matching; and the line we return is the 1069 'cleansed' line at linenum. 1070 """ 1071 1072 line = clean_lines.elided[linenum] 1073 startchar = line[pos] 1074 if startchar not in '({[': 1075 return (line, clean_lines.NumLines(), -1) 1076 if startchar == '(': endchar = ')' 1077 if startchar == '[': endchar = ']' 1078 if startchar == '{': endchar = '}' 1079 1080 # Check first line 1081 end_pos = FindEndOfExpressionInLine(line, pos, 0, startchar, endchar) 1082 if end_pos > -1: 1083 return (line, linenum, end_pos) 1084 tail = line[pos:] 1085 num_open = tail.count(startchar) - tail.count(endchar) 1086 while linenum < clean_lines.NumLines() - 1: 1087 linenum += 1 1088 line = clean_lines.elided[linenum] 1089 delta = line.count(startchar) - line.count(endchar) 1090 if num_open + delta <= 0: 1091 return (line, linenum, 1092 FindEndOfExpressionInLine(line, 0, num_open, startchar, endchar)) 1093 num_open += delta 1094 1095 # Did not find endchar before end of file, give up 1096 return (line, clean_lines.NumLines(), -1) 1097 1098 def CheckForCopyright(filename, lines, error): 1099 """Logs an error if no Copyright message appears at the top of the file.""" 1100 1101 # We'll say it should occur by line 10. Don't forget there's a 1102 # dummy line at the front. 1103 for line in xrange(1, min(len(lines), 11)): 1104 if re.search(r'Copyright', lines[line], re.I): break 1105 else: # means no copyright line was found 1106 error(filename, 0, 'legal/copyright', 5, 1107 'No copyright message found. ' 1108 'You should have a line: "Copyright [year] <Copyright Owner>"') 1109 1110 1111 def GetHeaderGuardCPPVariable(filename): 1112 """Returns the CPP variable that should be used as a header guard. 1113 1114 Args: 1115 filename: The name of a C++ header file. 1116 1117 Returns: 1118 The CPP variable that should be used as a header guard in the 1119 named file. 1120 1121 """ 1122 1123 # Restores original filename in case that cpplint is invoked from Emacs's 1124 # flymake. 1125 filename = re.sub(r'_flymake\.h$', '.h', filename) 1126 filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename) 1127 1128 fileinfo = FileInfo(filename) 1129 file_path_from_root = fileinfo.RepositoryName() 1130 if _root: 1131 file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root) 1132 return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_' 1133 1134 1135 def CheckForHeaderGuard(filename, lines, error): 1136 """Checks that the file contains a header guard. 1137 1138 Logs an error if no #ifndef header guard is present. For other 1139 headers, checks that the full pathname is used. 1140 1141 Args: 1142 filename: The name of the C++ header file. 1143 lines: An array of strings, each representing a line of the file. 1144 error: The function to call with any errors found. 1145 """ 1146 1147 cppvar = GetHeaderGuardCPPVariable(filename) 1148 1149 ifndef = None 1150 ifndef_linenum = 0 1151 define = None 1152 endif = None 1153 endif_linenum = 0 1154 for linenum, line in enumerate(lines): 1155 linesplit = line.split() 1156 if len(linesplit) >= 2: 1157 # find the first occurrence of #ifndef and #define, save arg 1158 if not ifndef and linesplit[0] == '#ifndef': 1159 # set ifndef to the header guard presented on the #ifndef line. 1160 ifndef = linesplit[1] 1161 ifndef_linenum = linenum 1162 if not define and linesplit[0] == '#define': 1163 define = linesplit[1] 1164 # find the last occurrence of #endif, save entire line 1165 if line.startswith('#endif'): 1166 endif = line 1167 endif_linenum = linenum 1168 1169 if not ifndef: 1170 error(filename, 0, 'build/header_guard', 5, 1171 'No #ifndef header guard found, suggested CPP variable is: %s' % 1172 cppvar) 1173 return 1174 1175 if not define: 1176 error(filename, 0, 'build/header_guard', 5, 1177 'No #define header guard found, suggested CPP variable is: %s' % 1178 cppvar) 1179 return 1180 1181 # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__ 1182 # for backward compatibility. 1183 if ifndef != cppvar: 1184 error_level = 0 1185 if ifndef != cppvar + '_': 1186 error_level = 5 1187 1188 ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum, 1189 error) 1190 error(filename, ifndef_linenum, 'build/header_guard', error_level, 1191 '#ifndef header guard has wrong style, please use: %s' % cppvar) 1192 1193 if define != ifndef: 1194 error(filename, 0, 'build/header_guard', 5, 1195 '#ifndef and #define don\'t match, suggested CPP variable is: %s' % 1196 cppvar) 1197 return 1198 1199 if endif != ('#endif // %s' % cppvar): 1200 error_level = 0 1201 if endif != ('#endif // %s' % (cppvar + '_')): 1202 error_level = 5 1203 1204 ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum, 1205 error) 1206 error(filename, endif_linenum, 'build/header_guard', error_level, 1207 '#endif line should be "#endif // %s"' % cppvar) 1208 1209 1210 def CheckForUnicodeReplacementCharacters(filename, lines, error): 1211 """Logs an error for each line containing Unicode replacement characters. 1212 1213 These indicate that either the file contained invalid UTF-8 (likely) 1214 or Unicode replacement characters (which it shouldn't). Note that 1215 it's possible for this to throw off line numbering if the invalid 1216 UTF-8 occurred adjacent to a newline. 1217 1218 Args: 1219 filename: The name of the current file. 1220 lines: An array of strings, each representing a line of the file. 1221 error: The function to call with any errors found. 1222 """ 1223 for linenum, line in enumerate(lines): 1224 if u'\ufffd' in line: 1225 error(filename, linenum, 'readability/utf8', 5, 1226 'Line contains invalid UTF-8 (or Unicode replacement character).') 1227 1228 1229 def CheckForNewlineAtEOF(filename, lines, error): 1230 """Logs an error if there is no newline char at the end of the file. 1231 1232 Args: 1233 filename: The name of the current file. 1234 lines: An array of strings, each representing a line of the file. 1235 error: The function to call with any errors found. 1236 """ 1237 1238 # The array lines() was created by adding two newlines to the 1239 # original file (go figure), then splitting on \n. 1240 # To verify that the file ends in \n, we just have to make sure the 1241 # last-but-two element of lines() exists and is empty. 1242 if len(lines) < 3 or lines[-2]: 1243 error(filename, len(lines) - 2, 'whitespace/ending_newline', 5, 1244 'Could not find a newline character at the end of the file.') 1245 1246 1247 def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error): 1248 """Logs an error if we see /* ... */ or "..." that extend past one line. 1249 1250 /* ... */ comments are legit inside macros, for one line. 1251 Otherwise, we prefer // comments, so it's ok to warn about the 1252 other. Likewise, it's ok for strings to extend across multiple 1253 lines, as long as a line continuation character (backslash) 1254 terminates each line. Although not currently prohibited by the C++ 1255 style guide, it's ugly and unnecessary. We don't do well with either 1256 in this lint program, so we warn about both. 1257 1258 Args: 1259 filename: The name of the current file. 1260 clean_lines: A CleansedLines instance containing the file. 1261 linenum: The number of the line to check. 1262 error: The function to call with any errors found. 1263 """ 1264 line = clean_lines.elided[linenum] 1265 1266 # Remove all \\ (escaped backslashes) from the line. They are OK, and the 1267 # second (escaped) slash may trigger later \" detection erroneously. 1268 line = line.replace('\\\\', '') 1269 1270 if line.count('/*') > line.count('*/'): 1271 error(filename, linenum, 'readability/multiline_comment', 5, 1272 'Complex multi-line /*...*/-style comment found. ' 1273 'Lint may give bogus warnings. ' 1274 'Consider replacing these with //-style comments, ' 1275 'with #if 0...#endif, ' 1276 'or with more clearly structured multi-line comments.') 1277 1278 if (line.count('"') - line.count('\\"')) % 2: 1279 error(filename, linenum, 'readability/multiline_string', 5, 1280 'Multi-line string ("...") found. This lint script doesn\'t ' 1281 'do well with such strings, and may give bogus warnings. They\'re ' 1282 'ugly and unnecessary, and you should use concatenation instead".') 1283 1284 1285 threading_list = ( 1286 ('asctime(', 'asctime_r('), 1287 ('ctime(', 'ctime_r('), 1288 ('getgrgid(', 'getgrgid_r('), 1289 ('getgrnam(', 'getgrnam_r('), 1290 ('getlogin(', 'getlogin_r('), 1291 ('getpwnam(', 'getpwnam_r('), 1292 ('getpwuid(', 'getpwuid_r('), 1293 ('gmtime(', 'gmtime_r('), 1294 ('localtime(', 'localtime_r('), 1295 ('rand(', 'rand_r('), 1296 ('readdir(', 'readdir_r('), 1297 ('strtok(', 'strtok_r('), 1298 ('ttyname(', 'ttyname_r('), 1299 ) 1300 1301 1302 def CheckPosixThreading(filename, clean_lines, linenum, error): 1303 """Checks for calls to thread-unsafe functions. 1304 1305 Much code has been originally written without consideration of 1306 multi-threading. Also, engineers are relying on their old experience; 1307 they have learned posix before threading extensions were added. These 1308 tests guide the engineers to use thread-safe functions (when using 1309 posix directly). 1310 1311 Args: 1312 filename: The name of the current file. 1313 clean_lines: A CleansedLines instance containing the file. 1314 linenum: The number of the line to check. 1315 error: The function to call with any errors found. 1316 """ 1317 line = clean_lines.elided[linenum] 1318 for single_thread_function, multithread_safe_function in threading_list: 1319 ix = line.find(single_thread_function) 1320 # Comparisons made explicit for clarity -- pylint: disable-msg=C6403 1321 if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and 1322 line[ix - 1] not in ('_', '.', '>'))): 1323 error(filename, linenum, 'runtime/threadsafe_fn', 2, 1324 'Consider using ' + multithread_safe_function + 1325 '...) instead of ' + single_thread_function + 1326 '...) for improved thread safety.') 1327 1328 1329 # Matches invalid increment: *count++, which moves pointer instead of 1330 # incrementing a value. 1331 _RE_PATTERN_INVALID_INCREMENT = re.compile( 1332 r'^\s*\*\w+(\+\+|--);') 1333 1334 1335 def CheckInvalidIncrement(filename, clean_lines, linenum, error): 1336 """Checks for invalid increment *count++. 1337 1338 For example following function: 1339 void increment_counter(int* count) { 1340 *count++; 1341 } 1342 is invalid, because it effectively does count++, moving pointer, and should 1343 be replaced with ++*count, (*count)++ or *count += 1. 1344 1345 Args: 1346 filename: The name of the current file. 1347 clean_lines: A CleansedLines instance containing the file. 1348 linenum: The number of the line to check. 1349 error: The function to call with any errors found. 1350 """ 1351 line = clean_lines.elided[linenum] 1352 if _RE_PATTERN_INVALID_INCREMENT.match(line): 1353 error(filename, linenum, 'runtime/invalid_increment', 5, 1354 'Changing pointer instead of value (or unused value of operator*).') 1355 1356 1357 class _BlockInfo(object): 1358 """Stores information about a generic block of code.""" 1359 1360 def __init__(self, seen_open_brace): 1361 self.seen_open_brace = seen_open_brace 1362 self.open_parentheses = 0 1363 self.inline_asm = _NO_ASM 1364 1365 def CheckBegin(self, filename, clean_lines, linenum, error): 1366 """Run checks that applies to text up to the opening brace. 1367 1368 This is mostly for checking the text after the class identifier 1369 and the "{", usually where the base class is specified. For other 1370 blocks, there isn't much to check, so we always pass. 1371 1372 Args: 1373 filename: The name of the current file. 1374 clean_lines: A CleansedLines instance containing the file. 1375 linenum: The number of the line to check. 1376 error: The function to call with any errors found. 1377 """ 1378 pass 1379 1380 def CheckEnd(self, filename, clean_lines, linenum, error): 1381 """Run checks that applies to text after the closing brace. 1382 1383 This is mostly used for checking end of namespace comments. 1384 1385 Args: 1386 filename: The name of the current file. 1387 clean_lines: A CleansedLines instance containing the file. 1388 linenum: The number of the line to check. 1389 error: The function to call with any errors found. 1390 """ 1391 pass 1392 1393 1394 class _ClassInfo(_BlockInfo): 1395 """Stores information about a class.""" 1396 1397 def __init__(self, name, class_or_struct, clean_lines, linenum): 1398 _BlockInfo.__init__(self, False) 1399 self.name = name 1400 self.starting_linenum = linenum 1401 self.is_derived = False 1402 if class_or_struct == 'struct': 1403 self.access = 'public' 1404 else: 1405 self.access = 'private' 1406 1407 # Try to find the end of the class. This will be confused by things like: 1408 # class A { 1409 # } *x = { ... 1410 # 1411 # But it's still good enough for CheckSectionSpacing. 1412 self.last_line = 0 1413 depth = 0 1414 for i in range(linenum, clean_lines.NumLines()): 1415 line = clean_lines.elided[i] 1416 depth += line.count('{') - line.count('}') 1417 if not depth: 1418 self.last_line = i 1419 break 1420 1421 def CheckBegin(self, filename, clean_lines, linenum, error): 1422 # Look for a bare ':' 1423 if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]): 1424 self.is_derived = True 1425 1426 1427 class _NamespaceInfo(_BlockInfo): 1428 """Stores information about a namespace.""" 1429 1430 def __init__(self, name, linenum): 1431 _BlockInfo.__init__(self, False) 1432 self.name = name or '' 1433 self.starting_linenum = linenum 1434 1435 def CheckEnd(self, filename, clean_lines, linenum, error): 1436 """Check end of namespace comments.""" 1437 line = clean_lines.raw_lines[linenum] 1438 1439 # Check how many lines is enclosed in this namespace. Don't issue 1440 # warning for missing namespace comments if there aren't enough 1441 # lines. However, do apply checks if there is already an end of 1442 # namespace comment and it's incorrect. 1443 # 1444 # TODO(unknown): We always want to check end of namespace comments 1445 # if a namespace is large, but sometimes we also want to apply the 1446 # check if a short namespace contained nontrivial things (something 1447 # other than forward declarations). There is currently no logic on 1448 # deciding what these nontrivial things are, so this check is 1449 # triggered by namespace size only, which works most of the time. 1450 if (linenum - self.starting_linenum < 10 1451 and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)): 1452 return 1453 1454 # Look for matching comment at end of namespace. 1455 # 1456 # Note that we accept C style "/* */" comments for terminating 1457 # namespaces, so that code that terminate namespaces inside 1458 # preprocessor macros can be cpplint clean. Example: http://go/nxpiz 1459 # 1460 # We also accept stuff like "// end of namespace <name>." with the 1461 # period at the end. 1462 # 1463 # Besides these, we don't accept anything else, otherwise we might 1464 # get false negatives when existing comment is a substring of the 1465 # expected namespace. Example: http://go/ldkdc, http://cl/23548205 1466 if self.name: 1467 # Named namespace 1468 if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) + 1469 r'[\*/\.\\\s]*$'), 1470 line): 1471 error(filename, linenum, 'readability/namespace', 5, 1472 'Namespace should be terminated with "// namespace %s"' % 1473 self.name) 1474 else: 1475 # Anonymous namespace 1476 if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line): 1477 error(filename, linenum, 'readability/namespace', 5, 1478 'Namespace should be terminated with "// namespace"') 1479 1480 1481 class _PreprocessorInfo(object): 1482 """Stores checkpoints of nesting stacks when #if/#else is seen.""" 1483 1484 def __init__(self, stack_before_if): 1485 # The entire nesting stack before #if 1486 self.stack_before_if = stack_before_if 1487 1488 # The entire nesting stack up to #else 1489 self.stack_before_else = [] 1490 1491 # Whether we have already seen #else or #elif 1492 self.seen_else = False 1493 1494 1495 class _NestingState(object): 1496 """Holds states related to parsing braces.""" 1497 1498 def __init__(self): 1499 # Stack for tracking all braces. An object is pushed whenever we 1500 # see a "{", and popped when we see a "}". Only 3 types of 1501 # objects are possible: 1502 # - _ClassInfo: a class or struct. 1503 # - _NamespaceInfo: a namespace. 1504 # - _BlockInfo: some other type of block. 1505 self.stack = [] 1506 1507 # Stack of _PreprocessorInfo objects. 1508 self.pp_stack = [] 1509 1510 def SeenOpenBrace(self): 1511 """Check if we have seen the opening brace for the innermost block. 1512 1513 Returns: 1514 True if we have seen the opening brace, False if the innermost 1515 block is still expecting an opening brace. 1516 """ 1517 return (not self.stack) or self.stack[-1].seen_open_brace 1518 1519 def InNamespaceBody(self): 1520 """Check if we are currently one level inside a namespace body. 1521 1522 Returns: 1523 True if top of the stack is a namespace block, False otherwise. 1524 """ 1525 return self.stack and isinstance(self.stack[-1], _NamespaceInfo) 1526 1527 def UpdatePreprocessor(self, line): 1528 """Update preprocessor stack. 1529 1530 We need to handle preprocessors due to classes like this: 1531 #ifdef SWIG 1532 struct ResultDetailsPageElementExtensionPoint { 1533 #else 1534 struct ResultDetailsPageElementExtensionPoint : public Extension { 1535 #endif 1536 (see http://go/qwddn for original example) 1537 1538 We make the following assumptions (good enough for most files): 1539 - Preprocessor condition evaluates to true from #if up to first 1540 #else/#elif/#endif. 1541 1542 - Preprocessor condition evaluates to false from #else/#elif up 1543 to #endif. We still perform lint checks on these lines, but 1544 these do not affect nesting stack. 1545 1546 Args: 1547 line: current line to check. 1548 """ 1549 if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line): 1550 # Beginning of #if block, save the nesting stack here. The saved 1551 # stack will allow us to restore the parsing state in the #else case. 1552 self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack))) 1553 elif Match(r'^\s*#\s*(else|elif)\b', line): 1554 # Beginning of #else block 1555 if self.pp_stack: 1556 if not self.pp_stack[-1].seen_else: 1557 # This is the first #else or #elif block. Remember the 1558 # whole nesting stack up to this point. This is what we 1559 # keep after the #endif. 1560 self.pp_stack[-1].seen_else = True 1561 self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack) 1562 1563 # Restore the stack to how it was before the #if 1564 self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if) 1565 else: 1566 # TODO(unknown): unexpected #else, issue warning? 1567 pass 1568 elif Match(r'^\s*#\s*endif\b', line): 1569 # End of #if or #else blocks. 1570 if self.pp_stack: 1571 # If we saw an #else, we will need to restore the nesting 1572 # stack to its former state before the #else, otherwise we 1573 # will just continue from where we left off. 1574 if self.pp_stack[-1].seen_else: 1575 # Here we can just use a shallow copy since we are the last 1576 # reference to it. 1577 self.stack = self.pp_stack[-1].stack_before_else 1578 # Drop the corresponding #if 1579 self.pp_stack.pop() 1580 else: 1581 # TODO(unknown): unexpected #endif, issue warning? 1582 pass 1583 1584 def Update(self, filename, clean_lines, linenum, error): 1585 """Update nesting state with current line. 1586 1587 Args: 1588 filename: The name of the current file. 1589 clean_lines: A CleansedLines instance containing the file. 1590 linenum: The number of the line to check. 1591 error: The function to call with any errors found. 1592 """ 1593 line = clean_lines.elided[linenum] 1594 1595 # Update pp_stack first 1596 self.UpdatePreprocessor(line) 1597 1598 # Count parentheses. This is to avoid adding struct arguments to 1599 # the nesting stack. 1600 if self.stack: 1601 inner_block = self.stack[-1] 1602 depth_change = line.count('(') - line.count(')') 1603 inner_block.open_parentheses += depth_change 1604 1605 # Also check if we are starting or ending an inline assembly block. 1606 if inner_block.inline_asm in (_NO_ASM, _END_ASM): 1607 if (depth_change != 0 and 1608 inner_block.open_parentheses == 1 and 1609 _MATCH_ASM.match(line)): 1610 # Enter assembly block 1611 inner_block.inline_asm = _INSIDE_ASM 1612 else: 1613 # Not entering assembly block. If previous line was _END_ASM, 1614 # we will now shift to _NO_ASM state. 1615 inner_block.inline_asm = _NO_ASM 1616 elif (inner_block.inline_asm == _INSIDE_ASM and 1617 inner_block.open_parentheses == 0): 1618 # Exit assembly block 1619 inner_block.inline_asm = _END_ASM 1620 1621 # Consume namespace declaration at the beginning of the line. Do 1622 # this in a loop so that we catch same line declarations like this: 1623 # namespace proto2 { namespace bridge { class MessageSet; } } 1624 while True: 1625 # Match start of namespace. The "\b\s*" below catches namespace 1626 # declarations even if it weren't followed by a whitespace, this 1627 # is so that we don't confuse our namespace checker. The 1628 # missing spaces will be flagged by CheckSpacing. 1629 namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line) 1630 if not namespace_decl_match: 1631 break 1632 1633 new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum) 1634 self.stack.append(new_namespace) 1635 1636 line = namespace_decl_match.group(2) 1637 if line.find('{') != -1: 1638 new_namespace.seen_open_brace = True 1639 line = line[line.find('{') + 1:] 1640 1641 # Look for a class declaration in whatever is left of the line 1642 # after parsing namespaces. The regexp accounts for decorated classes 1643 # such as in: 1644 # class LOCKABLE API Object { 1645 # }; 1646 # 1647 # Templates with class arguments may confuse the parser, for example: 1648 # template <class T 1649 # class Comparator = less<T>, 1650 # class Vector = vector<T> > 1651 # class HeapQueue { 1652 # 1653 # Because this parser has no nesting state about templates, by the 1654 # time it saw "class Comparator", it may think that it's a new class. 1655 # Nested templates have a similar problem: 1656 # template < 1657 # typename ExportedType, 1658 # typename TupleType, 1659 # template <typename, typename> class ImplTemplate> 1660 # 1661 # To avoid these cases, we ignore classes that are followed by '=' or '>' 1662 class_decl_match = Match( 1663 r'\s*(template\s*<[\w\s<>,:]*>\s*)?' 1664 '(class|struct)\s+([A-Z_]+\s+)*(\w+(?:::\w+)*)' 1665 '(([^=>]|<[^<>]*>)*)$', line) 1666 if (class_decl_match and 1667 (not self.stack or self.stack[-1].open_parentheses == 0)): 1668 self.stack.append(_ClassInfo( 1669 class_decl_match.group(4), class_decl_match.group(2), 1670 clean_lines, linenum)) 1671 line = class_decl_match.group(5) 1672 1673 # If we have not yet seen the opening brace for the innermost block, 1674 # run checks here. 1675 if not self.SeenOpenBrace(): 1676 self.stack[-1].CheckBegin(filename, clean_lines, linenum, error) 1677 1678 # Update access control if we are inside a class/struct 1679 if self.stack and isinstance(self.stack[-1], _ClassInfo): 1680 access_match = Match(r'\s*(public|private|protected)\s*:', line) 1681 if access_match: 1682 self.stack[-1].access = access_match.group(1) 1683 1684 # Consume braces or semicolons from what's left of the line 1685 while True: 1686 # Match first brace, semicolon, or closed parenthesis. 1687 matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line) 1688 if not matched: 1689 break 1690 1691 token = matched.group(1) 1692 if token == '{': 1693 # If namespace or class hasn't seen a opening brace yet, mark 1694 # namespace/class head as complete. Push a new block onto the 1695 # stack otherwise. 1696 if not self.SeenOpenBrace(): 1697 self.stack[-1].seen_open_brace = True 1698 else: 1699 self.stack.append(_BlockInfo(True)) 1700 if _MATCH_ASM.match(line): 1701 self.stack[-1].inline_asm = _BLOCK_ASM 1702 elif token == ';' or token == ')': 1703 # If we haven't seen an opening brace yet, but we already saw 1704 # a semicolon, this is probably a forward declaration. Pop 1705 # the stack for these. 1706 # 1707 # Similarly, if we haven't seen an opening brace yet, but we 1708 # already saw a closing parenthesis, then these are probably 1709 # function arguments with extra "class" or "struct" keywords. 1710 # Also pop these stack for these. 1711 if not self.SeenOpenBrace(): 1712 self.stack.pop() 1713 else: # token == '}' 1714 # Perform end of block checks and pop the stack. 1715 if self.stack: 1716 self.stack[-1].CheckEnd(filename, clean_lines, linenum, error) 1717 self.stack.pop() 1718 line = matched.group(2) 1719 1720 def InnermostClass(self): 1721 """Get class info on the top of the stack. 1722 1723 Returns: 1724 A _ClassInfo object if we are inside a class, or None otherwise. 1725 """ 1726 for i in range(len(self.stack), 0, -1): 1727 classinfo = self.stack[i - 1] 1728 if isinstance(classinfo, _ClassInfo): 1729 return classinfo 1730 return None 1731 1732 def CheckClassFinished(self, filename, error): 1733 """Checks that all classes have been completely parsed. 1734 1735 Call this when all lines in a file have been processed. 1736 Args: 1737 filename: The name of the current file. 1738 error: The function to call with any errors found. 1739 """ 1740 # Note: This test can result in false positives if #ifdef constructs 1741 # get in the way of brace matching. See the testBuildClass test in 1742 # cpplint_unittest.py for an example of this. 1743 for obj in self.stack: 1744 if isinstance(obj, _ClassInfo): 1745 error(filename, obj.starting_linenum, 'build/class', 5, 1746 'Failed to find complete declaration of class %s' % 1747 obj.name) 1748 1749 1750 def CheckForNonStandardConstructs(filename, clean_lines, linenum, 1751 nesting_state, error): 1752 """Logs an error if we see certain non-ANSI constructs ignored by gcc-2. 1753 1754 Complain about several constructs which gcc-2 accepts, but which are 1755 not standard C++. Warning about these in lint is one way to ease the 1756 transition to new compilers. 1757 - put storage class first (e.g. "static const" instead of "const static"). 1758 - "%lld" instead of %qd" in printf-type functions. 1759 - "%1$d" is non-standard in printf-type functions. 1760 - "\%" is an undefined character escape sequence. 1761 - text after #endif is not allowed. 1762 - invalid inner-style forward declaration. 1763 - >? and <? operators, and their >?= and <?= cousins. 1764 1765 Additionally, check for constructor/destructor style violations and reference 1766 members, as it is very convenient to do so while checking for 1767 gcc-2 compliance. 1768 1769 Args: 1770 filename: The name of the current file. 1771 clean_lines: A CleansedLines instance containing the file. 1772 linenum: The number of the line to check. 1773 nesting_state: A _NestingState instance which maintains information about 1774 the current stack of nested blocks being parsed. 1775 error: A callable to which errors are reported, which takes 4 arguments: 1776 filename, line number, error level, and message 1777 """ 1778 1779 # Remove comments from the line, but leave in strings for now. 1780 line = clean_lines.lines[linenum] 1781 1782 if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line): 1783 error(filename, linenum, 'runtime/printf_format', 3, 1784 '%q in format strings is deprecated. Use %ll instead.') 1785 1786 if Search(r'printf\s*\(.*".*%\d+\$', line): 1787 error(filename, linenum, 'runtime/printf_format', 2, 1788 '%N$ formats are unconventional. Try rewriting to avoid them.') 1789 1790 # Remove escaped backslashes before looking for undefined escapes. 1791 line = line.replace('\\\\', '') 1792 1793 if Search(r'("|\').*\\(%|\[|\(|{)', line): 1794 error(filename, linenum, 'build/printf_format', 3, 1795 '%, [, (, and { are undefined character escapes. Unescape them.') 1796 1797 # For the rest, work with both comments and strings removed. 1798 line = clean_lines.elided[linenum] 1799 1800 if Search(r'\b(const|volatile|void|char|short|int|long' 1801 r'|float|double|signed|unsigned' 1802 r'|schar|u?int8|u?int16|u?int32|u?int64)' 1803 r'\s+(register|static|extern|typedef)\b', 1804 line): 1805 error(filename, linenum, 'build/storage_class', 5, 1806 'Storage class (static, extern, typedef, etc) should be first.') 1807 1808 if Match(r'\s*#\s*endif\s*[^/\s]+', line): 1809 error(filename, linenum, 'build/endif_comment', 5, 1810 'Uncommented text after #endif is non-standard. Use a comment.') 1811 1812 if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line): 1813 error(filename, linenum, 'build/forward_decl', 5, 1814 'Inner-style forward declarations are invalid. Remove this line.') 1815 1816 if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?', 1817 line): 1818 error(filename, linenum, 'build/deprecated', 3, 1819 '>? and <? (max and min) operators are non-standard and deprecated.') 1820 1821 if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line): 1822 # TODO(unknown): Could it be expanded safely to arbitrary references, 1823 # without triggering too many false positives? The first 1824 # attempt triggered 5 warnings for mostly benign code in the regtest, hence 1825 # the restriction. 1826 # Here's the original regexp, for the reference: 1827 # type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?' 1828 # r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;' 1829 error(filename, linenum, 'runtime/member_string_references', 2, 1830 'const string& members are dangerous. It is much better to use ' 1831 'alternatives, such as pointers or simple constants.') 1832 1833 # Everything else in this function operates on class declarations. 1834 # Return early if the top of the nesting stack is not a class, or if 1835 # the class head is not completed yet. 1836 classinfo = nesting_state.InnermostClass() 1837 if not classinfo or not classinfo.seen_open_brace: 1838 return 1839 1840 # The class may have been declared with namespace or classname qualifiers. 1841 # The constructor and destructor will not have those qualifiers. 1842 base_classname = classinfo.name.split('::')[-1] 1843 1844 # Look for single-argument constructors that aren't marked explicit. 1845 # Technically a valid construct, but against style. 1846 args = Match(r'\s+(?:inline\s+)?%s\s*\(([^,()]+)\)' 1847 % re.escape(base_classname), 1848 line) 1849 if (args and 1850 args.group(1) != 'void' and 1851 not Match(r'(const\s+)?%s\s*(?:<\w+>\s*)?&' % re.escape(base_classname), 1852 args.group(1).strip())): 1853 error(filename, linenum, 'runtime/explicit', 5, 1854 'Single-argument constructors should be marked explicit.') 1855 1856 1857 def CheckSpacingForFunctionCall(filename, line, linenum, error): 1858 """Checks for the correctness of various spacing around function calls. 1859 1860 Args: 1861 filename: The name of the current file. 1862 line: The text of the line to check. 1863 linenum: The number of the line to check. 1864 error: The function to call with any errors found. 1865 """ 1866 1867 # Since function calls often occur inside if/for/while/switch 1868 # expressions - which have their own, more liberal conventions - we 1869 # first see if we should be looking inside such an expression for a 1870 # function call, to which we can apply more strict standards. 1871 fncall = line # if there's no control flow construct, look at whole line 1872 for pattern in (r'\bif\s*\((.*)\)\s*{', 1873 r'\bfor\s*\((.*)\)\s*{', 1874 r'\bwhile\s*\((.*)\)\s*[{;]', 1875 r'\bswitch\s*\((.*)\)\s*{'): 1876 match = Search(pattern, line) 1877 if match: 1878 fncall = match.group(1) # look inside the parens for function calls 1879 break 1880 1881 # Except in if/for/while/switch, there should never be space 1882 # immediately inside parens (eg "f( 3, 4 )"). We make an exception 1883 # for nested parens ( (a+b) + c ). Likewise, there should never be 1884 # a space before a ( when it's a function argument. I assume it's a 1885 # function argument when the char before the whitespace is legal in 1886 # a function name (alnum + _) and we're not starting a macro. Also ignore 1887 # pointers and references to arrays and functions coz they're too tricky: 1888 # we use a very simple way to recognize these: 1889 # " (something)(maybe-something)" or 1890 # " (something)(maybe-something," or 1891 # " (something)[something]" 1892 # Note that we assume the contents of [] to be short enough that 1893 # they'll never need to wrap. 1894 if ( # Ignore control structures. 1895 not Search(r'\b(if|for|while|switch|return|delete)\b', fncall) and 1896 # Ignore pointers/references to functions. 1897 not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and 1898 # Ignore pointers/references to arrays. 1899 not Search(r' \([^)]+\)\[[^\]]+\]', fncall)): 1900 if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call 1901 error(filename, linenum, 'whitespace/parens', 4, 1902 'Extra space after ( in function call') 1903 elif Search(r'\(\s+(?!(\s*\\)|\()', fncall): 1904 error(filename, linenum, 'whitespace/parens', 2, 1905 'Extra space after (') 1906 if (Search(r'\w\s+\(', fncall) and 1907 not Search(r'#\s*define|typedef', fncall) and 1908 not Search(r'\w\s+\((\w+::)?\*\w+\)\(', fncall)): 1909 error(filename, linenum, 'whitespace/parens', 4, 1910 'Extra space before ( in function call') 1911 # If the ) is followed only by a newline or a { + newline, assume it's 1912 # part of a control statement (if/while/etc), and don't complain 1913 if Search(r'[^)]\s+\)\s*[^{\s]', fncall): 1914 # If the closing parenthesis is preceded by only whitespaces, 1915 # try to give a more descriptive error message. 1916 if Search(r'^\s+\)', fncall): 1917 error(filename, linenum, 'whitespace/parens', 2, 1918 'Closing ) should be moved to the previous line') 1919 else: 1920 error(filename, linenum, 'whitespace/parens', 2, 1921 'Extra space before )') 1922 1923 1924 def IsBlankLine(line): 1925 """Returns true if the given line is blank. 1926 1927 We consider a line to be blank if the line is empty or consists of 1928 only white spaces. 1929 1930 Args: 1931 line: A line of a string. 1932 1933 Returns: 1934 True, if the given line is blank. 1935 """ 1936 return not line or line.isspace() 1937 1938 1939 def CheckForFunctionLengths(filename, clean_lines, linenum, 1940 function_state, error): 1941 """Reports for long function bodies. 1942 1943 For an overview why this is done, see: 1944 http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions 1945 1946 Uses a simplistic algorithm assuming other style guidelines 1947 (especially spacing) are followed. 1948 Only checks unindented functions, so class members are unchecked. 1949 Trivial bodies are unchecked, so constructors with huge initializer lists 1950 may be missed. 1951 Blank/comment lines are not counted so as to avoid encouraging the removal 1952 of vertical space and comments just to get through a lint check. 1953 NOLINT *on the last line of a function* disables this check. 1954 1955 Args: 1956 filename: The name of the current file. 1957 clean_lines: A CleansedLines instance containing the file. 1958 linenum: The number of the line to check. 1959 function_state: Current function name and lines in body so far. 1960 error: The function to call with any errors found. 1961 """ 1962 lines = clean_lines.lines 1963 line = lines[linenum] 1964 raw = clean_lines.raw_lines 1965 raw_line = raw[linenum] 1966 joined_line = '' 1967 1968 starting_func = False 1969 regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ... 1970 match_result = Match(regexp, line) 1971 if match_result: 1972 # If the name is all caps and underscores, figure it's a macro and 1973 # ignore it, unless it's TEST or TEST_F. 1974 function_name = match_result.group(1).split()[-1] 1975 if function_name == 'TEST' or function_name == 'TEST_F' or ( 1976 not Match(r'[A-Z_]+$', function_name)): 1977 starting_func = True 1978 1979 if starting_func: 1980 body_found = False 1981 for start_linenum in xrange(linenum, clean_lines.NumLines()): 1982 start_line = lines[start_linenum] 1983 joined_line += ' ' + start_line.lstrip() 1984 if Search(r'(;|})', start_line): # Declarations and trivial functions 1985 body_found = True 1986 break # ... ignore 1987 elif Search(r'{', start_line): 1988 body_found = True 1989 function = Search(r'((\w|:)*)\(', line).group(1) 1990 if Match(r'TEST', function): # Handle TEST... macros 1991 parameter_regexp = Search(r'(\(.*\))', joined_line) 1992 if parameter_regexp: # Ignore bad syntax 1993 function += parameter_regexp.group(1) 1994 else: 1995 function += '()' 1996 function_state.Begin(function) 1997 break 1998 if not body_found: 1999 # No body for the function (or evidence of a non-function) was found. 2000 error(filename, linenum, 'readability/fn_size', 5, 2001 'Lint failed to find start of function body.') 2002 elif Match(r'^\}\s*$', line): # function end 2003 function_state.Check(error, filename, linenum) 2004 function_state.End() 2005 elif not Match(r'^\s*$', line): 2006 function_state.Count() # Count non-blank/non-comment lines. 2007 2008 2009 _RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?') 2010 2011 2012 def CheckComment(comment, filename, linenum, error): 2013 """Checks for common mistakes in TODO comments. 2014 2015 Args: 2016 comment: The text of the comment from the line in question. 2017 filename: The name of the current file. 2018 linenum: The number of the line to check. 2019 error: The function to call with any errors found. 2020 """ 2021 match = _RE_PATTERN_TODO.match(comment) 2022 if match: 2023 # One whitespace is correct; zero whitespace is handled elsewhere. 2024 leading_whitespace = match.group(1) 2025 if len(leading_whitespace) > 1: 2026 error(filename, linenum, 'whitespace/todo', 2, 2027 'Too many spaces before TODO') 2028 2029 username = match.group(2) 2030 if not username: 2031 error(filename, linenum, 'readability/todo', 2, 2032 'Missing username in TODO; it should look like ' 2033 '"// TODO(my_username): Stuff."') 2034 2035 middle_whitespace = match.group(3) 2036 # Comparisons made explicit for correctness -- pylint: disable-msg=C6403 2037 if middle_whitespace != ' ' and middle_whitespace != '': 2038 error(filename, linenum, 'whitespace/todo', 2, 2039 'TODO(my_username) should be followed by a space') 2040 2041 def CheckAccess(filename, clean_lines, linenum, nesting_state, error): 2042 """Checks for improper use of DISALLOW* macros. 2043 2044 Args: 2045 filename: The name of the current file. 2046 clean_lines: A CleansedLines instance containing the file. 2047 linenum: The number of the line to check. 2048 nesting_state: A _NestingState instance which maintains information about 2049 the current stack of nested blocks being parsed. 2050 error: The function to call with any errors found. 2051 """ 2052 line = clean_lines.elided[linenum] # get rid of comments and strings 2053 2054 matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|' 2055 r'DISALLOW_EVIL_CONSTRUCTORS|' 2056 r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line) 2057 if not matched: 2058 return 2059 if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo): 2060 if nesting_state.stack[-1].access != 'private': 2061 error(filename, linenum, 'readability/constructors', 3, 2062 '%s must be in the private: section' % matched.group(1)) 2063 2064 else: 2065 # Found DISALLOW* macro outside a class declaration, or perhaps it 2066 # was used inside a function when it should have been part of the 2067 # class declaration. We could issue a warning here, but it 2068 # probably resulted in a compiler error already. 2069 pass 2070 2071 2072 def FindNextMatchingAngleBracket(clean_lines, linenum, init_suffix): 2073 """Find the corresponding > to close a template. 2074 2075 Args: 2076 clean_lines: A CleansedLines instance containing the file. 2077 linenum: Current line number. 2078 init_suffix: Remainder of the current line after the initial <. 2079 2080 Returns: 2081 True if a matching bracket exists. 2082 """ 2083 line = init_suffix 2084 nesting_stack = ['<'] 2085 while True: 2086 # Find the next operator that can tell us whether < is used as an 2087 # opening bracket or as a less-than operator. We only want to 2088 # warn on the latter case. 2089 # 2090 # We could also check all other operators and terminate the search 2091 # early, e.g. if we got something like this "a<b+c", the "<" is 2092 # most likely a less-than operator, but then we will get false 2093 # positives for default arguments (e.g. http://go/prccd) and 2094 # other template expressions (e.g. http://go/oxcjq). 2095 match = Search(r'^[^<>(),;\[\]]*([<>(),;\[\]])(.*)$', line) 2096 if match: 2097 # Found an operator, update nesting stack 2098 operator = match.group(1) 2099 line = match.group(2) 2100 2101 if nesting_stack[-1] == '<': 2102 # Expecting closing angle bracket 2103 if operator in ('<', '(', '['): 2104 nesting_stack.append(operator) 2105 elif operator == '>': 2106 nesting_stack.pop() 2107 if not nesting_stack: 2108 # Found matching angle bracket 2109 return True 2110 elif operator == ',': 2111 # Got a comma after a bracket, this is most likely a template 2112 # argument. We have not seen a closing angle bracket yet, but 2113 # it's probably a few lines later if we look for it, so just 2114 # return early here. 2115 return True 2116 else: 2117 # Got some other operator. 2118 return False 2119 2120 else: 2121 # Expecting closing parenthesis or closing bracket 2122 if operator in ('<', '(', '['): 2123 nesting_stack.append(operator) 2124 elif operator in (')', ']'): 2125 # We don't bother checking for matching () or []. If we got 2126 # something like (] or [), it would have been a syntax error. 2127 nesting_stack.pop() 2128 2129 else: 2130 # Scan the next line 2131 linenum += 1 2132 if linenum >= len(clean_lines.elided): 2133 break 2134 line = clean_lines.elided[linenum] 2135 2136 # Exhausted all remaining lines and still no matching angle bracket. 2137 # Most likely the input was incomplete, otherwise we should have 2138 # seen a semicolon and returned early. 2139 return True 2140 2141 2142 def FindPreviousMatchingAngleBracket(clean_lines, linenum, init_prefix): 2143 """Find the corresponding < that started a template. 2144 2145 Args: 2146 clean_lines: A CleansedLines instance containing the file. 2147 linenum: Current line number. 2148 init_prefix: Part of the current line before the initial >. 2149 2150 Returns: 2151 True if a matching bracket exists. 2152 """ 2153 line = init_prefix 2154 nesting_stack = ['>'] 2155 while True: 2156 # Find the previous operator 2157 match = Search(r'^(.*)([<>(),;\[\]])[^<>(),;\[\]]*$', line) 2158 if match: 2159 # Found an operator, update nesting stack 2160 operator = match.group(2) 2161 line = match.group(1) 2162 2163 if nesting_stack[-1] == '>': 2164 # Expecting opening angle bracket 2165 if operator in ('>', ')', ']'): 2166 nesting_stack.append(operator) 2167 elif operator == '<': 2168 nesting_stack.pop() 2169 if not nesting_stack: 2170 # Found matching angle bracket 2171 return True 2172 elif operator == ',': 2173 # Got a comma before a bracket, this is most likely a 2174 # template argument. The opening angle bracket is probably 2175 # there if we look for it, so just return early here. 2176 return True 2177 else: 2178 # Got some other operator. 2179 return False 2180 2181 else: 2182 # Expecting opening parenthesis or opening bracket 2183 if operator in ('>', ')', ']'): 2184 nesting_stack.append(operator) 2185 elif operator in ('(', '['): 2186 nesting_stack.pop() 2187 2188 else: 2189 # Scan the previous line 2190 linenum -= 1 2191 if linenum < 0: 2192 break 2193 line = clean_lines.elided[linenum] 2194 2195 # Exhausted all earlier lines and still no matching angle bracket. 2196 return False 2197 2198 2199 def CheckSpacing(filename, clean_lines, linenum, nesting_state, error): 2200 """Checks for the correctness of various spacing issues in the code. 2201 2202 Things we check for: spaces around operators, spaces after 2203 if/for/while/switch, no spaces around parens in function calls, two 2204 spaces between code and comment, don't start a block with a blank 2205 line, don't end a function with a blank line, don't add a blank line 2206 after public/protected/private, don't have too many blank lines in a row. 2207 2208 Args: 2209 filename: The name of the current file. 2210 clean_lines: A CleansedLines instance containing the file. 2211 linenum: The number of the line to check. 2212 nesting_state: A _NestingState instance which maintains information about 2213 the current stack of nested blocks being parsed. 2214 error: The function to call with any errors found. 2215 """ 2216 2217 raw = clean_lines.raw_lines 2218 line = raw[linenum] 2219 2220 # Before nixing comments, check if the line is blank for no good 2221 # reason. This includes the first line after a block is opened, and 2222 # blank lines at the end of a function (ie, right before a line like '}' 2223 # 2224 # Skip all the blank line checks if we are immediately inside a 2225 # namespace body. In other words, don't issue blank line warnings 2226 # for this block: 2227 # namespace { 2228 # 2229 # } 2230 # 2231 # A warning about missing end of namespace comments will be issued instead. 2232 if IsBlankLine(line) and not nesting_state.InNamespaceBody(): 2233 elided = clean_lines.elided 2234 prev_line = elided[linenum - 1] 2235 prevbrace = prev_line.rfind('{') 2236 # TODO(unknown): Don't complain if line before blank line, and line after, 2237 # both start with alnums and are indented the same amount. 2238 # This ignores whitespace at the start of a namespace block 2239 # because those are not usually indented. 2240 if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1: 2241 # OK, we have a blank line at the start of a code block. Before we 2242 # complain, we check if it is an exception to the rule: The previous 2243 # non-empty line has the parameters of a function header that are indented 2244 # 4 spaces (because they did not fit in a 80 column line when placed on 2245 # the same line as the function name). We also check for the case where 2246 # the previous line is indented 6 spaces, which may happen when the 2247 # initializers of a constructor do not fit into a 80 column line. 2248 exception = False 2249 if Match(r' {6}\w', prev_line): # Initializer list? 2250 # We are looking for the opening column of initializer list, which 2251 # should be indented 4 spaces to cause 6 space indentation afterwards. 2252 search_position = linenum-2 2253 while (search_position >= 0 2254 and Match(r' {6}\w', elided[search_position])): 2255 search_position -= 1 2256 exception = (search_position >= 0 2257 and elided[search_position][:5] == ' :') 2258 else: 2259 # Search for the function arguments or an initializer list. We use a 2260 # simple heuristic here: If the line is indented 4 spaces; and we have a 2261 # closing paren, without the opening paren, followed by an opening brace 2262 # or colon (for initializer lists) we assume that it is the last line of 2263 # a function header. If we have a colon indented 4 spaces, it is an 2264 # initializer list. 2265 exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)', 2266 prev_line) 2267 or Match(r' {4}:', prev_line)) 2268 2269 if not exception: 2270 error(filename, linenum, 'whitespace/blank_line', 2, 2271 'Blank line at the start of a code block. Is this needed?') 2272 # Ignore blank lines at the end of a block in a long if-else 2273 # chain, like this: 2274 # if (condition1) { 2275 # // Something followed by a blank line 2276 # 2277 # } else if (condition2) { 2278 # // Something else 2279 # } 2280 if linenum + 1 < clean_lines.NumLines(): 2281 next_line = raw[linenum + 1] 2282 if (next_line 2283 and Match(r'\s*}', next_line) 2284 and next_line.find('} else ') == -1): 2285 error(filename, linenum, 'whitespace/blank_line', 3, 2286 'Blank line at the end of a code block. Is this needed?') 2287 2288 matched = Match(r'\s*(public|protected|private):', prev_line) 2289 if matched: 2290 error(filename, linenum, 'whitespace/blank_line', 3, 2291 'Do not leave a blank line after "%s:"' % matched.group(1)) 2292 2293 # Next, we complain if there's a comment too near the text 2294 commentpos = line.find('//') 2295 if commentpos != -1: 2296 # Check if the // may be in quotes. If so, ignore it 2297 # Comparisons made explicit for clarity -- pylint: disable-msg=C6403 2298 if (line.count('"', 0, commentpos) - 2299 line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes 2300 # Allow one space for new scopes, two spaces otherwise: 2301 if (not Match(r'^\s*{ //', line) and 2302 ((commentpos >= 1 and 2303 line[commentpos-1] not in string.whitespace) or 2304 (commentpos >= 2 and 2305 line[commentpos-2] not in string.whitespace))): 2306 error(filename, linenum, 'whitespace/comments', 2, 2307 'At least two spaces is best between code and comments') 2308 # There should always be a space between the // and the comment 2309 commentend = commentpos + 2 2310 if commentend < len(line) and not line[commentend] == ' ': 2311 # but some lines are exceptions -- e.g. if they're big 2312 # comment delimiters like: 2313 # //---------------------------------------------------------- 2314 # or are an empty C++ style Doxygen comment, like: 2315 # /// 2316 # or they begin with multiple slashes followed by a space: 2317 # //////// Header comment 2318 match = (Search(r'[=/-]{4,}\s*$', line[commentend:]) or 2319 Search(r'^/$', line[commentend:]) or 2320 Search(r'^/+ ', line[commentend:])) 2321 if not match: 2322 error(filename, linenum, 'whitespace/comments', 4, 2323 'Should have a space between // and comment') 2324 CheckComment(line[commentpos:], filename, linenum, error) 2325 2326 line = clean_lines.elided[linenum] # get rid of comments and strings 2327 2328 # Don't try to do spacing checks for operator methods 2329 line = re.sub(r'operator(==|!=|<|<<|<=|>=|>>|>)\(', 'operator\(', line) 2330 2331 # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )". 2332 # Otherwise not. Note we only check for non-spaces on *both* sides; 2333 # sometimes people put non-spaces on one side when aligning ='s among 2334 # many lines (not that this is behavior that I approve of...) 2335 if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line): 2336 error(filename, linenum, 'whitespace/operators', 4, 2337 'Missing spaces around =') 2338 2339 # It's ok not to have spaces around binary operators like + - * /, but if 2340 # there's too little whitespace, we get concerned. It's hard to tell, 2341 # though, so we punt on this one for now. TODO. 2342 2343 # You should always have whitespace around binary operators. 2344 # 2345 # Check <= and >= first to avoid false positives with < and >, then 2346 # check non-include lines for spacing around < and >. 2347 match = Search(r'[^<>=!\s](==|!=|<=|>=)[^<>=!\s]', line) 2348 if match: 2349 error(filename, linenum, 'whitespace/operators', 3, 2350 'Missing spaces around %s' % match.group(1)) 2351 # We allow no-spaces around << when used like this: 10<<20, but 2352 # not otherwise (particularly, not when used as streams) 2353 match = Search(r'(\S)(?:L|UL|ULL|l|ul|ull)?<<(\S)', line) 2354 if match and not (match.group(1).isdigit() and match.group(2).isdigit()): 2355 error(filename, linenum, 'whitespace/operators', 3, 2356 'Missing spaces around <<') 2357 elif not Match(r'#.*include', line): 2358 # Avoid false positives on -> 2359 reduced_line = line.replace('->', '') 2360 2361 # Look for < that is not surrounded by spaces. This is only 2362 # triggered if both sides are missing spaces, even though 2363 # technically should should flag if at least one side is missing a 2364 # space. This is done to avoid some false positives with shifts. 2365 match = Search(r'[^\s<]<([^\s=<].*)', reduced_line) 2366 if (match and 2367 not FindNextMatchingAngleBracket(clean_lines, linenum, match.group(1))): 2368 error(filename, linenum, 'whitespace/operators', 3, 2369 'Missing spaces around <') 2370 2371 # Look for > that is not surrounded by spaces. Similar to the 2372 # above, we only trigger if both sides are missing spaces to avoid 2373 # false positives with shifts. 2374 match = Search(r'^(.*[^\s>])>[^\s=>]', reduced_line) 2375 if (match and 2376 not FindPreviousMatchingAngleBracket(clean_lines, linenum, 2377 match.group(1))): 2378 error(filename, linenum, 'whitespace/operators', 3, 2379 'Missing spaces around >') 2380 2381 # We allow no-spaces around >> for almost anything. This is because 2382 # C++11 allows ">>" to close nested templates, which accounts for 2383 # most cases when ">>" is not followed by a space. 2384 # 2385 # We still warn on ">>" followed by alpha character, because that is 2386 # likely due to ">>" being used for right shifts, e.g.: 2387 # value >> alpha 2388 # 2389 # When ">>" is used to close templates, the alphanumeric letter that 2390 # follows would be part of an identifier, and there should still be 2391 # a space separating the template type and the identifier. 2392 # type<type<type>> alpha 2393 match = Search(r'>>[a-zA-Z_]', line) 2394 if match: 2395 error(filename, linenum, 'whitespace/operators', 3, 2396 'Missing spaces around >>') 2397 2398 # There shouldn't be space around unary operators 2399 match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line) 2400 if match: 2401 error(filename, linenum, 'whitespace/operators', 4, 2402 'Extra space for operator %s' % match.group(1)) 2403 2404 # A pet peeve of mine: no spaces after an if, while, switch, or for 2405 match = Search(r' (if\(|for\(|while\(|switch\()', line) 2406 if match: 2407 error(filename, linenum, 'whitespace/parens', 5, 2408 'Missing space before ( in %s' % match.group(1)) 2409 2410 # For if/for/while/switch, the left and right parens should be 2411 # consistent about how many spaces are inside the parens, and 2412 # there should either be zero or one spaces inside the parens. 2413 # We don't want: "if ( foo)" or "if ( foo )". 2414 # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed. 2415 match = Search(r'\b(if|for|while|switch)\s*' 2416 r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$', 2417 line) 2418 if match: 2419 if len(match.group(2)) != len(match.group(4)): 2420 if not (match.group(3) == ';' and 2421 len(match.group(2)) == 1 + len(match.group(4)) or 2422 not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)): 2423 error(filename, linenum, 'whitespace/parens', 5, 2424 'Mismatching spaces inside () in %s' % match.group(1)) 2425 if not len(match.group(2)) in [0, 1]: 2426 error(filename, linenum, 'whitespace/parens', 5, 2427 'Should have zero or one spaces inside ( and ) in %s' % 2428 match.group(1)) 2429 2430 # You should always have a space after a comma (either as fn arg or operator) 2431 if Search(r',[^\s]', line): 2432 error(filename, linenum, 'whitespace/comma', 3, 2433 'Missing space after ,') 2434 2435 # You should always have a space after a semicolon 2436 # except for few corner cases 2437 # TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more 2438 # space after ; 2439 if Search(r';[^\s};\\)/]', line): 2440 error(filename, linenum, 'whitespace/semicolon', 3, 2441 'Missing space after ;') 2442 2443 # Next we will look for issues with function calls. 2444 CheckSpacingForFunctionCall(filename, line, linenum, error) 2445 2446 # Except after an opening paren, or after another opening brace (in case of 2447 # an initializer list, for instance), you should have spaces before your 2448 # braces. And since you should never have braces at the beginning of a line, 2449 # this is an easy test. 2450 if Search(r'[^ ({]{', line): 2451 error(filename, linenum, 'whitespace/braces', 5, 2452 'Missing space before {') 2453 2454 # Make sure '} else {' has spaces. 2455 if Search(r'}else', line): 2456 error(filename, linenum, 'whitespace/braces', 5, 2457 'Missing space before else') 2458 2459 # You shouldn't have spaces before your brackets, except maybe after 2460 # 'delete []' or 'new char * []'. 2461 if Search(r'\w\s+\[', line) and not Search(r'delete\s+\[', line): 2462 error(filename, linenum, 'whitespace/braces', 5, 2463 'Extra space before [') 2464 2465 # You shouldn't have a space before a semicolon at the end of the line. 2466 # There's a special case for "for" since the style guide allows space before 2467 # the semicolon there. 2468 if Search(r':\s*;\s*$', line): 2469 error(filename, linenum, 'whitespace/semicolon', 5, 2470 'Semicolon defining empty statement. Use {} instead.') 2471 elif Search(r'^\s*;\s*$', line): 2472 error(filename, linenum, 'whitespace/semicolon', 5, 2473 'Line contains only semicolon. If this should be an empty statement, ' 2474 'use {} instead.') 2475 elif (Search(r'\s+;\s*$', line) and 2476 not Search(r'\bfor\b', line)): 2477 error(filename, linenum, 'whitespace/semicolon', 5, 2478 'Extra space before last semicolon. If this should be an empty ' 2479 'statement, use {} instead.') 2480 2481 # In range-based for, we wanted spaces before and after the colon, but 2482 # not around "::" tokens that might appear. 2483 if (Search('for *\(.*[^:]:[^: ]', line) or 2484 Search('for *\(.*[^: ]:[^:]', line)): 2485 error(filename, linenum, 'whitespace/forcolon', 2, 2486 'Missing space around colon in range-based for loop') 2487 2488 2489 def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error): 2490 """Checks for additional blank line issues related to sections. 2491 2492 Currently the only thing checked here is blank line before protected/private. 2493 2494 Args: 2495 filename: The name of the current file. 2496 clean_lines: A CleansedLines instance containing the file. 2497 class_info: A _ClassInfo objects. 2498 linenum: The number of the line to check. 2499 error: The function to call with any errors found. 2500 """ 2501 # Skip checks if the class is small, where small means 25 lines or less. 2502 # 25 lines seems like a good cutoff since that's the usual height of 2503 # terminals, and any class that can't fit in one screen can't really 2504 # be considered "small". 2505 # 2506 # Also skip checks if we are on the first line. This accounts for 2507 # classes that look like 2508 # class Foo { public: ... }; 2509 # 2510 # If we didn't find the end of the class, last_line would be zero, 2511 # and the check will be skipped by the first condition. 2512 if (class_info.last_line - class_info.starting_linenum <= 24 or 2513 linenum <= class_info.starting_linenum): 2514 return 2515 2516 matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum]) 2517 if matched: 2518 # Issue warning if the line before public/protected/private was 2519 # not a blank line, but don't do this if the previous line contains 2520 # "class" or "struct". This can happen two ways: 2521 # - We are at the beginning of the class. 2522 # - We are forward-declaring an inner class that is semantically 2523 # private, but needed to be public for implementation reasons. 2524 # Also ignores cases where the previous line ends with a backslash as can be 2525 # common when defining classes in C macros. 2526 prev_line = clean_lines.lines[linenum - 1] 2527 if (not IsBlankLine(prev_line) and 2528 not Search(r'\b(class|struct)\b', prev_line) and 2529 not Search(r'\\$', prev_line)): 2530 # Try a bit harder to find the beginning of the class. This is to 2531 # account for multi-line base-specifier lists, e.g.: 2532 # class Derived 2533 # : public Base { 2534 end_class_head = class_info.starting_linenum 2535 for i in range(class_info.starting_linenum, linenum): 2536 if Search(r'\{\s*$', clean_lines.lines[i]): 2537 end_class_head = i 2538 break 2539 if end_class_head < linenum - 1: 2540 error(filename, linenum, 'whitespace/blank_line', 3, 2541 '"%s:" should be preceded by a blank line' % matched.group(1)) 2542 2543 2544 def GetPreviousNonBlankLine(clean_lines, linenum): 2545 """Return the most recent non-blank line and its line number. 2546 2547 Args: 2548 clean_lines: A CleansedLines instance containing the file contents. 2549 linenum: The number of the line to check. 2550 2551 Returns: 2552 A tuple with two elements. The first element is the contents of the last 2553 non-blank line before the current line, or the empty string if this is the 2554 first non-blank line. The second is the line number of that line, or -1 2555 if this is the first non-blank line. 2556 """ 2557 2558 prevlinenum = linenum - 1 2559 while prevlinenum >= 0: 2560 prevline = clean_lines.elided[prevlinenum] 2561 if not IsBlankLine(prevline): # if not a blank line... 2562 return (prevline, prevlinenum) 2563 prevlinenum -= 1 2564 return ('', -1) 2565 2566 2567 def CheckBraces(filename, clean_lines, linenum, error): 2568 """Looks for misplaced braces (e.g. at the end of line). 2569 2570 Args: 2571 filename: The name of the current file. 2572 clean_lines: A CleansedLines instance containing the file. 2573 linenum: The number of the line to check. 2574 error: The function to call with any errors found. 2575 """ 2576 2577 line = clean_lines.elided[linenum] # get rid of comments and strings 2578 2579 if Match(r'\s*{\s*$', line): 2580 # We allow an open brace to start a line in the case where someone 2581 # is using braces in a block to explicitly create a new scope, 2582 # which is commonly used to control the lifetime of 2583 # stack-allocated variables. We don't detect this perfectly: we 2584 # just don't complain if the last non-whitespace character on the 2585 # previous non-blank line is ';', ':', '{', or '}', or if the previous 2586 # line starts a preprocessor block. 2587 prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] 2588 if (not Search(r'[;:}{]\s*$', prevline) and 2589 not Match(r'\s*#', prevline)): 2590 error(filename, linenum, 'whitespace/braces', 4, 2591 '{ should almost always be at the end of the previous line') 2592 2593 # An else clause should be on the same line as the preceding closing brace. 2594 if Match(r'\s*else\s*', line): 2595 prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] 2596 if Match(r'\s*}\s*$', prevline): 2597 error(filename, linenum, 'whitespace/newline', 4, 2598 'An else should appear on the same line as the preceding }') 2599 2600 # If braces come on one side of an else, they should be on both. 2601 # However, we have to worry about "else if" that spans multiple lines! 2602 if Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line): 2603 if Search(r'}\s*else if([^{]*)$', line): # could be multi-line if 2604 # find the ( after the if 2605 pos = line.find('else if') 2606 pos = line.find('(', pos) 2607 if pos > 0: 2608 (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos) 2609 if endline[endpos:].find('{') == -1: # must be brace after if 2610 error(filename, linenum, 'readability/braces', 5, 2611 'If an else has a brace on one side, it should have it on both') 2612 else: # common case: else not followed by a multi-line if 2613 error(filename, linenum, 'readability/braces', 5, 2614 'If an else has a brace on one side, it should have it on both') 2615 2616 # Likewise, an else should never have the else clause on the same line 2617 if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line): 2618 error(filename, linenum, 'whitespace/newline', 4, 2619 'Else clause should never be on same line as else (use 2 lines)') 2620 2621 # In the same way, a do/while should never be on one line 2622 if Match(r'\s*do [^\s{]', line): 2623 error(filename, linenum, 'whitespace/newline', 4, 2624 'do/while clauses should not be on a single line') 2625 2626 # Braces shouldn't be followed by a ; unless they're defining a struct 2627 # or initializing an array. 2628 # We can't tell in general, but we can for some common cases. 2629 prevlinenum = linenum 2630 while True: 2631 (prevline, prevlinenum) = GetPreviousNonBlankLine(clean_lines, prevlinenum) 2632 if Match(r'\s+{.*}\s*;', line) and not prevline.count(';'): 2633 line = prevline + line 2634 else: 2635 break 2636 if (Search(r'{.*}\s*;', line) and 2637 line.count('{') == line.count('}') and 2638 not Search(r'struct|class|enum|\s*=\s*{', line)): 2639 error(filename, linenum, 'readability/braces', 4, 2640 "You don't need a ; after a }") 2641 2642 2643 def CheckEmptyLoopBody(filename, clean_lines, linenum, error): 2644 """Loop for empty loop body with only a single semicolon. 2645 2646 Args: 2647 filename: The name of the current file. 2648 clean_lines: A CleansedLines instance containing the file. 2649 linenum: The number of the line to check. 2650 error: The function to call with any errors found. 2651 """ 2652 2653 # Search for loop keywords at the beginning of the line. Because only 2654 # whitespaces are allowed before the keywords, this will also ignore most 2655 # do-while-loops, since those lines should start with closing brace. 2656 line = clean_lines.elided[linenum] 2657 if Match(r'\s*(for|while)\s*\(', line): 2658 # Find the end of the conditional expression 2659 (end_line, end_linenum, end_pos) = CloseExpression( 2660 clean_lines, linenum, line.find('(')) 2661 2662 # Output warning if what follows the condition expression is a semicolon. 2663 # No warning for all other cases, including whitespace or newline, since we 2664 # have a separate check for semicolons preceded by whitespace. 2665 if end_pos >= 0 and Match(r';', end_line[end_pos:]): 2666 error(filename, end_linenum, 'whitespace/empty_loop_body', 5, 2667 'Empty loop bodies should use {} or continue') 2668 2669 2670 def ReplaceableCheck(operator, macro, line): 2671 """Determine whether a basic CHECK can be replaced with a more specific one. 2672 2673 For example suggest using CHECK_EQ instead of CHECK(a == b) and 2674 similarly for CHECK_GE, CHECK_GT, CHECK_LE, CHECK_LT, CHECK_NE. 2675 2676 Args: 2677 operator: The C++ operator used in the CHECK. 2678 macro: The CHECK or EXPECT macro being called. 2679 line: The current source line. 2680 2681 Returns: 2682 True if the CHECK can be replaced with a more specific one. 2683 """ 2684 2685 # This matches decimal and hex integers, strings, and chars (in that order). 2686 match_constant = r'([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')' 2687 2688 # Expression to match two sides of the operator with something that 2689 # looks like a literal, since CHECK(x == iterator) won't compile. 2690 # This means we can't catch all the cases where a more specific 2691 # CHECK is possible, but it's less annoying than dealing with 2692 # extraneous warnings. 2693 match_this = (r'\s*' + macro + r'\((\s*' + 2694 match_constant + r'\s*' + operator + r'[^<>].*|' 2695 r'.*[^<>]' + operator + r'\s*' + match_constant + 2696 r'\s*\))') 2697 2698 # Don't complain about CHECK(x == NULL) or similar because 2699 # CHECK_EQ(x, NULL) won't compile (requires a cast). 2700 # Also, don't complain about more complex boolean expressions 2701 # involving && or || such as CHECK(a == b || c == d). 2702 return Match(match_this, line) and not Search(r'NULL|&&|\|\|', line) 2703 2704 2705 def CheckCheck(filename, clean_lines, linenum, error): 2706 """Checks the use of CHECK and EXPECT macros. 2707 2708 Args: 2709 filename: The name of the current file. 2710 clean_lines: A CleansedLines instance containing the file. 2711 linenum: The number of the line to check. 2712 error: The function to call with any errors found. 2713 """ 2714 2715 # Decide the set of replacement macros that should be suggested 2716 raw_lines = clean_lines.raw_lines 2717 current_macro = '' 2718 for macro in _CHECK_MACROS: 2719 if raw_lines[linenum].find(macro) >= 0: 2720 current_macro = macro 2721 break 2722 if not current_macro: 2723 # Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT' 2724 return 2725 2726 line = clean_lines.elided[linenum] # get rid of comments and strings 2727 2728 # Encourage replacing plain CHECKs with CHECK_EQ/CHECK_NE/etc. 2729 for operator in ['==', '!=', '>=', '>', '<=', '<']: 2730 if ReplaceableCheck(operator, current_macro, line): 2731 error(filename, linenum, 'readability/check', 2, 2732 'Consider using %s instead of %s(a %s b)' % ( 2733 _CHECK_REPLACEMENT[current_macro][operator], 2734 current_macro, operator)) 2735 break 2736 2737 2738 def CheckAltTokens(filename, clean_lines, linenum, error): 2739 """Check alternative keywords being used in boolean expressions. 2740 2741 Args: 2742 filename: The name of the current file. 2743 clean_lines: A CleansedLines instance containing the file. 2744 linenum: The number of the line to check. 2745 error: The function to call with any errors found. 2746 """ 2747 line = clean_lines.elided[linenum] 2748 2749 # Avoid preprocessor lines 2750 if Match(r'^\s*#', line): 2751 return 2752 2753 # Last ditch effort to avoid multi-line comments. This will not help 2754 # if the comment started before the current line or ended after the 2755 # current line, but it catches most of the false positives. At least, 2756 # it provides a way to workaround this warning for people who use 2757 # multi-line comments in preprocessor macros. 2758 # 2759 # TODO(unknown): remove this once cpplint has better support for 2760 # multi-line comments. 2761 if line.find('/*') >= 0 or line.find('*/') >= 0: 2762 return 2763 2764 for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line): 2765 error(filename, linenum, 'readability/alt_tokens', 2, 2766 'Use operator %s instead of %s' % ( 2767 _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1))) 2768 2769 2770 def GetLineWidth(line): 2771 """Determines the width of the line in column positions. 2772 2773 Args: 2774 line: A string, which may be a Unicode string. 2775 2776 Returns: 2777 The width of the line in column positions, accounting for Unicode 2778 combining characters and wide characters. 2779 """ 2780 if isinstance(line, unicode): 2781 width = 0 2782 for uc in unicodedata.normalize('NFC', line): 2783 if unicodedata.east_asian_width(uc) in ('W', 'F'): 2784 width += 2 2785 elif not unicodedata.combining(uc): 2786 width += 1 2787 return width 2788 else: 2789 return len(line) 2790 2791 2792 def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state, 2793 error): 2794 """Checks rules from the 'C++ style rules' section of cppguide.html. 2795 2796 Most of these rules are hard to test (naming, comment style), but we 2797 do what we can. In particular we check for 2-space indents, line lengths, 2798 tab usage, spaces inside code, etc. 2799 2800 Args: 2801 filename: The name of the current file. 2802 clean_lines: A CleansedLines instance containing the file. 2803 linenum: The number of the line to check. 2804 file_extension: The extension (without the dot) of the filename. 2805 nesting_state: A _NestingState instance which maintains information about 2806 the current stack of nested blocks being parsed. 2807 error: The function to call with any errors found. 2808 """ 2809 2810 raw_lines = clean_lines.raw_lines 2811 line = raw_lines[linenum] 2812 2813 if line.find('\t') != -1: 2814 error(filename, linenum, 'whitespace/tab', 1, 2815 'Tab found; better to use spaces') 2816 2817 # One or three blank spaces at the beginning of the line is weird; it's 2818 # hard to reconcile that with 2-space indents. 2819 # NOTE: here are the conditions rob pike used for his tests. Mine aren't 2820 # as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces 2821 # if(RLENGTH > 20) complain = 0; 2822 # if(match($0, " +(error|private|public|protected):")) complain = 0; 2823 # if(match(prev, "&& *$")) complain = 0; 2824 # if(match(prev, "\\|\\| *$")) complain = 0; 2825 # if(match(prev, "[\",=><] *$")) complain = 0; 2826 # if(match($0, " <<")) complain = 0; 2827 # if(match(prev, " +for \\(")) complain = 0; 2828 # if(prevodd && match(prevprev, " +for \\(")) complain = 0; 2829 initial_spaces = 0 2830 cleansed_line = clean_lines.elided[linenum] 2831 while initial_spaces < len(line) and line[initial_spaces] == ' ': 2832 initial_spaces += 1 2833 if line and line[-1].isspace(): 2834 error(filename, linenum, 'whitespace/end_of_line', 4, 2835 'Line ends in whitespace. Consider deleting these extra spaces.') 2836 # There are certain situations we allow one space, notably for labels 2837 elif ((initial_spaces == 1 or initial_spaces == 3) and 2838 not Match(r'\s*\w+\s*:\s*$', cleansed_line)): 2839 error(filename, linenum, 'whitespace/indent', 3, 2840 'Weird number of spaces at line-start. ' 2841 'Are you using a 2-space indent?') 2842 # Labels should always be indented at least one space. 2843 elif not initial_spaces and line[:2] != '//' and Search(r'[^:]:\s*$', 2844 line): 2845 error(filename, linenum, 'whitespace/labels', 4, 2846 'Labels should always be indented at least one space. ' 2847 'If this is a member-initializer list in a constructor or ' 2848 'the base class list in a class definition, the colon should ' 2849 'be on the following line.') 2850 2851 2852 # Check if the line is a header guard. 2853 is_header_guard = False 2854 if file_extension == 'h': 2855 cppvar = GetHeaderGuardCPPVariable(filename) 2856 if (line.startswith('#ifndef %s' % cppvar) or 2857 line.startswith('#define %s' % cppvar) or 2858 line.startswith('#endif // %s' % cppvar)): 2859 is_header_guard = True 2860 # #include lines and header guards can be long, since there's no clean way to 2861 # split them. 2862 # 2863 # URLs can be long too. It's possible to split these, but it makes them 2864 # harder to cut&paste. 2865 # 2866 # The "$Id:...$" comment may also get very long without it being the 2867 # developers fault. 2868 if (not line.startswith('#include') and not is_header_guard and 2869 not Match(r'^\s*//.*http(s?)://\S*$', line) and 2870 not Match(r'^// \$Id:.*#[0-9]+ \$$', line)): 2871 line_width = GetLineWidth(line) 2872 if line_width > 100: 2873 error(filename, linenum, 'whitespace/line_length', 4, 2874 'Lines should very rarely be longer than 100 characters') 2875 elif line_width > 80: 2876 error(filename, linenum, 'whitespace/line_length', 2, 2877 'Lines should be <= 80 characters long') 2878 2879 if (cleansed_line.count(';') > 1 and 2880 # for loops are allowed two ;'s (and may run over two lines). 2881 cleansed_line.find('for') == -1 and 2882 (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or 2883 GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and 2884 # It's ok to have many commands in a switch case that fits in 1 line 2885 not ((cleansed_line.find('case ') != -1 or 2886 cleansed_line.find('default:') != -1) and 2887 cleansed_line.find('break;') != -1)): 2888 error(filename, linenum, 'whitespace/newline', 0, 2889 'More than one command on the same line') 2890 2891 # Some more style checks 2892 CheckBraces(filename, clean_lines, linenum, error) 2893 CheckEmptyLoopBody(filename, clean_lines, linenum, error) 2894 CheckAccess(filename, clean_lines, linenum, nesting_state, error) 2895 CheckSpacing(filename, clean_lines, linenum, nesting_state, error) 2896 CheckCheck(filename, clean_lines, linenum, error) 2897 CheckAltTokens(filename, clean_lines, linenum, error) 2898 classinfo = nesting_state.InnermostClass() 2899 if classinfo: 2900 CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error) 2901 2902 2903 _RE_PATTERN_INCLUDE_NEW_STYLE = re.compile(r'#include +"[^/]+\.h"') 2904 _RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$') 2905 # Matches the first component of a filename delimited by -s and _s. That is: 2906 # _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo' 2907 # _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo' 2908 # _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo' 2909 # _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo' 2910 _RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+') 2911 2912 2913 def _DropCommonSuffixes(filename): 2914 """Drops common suffixes like _test.cc or -inl.h from filename. 2915 2916 For example: 2917 >>> _DropCommonSuffixes('foo/foo-inl.h') 2918 'foo/foo' 2919 >>> _DropCommonSuffixes('foo/bar/foo.cc') 2920 'foo/bar/foo' 2921 >>> _DropCommonSuffixes('foo/foo_internal.h') 2922 'foo/foo' 2923 >>> _DropCommonSuffixes('foo/foo_unusualinternal.h') 2924 'foo/foo_unusualinternal' 2925 2926 Args: 2927 filename: The input filename. 2928 2929 Returns: 2930 The filename with the common suffix removed. 2931 """ 2932 for suffix in ('test.cc', 'regtest.cc', 'unittest.cc', 2933 'inl.h', 'impl.h', 'internal.h'): 2934 if (filename.endswith(suffix) and len(filename) > len(suffix) and 2935 filename[-len(suffix) - 1] in ('-', '_')): 2936 return filename[:-len(suffix) - 1] 2937 return os.path.splitext(filename)[0] 2938 2939 2940 def _IsTestFilename(filename): 2941 """Determines if the given filename has a suffix that identifies it as a test. 2942 2943 Args: 2944 filename: The input filename. 2945 2946 Returns: 2947 True if 'filename' looks like a test, False otherwise. 2948 """ 2949 if (filename.endswith('_test.cc') or 2950 filename.endswith('_unittest.cc') or 2951 filename.endswith('_regtest.cc')): 2952 return True 2953 else: 2954 return False 2955 2956 2957 def _ClassifyInclude(fileinfo, include, is_system): 2958 """Figures out what kind of header 'include' is. 2959 2960 Args: 2961 fileinfo: The current file cpplint is running over. A FileInfo instance. 2962 include: The path to a #included file. 2963 is_system: True if the #include used <> rather than "". 2964 2965 Returns: 2966 One of the _XXX_HEADER constants. 2967 2968 For example: 2969 >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True) 2970 _C_SYS_HEADER 2971 >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True) 2972 _CPP_SYS_HEADER 2973 >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False) 2974 _LIKELY_MY_HEADER 2975 >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'), 2976 ... 'bar/foo_other_ext.h', False) 2977 _POSSIBLE_MY_HEADER 2978 >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False) 2979 _OTHER_HEADER 2980 """ 2981 # This is a list of all standard c++ header files, except 2982 # those already checked for above. 2983 is_stl_h = include in _STL_HEADERS 2984 is_cpp_h = is_stl_h or include in _CPP_HEADERS 2985 2986 if is_system: 2987 if is_cpp_h: 2988 return _CPP_SYS_HEADER 2989 else: 2990 return _C_SYS_HEADER 2991 2992 # If the target file and the include we're checking share a 2993 # basename when we drop common extensions, and the include 2994 # lives in . , then it's likely to be owned by the target file. 2995 target_dir, target_base = ( 2996 os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName()))) 2997 include_dir, include_base = os.path.split(_DropCommonSuffixes(include)) 2998 if target_base == include_base and ( 2999 include_dir == target_dir or 3000 include_dir == os.path.normpath(target_dir + '/../public')): 3001 return _LIKELY_MY_HEADER 3002 3003 # If the target and include share some initial basename 3004 # component, it's possible the target is implementing the 3005 # include, so it's allowed to be first, but we'll never 3006 # complain if it's not there. 3007 target_first_component = _RE_FIRST_COMPONENT.match(target_base) 3008 include_first_component = _RE_FIRST_COMPONENT.match(include_base) 3009 if (target_first_component and include_first_component and 3010 target_first_component.group(0) == 3011 include_first_component.group(0)): 3012 return _POSSIBLE_MY_HEADER 3013 3014 return _OTHER_HEADER 3015 3016 3017 3018 def CheckIncludeLine(filename, clean_lines, linenum, include_state, error): 3019 """Check rules that are applicable to #include lines. 3020 3021 Strings on #include lines are NOT removed from elided line, to make 3022 certain tasks easier. However, to prevent false positives, checks 3023 applicable to #include lines in CheckLanguage must be put here. 3024 3025 Args: 3026 filename: The name of the current file. 3027 clean_lines: A CleansedLines instance containing the file. 3028 linenum: The number of the line to check. 3029 include_state: An _IncludeState instance in which the headers are inserted. 3030 error: The function to call with any errors found. 3031 """ 3032 fileinfo = FileInfo(filename) 3033 3034 line = clean_lines.lines[linenum] 3035 3036 # "include" should use the new style "foo/bar.h" instead of just "bar.h" 3037 if _RE_PATTERN_INCLUDE_NEW_STYLE.search(line): 3038 error(filename, linenum, 'build/include', 4, 3039 'Include the directory when naming .h files') 3040 3041 # we shouldn't include a file more than once. actually, there are a 3042 # handful of instances where doing so is okay, but in general it's 3043 # not. 3044 match = _RE_PATTERN_INCLUDE.search(line) 3045 if match: 3046 include = match.group(2) 3047 is_system = (match.group(1) == '<') 3048 if include in include_state: 3049 error(filename, linenum, 'build/include', 4, 3050 '"%s" already included at %s:%s' % 3051 (include, filename, include_state[include])) 3052 else: 3053 include_state[include] = linenum 3054 3055 # We want to ensure that headers appear in the right order: 3056 # 1) for foo.cc, foo.h (preferred location) 3057 # 2) c system files 3058 # 3) cpp system files 3059 # 4) for foo.cc, foo.h (deprecated location) 3060 # 5) other google headers 3061 # 3062 # We classify each include statement as one of those 5 types 3063 # using a number of techniques. The include_state object keeps 3064 # track of the highest type seen, and complains if we see a 3065 # lower type after that. 3066 error_message = include_state.CheckNextIncludeOrder( 3067 _ClassifyInclude(fileinfo, include, is_system)) 3068 if error_message: 3069 error(filename, linenum, 'build/include_order', 4, 3070 '%s. Should be: %s.h, c system, c++ system, other.' % 3071 (error_message, fileinfo.BaseName())) 3072 if not include_state.IsInAlphabeticalOrder(include): 3073 error(filename, linenum, 'build/include_alpha', 4, 3074 'Include "%s" not in alphabetical order' % include) 3075 3076 # Look for any of the stream classes that are part of standard C++. 3077 match = _RE_PATTERN_INCLUDE.match(line) 3078 if match: 3079 include = match.group(2) 3080 if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include): 3081 # Many unit tests use cout, so we exempt them. 3082 if not _IsTestFilename(filename): 3083 error(filename, linenum, 'readability/streams', 3, 3084 'Streams are highly discouraged.') 3085 3086 3087 def _GetTextInside(text, start_pattern): 3088 """Retrieves all the text between matching open and close parentheses. 3089 3090 Given a string of lines and a regular expression string, retrieve all the text 3091 following the expression and between opening punctuation symbols like 3092 (, [, or {, and the matching close-punctuation symbol. This properly nested 3093 occurrences of the punctuations, so for the text like 3094 printf(a(), b(c())); 3095 a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'. 3096 start_pattern must match string having an open punctuation symbol at the end. 3097 3098 Args: 3099 text: The lines to extract text. Its comments and strings must be elided. 3100 It can be single line and can span multiple lines. 3101 start_pattern: The regexp string indicating where to start extracting 3102 the text. 3103 Returns: 3104 The extracted text. 3105 None if either the opening string or ending punctuation could not be found. 3106 """ 3107 # TODO(sugawarayu): Audit cpplint.py to see what places could be profitably 3108 # rewritten to use _GetTextInside (and use inferior regexp matching today). 3109 3110 # Give opening punctuations to get the matching close-punctuations. 3111 matching_punctuation = {'(': ')', '{': '}', '[': ']'} 3112 closing_punctuation = set(matching_punctuation.itervalues()) 3113 3114 # Find the position to start extracting text. 3115 match = re.search(start_pattern, text, re.M) 3116 if not match: # start_pattern not found in text. 3117 return None 3118 start_position = match.end(0) 3119 3120 assert start_position > 0, ( 3121 'start_pattern must ends with an opening punctuation.') 3122 assert text[start_position - 1] in matching_punctuation, ( 3123 'start_pattern must ends with an opening punctuation.') 3124 # Stack of closing punctuations we expect to have in text after position. 3125 punctuation_stack = [matching_punctuation[text[start_position - 1]]] 3126 position = start_position 3127 while punctuation_stack and position < len(text): 3128 if text[position] == punctuation_stack[-1]: 3129 punctuation_stack.pop() 3130 elif text[position] in closing_punctuation: 3131 # A closing punctuation without matching opening punctuations. 3132 return None 3133 elif text[position] in matching_punctuation: 3134 punctuation_stack.append(matching_punctuation[text[position]]) 3135 position += 1 3136 if punctuation_stack: 3137 # Opening punctuations left without matching close-punctuations. 3138 return None 3139 # punctuations match. 3140 return text[start_position:position - 1] 3141 3142 3143 def CheckLanguage(filename, clean_lines, linenum, file_extension, include_state, 3144 error): 3145 """Checks rules from the 'C++ language rules' section of cppguide.html. 3146 3147 Some of these rules are hard to test (function overloading, using 3148 uint32 inappropriately), but we do the best we can. 3149 3150 Args: 3151 filename: The name of the current file. 3152 clean_lines: A CleansedLines instance containing the file. 3153 linenum: The number of the line to check. 3154 file_extension: The extension (without the dot) of the filename. 3155 include_state: An _IncludeState instance in which the headers are inserted. 3156 error: The function to call with any errors found. 3157 """ 3158 # If the line is empty or consists of entirely a comment, no need to 3159 # check it. 3160 line = clean_lines.elided[linenum] 3161 if not line: 3162 return 3163 3164 match = _RE_PATTERN_INCLUDE.search(line) 3165 if match: 3166 CheckIncludeLine(filename, clean_lines, linenum, include_state, error) 3167 return 3168 3169 # Create an extended_line, which is the concatenation of the current and 3170 # next lines, for more effective checking of code that may span more than one 3171 # line. 3172 if linenum + 1 < clean_lines.NumLines(): 3173 extended_line = line + clean_lines.elided[linenum + 1] 3174 else: 3175 extended_line = line 3176 3177 # Make Windows paths like Unix. 3178 fullname = os.path.abspath(filename).replace('\\', '/') 3179 3180 # TODO(unknown): figure out if they're using default arguments in fn proto. 3181 3182 # Check for non-const references in functions. This is tricky because & 3183 # is also used to take the address of something. We allow <> for templates, 3184 # (ignoring whatever is between the braces) and : for classes. 3185 # These are complicated re's. They try to capture the following: 3186 # paren (for fn-prototype start), typename, &, varname. For the const 3187 # version, we're willing for const to be before typename or after 3188 # Don't check the implementation on same line. 3189 fnline = line.split('{', 1)[0] 3190 if (len(re.findall(r'\([^()]*\b(?:[\w:]|<[^()]*>)+(\s?&|&\s?)\w+', fnline)) > 3191 len(re.findall(r'\([^()]*\bconst\s+(?:typename\s+)?(?:struct\s+)?' 3192 r'(?:[\w:]|<[^()]*>)+(\s?&|&\s?)\w+', fnline)) + 3193 len(re.findall(r'\([^()]*\b(?:[\w:]|<[^()]*>)+\s+const(\s?&|&\s?)[\w]+', 3194 fnline))): 3195 3196 # We allow non-const references in a few standard places, like functions 3197 # called "swap()" or iostream operators like "<<" or ">>". We also filter 3198 # out for loops, which lint otherwise mistakenly thinks are functions. 3199 if not Search( 3200 r'(for|swap|Swap|operator[<>][<>])\s*\(\s*' 3201 r'(?:(?:typename\s*)?[\w:]|<.*>)+\s*&', 3202 fnline): 3203 error(filename, linenum, 'runtime/references', 2, 3204 'Is this a non-const reference? ' 3205 'If so, make const or use a pointer.') 3206 3207 # Check to see if they're using an conversion function cast. 3208 # I just try to capture the most common basic types, though there are more. 3209 # Parameterless conversion functions, such as bool(), are allowed as they are 3210 # probably a member operator declaration or default constructor. 3211 match = Search( 3212 r'(\bnew\s+)?\b' # Grab 'new' operator, if it's there 3213 r'(int|float|double|bool|char|int32|uint32|int64|uint64)\([^)]', line) 3214 if match: 3215 # gMock methods are defined using some variant of MOCK_METHODx(name, type) 3216 # where type may be float(), int(string), etc. Without context they are 3217 # virtually indistinguishable from int(x) casts. Likewise, gMock's 3218 # MockCallback takes a template parameter of the form return_type(arg_type), 3219 # which looks much like the cast we're trying to detect. 3220 if (match.group(1) is None and # If new operator, then this isn't a cast 3221 not (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or 3222 Match(r'^\s*MockCallback<.*>', line))): 3223 # Try a bit harder to catch gmock lines: the only place where 3224 # something looks like an old-style cast is where we declare the 3225 # return type of the mocked method, and the only time when we 3226 # are missing context is if MOCK_METHOD was split across 3227 # multiple lines (for example http://go/hrfhr ), so we only need 3228 # to check the previous line for MOCK_METHOD. 3229 if (linenum == 0 or 3230 not Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(\S+,\s*$', 3231 clean_lines.elided[linenum - 1])): 3232 error(filename, linenum, 'readability/casting', 4, 3233 'Using deprecated casting style. ' 3234 'Use static_cast<%s>(...) instead' % 3235 match.group(2)) 3236 3237 CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum], 3238 'static_cast', 3239 r'\((int|float|double|bool|char|u?int(16|32|64))\)', error) 3240 3241 # This doesn't catch all cases. Consider (const char * const)"hello". 3242 # 3243 # (char *) "foo" should always be a const_cast (reinterpret_cast won't 3244 # compile). 3245 if CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum], 3246 'const_cast', r'\((char\s?\*+\s?)\)\s*"', error): 3247 pass 3248 else: 3249 # Check pointer casts for other than string constants 3250 CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum], 3251 'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error) 3252 3253 # In addition, we look for people taking the address of a cast. This 3254 # is dangerous -- casts can assign to temporaries, so the pointer doesn't 3255 # point where you think. 3256 if Search( 3257 r'(&\([^)]+\)[\w(])|(&(static|dynamic|reinterpret)_cast\b)', line): 3258 error(filename, linenum, 'runtime/casting', 4, 3259 ('Are you taking an address of a cast? ' 3260 'This is dangerous: could be a temp var. ' 3261 'Take the address before doing the cast, rather than after')) 3262 3263 # Check for people declaring static/global STL strings at the top level. 3264 # This is dangerous because the C++ language does not guarantee that 3265 # globals with constructors are initialized before the first access. 3266 match = Match( 3267 r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)', 3268 line) 3269 # Make sure it's not a function. 3270 # Function template specialization looks like: "string foo<Type>(...". 3271 # Class template definitions look like: "string Foo<Type>::Method(...". 3272 if match and not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)', 3273 match.group(3)): 3274 error(filename, linenum, 'runtime/string', 4, 3275 'For a static/global string constant, use a C style string instead: ' 3276 '"%schar %s[]".' % 3277 (match.group(1), match.group(2))) 3278 3279 # Check that we're not using RTTI outside of testing code. 3280 if Search(r'\bdynamic_cast<', line) and not _IsTestFilename(filename): 3281 error(filename, linenum, 'runtime/rtti', 5, 3282 'Do not use dynamic_cast<>. If you need to cast within a class ' 3283 "hierarchy, use static_cast<> to upcast. Google doesn't support " 3284 'RTTI.') 3285 3286 if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line): 3287 error(filename, linenum, 'runtime/init', 4, 3288 'You seem to be initializing a member variable with itself.') 3289 3290 if file_extension == 'h': 3291 # TODO(unknown): check that 1-arg constructors are explicit. 3292 # How to tell it's a constructor? 3293 # (handled in CheckForNonStandardConstructs for now) 3294 # TODO(unknown): check that classes have DISALLOW_EVIL_CONSTRUCTORS 3295 # (level 1 error) 3296 pass 3297 3298 # Check if people are using the verboten C basic types. The only exception 3299 # we regularly allow is "unsigned short port" for port. 3300 if Search(r'\bshort port\b', line): 3301 if not Search(r'\bunsigned short port\b', line): 3302 error(filename, linenum, 'runtime/int', 4, 3303 'Use "unsigned short" for ports, not "short"') 3304 else: 3305 match = Search(r'\b(short|long(?! +double)|long long)\b', line) 3306 if match: 3307 error(filename, linenum, 'runtime/int', 4, 3308 'Use int16/int64/etc, rather than the C type %s' % match.group(1)) 3309 3310 # When snprintf is used, the second argument shouldn't be a literal. 3311 match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line) 3312 if match and match.group(2) != '0': 3313 # If 2nd arg is zero, snprintf is used to calculate size. 3314 error(filename, linenum, 'runtime/printf', 3, 3315 'If you can, use sizeof(%s) instead of %s as the 2nd arg ' 3316 'to snprintf.' % (match.group(1), match.group(2))) 3317 3318 # Check if some verboten C functions are being used. 3319 if Search(r'\bsprintf\b', line): 3320 error(filename, linenum, 'runtime/printf', 5, 3321 'Never use sprintf. Use snprintf instead.') 3322 match = Search(r'\b(strcpy|strcat)\b', line) 3323 if match: 3324 error(filename, linenum, 'runtime/printf', 4, 3325 'Almost always, snprintf is better than %s' % match.group(1)) 3326 3327 if Search(r'\bsscanf\b', line): 3328 error(filename, linenum, 'runtime/printf', 1, 3329 'sscanf can be ok, but is slow and can overflow buffers.') 3330 3331 # Check if some verboten operator overloading is going on 3332 # TODO(unknown): catch out-of-line unary operator&: 3333 # class X {}; 3334 # int operator&(const X& x) { return 42; } // unary operator& 3335 # The trick is it's hard to tell apart from binary operator&: 3336 # class Y { int operator&(const Y& x) { return 23; } }; // binary operator& 3337 if Search(r'\boperator\s*&\s*\(\s*\)', line): 3338 error(filename, linenum, 'runtime/operator', 4, 3339 'Unary operator& is dangerous. Do not use it.') 3340 3341 # Check for suspicious usage of "if" like 3342 # } if (a == b) { 3343 if Search(r'\}\s*if\s*\(', line): 3344 error(filename, linenum, 'readability/braces', 4, 3345 'Did you mean "else if"? If not, start a new line for "if".') 3346 3347 # Check for potential format string bugs like printf(foo). 3348 # We constrain the pattern not to pick things like DocidForPrintf(foo). 3349 # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str()) 3350 # TODO(sugawarayu): Catch the following case. Need to change the calling 3351 # convention of the whole function to process multiple line to handle it. 3352 # printf( 3353 # boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line); 3354 printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(') 3355 if printf_args: 3356 match = Match(r'([\w.\->()]+)$', printf_args) 3357 if match and match.group(1) != '__VA_ARGS__': 3358 function_name = re.search(r'\b((?:string)?printf)\s*\(', 3359 line, re.I).group(1) 3360 error(filename, linenum, 'runtime/printf', 4, 3361 'Potential format string bug. Do %s("%%s", %s) instead.' 3362 % (function_name, match.group(1))) 3363 3364 # Check for potential memset bugs like memset(buf, sizeof(buf), 0). 3365 match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line) 3366 if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)): 3367 error(filename, linenum, 'runtime/memset', 4, 3368 'Did you mean "memset(%s, 0, %s)"?' 3369 % (match.group(1), match.group(2))) 3370 3371 if Search(r'\busing namespace\b', line): 3372 error(filename, linenum, 'build/namespaces', 5, 3373 'Do not use namespace using-directives. ' 3374 'Use using-declarations instead.') 3375 3376 # Detect variable-length arrays. 3377 match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line) 3378 if (match and match.group(2) != 'return' and match.group(2) != 'delete' and 3379 match.group(3).find(']') == -1): 3380 # Split the size using space and arithmetic operators as delimiters. 3381 # If any of the resulting tokens are not compile time constants then 3382 # report the error. 3383 tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3)) 3384 is_const = True 3385 skip_next = False 3386 for tok in tokens: 3387 if skip_next: 3388 skip_next = False 3389 continue 3390 3391 if Search(r'sizeof\(.+\)', tok): continue 3392 if Search(r'arraysize\(\w+\)', tok): continue 3393 3394 tok = tok.lstrip('(') 3395 tok = tok.rstrip(')') 3396 if not tok: continue 3397 if Match(r'\d+', tok): continue 3398 if Match(r'0[xX][0-9a-fA-F]+', tok): continue 3399 if Match(r'k[A-Z0-9]\w*', tok): continue 3400 if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue 3401 if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue 3402 # A catch all for tricky sizeof cases, including 'sizeof expression', 3403 # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)' 3404 # requires skipping the next token because we split on ' ' and '*'. 3405 if tok.startswith('sizeof'): 3406 skip_next = True 3407 continue 3408 is_const = False 3409 break 3410 if not is_const: 3411 error(filename, linenum, 'runtime/arrays', 1, 3412 'Do not use variable-length arrays. Use an appropriately named ' 3413 "('k' followed by CamelCase) compile-time constant for the size.") 3414 3415 # If DISALLOW_EVIL_CONSTRUCTORS, DISALLOW_COPY_AND_ASSIGN, or 3416 # DISALLOW_IMPLICIT_CONSTRUCTORS is present, then it should be the last thing 3417 # in the class declaration. 3418 match = Match( 3419 (r'\s*' 3420 r'(DISALLOW_(EVIL_CONSTRUCTORS|COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))' 3421 r'\(.*\);$'), 3422 line) 3423 if match and linenum + 1 < clean_lines.NumLines(): 3424 next_line = clean_lines.elided[linenum + 1] 3425 # We allow some, but not all, declarations of variables to be present 3426 # in the statement that defines the class. The [\w\*,\s]* fragment of 3427 # the regular expression below allows users to declare instances of 3428 # the class or pointers to instances, but not less common types such 3429 # as function pointers or arrays. It's a tradeoff between allowing 3430 # reasonable code and avoiding trying to parse more C++ using regexps. 3431 if not Search(r'^\s*}[\w\*,\s]*;', next_line): 3432 error(filename, linenum, 'readability/constructors', 3, 3433 match.group(1) + ' should be the last thing in the class') 3434 3435 # Check for use of unnamed namespaces in header files. Registration 3436 # macros are typically OK, so we allow use of "namespace {" on lines 3437 # that end with backslashes. 3438 if (file_extension == 'h' 3439 and Search(r'\bnamespace\s*{', line) 3440 and line[-1] != '\\'): 3441 error(filename, linenum, 'build/namespaces', 4, 3442 'Do not use unnamed namespaces in header files. See ' 3443 'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces' 3444 ' for more information.') 3445 3446 3447 def CheckCStyleCast(filename, linenum, line, raw_line, cast_type, pattern, 3448 error): 3449 """Checks for a C-style cast by looking for the pattern. 3450 3451 This also handles sizeof(type) warnings, due to similarity of content. 3452 3453 Args: 3454 filename: The name of the current file. 3455 linenum: The number of the line to check. 3456 line: The line of code to check. 3457 raw_line: The raw line of code to check, with comments. 3458 cast_type: The string for the C++ cast to recommend. This is either 3459 reinterpret_cast, static_cast, or const_cast, depending. 3460 pattern: The regular expression used to find C-style casts. 3461 error: The function to call with any errors found. 3462 3463 Returns: 3464 True if an error was emitted. 3465 False otherwise. 3466 """ 3467 match = Search(pattern, line) 3468 if not match: 3469 return False 3470 3471 # e.g., sizeof(int) 3472 sizeof_match = Match(r'.*sizeof\s*$', line[0:match.start(1) - 1]) 3473 if sizeof_match: 3474 error(filename, linenum, 'runtime/sizeof', 1, 3475 'Using sizeof(type). Use sizeof(varname) instead if possible') 3476 return True 3477 3478 # operator++(int) and operator--(int) 3479 if (line[0:match.start(1) - 1].endswith(' operator++') or 3480 line[0:match.start(1) - 1].endswith(' operator--')): 3481 return False 3482 3483 remainder = line[match.end(0):] 3484 3485 # The close paren is for function pointers as arguments to a function. 3486 # eg, void foo(void (*bar)(int)); 3487 # The semicolon check is a more basic function check; also possibly a 3488 # function pointer typedef. 3489 # eg, void foo(int); or void foo(int) const; 3490 # The equals check is for function pointer assignment. 3491 # eg, void *(*foo)(int) = ... 3492 # The > is for MockCallback<...> ... 3493 # 3494 # Right now, this will only catch cases where there's a single argument, and 3495 # it's unnamed. It should probably be expanded to check for multiple 3496 # arguments with some unnamed. 3497 function_match = Match(r'\s*(\)|=|(const)?\s*(;|\{|throw\(\)|>))', remainder) 3498 if function_match: 3499 if (not function_match.group(3) or 3500 function_match.group(3) == ';' or 3501 ('MockCallback<' not in raw_line and 3502 '/*' not in raw_line)): 3503 error(filename, linenum, 'readability/function', 3, 3504 'All parameters should be named in a function') 3505 return True 3506 3507 # At this point, all that should be left is actual casts. 3508 error(filename, linenum, 'readability/casting', 4, 3509 'Using C-style cast. Use %s<%s>(...) instead' % 3510 (cast_type, match.group(1))) 3511 3512 return True 3513 3514 3515 _HEADERS_CONTAINING_TEMPLATES = ( 3516 ('<deque>', ('deque',)), 3517 ('<functional>', ('unary_function', 'binary_function', 3518 'plus', 'minus', 'multiplies', 'divides', 'modulus', 3519 'negate', 3520 'equal_to', 'not_equal_to', 'greater', 'less', 3521 'greater_equal', 'less_equal', 3522 'logical_and', 'logical_or', 'logical_not', 3523 'unary_negate', 'not1', 'binary_negate', 'not2', 3524 'bind1st', 'bind2nd', 3525 'pointer_to_unary_function', 3526 'pointer_to_binary_function', 3527 'ptr_fun', 3528 'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t', 3529 'mem_fun_ref_t', 3530 'const_mem_fun_t', 'const_mem_fun1_t', 3531 'const_mem_fun_ref_t', 'const_mem_fun1_ref_t', 3532 'mem_fun_ref', 3533 )), 3534 ('<limits>', ('numeric_limits',)), 3535 ('<list>', ('list',)), 3536 ('<map>', ('map', 'multimap',)), 3537 ('<memory>', ('allocator',)), 3538 ('<queue>', ('queue', 'priority_queue',)), 3539 ('<set>', ('set', 'multiset',)), 3540 ('<stack>', ('stack',)), 3541 ('<string>', ('char_traits', 'basic_string',)), 3542 ('<utility>', ('pair',)), 3543 ('<vector>', ('vector',)), 3544 3545 # gcc extensions. 3546 # Note: std::hash is their hash, ::hash is our hash 3547 ('<hash_map>', ('hash_map', 'hash_multimap',)), 3548 ('<hash_set>', ('hash_set', 'hash_multiset',)), 3549 ('<slist>', ('slist',)), 3550 ) 3551 3552 _RE_PATTERN_STRING = re.compile(r'\bstring\b') 3553 3554 _re_pattern_algorithm_header = [] 3555 for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap', 3556 'transform'): 3557 # Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or 3558 # type::max(). 3559 _re_pattern_algorithm_header.append( 3560 (re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'), 3561 _template, 3562 '<algorithm>')) 3563 3564 _re_pattern_templates = [] 3565 for _header, _templates in _HEADERS_CONTAINING_TEMPLATES: 3566 for _template in _templates: 3567 _re_pattern_templates.append( 3568 (re.compile(r'(\<|\b)' + _template + r'\s*\<'), 3569 _template + '<>', 3570 _header)) 3571 3572 3573 def FilesBelongToSameModule(filename_cc, filename_h): 3574 """Check if these two filenames belong to the same module. 3575 3576 The concept of a 'module' here is a as follows: 3577 foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the 3578 same 'module' if they are in the same directory. 3579 some/path/public/xyzzy and some/path/internal/xyzzy are also considered 3580 to belong to the same module here. 3581 3582 If the filename_cc contains a longer path than the filename_h, for example, 3583 '/absolute/path/to/base/sysinfo.cc', and this file would include 3584 'base/sysinfo.h', this function also produces the prefix needed to open the 3585 header. This is used by the caller of this function to more robustly open the 3586 header file. We don't have access to the real include paths in this context, 3587 so we need this guesswork here. 3588 3589 Known bugs: tools/base/bar.cc and base/bar.h belong to the same module 3590 according to this implementation. Because of this, this function gives 3591 some false positives. This should be sufficiently rare in practice. 3592 3593 Args: 3594 filename_cc: is the path for the .cc file 3595 filename_h: is the path for the header path 3596 3597 Returns: 3598 Tuple with a bool and a string: 3599 bool: True if filename_cc and filename_h belong to the same module. 3600 string: the additional prefix needed to open the header file. 3601 """ 3602 3603 if not filename_cc.endswith('.cc'): 3604 return (False, '') 3605 filename_cc = filename_cc[:-len('.cc')] 3606 if filename_cc.endswith('_unittest'): 3607 filename_cc = filename_cc[:-len('_unittest')] 3608 elif filename_cc.endswith('_test'): 3609 filename_cc = filename_cc[:-len('_test')] 3610 filename_cc = filename_cc.replace('/public/', '/') 3611 filename_cc = filename_cc.replace('/internal/', '/') 3612 3613 if not filename_h.endswith('.h'): 3614 return (False, '') 3615 filename_h = filename_h[:-len('.h')] 3616 if filename_h.endswith('-inl'): 3617 filename_h = filename_h[:-len('-inl')] 3618 filename_h = filename_h.replace('/public/', '/') 3619 filename_h = filename_h.replace('/internal/', '/') 3620 3621 files_belong_to_same_module = filename_cc.endswith(filename_h) 3622 common_path = '' 3623 if files_belong_to_same_module: 3624 common_path = filename_cc[:-len(filename_h)] 3625 return files_belong_to_same_module, common_path 3626 3627 3628 def UpdateIncludeState(filename, include_state, io=codecs): 3629 """Fill up the include_state with new includes found from the file. 3630 3631 Args: 3632 filename: the name of the header to read. 3633 include_state: an _IncludeState instance in which the headers are inserted. 3634 io: The io factory to use to read the file. Provided for testability. 3635 3636 Returns: 3637 True if a header was successfully added. False otherwise. 3638 """ 3639 headerfile = None 3640 try: 3641 headerfile = io.open(filename, 'r', 'utf8', 'replace') 3642 except IOError: 3643 return False 3644 linenum = 0 3645 for line in headerfile: 3646 linenum += 1 3647 clean_line = CleanseComments(line) 3648 match = _RE_PATTERN_INCLUDE.search(clean_line) 3649 if match: 3650 include = match.group(2) 3651 # The value formatting is cute, but not really used right now. 3652 # What matters here is that the key is in include_state. 3653 include_state.setdefault(include, '%s:%d' % (filename, linenum)) 3654 return True 3655 3656 3657 def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error, 3658 io=codecs): 3659 """Reports for missing stl includes. 3660 3661 This function will output warnings to make sure you are including the headers 3662 necessary for the stl containers and functions that you use. We only give one 3663 reason to include a header. For example, if you use both equal_to<> and 3664 less<> in a .h file, only one (the latter in the file) of these will be 3665 reported as a reason to include the <functional>. 3666 3667 Args: 3668 filename: The name of the current file. 3669 clean_lines: A CleansedLines instance containing the file. 3670 include_state: An _IncludeState instance. 3671 error: The function to call with any errors found. 3672 io: The IO factory to use to read the header file. Provided for unittest 3673 injection. 3674 """ 3675 required = {} # A map of header name to linenumber and the template entity. 3676 # Example of required: { '<functional>': (1219, 'less<>') } 3677 3678 for linenum in xrange(clean_lines.NumLines()): 3679 line = clean_lines.elided[linenum] 3680 if not line or line[0] == '#': 3681 continue 3682 3683 # String is special -- it is a non-templatized type in STL. 3684 matched = _RE_PATTERN_STRING.search(line) 3685 if matched: 3686 # Don't warn about strings in non-STL namespaces: 3687 # (We check only the first match per line; good enough.) 3688 prefix = line[:matched.start()] 3689 if prefix.endswith('std::') or not prefix.endswith('::'): 3690 required['<string>'] = (linenum, 'string') 3691 3692 for pattern, template, header in _re_pattern_algorithm_header: 3693 if pattern.search(line): 3694 required[header] = (linenum, template) 3695 3696 # The following function is just a speed up, no semantics are changed. 3697 if not '<' in line: # Reduces the cpu time usage by skipping lines. 3698 continue 3699 3700 for pattern, template, header in _re_pattern_templates: 3701 if pattern.search(line): 3702 required[header] = (linenum, template) 3703 3704 # The policy is that if you #include something in foo.h you don't need to 3705 # include it again in foo.cc. Here, we will look at possible includes. 3706 # Let's copy the include_state so it is only messed up within this function. 3707 include_state = include_state.copy() 3708 3709 # Did we find the header for this file (if any) and successfully load it? 3710 header_found = False 3711 3712 # Use the absolute path so that matching works properly. 3713 abs_filename = FileInfo(filename).FullName() 3714 3715 # For Emacs's flymake. 3716 # If cpplint is invoked from Emacs's flymake, a temporary file is generated 3717 # by flymake and that file name might end with '_flymake.cc'. In that case, 3718 # restore original file name here so that the corresponding header file can be 3719 # found. 3720 # e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h' 3721 # instead of 'foo_flymake.h' 3722 abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename) 3723 3724 # include_state is modified during iteration, so we iterate over a copy of 3725 # the keys. 3726 header_keys = include_state.keys() 3727 for header in header_keys: 3728 (same_module, common_path) = FilesBelongToSameModule(abs_filename, header) 3729 fullpath = common_path + header 3730 if same_module and UpdateIncludeState(fullpath, include_state, io): 3731 header_found = True 3732 3733 # If we can't find the header file for a .cc, assume it's because we don't 3734 # know where to look. In that case we'll give up as we're not sure they 3735 # didn't include it in the .h file. 3736 # TODO(unknown): Do a better job of finding .h files so we are confident that 3737 # not having the .h file means there isn't one. 3738 if filename.endswith('.cc') and not header_found: 3739 return 3740 3741 # All the lines have been processed, report the errors found. 3742 for required_header_unstripped in required: 3743 template = required[required_header_unstripped][1] 3744 if required_header_unstripped.strip('<>"') not in include_state: 3745 error(filename, required[required_header_unstripped][0], 3746 'build/include_what_you_use', 4, 3747 'Add #include ' + required_header_unstripped + ' for ' + template) 3748 3749 3750 _RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<') 3751 3752 3753 def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error): 3754 """Check that make_pair's template arguments are deduced. 3755 3756 G++ 4.6 in C++0x mode fails badly if make_pair's template arguments are 3757 specified explicitly, and such use isn't intended in any case. 3758 3759 Args: 3760 filename: The name of the current file. 3761 clean_lines: A CleansedLines instance containing the file. 3762 linenum: The number of the line to check. 3763 error: The function to call with any errors found. 3764 """ 3765 raw = clean_lines.raw_lines 3766 line = raw[linenum] 3767 match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line) 3768 if match: 3769 error(filename, linenum, 'build/explicit_make_pair', 3770 4, # 4 = high confidence 3771 'For C++11-compatibility, omit template arguments from make_pair' 3772 ' OR use pair directly OR if appropriate, construct a pair directly') 3773 3774 3775 def ProcessLine(filename, file_extension, clean_lines, line, 3776 include_state, function_state, nesting_state, error, 3777 extra_check_functions=[]): 3778 """Processes a single line in the file. 3779 3780 Args: 3781 filename: Filename of the file that is being processed. 3782 file_extension: The extension (dot not included) of the file. 3783 clean_lines: An array of strings, each representing a line of the file, 3784 with comments stripped. 3785 line: Number of line being processed. 3786 include_state: An _IncludeState instance in which the headers are inserted. 3787 function_state: A _FunctionState instance which counts function lines, etc. 3788 nesting_state: A _NestingState instance which maintains information about 3789 the current stack of nested blocks being parsed. 3790 error: A callable to which errors are reported, which takes 4 arguments: 3791 filename, line number, error level, and message 3792 extra_check_functions: An array of additional check functions that will be 3793 run on each source line. Each function takes 4 3794 arguments: filename, clean_lines, line, error 3795 """ 3796 raw_lines = clean_lines.raw_lines 3797 ParseNolintSuppressions(filename, raw_lines[line], line, error) 3798 nesting_state.Update(filename, clean_lines, line, error) 3799 if nesting_state.stack and nesting_state.stack[-1].inline_asm != _NO_ASM: 3800 return 3801 CheckForFunctionLengths(filename, clean_lines, line, function_state, error) 3802 CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error) 3803 CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error) 3804 CheckLanguage(filename, clean_lines, line, file_extension, include_state, 3805 error) 3806 CheckForNonStandardConstructs(filename, clean_lines, line, 3807 nesting_state, error) 3808 CheckPosixThreading(filename, clean_lines, line, error) 3809 CheckInvalidIncrement(filename, clean_lines, line, error) 3810 CheckMakePairUsesDeduction(filename, clean_lines, line, error) 3811 for check_fn in extra_check_functions: 3812 check_fn(filename, clean_lines, line, error) 3813 3814 def ProcessFileData(filename, file_extension, lines, error, 3815 extra_check_functions=[]): 3816 """Performs lint checks and reports any errors to the given error function. 3817 3818 Args: 3819 filename: Filename of the file that is being processed. 3820 file_extension: The extension (dot not included) of the file. 3821 lines: An array of strings, each representing a line of the file, with the 3822 last element being empty if the file is terminated with a newline. 3823 error: A callable to which errors are reported, which takes 4 arguments: 3824 filename, line number, error level, and message 3825 extra_check_functions: An array of additional check functions that will be 3826 run on each source line. Each function takes 4 3827 arguments: filename, clean_lines, line, error 3828 """ 3829 lines = (['// marker so line numbers and indices both start at 1'] + lines + 3830 ['// marker so line numbers end in a known way']) 3831 3832 include_state = _IncludeState() 3833 function_state = _FunctionState() 3834 nesting_state = _NestingState() 3835 3836 ResetNolintSuppressions() 3837 3838 CheckForCopyright(filename, lines, error) 3839 3840 if file_extension == 'h': 3841 CheckForHeaderGuard(filename, lines, error) 3842 3843 RemoveMultiLineComments(filename, lines, error) 3844 clean_lines = CleansedLines(lines) 3845 for line in xrange(clean_lines.NumLines()): 3846 ProcessLine(filename, file_extension, clean_lines, line, 3847 include_state, function_state, nesting_state, error, 3848 extra_check_functions) 3849 nesting_state.CheckClassFinished(filename, error) 3850 3851 CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error) 3852 3853 # We check here rather than inside ProcessLine so that we see raw 3854 # lines rather than "cleaned" lines. 3855 CheckForUnicodeReplacementCharacters(filename, lines, error) 3856 3857 CheckForNewlineAtEOF(filename, lines, error) 3858 3859 def ProcessFile(filename, vlevel, extra_check_functions=[]): 3860 """Does google-lint on a single file. 3861 3862 Args: 3863 filename: The name of the file to parse. 3864 3865 vlevel: The level of errors to report. Every error of confidence 3866 >= verbose_level will be reported. 0 is a good default. 3867 3868 extra_check_functions: An array of additional check functions that will be 3869 run on each source line. Each function takes 4 3870 arguments: filename, clean_lines, line, error 3871 """ 3872 3873 _SetVerboseLevel(vlevel) 3874 3875 try: 3876 # Support the UNIX convention of using "-" for stdin. Note that 3877 # we are not opening the file with universal newline support 3878 # (which codecs doesn't support anyway), so the resulting lines do 3879 # contain trailing '\r' characters if we are reading a file that 3880 # has CRLF endings. 3881 # If after the split a trailing '\r' is present, it is removed 3882 # below. If it is not expected to be present (i.e. os.linesep != 3883 # '\r\n' as in Windows), a warning is issued below if this file 3884 # is processed. 3885 3886 if filename == '-': 3887 lines = codecs.StreamReaderWriter(sys.stdin, 3888 codecs.getreader('utf8'), 3889 codecs.getwriter('utf8'), 3890 'replace').read().split('\n') 3891 else: 3892 lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n') 3893 3894 carriage_return_found = False 3895 # Remove trailing '\r'. 3896 for linenum in range(len(lines)): 3897 if lines[linenum].endswith('\r'): 3898 lines[linenum] = lines[linenum].rstrip('\r') 3899 carriage_return_found = True 3900 3901 except IOError: 3902 sys.stderr.write( 3903 "Skipping input '%s': Can't open for reading\n" % filename) 3904 return 3905 3906 # Note, if no dot is found, this will give the entire filename as the ext. 3907 file_extension = filename[filename.rfind('.') + 1:] 3908 3909 # When reading from stdin, the extension is unknown, so no cpplint tests 3910 # should rely on the extension. 3911 if (filename != '-' and file_extension != 'cc' and file_extension != 'h' 3912 and file_extension != 'cpp'): 3913 sys.stderr.write('Ignoring %s; not a .cc or .h file\n' % filename) 3914 else: 3915 ProcessFileData(filename, file_extension, lines, Error, 3916 extra_check_functions) 3917 if carriage_return_found and os.linesep != '\r\n': 3918 # Use 0 for linenum since outputting only one error for potentially 3919 # several lines. 3920 Error(filename, 0, 'whitespace/newline', 1, 3921 'One or more unexpected \\r (^M) found;' 3922 'better to use only a \\n') 3923 3924 sys.stderr.write('Done processing %s\n' % filename) 3925 3926 3927 def PrintUsage(message): 3928 """Prints a brief usage string and exits, optionally with an error message. 3929 3930 Args: 3931 message: The optional error message. 3932 """ 3933 sys.stderr.write(_USAGE) 3934 if message: 3935 sys.exit('\nFATAL ERROR: ' + message) 3936 else: 3937 sys.exit(1) 3938 3939 3940 def PrintCategories(): 3941 """Prints a list of all the error-categories used by error messages. 3942 3943 These are the categories used to filter messages via --filter. 3944 """ 3945 sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES)) 3946 sys.exit(0) 3947 3948 3949 def ParseArguments(args): 3950 """Parses the command line arguments. 3951 3952 This may set the output format and verbosity level as side-effects. 3953 3954 Args: 3955 args: The command line arguments: 3956 3957 Returns: 3958 The list of filenames to lint. 3959 """ 3960 try: 3961 (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=', 3962 'counting=', 3963 'filter=', 3964 'root=']) 3965 except getopt.GetoptError: 3966 PrintUsage('Invalid arguments.') 3967 3968 verbosity = _VerboseLevel() 3969 output_format = _OutputFormat() 3970 filters = '' 3971 counting_style = '' 3972 3973 for (opt, val) in opts: 3974 if opt == '--help': 3975 PrintUsage(None) 3976 elif opt == '--output': 3977 if not val in ('emacs', 'vs7', 'eclipse'): 3978 PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.') 3979 output_format = val 3980 elif opt == '--verbose': 3981 verbosity = int(val) 3982 elif opt == '--filter': 3983 filters = val 3984 if not filters: 3985 PrintCategories() 3986 elif opt == '--counting': 3987 if val not in ('total', 'toplevel', 'detailed'): 3988 PrintUsage('Valid counting options are total, toplevel, and detailed') 3989 counting_style = val 3990 elif opt == '--root': 3991 global _root 3992 _root = val 3993 3994 if not filenames: 3995 PrintUsage('No files were specified.') 3996 3997 _SetOutputFormat(output_format) 3998 _SetVerboseLevel(verbosity) 3999 _SetFilters(filters) 4000 _SetCountingStyle(counting_style) 4001 4002 return filenames 4003 4004 4005 def main(): 4006 filenames = ParseArguments(sys.argv[1:]) 4007 4008 # Change stderr to write with replacement characters so we don't die 4009 # if we try to print something containing non-ASCII characters. 4010 sys.stderr = codecs.StreamReaderWriter(sys.stderr, 4011 codecs.getreader('utf8'), 4012 codecs.getwriter('utf8'), 4013 'replace') 4014 4015 _cpplint_state.ResetErrorCounts() 4016 for filename in filenames: 4017 ProcessFile(filename, _cpplint_state.verbose_level) 4018 _cpplint_state.PrintErrorCounts() 4019 4020 sys.exit(_cpplint_state.error_count > 0) 4021 4022 4023 if __name__ == '__main__': 4024 main() 4025