Statistics
| Branch: | Tag: | Revision:

amiro-os / tools / cpplint / python / cpplint.py @ e545e620

History | View | Annotate | Download (236 KB)

1 e545e620 Thomas Schöpping
#!/usr/bin/env python
2
#
3
# Copyright (c) 2009 Google Inc. All rights reserved.
4
#
5
# Redistribution and use in source and binary forms, with or without
6
# modification, are permitted provided that the following conditions are
7
# met:
8
#
9
#    * Redistributions of source code must retain the above copyright
10
# notice, this list of conditions and the following disclaimer.
11
#    * Redistributions in binary form must reproduce the above
12
# copyright notice, this list of conditions and the following disclaimer
13
# in the documentation and/or other materials provided with the
14
# distribution.
15
#    * Neither the name of Google Inc. nor the names of its
16
# contributors may be used to endorse or promote products derived from
17
# this software without specific prior written permission.
18
#
19
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30
31
"""Does google-lint on c++ files.
32

33
The goal of this script is to identify places in the code that *may*
34
be in non-compliance with google style.  It does not attempt to fix
35
up these problems -- the point is to educate.  It does also not
36
attempt to find all problems, or to ensure that everything it does
37
find is legitimately a problem.
38

39
In particular, we can get very confused by /* and // inside strings!
40
We do a small hack, which is to ignore //'s with "'s after them on the
41
same line, but it is far from perfect (in either direction).
42
"""
43
44
import codecs
45
import copy
46
import getopt
47
import math  # for log
48
import os
49
import re
50
import sre_compile
51
import string
52
import sys
53
import unicodedata
54
55
56
_USAGE = """
57
Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
58
                   [--counting=total|toplevel|detailed] [--root=subdir]
59
                   [--linelength=digits]
60
        <file> [file] ...
61

62
  The style guidelines this tries to follow are those in
63
    http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
64

65
  Every problem is given a confidence score from 1-5, with 5 meaning we are
66
  certain of the problem, and 1 meaning it could be a legitimate construct.
67
  This will miss some errors, and is not a substitute for a code review.
68

69
  To suppress false-positive errors of a certain category, add a
70
  'NOLINT(category)' comment to the line.  NOLINT or NOLINT(*)
71
  suppresses errors of all categories on that line.
72

73
  The files passed in will be linted; at least one file must be provided.
74
  Default linted extensions are .cc, .cpp, .cu, .cuh and .h.  Change the
75
  extensions with the --extensions flag.
76

77
  Flags:
78

79
    output=vs7
80
      By default, the output is formatted to ease emacs parsing.  Visual Studio
81
      compatible output (vs7) may also be used.  Other formats are unsupported.
82

83
    verbose=#
84
      Specify a number 0-5 to restrict errors to certain verbosity levels.
85

86
    filter=-x,+y,...
87
      Specify a comma-separated list of category-filters to apply: only
88
      error messages whose category names pass the filters will be printed.
89
      (Category names are printed with the message and look like
90
      "[whitespace/indent]".)  Filters are evaluated left to right.
91
      "-FOO" and "FOO" means "do not print categories that start with FOO".
92
      "+FOO" means "do print categories that start with FOO".
93

94
      Examples: --filter=-whitespace,+whitespace/braces
95
                --filter=whitespace,runtime/printf,+runtime/printf_format
96
                --filter=-,+build/include_what_you_use
97

98
      To see a list of all the categories used in cpplint, pass no arg:
99
         --filter=
100

101
    counting=total|toplevel|detailed
102
      The total number of errors found is always printed. If
103
      'toplevel' is provided, then the count of errors in each of
104
      the top-level categories like 'build' and 'whitespace' will
105
      also be printed. If 'detailed' is provided, then a count
106
      is provided for each category like 'build/class'.
107

108
    root=subdir
109
      The root directory used for deriving header guard CPP variable.
110
      By default, the header guard CPP variable is calculated as the relative
111
      path to the directory that contains .git, .hg, or .svn.  When this flag
112
      is specified, the relative path is calculated from the specified
113
      directory. If the specified directory does not exist, this flag is
114
      ignored.
115

116
      Examples:
117
        Assuming that src/.git exists, the header guard CPP variables for
118
        src/chrome/browser/ui/browser.h are:
119

120
        No flag => CHROME_BROWSER_UI_BROWSER_H_
121
        --root=chrome => BROWSER_UI_BROWSER_H_
122
        --root=chrome/browser => UI_BROWSER_H_
123

124
    linelength=digits
125
      This is the allowed line length for the project. The default value is
126
      80 characters.
127

128
      Examples:
129
        --linelength=120
130

131
    extensions=extension,extension,...
132
      The allowed file extensions that cpplint will check
133

134
      Examples:
135
        --extensions=hpp,cpp
136

137
    cpplint.py supports per-directory configurations specified in CPPLINT.cfg
138
    files. CPPLINT.cfg file can contain a number of key=value pairs.
139
    Currently the following options are supported:
140

141
      set noparent
142
      filter=+filter1,-filter2,...
143
      exclude_files=regex
144
      linelength=80
145

146
    "set noparent" option prevents cpplint from traversing directory tree
147
    upwards looking for more .cfg files in parent directories. This option
148
    is usually placed in the top-level project directory.
149

150
    The "filter" option is similar in function to --filter flag. It specifies
151
    message filters in addition to the |_DEFAULT_FILTERS| and those specified
152
    through --filter command-line flag.
153

154
    "exclude_files" allows to specify a regular expression to be matched against
155
    a file name. If the expression matches, the file is skipped and not run
156
    through liner.
157

158
    "linelength" allows to specify the allowed line length for the project.
159

160
    CPPLINT.cfg has an effect on files in the same directory and all
161
    sub-directories, unless overridden by a nested configuration file.
162

163
      Example file:
164
        filter=-build/include_order,+build/include_alpha
165
        exclude_files=.*\.cc
166

167
    The above example disables build/include_order warning and enables
168
    build/include_alpha as well as excludes all .cc from being
169
    processed by linter, in the current directory (where the .cfg
170
    file is located) and all sub-directories.
171
"""
172
173
# We categorize each error message we print.  Here are the categories.
174
# We want an explicit list so we can list them all in cpplint --filter=.
175
# If you add a new error message with a new category, add it to the list
176
# here!  cpplint_unittest.py should tell you if you forget to do this.
177
_ERROR_CATEGORIES = [
178
    'build/class',
179
    'build/c++11',
180
    'build/deprecated',
181
    'build/endif_comment',
182
    'build/explicit_make_pair',
183
    'build/forward_decl',
184
    'build/header_guard',
185
    'build/include',
186
    'build/include_alpha',
187
    'build/include_order',
188
    'build/include_what_you_use',
189
    'build/namespaces',
190
    'build/printf_format',
191
    'build/storage_class',
192
    'legal/copyright',
193
    'readability/alt_tokens',
194
    'readability/braces',
195
    'readability/casting',
196
    'readability/check',
197
    'readability/constructors',
198
    'readability/fn_size',
199
    'readability/function',
200
    'readability/inheritance',
201
    'readability/multiline_comment',
202
    'readability/multiline_string',
203
    'readability/namespace',
204
    'readability/nolint',
205
    'readability/nul',
206
    'readability/strings',
207
    'readability/todo',
208
    'readability/utf8',
209
    'runtime/arrays',
210
    'runtime/casting',
211
    'runtime/explicit',
212
    'runtime/int',
213
    'runtime/init',
214
    'runtime/invalid_increment',
215
    'runtime/member_string_references',
216
    'runtime/memset',
217
    'runtime/indentation_namespace',
218
    'runtime/operator',
219
    'runtime/printf',
220
    'runtime/printf_format',
221
    'runtime/references',
222
    'runtime/string',
223
    'runtime/threadsafe_fn',
224
    'runtime/vlog',
225
    'whitespace/blank_line',
226
    'whitespace/braces',
227
    'whitespace/comma',
228
    'whitespace/comments',
229
    'whitespace/empty_conditional_body',
230
    'whitespace/empty_loop_body',
231
    'whitespace/end_of_line',
232
    'whitespace/ending_newline',
233
    'whitespace/forcolon',
234
    'whitespace/indent',
235
    'whitespace/line_length',
236
    'whitespace/newline',
237
    'whitespace/operators',
238
    'whitespace/parens',
239
    'whitespace/semicolon',
240
    'whitespace/tab',
241
    'whitespace/todo',
242
    ]
243
244
# These error categories are no longer enforced by cpplint, but for backwards-
245
# compatibility they may still appear in NOLINT comments.
246
_LEGACY_ERROR_CATEGORIES = [
247
    'readability/streams',
248
    ]
249
250
# The default state of the category filter. This is overridden by the --filter=
251
# flag. By default all errors are on, so only add here categories that should be
252
# off by default (i.e., categories that must be enabled by the --filter= flags).
253
# All entries here should start with a '-' or '+', as in the --filter= flag.
254
_DEFAULT_FILTERS = ['-build/include_alpha']
255
256
# We used to check for high-bit characters, but after much discussion we
257
# decided those were OK, as long as they were in UTF-8 and didn't represent
258
# hard-coded international strings, which belong in a separate i18n file.
259
260
# C++ headers
261
_CPP_HEADERS = frozenset([
262
    # Legacy
263
    'algobase.h',
264
    'algo.h',
265
    'alloc.h',
266
    'builtinbuf.h',
267
    'bvector.h',
268
    'complex.h',
269
    'defalloc.h',
270
    'deque.h',
271
    'editbuf.h',
272
    'fstream.h',
273
    'function.h',
274
    'hash_map',
275
    'hash_map.h',
276
    'hash_set',
277
    'hash_set.h',
278
    'hashtable.h',
279
    'heap.h',
280
    'indstream.h',
281
    'iomanip.h',
282
    'iostream.h',
283
    'istream.h',
284
    'iterator.h',
285
    'list.h',
286
    'map.h',
287
    'multimap.h',
288
    'multiset.h',
289
    'ostream.h',
290
    'pair.h',
291
    'parsestream.h',
292
    'pfstream.h',
293
    'procbuf.h',
294
    'pthread_alloc',
295
    'pthread_alloc.h',
296
    'rope',
297
    'rope.h',
298
    'ropeimpl.h',
299
    'set.h',
300
    'slist',
301
    'slist.h',
302
    'stack.h',
303
    'stdiostream.h',
304
    'stl_alloc.h',
305
    'stl_relops.h',
306
    'streambuf.h',
307
    'stream.h',
308
    'strfile.h',
309
    'strstream.h',
310
    'tempbuf.h',
311
    'tree.h',
312
    'type_traits.h',
313
    'vector.h',
314
    # 17.6.1.2 C++ library headers
315
    'algorithm',
316
    'array',
317
    'atomic',
318
    'bitset',
319
    'chrono',
320
    'codecvt',
321
    'complex',
322
    'condition_variable',
323
    'deque',
324
    'exception',
325
    'forward_list',
326
    'fstream',
327
    'functional',
328
    'future',
329
    'initializer_list',
330
    'iomanip',
331
    'ios',
332
    'iosfwd',
333
    'iostream',
334
    'istream',
335
    'iterator',
336
    'limits',
337
    'list',
338
    'locale',
339
    'map',
340
    'memory',
341
    'mutex',
342
    'new',
343
    'numeric',
344
    'ostream',
345
    'queue',
346
    'random',
347
    'ratio',
348
    'regex',
349
    'set',
350
    'sstream',
351
    'stack',
352
    'stdexcept',
353
    'streambuf',
354
    'string',
355
    'strstream',
356
    'system_error',
357
    'thread',
358
    'tuple',
359
    'typeindex',
360
    'typeinfo',
361
    'type_traits',
362
    'unordered_map',
363
    'unordered_set',
364
    'utility',
365
    'valarray',
366
    'vector',
367
    # 17.6.1.2 C++ headers for C library facilities
368
    'cassert',
369
    'ccomplex',
370
    'cctype',
371
    'cerrno',
372
    'cfenv',
373
    'cfloat',
374
    'cinttypes',
375
    'ciso646',
376
    'climits',
377
    'clocale',
378
    'cmath',
379
    'csetjmp',
380
    'csignal',
381
    'cstdalign',
382
    'cstdarg',
383
    'cstdbool',
384
    'cstddef',
385
    'cstdint',
386
    'cstdio',
387
    'cstdlib',
388
    'cstring',
389
    'ctgmath',
390
    'ctime',
391
    'cuchar',
392
    'cwchar',
393
    'cwctype',
394
    ])
395
396
397
# These headers are excluded from [build/include] and [build/include_order]
398
# checks:
399
# - Anything not following google file name conventions (containing an
400
#   uppercase character, such as Python.h or nsStringAPI.h, for example).
401
# - Lua headers.
402
_THIRD_PARTY_HEADERS_PATTERN = re.compile(
403
    r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$')
404
405
406
# Assertion macros.  These are defined in base/logging.h and
407
# testing/base/gunit.h.  Note that the _M versions need to come first
408
# for substring matching to work.
409
_CHECK_MACROS = [
410
    'DCHECK', 'CHECK',
411
    'EXPECT_TRUE_M', 'EXPECT_TRUE',
412
    'ASSERT_TRUE_M', 'ASSERT_TRUE',
413
    'EXPECT_FALSE_M', 'EXPECT_FALSE',
414
    'ASSERT_FALSE_M', 'ASSERT_FALSE',
415
    ]
416
417
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
418
_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
419
420
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
421
                        ('>=', 'GE'), ('>', 'GT'),
422
                        ('<=', 'LE'), ('<', 'LT')]:
423
  _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
424
  _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
425
  _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
426
  _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
427
  _CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
428
  _CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
429
430
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
431
                            ('>=', 'LT'), ('>', 'LE'),
432
                            ('<=', 'GT'), ('<', 'GE')]:
433
  _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
434
  _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
435
  _CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
436
  _CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
437
438
# Alternative tokens and their replacements.  For full list, see section 2.5
439
# Alternative tokens [lex.digraph] in the C++ standard.
440
#
441
# Digraphs (such as '%:') are not included here since it's a mess to
442
# match those on a word boundary.
443
_ALT_TOKEN_REPLACEMENT = {
444
    'and': '&&',
445
    'bitor': '|',
446
    'or': '||',
447
    'xor': '^',
448
    'compl': '~',
449
    'bitand': '&',
450
    'and_eq': '&=',
451
    'or_eq': '|=',
452
    'xor_eq': '^=',
453
    'not': '!',
454
    'not_eq': '!='
455
    }
456
457
# Compile regular expression that matches all the above keywords.  The "[ =()]"
458
# bit is meant to avoid matching these keywords outside of boolean expressions.
459
#
460
# False positives include C-style multi-line comments and multi-line strings
461
# but those have always been troublesome for cpplint.
462
_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
463
    r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
464
465
466
# These constants define types of headers for use with
467
# _IncludeState.CheckNextIncludeOrder().
468
_C_SYS_HEADER = 1
469
_CPP_SYS_HEADER = 2
470
_LIKELY_MY_HEADER = 3
471
_POSSIBLE_MY_HEADER = 4
472
_OTHER_HEADER = 5
473
474
# These constants define the current inline assembly state
475
_NO_ASM = 0       # Outside of inline assembly block
476
_INSIDE_ASM = 1   # Inside inline assembly block
477
_END_ASM = 2      # Last line of inline assembly block
478
_BLOCK_ASM = 3    # The whole block is an inline assembly block
479
480
# Match start of assembly blocks
481
_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
482
                        r'(?:\s+(volatile|__volatile__))?'
483
                        r'\s*[{(]')
484
485
486
_regexp_compile_cache = {}
487
488
# {str, set(int)}: a map from error categories to sets of linenumbers
489
# on which those errors are expected and should be suppressed.
490
_error_suppressions = {}
491
492
# The root directory used for deriving header guard CPP variable.
493
# This is set by --root flag.
494
_root = None
495
496
# The allowed line length of files.
497
# This is set by --linelength flag.
498
_line_length = 80
499
500
# The allowed extensions for file names
501
# This is set by --extensions flag.
502
_valid_extensions = set(['cc', 'h', 'cpp', 'cu', 'cuh'])
503
504
def ParseNolintSuppressions(filename, raw_line, linenum, error):
505
  """Updates the global list of error-suppressions.
506

507
  Parses any NOLINT comments on the current line, updating the global
508
  error_suppressions store.  Reports an error if the NOLINT comment
509
  was malformed.
510

511
  Args:
512
    filename: str, the name of the input file.
513
    raw_line: str, the line of input text, with comments.
514
    linenum: int, the number of the current line.
515
    error: function, an error handler.
516
  """
517
  matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line)
518
  if matched:
519
    if matched.group(1):
520
      suppressed_line = linenum + 1
521
    else:
522
      suppressed_line = linenum
523
    category = matched.group(2)
524
    if category in (None, '(*)'):  # => "suppress all"
525
      _error_suppressions.setdefault(None, set()).add(suppressed_line)
526
    else:
527
      if category.startswith('(') and category.endswith(')'):
528
        category = category[1:-1]
529
        if category in _ERROR_CATEGORIES:
530
          _error_suppressions.setdefault(category, set()).add(suppressed_line)
531
        elif category not in _LEGACY_ERROR_CATEGORIES:
532
          error(filename, linenum, 'readability/nolint', 5,
533
                'Unknown NOLINT error category: %s' % category)
534
535
536
def ResetNolintSuppressions():
537
  """Resets the set of NOLINT suppressions to empty."""
538
  _error_suppressions.clear()
539
540
541
def IsErrorSuppressedByNolint(category, linenum):
542
  """Returns true if the specified error category is suppressed on this line.
543

544
  Consults the global error_suppressions map populated by
545
  ParseNolintSuppressions/ResetNolintSuppressions.
546

547
  Args:
548
    category: str, the category of the error.
549
    linenum: int, the current line number.
550
  Returns:
551
    bool, True iff the error should be suppressed due to a NOLINT comment.
552
  """
553
  return (linenum in _error_suppressions.get(category, set()) or
554
          linenum in _error_suppressions.get(None, set()))
555
556
557
def Match(pattern, s):
558
  """Matches the string with the pattern, caching the compiled regexp."""
559
  # The regexp compilation caching is inlined in both Match and Search for
560
  # performance reasons; factoring it out into a separate function turns out
561
  # to be noticeably expensive.
562
  if pattern not in _regexp_compile_cache:
563
    _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
564
  return _regexp_compile_cache[pattern].match(s)
565
566
567
def ReplaceAll(pattern, rep, s):
568
  """Replaces instances of pattern in a string with a replacement.
569

570
  The compiled regex is kept in a cache shared by Match and Search.
571

572
  Args:
573
    pattern: regex pattern
574
    rep: replacement text
575
    s: search string
576

577
  Returns:
578
    string with replacements made (or original string if no replacements)
579
  """
580
  if pattern not in _regexp_compile_cache:
581
    _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
582
  return _regexp_compile_cache[pattern].sub(rep, s)
583
584
585
def Search(pattern, s):
586
  """Searches the string for the pattern, caching the compiled regexp."""
587
  if pattern not in _regexp_compile_cache:
588
    _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
589
  return _regexp_compile_cache[pattern].search(s)
590
591
592
class _IncludeState(object):
593
  """Tracks line numbers for includes, and the order in which includes appear.
594

595
  include_list contains list of lists of (header, line number) pairs.
596
  It's a lists of lists rather than just one flat list to make it
597
  easier to update across preprocessor boundaries.
598

599
  Call CheckNextIncludeOrder() once for each header in the file, passing
600
  in the type constants defined above. Calls in an illegal order will
601
  raise an _IncludeError with an appropriate error message.
602

603
  """
604
  # self._section will move monotonically through this set. If it ever
605
  # needs to move backwards, CheckNextIncludeOrder will raise an error.
606
  _INITIAL_SECTION = 0
607
  _MY_H_SECTION = 1
608
  _C_SECTION = 2
609
  _CPP_SECTION = 3
610
  _OTHER_H_SECTION = 4
611
612
  _TYPE_NAMES = {
613
      _C_SYS_HEADER: 'C system header',
614
      _CPP_SYS_HEADER: 'C++ system header',
615
      _LIKELY_MY_HEADER: 'header this file implements',
616
      _POSSIBLE_MY_HEADER: 'header this file may implement',
617
      _OTHER_HEADER: 'other header',
618
      }
619
  _SECTION_NAMES = {
620
      _INITIAL_SECTION: "... nothing. (This can't be an error.)",
621
      _MY_H_SECTION: 'a header this file implements',
622
      _C_SECTION: 'C system header',
623
      _CPP_SECTION: 'C++ system header',
624
      _OTHER_H_SECTION: 'other header',
625
      }
626
627
  def __init__(self):
628
    self.include_list = [[]]
629
    self.ResetSection('')
630
631
  def FindHeader(self, header):
632
    """Check if a header has already been included.
633

634
    Args:
635
      header: header to check.
636
    Returns:
637
      Line number of previous occurrence, or -1 if the header has not
638
      been seen before.
639
    """
640
    for section_list in self.include_list:
641
      for f in section_list:
642
        if f[0] == header:
643
          return f[1]
644
    return -1
645
646
  def ResetSection(self, directive):
647
    """Reset section checking for preprocessor directive.
648

649
    Args:
650
      directive: preprocessor directive (e.g. "if", "else").
651
    """
652
    # The name of the current section.
653
    self._section = self._INITIAL_SECTION
654
    # The path of last found header.
655
    self._last_header = ''
656
657
    # Update list of includes.  Note that we never pop from the
658
    # include list.
659
    if directive in ('if', 'ifdef', 'ifndef'):
660
      self.include_list.append([])
661
    elif directive in ('else', 'elif'):
662
      self.include_list[-1] = []
663
664
  def SetLastHeader(self, header_path):
665
    self._last_header = header_path
666
667
  def CanonicalizeAlphabeticalOrder(self, header_path):
668
    """Returns a path canonicalized for alphabetical comparison.
669

670
    - replaces "-" with "_" so they both cmp the same.
671
    - removes '-inl' since we don't require them to be after the main header.
672
    - lowercase everything, just in case.
673

674
    Args:
675
      header_path: Path to be canonicalized.
676

677
    Returns:
678
      Canonicalized path.
679
    """
680
    return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
681
682
  def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
683
    """Check if a header is in alphabetical order with the previous header.
684

685
    Args:
686
      clean_lines: A CleansedLines instance containing the file.
687
      linenum: The number of the line to check.
688
      header_path: Canonicalized header to be checked.
689

690
    Returns:
691
      Returns true if the header is in alphabetical order.
692
    """
693
    # If previous section is different from current section, _last_header will
694
    # be reset to empty string, so it's always less than current header.
695
    #
696
    # If previous line was a blank line, assume that the headers are
697
    # intentionally sorted the way they are.
698
    if (self._last_header > header_path and
699
        Match(r'^\s*#\s*include\b', clean_lines.elided[linenum - 1])):
700
      return False
701
    return True
702
703
  def CheckNextIncludeOrder(self, header_type):
704
    """Returns a non-empty error message if the next header is out of order.
705

706
    This function also updates the internal state to be ready to check
707
    the next include.
708

709
    Args:
710
      header_type: One of the _XXX_HEADER constants defined above.
711

712
    Returns:
713
      The empty string if the header is in the right order, or an
714
      error message describing what's wrong.
715

716
    """
717
    error_message = ('Found %s after %s' %
718
                     (self._TYPE_NAMES[header_type],
719
                      self._SECTION_NAMES[self._section]))
720
721
    last_section = self._section
722
723
    if header_type == _C_SYS_HEADER:
724
      if self._section <= self._C_SECTION:
725
        self._section = self._C_SECTION
726
      else:
727
        self._last_header = ''
728
        return error_message
729
    elif header_type == _CPP_SYS_HEADER:
730
      if self._section <= self._CPP_SECTION:
731
        self._section = self._CPP_SECTION
732
      else:
733
        self._last_header = ''
734
        return error_message
735
    elif header_type == _LIKELY_MY_HEADER:
736
      if self._section <= self._MY_H_SECTION:
737
        self._section = self._MY_H_SECTION
738
      else:
739
        self._section = self._OTHER_H_SECTION
740
    elif header_type == _POSSIBLE_MY_HEADER:
741
      if self._section <= self._MY_H_SECTION:
742
        self._section = self._MY_H_SECTION
743
      else:
744
        # This will always be the fallback because we're not sure
745
        # enough that the header is associated with this file.
746
        self._section = self._OTHER_H_SECTION
747
    else:
748
      assert header_type == _OTHER_HEADER
749
      self._section = self._OTHER_H_SECTION
750
751
    if last_section != self._section:
752
      self._last_header = ''
753
754
    return ''
755
756
757
class _CppLintState(object):
758
  """Maintains module-wide state.."""
759
760
  def __init__(self):
761
    self.verbose_level = 1  # global setting.
762
    self.error_count = 0    # global count of reported errors
763
    # filters to apply when emitting error messages
764
    self.filters = _DEFAULT_FILTERS[:]
765
    # backup of filter list. Used to restore the state after each file.
766
    self._filters_backup = self.filters[:]
767
    self.counting = 'total'  # In what way are we counting errors?
768
    self.errors_by_category = {}  # string to int dict storing error counts
769
770
    # output format:
771
    # "emacs" - format that emacs can parse (default)
772
    # "vs7" - format that Microsoft Visual Studio 7 can parse
773
    self.output_format = 'emacs'
774
775
  def SetOutputFormat(self, output_format):
776
    """Sets the output format for errors."""
777
    self.output_format = output_format
778
779
  def SetVerboseLevel(self, level):
780
    """Sets the module's verbosity, and returns the previous setting."""
781
    last_verbose_level = self.verbose_level
782
    self.verbose_level = level
783
    return last_verbose_level
784
785
  def SetCountingStyle(self, counting_style):
786
    """Sets the module's counting options."""
787
    self.counting = counting_style
788
789
  def SetFilters(self, filters):
790
    """Sets the error-message filters.
791

792
    These filters are applied when deciding whether to emit a given
793
    error message.
794

795
    Args:
796
      filters: A string of comma-separated filters (eg "+whitespace/indent").
797
               Each filter should start with + or -; else we die.
798

799
    Raises:
800
      ValueError: The comma-separated filters did not all start with '+' or '-'.
801
                  E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
802
    """
803
    # Default filters always have less priority than the flag ones.
804
    self.filters = _DEFAULT_FILTERS[:]
805
    self.AddFilters(filters)
806
807
  def AddFilters(self, filters):
808
    """ Adds more filters to the existing list of error-message filters. """
809
    for filt in filters.split(','):
810
      clean_filt = filt.strip()
811
      if clean_filt:
812
        self.filters.append(clean_filt)
813
    for filt in self.filters:
814
      if not (filt.startswith('+') or filt.startswith('-')):
815
        raise ValueError('Every filter in --filters must start with + or -'
816
                         ' (%s does not)' % filt)
817
818
  def BackupFilters(self):
819
    """ Saves the current filter list to backup storage."""
820
    self._filters_backup = self.filters[:]
821
822
  def RestoreFilters(self):
823
    """ Restores filters previously backed up."""
824
    self.filters = self._filters_backup[:]
825
826
  def ResetErrorCounts(self):
827
    """Sets the module's error statistic back to zero."""
828
    self.error_count = 0
829
    self.errors_by_category = {}
830
831
  def IncrementErrorCount(self, category):
832
    """Bumps the module's error statistic."""
833
    self.error_count += 1
834
    if self.counting in ('toplevel', 'detailed'):
835
      if self.counting != 'detailed':
836
        category = category.split('/')[0]
837
      if category not in self.errors_by_category:
838
        self.errors_by_category[category] = 0
839
      self.errors_by_category[category] += 1
840
841
  def PrintErrorCounts(self):
842
    """Print a summary of errors by category, and the total."""
843
    for category, count in self.errors_by_category.iteritems():
844
      sys.stderr.write('Category \'%s\' errors found: %d\n' %
845
                       (category, count))
846
    sys.stderr.write('Total errors found: %d\n' % self.error_count)
847
848
_cpplint_state = _CppLintState()
849
850
851
def _OutputFormat():
852
  """Gets the module's output format."""
853
  return _cpplint_state.output_format
854
855
856
def _SetOutputFormat(output_format):
857
  """Sets the module's output format."""
858
  _cpplint_state.SetOutputFormat(output_format)
859
860
861
def _VerboseLevel():
862
  """Returns the module's verbosity setting."""
863
  return _cpplint_state.verbose_level
864
865
866
def _SetVerboseLevel(level):
867
  """Sets the module's verbosity, and returns the previous setting."""
868
  return _cpplint_state.SetVerboseLevel(level)
869
870
871
def _SetCountingStyle(level):
872
  """Sets the module's counting options."""
873
  _cpplint_state.SetCountingStyle(level)
874
875
876
def _Filters():
877
  """Returns the module's list of output filters, as a list."""
878
  return _cpplint_state.filters
879
880
881
def _SetFilters(filters):
882
  """Sets the module's error-message filters.
883

884
  These filters are applied when deciding whether to emit a given
885
  error message.
886

887
  Args:
888
    filters: A string of comma-separated filters (eg "whitespace/indent").
889
             Each filter should start with + or -; else we die.
890
  """
891
  _cpplint_state.SetFilters(filters)
892
893
def _AddFilters(filters):
894
  """Adds more filter overrides.
895

896
  Unlike _SetFilters, this function does not reset the current list of filters
897
  available.
898

899
  Args:
900
    filters: A string of comma-separated filters (eg "whitespace/indent").
901
             Each filter should start with + or -; else we die.
902
  """
903
  _cpplint_state.AddFilters(filters)
904
905
def _BackupFilters():
906
  """ Saves the current filter list to backup storage."""
907
  _cpplint_state.BackupFilters()
908
909
def _RestoreFilters():
910
  """ Restores filters previously backed up."""
911
  _cpplint_state.RestoreFilters()
912
913
class _FunctionState(object):
914
  """Tracks current function name and the number of lines in its body."""
915
916
  _NORMAL_TRIGGER = 250  # for --v=0, 500 for --v=1, etc.
917
  _TEST_TRIGGER = 400    # about 50% more than _NORMAL_TRIGGER.
918
919
  def __init__(self):
920
    self.in_a_function = False
921
    self.lines_in_function = 0
922
    self.current_function = ''
923
924
  def Begin(self, function_name):
925
    """Start analyzing function body.
926

927
    Args:
928
      function_name: The name of the function being tracked.
929
    """
930
    self.in_a_function = True
931
    self.lines_in_function = 0
932
    self.current_function = function_name
933
934
  def Count(self):
935
    """Count line in current function body."""
936
    if self.in_a_function:
937
      self.lines_in_function += 1
938
939
  def Check(self, error, filename, linenum):
940
    """Report if too many lines in function body.
941

942
    Args:
943
      error: The function to call with any errors found.
944
      filename: The name of the current file.
945
      linenum: The number of the line to check.
946
    """
947
    if Match(r'T(EST|est)', self.current_function):
948
      base_trigger = self._TEST_TRIGGER
949
    else:
950
      base_trigger = self._NORMAL_TRIGGER
951
    trigger = base_trigger * 2**_VerboseLevel()
952
953
    if self.lines_in_function > trigger:
954
      error_level = int(math.log(self.lines_in_function / base_trigger, 2))
955
      # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
956
      if error_level > 5:
957
        error_level = 5
958
      error(filename, linenum, 'readability/fn_size', error_level,
959
            'Small and focused functions are preferred:'
960
            ' %s has %d non-comment lines'
961
            ' (error triggered by exceeding %d lines).'  % (
962
                self.current_function, self.lines_in_function, trigger))
963
964
  def End(self):
965
    """Stop analyzing function body."""
966
    self.in_a_function = False
967
968
969
class _IncludeError(Exception):
970
  """Indicates a problem with the include order in a file."""
971
  pass
972
973
974
class FileInfo(object):
975
  """Provides utility functions for filenames.
976

977
  FileInfo provides easy access to the components of a file's path
978
  relative to the project root.
979
  """
980
981
  def __init__(self, filename):
982
    self._filename = filename
983
984
  def FullName(self):
985
    """Make Windows paths like Unix."""
986
    return os.path.abspath(self._filename).replace('\\', '/')
987
988
  def RepositoryName(self):
989
    """FullName after removing the local path to the repository.
990

991
    If we have a real absolute path name here we can try to do something smart:
992
    detecting the root of the checkout and truncating /path/to/checkout from
993
    the name so that we get header guards that don't include things like
994
    "C:\Documents and Settings\..." or "/home/username/..." in them and thus
995
    people on different computers who have checked the source out to different
996
    locations won't see bogus errors.
997
    """
998
    fullname = self.FullName()
999
1000
    if os.path.exists(fullname):
1001
      project_dir = os.path.dirname(fullname)
1002
1003
      if os.path.exists(os.path.join(project_dir, ".svn")):
1004
        # If there's a .svn file in the current directory, we recursively look
1005
        # up the directory tree for the top of the SVN checkout
1006
        root_dir = project_dir
1007
        one_up_dir = os.path.dirname(root_dir)
1008
        while os.path.exists(os.path.join(one_up_dir, ".svn")):
1009
          root_dir = os.path.dirname(root_dir)
1010
          one_up_dir = os.path.dirname(one_up_dir)
1011
1012
        prefix = os.path.commonprefix([root_dir, project_dir])
1013
        return fullname[len(prefix) + 1:]
1014
1015
      # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
1016
      # searching up from the current path.
1017
      root_dir = os.path.dirname(fullname)
1018
      while (root_dir != os.path.dirname(root_dir) and
1019
             not os.path.exists(os.path.join(root_dir, ".git")) and
1020
             not os.path.exists(os.path.join(root_dir, ".hg")) and
1021
             not os.path.exists(os.path.join(root_dir, ".svn"))):
1022
        root_dir = os.path.dirname(root_dir)
1023
1024
      if (os.path.exists(os.path.join(root_dir, ".git")) or
1025
          os.path.exists(os.path.join(root_dir, ".hg")) or
1026
          os.path.exists(os.path.join(root_dir, ".svn"))):
1027
        prefix = os.path.commonprefix([root_dir, project_dir])
1028
        return fullname[len(prefix) + 1:]
1029
1030
    # Don't know what to do; header guard warnings may be wrong...
1031
    return fullname
1032
1033
  def Split(self):
1034
    """Splits the file into the directory, basename, and extension.
1035

1036
    For 'chrome/browser/browser.cc', Split() would
1037
    return ('chrome/browser', 'browser', '.cc')
1038

1039
    Returns:
1040
      A tuple of (directory, basename, extension).
1041
    """
1042
1043
    googlename = self.RepositoryName()
1044
    project, rest = os.path.split(googlename)
1045
    return (project,) + os.path.splitext(rest)
1046
1047
  def BaseName(self):
1048
    """File base name - text after the final slash, before the final period."""
1049
    return self.Split()[1]
1050
1051
  def Extension(self):
1052
    """File extension - text following the final period."""
1053
    return self.Split()[2]
1054
1055
  def NoExtension(self):
1056
    """File has no source file extension."""
1057
    return '/'.join(self.Split()[0:2])
1058
1059
  def IsSource(self):
1060
    """File has a source file extension."""
1061
    return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
1062
1063
1064
def _ShouldPrintError(category, confidence, linenum):
1065
  """If confidence >= verbose, category passes filter and is not suppressed."""
1066
1067
  # There are three ways we might decide not to print an error message:
1068
  # a "NOLINT(category)" comment appears in the source,
1069
  # the verbosity level isn't high enough, or the filters filter it out.
1070
  if IsErrorSuppressedByNolint(category, linenum):
1071
    return False
1072
1073
  if confidence < _cpplint_state.verbose_level:
1074
    return False
1075
1076
  is_filtered = False
1077
  for one_filter in _Filters():
1078
    if one_filter.startswith('-'):
1079
      if category.startswith(one_filter[1:]):
1080
        is_filtered = True
1081
    elif one_filter.startswith('+'):
1082
      if category.startswith(one_filter[1:]):
1083
        is_filtered = False
1084
    else:
1085
      assert False  # should have been checked for in SetFilter.
1086
  if is_filtered:
1087
    return False
1088
1089
  return True
1090
1091
1092
def Error(filename, linenum, category, confidence, message):
1093
  """Logs the fact we've found a lint error.
1094

1095
  We log where the error was found, and also our confidence in the error,
1096
  that is, how certain we are this is a legitimate style regression, and
1097
  not a misidentification or a use that's sometimes justified.
1098

1099
  False positives can be suppressed by the use of
1100
  "cpplint(category)"  comments on the offending line.  These are
1101
  parsed into _error_suppressions.
1102

1103
  Args:
1104
    filename: The name of the file containing the error.
1105
    linenum: The number of the line containing the error.
1106
    category: A string used to describe the "category" this bug
1107
      falls under: "whitespace", say, or "runtime".  Categories
1108
      may have a hierarchy separated by slashes: "whitespace/indent".
1109
    confidence: A number from 1-5 representing a confidence score for
1110
      the error, with 5 meaning that we are certain of the problem,
1111
      and 1 meaning that it could be a legitimate construct.
1112
    message: The error message.
1113
  """
1114
  if _ShouldPrintError(category, confidence, linenum):
1115
    _cpplint_state.IncrementErrorCount(category)
1116
    if _cpplint_state.output_format == 'vs7':
1117
      sys.stderr.write('%s(%s):  %s  [%s] [%d]\n' % (
1118
          filename, linenum, message, category, confidence))
1119
    elif _cpplint_state.output_format == 'eclipse':
1120
      sys.stderr.write('%s:%s: warning: %s  [%s] [%d]\n' % (
1121
          filename, linenum, message, category, confidence))
1122
    else:
1123
      sys.stderr.write('%s:%s:  %s  [%s] [%d]\n' % (
1124
          filename, linenum, message, category, confidence))
1125
1126
1127
# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
1128
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
1129
    r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
1130
# Match a single C style comment on the same line.
1131
_RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/'
1132
# Matches multi-line C style comments.
1133
# This RE is a little bit more complicated than one might expect, because we
1134
# have to take care of space removals tools so we can handle comments inside
1135
# statements better.
1136
# The current rule is: We only clear spaces from both sides when we're at the
1137
# end of the line. Otherwise, we try to remove spaces from the right side,
1138
# if this doesn't work we try on left side but only if there's a non-character
1139
# on the right.
1140
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
1141
    r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' +
1142
    _RE_PATTERN_C_COMMENTS + r'\s+|' +
1143
    r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' +
1144
    _RE_PATTERN_C_COMMENTS + r')')
1145
1146
1147
def IsCppString(line):
1148
  """Does line terminate so, that the next symbol is in string constant.
1149

1150
  This function does not consider single-line nor multi-line comments.
1151

1152
  Args:
1153
    line: is a partial line of code starting from the 0..n.
1154

1155
  Returns:
1156
    True, if next character appended to 'line' is inside a
1157
    string constant.
1158
  """
1159
1160
  line = line.replace(r'\\', 'XX')  # after this, \\" does not match to \"
1161
  return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
1162
1163
1164
def CleanseRawStrings(raw_lines):
1165
  """Removes C++11 raw strings from lines.
1166

1167
    Before:
1168
      static const char kData[] = R"(
1169
          multi-line string
1170
          )";
1171

1172
    After:
1173
      static const char kData[] = ""
1174
          (replaced by blank line)
1175
          "";
1176

1177
  Args:
1178
    raw_lines: list of raw lines.
1179

1180
  Returns:
1181
    list of lines with C++11 raw strings replaced by empty strings.
1182
  """
1183
1184
  delimiter = None
1185
  lines_without_raw_strings = []
1186
  for line in raw_lines:
1187
    if delimiter:
1188
      # Inside a raw string, look for the end
1189
      end = line.find(delimiter)
1190
      if end >= 0:
1191
        # Found the end of the string, match leading space for this
1192
        # line and resume copying the original lines, and also insert
1193
        # a "" on the last line.
1194
        leading_space = Match(r'^(\s*)\S', line)
1195
        line = leading_space.group(1) + '""' + line[end + len(delimiter):]
1196
        delimiter = None
1197
      else:
1198
        # Haven't found the end yet, append a blank line.
1199
        line = '""'
1200
1201
    # Look for beginning of a raw string, and replace them with
1202
    # empty strings.  This is done in a loop to handle multiple raw
1203
    # strings on the same line.
1204
    while delimiter is None:
1205
      # Look for beginning of a raw string.
1206
      # See 2.14.15 [lex.string] for syntax.
1207
      matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
1208
      if matched:
1209
        delimiter = ')' + matched.group(2) + '"'
1210
1211
        end = matched.group(3).find(delimiter)
1212
        if end >= 0:
1213
          # Raw string ended on same line
1214
          line = (matched.group(1) + '""' +
1215
                  matched.group(3)[end + len(delimiter):])
1216
          delimiter = None
1217
        else:
1218
          # Start of a multi-line raw string
1219
          line = matched.group(1) + '""'
1220
      else:
1221
        break
1222
1223
    lines_without_raw_strings.append(line)
1224
1225
  # TODO(unknown): if delimiter is not None here, we might want to
1226
  # emit a warning for unterminated string.
1227
  return lines_without_raw_strings
1228
1229
1230
def FindNextMultiLineCommentStart(lines, lineix):
1231
  """Find the beginning marker for a multiline comment."""
1232
  while lineix < len(lines):
1233
    if lines[lineix].strip().startswith('/*'):
1234
      # Only return this marker if the comment goes beyond this line
1235
      if lines[lineix].strip().find('*/', 2) < 0:
1236
        return lineix
1237
    lineix += 1
1238
  return len(lines)
1239
1240
1241
def FindNextMultiLineCommentEnd(lines, lineix):
1242
  """We are inside a comment, find the end marker."""
1243
  while lineix < len(lines):
1244
    if lines[lineix].strip().endswith('*/'):
1245
      return lineix
1246
    lineix += 1
1247
  return len(lines)
1248
1249
1250
def RemoveMultiLineCommentsFromRange(lines, begin, end):
1251
  """Clears a range of lines for multi-line comments."""
1252
  # Having // dummy comments makes the lines non-empty, so we will not get
1253
  # unnecessary blank line warnings later in the code.
1254
  for i in range(begin, end):
1255
    lines[i] = '/**/'
1256
1257
1258
def RemoveMultiLineComments(filename, lines, error):
1259
  """Removes multiline (c-style) comments from lines."""
1260
  lineix = 0
1261
  while lineix < len(lines):
1262
    lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
1263
    if lineix_begin >= len(lines):
1264
      return
1265
    lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
1266
    if lineix_end >= len(lines):
1267
      error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
1268
            'Could not find end of multi-line comment')
1269
      return
1270
    RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
1271
    lineix = lineix_end + 1
1272
1273
1274
def CleanseComments(line):
1275
  """Removes //-comments and single-line C-style /* */ comments.
1276

1277
  Args:
1278
    line: A line of C++ source.
1279

1280
  Returns:
1281
    The line with single-line comments removed.
1282
  """
1283
  commentpos = line.find('//')
1284
  if commentpos != -1 and not IsCppString(line[:commentpos]):
1285
    line = line[:commentpos].rstrip()
1286
  # get rid of /* ... */
1287
  return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
1288
1289
1290
class CleansedLines(object):
1291
  """Holds 4 copies of all lines with different preprocessing applied to them.
1292

1293
  1) elided member contains lines without strings and comments.
1294
  2) lines member contains lines without comments.
1295
  3) raw_lines member contains all the lines without processing.
1296
  4) lines_without_raw_strings member is same as raw_lines, but with C++11 raw
1297
     strings removed.
1298
  All these members are of <type 'list'>, and of the same length.
1299
  """
1300
1301
  def __init__(self, lines):
1302
    self.elided = []
1303
    self.lines = []
1304
    self.raw_lines = lines
1305
    self.num_lines = len(lines)
1306
    self.lines_without_raw_strings = CleanseRawStrings(lines)
1307
    for linenum in range(len(self.lines_without_raw_strings)):
1308
      self.lines.append(CleanseComments(
1309
          self.lines_without_raw_strings[linenum]))
1310
      elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
1311
      self.elided.append(CleanseComments(elided))
1312
1313
  def NumLines(self):
1314
    """Returns the number of lines represented."""
1315
    return self.num_lines
1316
1317
  @staticmethod
1318
  def _CollapseStrings(elided):
1319
    """Collapses strings and chars on a line to simple "" or '' blocks.
1320

1321
    We nix strings first so we're not fooled by text like '"http://"'
1322

1323
    Args:
1324
      elided: The line being processed.
1325

1326
    Returns:
1327
      The line with collapsed strings.
1328
    """
1329
    if _RE_PATTERN_INCLUDE.match(elided):
1330
      return elided
1331
1332
    # Remove escaped characters first to make quote/single quote collapsing
1333
    # basic.  Things that look like escaped characters shouldn't occur
1334
    # outside of strings and chars.
1335
    elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
1336
1337
    # Replace quoted strings and digit separators.  Both single quotes
1338
    # and double quotes are processed in the same loop, otherwise
1339
    # nested quotes wouldn't work.
1340
    collapsed = ''
1341
    while True:
1342
      # Find the first quote character
1343
      match = Match(r'^([^\'"]*)([\'"])(.*)$', elided)
1344
      if not match:
1345
        collapsed += elided
1346
        break
1347
      head, quote, tail = match.groups()
1348
1349
      if quote == '"':
1350
        # Collapse double quoted strings
1351
        second_quote = tail.find('"')
1352
        if second_quote >= 0:
1353
          collapsed += head + '""'
1354
          elided = tail[second_quote + 1:]
1355
        else:
1356
          # Unmatched double quote, don't bother processing the rest
1357
          # of the line since this is probably a multiline string.
1358
          collapsed += elided
1359
          break
1360
      else:
1361
        # Found single quote, check nearby text to eliminate digit separators.
1362
        #
1363
        # There is no special handling for floating point here, because
1364
        # the integer/fractional/exponent parts would all be parsed
1365
        # correctly as long as there are digits on both sides of the
1366
        # separator.  So we are fine as long as we don't see something
1367
        # like "0.'3" (gcc 4.9.0 will not allow this literal).
1368
        if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):
1369
          match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail)
1370
          collapsed += head + match_literal.group(1).replace("'", '')
1371
          elided = match_literal.group(2)
1372
        else:
1373
          second_quote = tail.find('\'')
1374
          if second_quote >= 0:
1375
            collapsed += head + "''"
1376
            elided = tail[second_quote + 1:]
1377
          else:
1378
            # Unmatched single quote
1379
            collapsed += elided
1380
            break
1381
1382
    return collapsed
1383
1384
1385
def FindEndOfExpressionInLine(line, startpos, stack):
1386
  """Find the position just after the end of current parenthesized expression.
1387

1388
  Args:
1389
    line: a CleansedLines line.
1390
    startpos: start searching at this position.
1391
    stack: nesting stack at startpos.
1392

1393
  Returns:
1394
    On finding matching end: (index just after matching end, None)
1395
    On finding an unclosed expression: (-1, None)
1396
    Otherwise: (-1, new stack at end of this line)
1397
  """
1398
  for i in xrange(startpos, len(line)):
1399
    char = line[i]
1400
    if char in '([{':
1401
      # Found start of parenthesized expression, push to expression stack
1402
      stack.append(char)
1403
    elif char == '<':
1404
      # Found potential start of template argument list
1405
      if i > 0 and line[i - 1] == '<':
1406
        # Left shift operator
1407
        if stack and stack[-1] == '<':
1408
          stack.pop()
1409
          if not stack:
1410
            return (-1, None)
1411
      elif i > 0 and Search(r'\boperator\s*$', line[0:i]):
1412
        # operator<, don't add to stack
1413
        continue
1414
      else:
1415
        # Tentative start of template argument list
1416
        stack.append('<')
1417
    elif char in ')]}':
1418
      # Found end of parenthesized expression.
1419
      #
1420
      # If we are currently expecting a matching '>', the pending '<'
1421
      # must have been an operator.  Remove them from expression stack.
1422
      while stack and stack[-1] == '<':
1423
        stack.pop()
1424
      if not stack:
1425
        return (-1, None)
1426
      if ((stack[-1] == '(' and char == ')') or
1427
          (stack[-1] == '[' and char == ']') or
1428
          (stack[-1] == '{' and char == '}')):
1429
        stack.pop()
1430
        if not stack:
1431
          return (i + 1, None)
1432
      else:
1433
        # Mismatched parentheses
1434
        return (-1, None)
1435
    elif char == '>':
1436
      # Found potential end of template argument list.
1437
1438
      # Ignore "->" and operator functions
1439
      if (i > 0 and
1440
          (line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))):
1441
        continue
1442
1443
      # Pop the stack if there is a matching '<'.  Otherwise, ignore
1444
      # this '>' since it must be an operator.
1445
      if stack:
1446
        if stack[-1] == '<':
1447
          stack.pop()
1448
          if not stack:
1449
            return (i + 1, None)
1450
    elif char == ';':
1451
      # Found something that look like end of statements.  If we are currently
1452
      # expecting a '>', the matching '<' must have been an operator, since
1453
      # template argument list should not contain statements.
1454
      while stack and stack[-1] == '<':
1455
        stack.pop()
1456
      if not stack:
1457
        return (-1, None)
1458
1459
  # Did not find end of expression or unbalanced parentheses on this line
1460
  return (-1, stack)
1461
1462
1463
def CloseExpression(clean_lines, linenum, pos):
1464
  """If input points to ( or { or [ or <, finds the position that closes it.
1465

1466
  If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
1467
  linenum/pos that correspond to the closing of the expression.
1468

1469
  TODO(unknown): cpplint spends a fair bit of time matching parentheses.
1470
  Ideally we would want to index all opening and closing parentheses once
1471
  and have CloseExpression be just a simple lookup, but due to preprocessor
1472
  tricks, this is not so easy.
1473

1474
  Args:
1475
    clean_lines: A CleansedLines instance containing the file.
1476
    linenum: The number of the line to check.
1477
    pos: A position on the line.
1478

1479
  Returns:
1480
    A tuple (line, linenum, pos) pointer *past* the closing brace, or
1481
    (line, len(lines), -1) if we never find a close.  Note we ignore
1482
    strings and comments when matching; and the line we return is the
1483
    'cleansed' line at linenum.
1484
  """
1485
1486
  line = clean_lines.elided[linenum]
1487
  if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]):
1488
    return (line, clean_lines.NumLines(), -1)
1489
1490
  # Check first line
1491
  (end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])
1492
  if end_pos > -1:
1493
    return (line, linenum, end_pos)
1494
1495
  # Continue scanning forward
1496
  while stack and linenum < clean_lines.NumLines() - 1:
1497
    linenum += 1
1498
    line = clean_lines.elided[linenum]
1499
    (end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack)
1500
    if end_pos > -1:
1501
      return (line, linenum, end_pos)
1502
1503
  # Did not find end of expression before end of file, give up
1504
  return (line, clean_lines.NumLines(), -1)
1505
1506
1507
def FindStartOfExpressionInLine(line, endpos, stack):
1508
  """Find position at the matching start of current expression.
1509

1510
  This is almost the reverse of FindEndOfExpressionInLine, but note
1511
  that the input position and returned position differs by 1.
1512

1513
  Args:
1514
    line: a CleansedLines line.
1515
    endpos: start searching at this position.
1516
    stack: nesting stack at endpos.
1517

1518
  Returns:
1519
    On finding matching start: (index at matching start, None)
1520
    On finding an unclosed expression: (-1, None)
1521
    Otherwise: (-1, new stack at beginning of this line)
1522
  """
1523
  i = endpos
1524
  while i >= 0:
1525
    char = line[i]
1526
    if char in ')]}':
1527
      # Found end of expression, push to expression stack
1528
      stack.append(char)
1529
    elif char == '>':
1530
      # Found potential end of template argument list.
1531
      #
1532
      # Ignore it if it's a "->" or ">=" or "operator>"
1533
      if (i > 0 and
1534
          (line[i - 1] == '-' or
1535
           Match(r'\s>=\s', line[i - 1:]) or
1536
           Search(r'\boperator\s*$', line[0:i]))):
1537
        i -= 1
1538
      else:
1539
        stack.append('>')
1540
    elif char == '<':
1541
      # Found potential start of template argument list
1542
      if i > 0 and line[i - 1] == '<':
1543
        # Left shift operator
1544
        i -= 1
1545
      else:
1546
        # If there is a matching '>', we can pop the expression stack.
1547
        # Otherwise, ignore this '<' since it must be an operator.
1548
        if stack and stack[-1] == '>':
1549
          stack.pop()
1550
          if not stack:
1551
            return (i, None)
1552
    elif char in '([{':
1553
      # Found start of expression.
1554
      #
1555
      # If there are any unmatched '>' on the stack, they must be
1556
      # operators.  Remove those.
1557
      while stack and stack[-1] == '>':
1558
        stack.pop()
1559
      if not stack:
1560
        return (-1, None)
1561
      if ((char == '(' and stack[-1] == ')') or
1562
          (char == '[' and stack[-1] == ']') or
1563
          (char == '{' and stack[-1] == '}')):
1564
        stack.pop()
1565
        if not stack:
1566
          return (i, None)
1567
      else:
1568
        # Mismatched parentheses
1569
        return (-1, None)
1570
    elif char == ';':
1571
      # Found something that look like end of statements.  If we are currently
1572
      # expecting a '<', the matching '>' must have been an operator, since
1573
      # template argument list should not contain statements.
1574
      while stack and stack[-1] == '>':
1575
        stack.pop()
1576
      if not stack:
1577
        return (-1, None)
1578
1579
    i -= 1
1580
1581
  return (-1, stack)
1582
1583
1584
def ReverseCloseExpression(clean_lines, linenum, pos):
1585
  """If input points to ) or } or ] or >, finds the position that opens it.
1586

1587
  If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
1588
  linenum/pos that correspond to the opening of the expression.
1589

1590
  Args:
1591
    clean_lines: A CleansedLines instance containing the file.
1592
    linenum: The number of the line to check.
1593
    pos: A position on the line.
1594

1595
  Returns:
1596
    A tuple (line, linenum, pos) pointer *at* the opening brace, or
1597
    (line, 0, -1) if we never find the matching opening brace.  Note
1598
    we ignore strings and comments when matching; and the line we
1599
    return is the 'cleansed' line at linenum.
1600
  """
1601
  line = clean_lines.elided[linenum]
1602
  if line[pos] not in ')}]>':
1603
    return (line, 0, -1)
1604
1605
  # Check last line
1606
  (start_pos, stack) = FindStartOfExpressionInLine(line, pos, [])
1607
  if start_pos > -1:
1608
    return (line, linenum, start_pos)
1609
1610
  # Continue scanning backward
1611
  while stack and linenum > 0:
1612
    linenum -= 1
1613
    line = clean_lines.elided[linenum]
1614
    (start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack)
1615
    if start_pos > -1:
1616
      return (line, linenum, start_pos)
1617
1618
  # Did not find start of expression before beginning of file, give up
1619
  return (line, 0, -1)
1620
1621
1622
def CheckForCopyright(filename, lines, error):
1623
  """Logs an error if no Copyright message appears at the top of the file."""
1624
1625
  # We'll say it should occur by line 10. Don't forget there's a
1626
  # dummy line at the front.
1627
  for line in xrange(1, min(len(lines), 11)):
1628
    if re.search(r'Copyright', lines[line], re.I): break
1629
  else:                       # means no copyright line was found
1630
    error(filename, 0, 'legal/copyright', 5,
1631
          'No copyright message found.  '
1632
          'You should have a line: "Copyright [year] <Copyright Owner>"')
1633
1634
1635
def GetIndentLevel(line):
1636
  """Return the number of leading spaces in line.
1637

1638
  Args:
1639
    line: A string to check.
1640

1641
  Returns:
1642
    An integer count of leading spaces, possibly zero.
1643
  """
1644
  indent = Match(r'^( *)\S', line)
1645
  if indent:
1646
    return len(indent.group(1))
1647
  else:
1648
    return 0
1649
1650
1651
def GetHeaderGuardCPPVariable(filename):
1652
  """Returns the CPP variable that should be used as a header guard.
1653

1654
  Args:
1655
    filename: The name of a C++ header file.
1656

1657
  Returns:
1658
    The CPP variable that should be used as a header guard in the
1659
    named file.
1660

1661
  """
1662
1663
  # Restores original filename in case that cpplint is invoked from Emacs's
1664
  # flymake.
1665
  filename = re.sub(r'_flymake\.h$', '.h', filename)
1666
  filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
1667
  # Replace 'c++' with 'cpp'.
1668
  filename = filename.replace('C++', 'cpp').replace('c++', 'cpp')
1669
  
1670
  fileinfo = FileInfo(filename)
1671
  file_path_from_root = fileinfo.RepositoryName()
1672
  if _root:
1673
    file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root)
1674
  return re.sub(r'[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_'
1675
1676
1677
def CheckForHeaderGuard(filename, clean_lines, error):
1678
  """Checks that the file contains a header guard.
1679

1680
  Logs an error if no #ifndef header guard is present.  For other
1681
  headers, checks that the full pathname is used.
1682

1683
  Args:
1684
    filename: The name of the C++ header file.
1685
    clean_lines: A CleansedLines instance containing the file.
1686
    error: The function to call with any errors found.
1687
  """
1688
1689
  # Don't check for header guards if there are error suppression
1690
  # comments somewhere in this file.
1691
  #
1692
  # Because this is silencing a warning for a nonexistent line, we
1693
  # only support the very specific NOLINT(build/header_guard) syntax,
1694
  # and not the general NOLINT or NOLINT(*) syntax.
1695
  raw_lines = clean_lines.lines_without_raw_strings
1696
  for i in raw_lines:
1697
    if Search(r'//\s*NOLINT\(build/header_guard\)', i):
1698
      return
1699
1700
  cppvar = GetHeaderGuardCPPVariable(filename)
1701
1702
  ifndef = ''
1703
  ifndef_linenum = 0
1704
  define = ''
1705
  endif = ''
1706
  endif_linenum = 0
1707
  for linenum, line in enumerate(raw_lines):
1708
    linesplit = line.split()
1709
    if len(linesplit) >= 2:
1710
      # find the first occurrence of #ifndef and #define, save arg
1711
      if not ifndef and linesplit[0] == '#ifndef':
1712
        # set ifndef to the header guard presented on the #ifndef line.
1713
        ifndef = linesplit[1]
1714
        ifndef_linenum = linenum
1715
      if not define and linesplit[0] == '#define':
1716
        define = linesplit[1]
1717
    # find the last occurrence of #endif, save entire line
1718
    if line.startswith('#endif'):
1719
      endif = line
1720
      endif_linenum = linenum
1721
1722
  if not ifndef or not define or ifndef != define:
1723
    error(filename, 0, 'build/header_guard', 5,
1724
          'No #ifndef header guard found, suggested CPP variable is: %s' %
1725
          cppvar)
1726
    return
1727
1728
  # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
1729
  # for backward compatibility.
1730
  if ifndef != cppvar:
1731
    error_level = 0
1732
    if ifndef != cppvar + '_':
1733
      error_level = 5
1734
1735
    ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum,
1736
                            error)
1737
    error(filename, ifndef_linenum, 'build/header_guard', error_level,
1738
          '#ifndef header guard has wrong style, please use: %s' % cppvar)
1739
1740
  # Check for "//" comments on endif line.
1741
  ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum,
1742
                          error)
1743
  match = Match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif)
1744
  if match:
1745
    if match.group(1) == '_':
1746
      # Issue low severity warning for deprecated double trailing underscore
1747
      error(filename, endif_linenum, 'build/header_guard', 0,
1748
            '#endif line should be "#endif  // %s"' % cppvar)
1749
    return
1750
1751
  # Didn't find the corresponding "//" comment.  If this file does not
1752
  # contain any "//" comments at all, it could be that the compiler
1753
  # only wants "/**/" comments, look for those instead.
1754
  no_single_line_comments = True
1755
  for i in xrange(1, len(raw_lines) - 1):
1756
    line = raw_lines[i]
1757
    if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line):
1758
      no_single_line_comments = False
1759
      break
1760
1761
  if no_single_line_comments:
1762
    match = Match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif)
1763
    if match:
1764
      if match.group(1) == '_':
1765
        # Low severity warning for double trailing underscore
1766
        error(filename, endif_linenum, 'build/header_guard', 0,
1767
              '#endif line should be "#endif  /* %s */"' % cppvar)
1768
      return
1769
1770
  # Didn't find anything
1771
  error(filename, endif_linenum, 'build/header_guard', 5,
1772
        '#endif line should be "#endif  // %s"' % cppvar)
1773
1774
1775
def CheckHeaderFileIncluded(filename, include_state, error):
1776
  """Logs an error if a .cc file does not include its header."""
1777
1778
  # Do not check test files
1779
  if filename.endswith('_test.cc') or filename.endswith('_unittest.cc'):
1780
    return
1781
1782
  fileinfo = FileInfo(filename)
1783
  headerfile = filename[0:len(filename) - 2] + 'h'
1784
  if not os.path.exists(headerfile):
1785
    return
1786
  headername = FileInfo(headerfile).RepositoryName()
1787
  first_include = 0
1788
  for section_list in include_state.include_list:
1789
    for f in section_list:
1790
      if headername in f[0] or f[0] in headername:
1791
        return
1792
      if not first_include:
1793
        first_include = f[1]
1794
1795
  error(filename, first_include, 'build/include', 5,
1796
        '%s should include its header file %s' % (fileinfo.RepositoryName(),
1797
                                                  headername))
1798
1799
1800
def CheckForBadCharacters(filename, lines, error):
1801
  """Logs an error for each line containing bad characters.
1802

1803
  Two kinds of bad characters:
1804

1805
  1. Unicode replacement characters: These indicate that either the file
1806
  contained invalid UTF-8 (likely) or Unicode replacement characters (which
1807
  it shouldn't).  Note that it's possible for this to throw off line
1808
  numbering if the invalid UTF-8 occurred adjacent to a newline.
1809

1810
  2. NUL bytes.  These are problematic for some tools.
1811

1812
  Args:
1813
    filename: The name of the current file.
1814
    lines: An array of strings, each representing a line of the file.
1815
    error: The function to call with any errors found.
1816
  """
1817
  for linenum, line in enumerate(lines):
1818
    if u'\ufffd' in line:
1819
      error(filename, linenum, 'readability/utf8', 5,
1820
            'Line contains invalid UTF-8 (or Unicode replacement character).')
1821
    if '\0' in line:
1822
      error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
1823
1824
1825
def CheckForNewlineAtEOF(filename, lines, error):
1826
  """Logs an error if there is no newline char at the end of the file.
1827

1828
  Args:
1829
    filename: The name of the current file.
1830
    lines: An array of strings, each representing a line of the file.
1831
    error: The function to call with any errors found.
1832
  """
1833
1834
  # The array lines() was created by adding two newlines to the
1835
  # original file (go figure), then splitting on \n.
1836
  # To verify that the file ends in \n, we just have to make sure the
1837
  # last-but-two element of lines() exists and is empty.
1838
  if len(lines) < 3 or lines[-2]:
1839
    error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
1840
          'Could not find a newline character at the end of the file.')
1841
1842
1843
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
1844
  """Logs an error if we see /* ... */ or "..." that extend past one line.
1845

1846
  /* ... */ comments are legit inside macros, for one line.
1847
  Otherwise, we prefer // comments, so it's ok to warn about the
1848
  other.  Likewise, it's ok for strings to extend across multiple
1849
  lines, as long as a line continuation character (backslash)
1850
  terminates each line. Although not currently prohibited by the C++
1851
  style guide, it's ugly and unnecessary. We don't do well with either
1852
  in this lint program, so we warn about both.
1853

1854
  Args:
1855
    filename: The name of the current file.
1856
    clean_lines: A CleansedLines instance containing the file.
1857
    linenum: The number of the line to check.
1858
    error: The function to call with any errors found.
1859
  """
1860
  line = clean_lines.elided[linenum]
1861
1862
  # Remove all \\ (escaped backslashes) from the line. They are OK, and the
1863
  # second (escaped) slash may trigger later \" detection erroneously.
1864
  line = line.replace('\\\\', '')
1865
1866
  if line.count('/*') > line.count('*/'):
1867
    error(filename, linenum, 'readability/multiline_comment', 5,
1868
          'Complex multi-line /*...*/-style comment found. '
1869
          'Lint may give bogus warnings.  '
1870
          'Consider replacing these with //-style comments, '
1871
          'with #if 0...#endif, '
1872
          'or with more clearly structured multi-line comments.')
1873
1874
  if (line.count('"') - line.count('\\"')) % 2:
1875
    error(filename, linenum, 'readability/multiline_string', 5,
1876
          'Multi-line string ("...") found.  This lint script doesn\'t '
1877
          'do well with such strings, and may give bogus warnings.  '
1878
          'Use C++11 raw strings or concatenation instead.')
1879
1880
1881
# (non-threadsafe name, thread-safe alternative, validation pattern)
1882
#
1883
# The validation pattern is used to eliminate false positives such as:
1884
#  _rand();               // false positive due to substring match.
1885
#  ->rand();              // some member function rand().
1886
#  ACMRandom rand(seed);  // some variable named rand.
1887
#  ISAACRandom rand();    // another variable named rand.
1888
#
1889
# Basically we require the return value of these functions to be used
1890
# in some expression context on the same line by matching on some
1891
# operator before the function name.  This eliminates constructors and
1892
# member function calls.
1893
_UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)'
1894
_THREADING_LIST = (
1895
    ('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'),
1896
    ('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'),
1897
    ('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'),
1898
    ('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'),
1899
    ('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'),
1900
    ('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'),
1901
    ('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'),
1902
    ('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'),
1903
    ('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'),
1904
    ('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'),
1905
    ('strtok(', 'strtok_r(',
1906
     _UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'),
1907
    ('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'),
1908
    )
1909
1910
1911
def CheckPosixThreading(filename, clean_lines, linenum, error):
1912
  """Checks for calls to thread-unsafe functions.
1913

1914
  Much code has been originally written without consideration of
1915
  multi-threading. Also, engineers are relying on their old experience;
1916
  they have learned posix before threading extensions were added. These
1917
  tests guide the engineers to use thread-safe functions (when using
1918
  posix directly).
1919

1920
  Args:
1921
    filename: The name of the current file.
1922
    clean_lines: A CleansedLines instance containing the file.
1923
    linenum: The number of the line to check.
1924
    error: The function to call with any errors found.
1925
  """
1926
  line = clean_lines.elided[linenum]
1927
  for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:
1928
    # Additional pattern matching check to confirm that this is the
1929
    # function we are looking for
1930
    if Search(pattern, line):
1931
      error(filename, linenum, 'runtime/threadsafe_fn', 2,
1932
            'Consider using ' + multithread_safe_func +
1933
            '...) instead of ' + single_thread_func +
1934
            '...) for improved thread safety.')
1935
1936
1937
def CheckVlogArguments(filename, clean_lines, linenum, error):
1938
  """Checks that VLOG() is only used for defining a logging level.
1939

1940
  For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
1941
  VLOG(FATAL) are not.
1942

1943
  Args:
1944
    filename: The name of the current file.
1945
    clean_lines: A CleansedLines instance containing the file.
1946
    linenum: The number of the line to check.
1947
    error: The function to call with any errors found.
1948
  """
1949
  line = clean_lines.elided[linenum]
1950
  if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
1951
    error(filename, linenum, 'runtime/vlog', 5,
1952
          'VLOG() should be used with numeric verbosity level.  '
1953
          'Use LOG() if you want symbolic severity levels.')
1954
1955
# Matches invalid increment: *count++, which moves pointer instead of
1956
# incrementing a value.
1957
_RE_PATTERN_INVALID_INCREMENT = re.compile(
1958
    r'^\s*\*\w+(\+\+|--);')
1959
1960
1961
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
1962
  """Checks for invalid increment *count++.
1963

1964
  For example following function:
1965
  void increment_counter(int* count) {
1966
    *count++;
1967
  }
1968
  is invalid, because it effectively does count++, moving pointer, and should
1969
  be replaced with ++*count, (*count)++ or *count += 1.
1970

1971
  Args:
1972
    filename: The name of the current file.
1973
    clean_lines: A CleansedLines instance containing the file.
1974
    linenum: The number of the line to check.
1975
    error: The function to call with any errors found.
1976
  """
1977
  line = clean_lines.elided[linenum]
1978
  if _RE_PATTERN_INVALID_INCREMENT.match(line):
1979
    error(filename, linenum, 'runtime/invalid_increment', 5,
1980
          'Changing pointer instead of value (or unused value of operator*).')
1981
1982
1983
def IsMacroDefinition(clean_lines, linenum):
1984
  if Search(r'^#define', clean_lines[linenum]):
1985
    return True
1986
1987
  if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]):
1988
    return True
1989
1990
  return False
1991
1992
1993
def IsForwardClassDeclaration(clean_lines, linenum):
1994
  return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum])
1995
1996
1997
class _BlockInfo(object):
1998
  """Stores information about a generic block of code."""
1999
2000
  def __init__(self, seen_open_brace):
2001
    self.seen_open_brace = seen_open_brace
2002
    self.open_parentheses = 0
2003
    self.inline_asm = _NO_ASM
2004
    self.check_namespace_indentation = False
2005
2006
  def CheckBegin(self, filename, clean_lines, linenum, error):
2007
    """Run checks that applies to text up to the opening brace.
2008

2009
    This is mostly for checking the text after the class identifier
2010
    and the "{", usually where the base class is specified.  For other
2011
    blocks, there isn't much to check, so we always pass.
2012

2013
    Args:
2014
      filename: The name of the current file.
2015
      clean_lines: A CleansedLines instance containing the file.
2016
      linenum: The number of the line to check.
2017
      error: The function to call with any errors found.
2018
    """
2019
    pass
2020
2021
  def CheckEnd(self, filename, clean_lines, linenum, error):
2022
    """Run checks that applies to text after the closing brace.
2023

2024
    This is mostly used for checking end of namespace comments.
2025

2026
    Args:
2027
      filename: The name of the current file.
2028
      clean_lines: A CleansedLines instance containing the file.
2029
      linenum: The number of the line to check.
2030
      error: The function to call with any errors found.
2031
    """
2032
    pass
2033
2034
  def IsBlockInfo(self):
2035
    """Returns true if this block is a _BlockInfo.
2036

2037
    This is convenient for verifying that an object is an instance of
2038
    a _BlockInfo, but not an instance of any of the derived classes.
2039

2040
    Returns:
2041
      True for this class, False for derived classes.
2042
    """
2043
    return self.__class__ == _BlockInfo
2044
2045
2046
class _ExternCInfo(_BlockInfo):
2047
  """Stores information about an 'extern "C"' block."""
2048
2049
  def __init__(self):
2050
    _BlockInfo.__init__(self, True)
2051
2052
2053
class _ClassInfo(_BlockInfo):
2054
  """Stores information about a class."""
2055
2056
  def __init__(self, name, class_or_struct, clean_lines, linenum):
2057
    _BlockInfo.__init__(self, False)
2058
    self.name = name
2059
    self.starting_linenum = linenum
2060
    self.is_derived = False
2061
    self.check_namespace_indentation = True
2062
    if class_or_struct == 'struct':
2063
      self.access = 'public'
2064
      self.is_struct = True
2065
    else:
2066
      self.access = 'private'
2067
      self.is_struct = False
2068
2069
    # Remember initial indentation level for this class.  Using raw_lines here
2070
    # instead of elided to account for leading comments.
2071
    self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum])
2072
2073
    # Try to find the end of the class.  This will be confused by things like:
2074
    #   class A {
2075
    #   } *x = { ...
2076
    #
2077
    # But it's still good enough for CheckSectionSpacing.
2078
    self.last_line = 0
2079
    depth = 0
2080
    for i in range(linenum, clean_lines.NumLines()):
2081
      line = clean_lines.elided[i]
2082
      depth += line.count('{') - line.count('}')
2083
      if not depth:
2084
        self.last_line = i
2085
        break
2086
2087
  def CheckBegin(self, filename, clean_lines, linenum, error):
2088
    # Look for a bare ':'
2089
    if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
2090
      self.is_derived = True
2091
2092
  def CheckEnd(self, filename, clean_lines, linenum, error):
2093
    # If there is a DISALLOW macro, it should appear near the end of
2094
    # the class.
2095
    seen_last_thing_in_class = False
2096
    for i in xrange(linenum - 1, self.starting_linenum, -1):
2097
      match = Search(
2098
          r'\b(DISALLOW_COPY_AND_ASSIGN|DISALLOW_IMPLICIT_CONSTRUCTORS)\(' +
2099
          self.name + r'\)',
2100
          clean_lines.elided[i])
2101
      if match:
2102
        if seen_last_thing_in_class:
2103
          error(filename, i, 'readability/constructors', 3,
2104
                match.group(1) + ' should be the last thing in the class')
2105
        break
2106
2107
      if not Match(r'^\s*$', clean_lines.elided[i]):
2108
        seen_last_thing_in_class = True
2109
2110
    # Check that closing brace is aligned with beginning of the class.
2111
    # Only do this if the closing brace is indented by only whitespaces.
2112
    # This means we will not check single-line class definitions.
2113
    indent = Match(r'^( *)\}', clean_lines.elided[linenum])
2114
    if indent and len(indent.group(1)) != self.class_indent:
2115
      if self.is_struct:
2116
        parent = 'struct ' + self.name
2117
      else:
2118
        parent = 'class ' + self.name
2119
      error(filename, linenum, 'whitespace/indent', 3,
2120
            'Closing brace should be aligned with beginning of %s' % parent)
2121
2122
2123
class _NamespaceInfo(_BlockInfo):
2124
  """Stores information about a namespace."""
2125
2126
  def __init__(self, name, linenum):
2127
    _BlockInfo.__init__(self, False)
2128
    self.name = name or ''
2129
    self.starting_linenum = linenum
2130
    self.check_namespace_indentation = True
2131
2132
  def CheckEnd(self, filename, clean_lines, linenum, error):
2133
    """Check end of namespace comments."""
2134
    line = clean_lines.raw_lines[linenum]
2135
2136
    # Check how many lines is enclosed in this namespace.  Don't issue
2137
    # warning for missing namespace comments if there aren't enough
2138
    # lines.  However, do apply checks if there is already an end of
2139
    # namespace comment and it's incorrect.
2140
    #
2141
    # TODO(unknown): We always want to check end of namespace comments
2142
    # if a namespace is large, but sometimes we also want to apply the
2143
    # check if a short namespace contained nontrivial things (something
2144
    # other than forward declarations).  There is currently no logic on
2145
    # deciding what these nontrivial things are, so this check is
2146
    # triggered by namespace size only, which works most of the time.
2147
    if (linenum - self.starting_linenum < 10
2148
        and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)):
2149
      return
2150
2151
    # Look for matching comment at end of namespace.
2152
    #
2153
    # Note that we accept C style "/* */" comments for terminating
2154
    # namespaces, so that code that terminate namespaces inside
2155
    # preprocessor macros can be cpplint clean.
2156
    #
2157
    # We also accept stuff like "// end of namespace <name>." with the
2158
    # period at the end.
2159
    #
2160
    # Besides these, we don't accept anything else, otherwise we might
2161
    # get false negatives when existing comment is a substring of the
2162
    # expected namespace.
2163
    if self.name:
2164
      # Named namespace
2165
      if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) +
2166
                    r'[\*/\.\\\s]*$'),
2167
                   line):
2168
        error(filename, linenum, 'readability/namespace', 5,
2169
              'Namespace should be terminated with "// namespace %s"' %
2170
              self.name)
2171
    else:
2172
      # Anonymous namespace
2173
      if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
2174
        # If "// namespace anonymous" or "// anonymous namespace (more text)",
2175
        # mention "// anonymous namespace" as an acceptable form
2176
        if Match(r'}.*\b(namespace anonymous|anonymous namespace)\b', line):
2177
          error(filename, linenum, 'readability/namespace', 5,
2178
                'Anonymous namespace should be terminated with "// namespace"'
2179
                ' or "// anonymous namespace"')
2180
        else:
2181
          error(filename, linenum, 'readability/namespace', 5,
2182
                'Anonymous namespace should be terminated with "// namespace"')
2183
2184
2185
class _PreprocessorInfo(object):
2186
  """Stores checkpoints of nesting stacks when #if/#else is seen."""
2187
2188
  def __init__(self, stack_before_if):
2189
    # The entire nesting stack before #if
2190
    self.stack_before_if = stack_before_if
2191
2192
    # The entire nesting stack up to #else
2193
    self.stack_before_else = []
2194
2195
    # Whether we have already seen #else or #elif
2196
    self.seen_else = False
2197
2198
2199
class NestingState(object):
2200
  """Holds states related to parsing braces."""
2201
2202
  def __init__(self):
2203
    # Stack for tracking all braces.  An object is pushed whenever we
2204
    # see a "{", and popped when we see a "}".  Only 3 types of
2205
    # objects are possible:
2206
    # - _ClassInfo: a class or struct.
2207
    # - _NamespaceInfo: a namespace.
2208
    # - _BlockInfo: some other type of block.
2209
    self.stack = []
2210
2211
    # Top of the previous stack before each Update().
2212
    #
2213
    # Because the nesting_stack is updated at the end of each line, we
2214
    # had to do some convoluted checks to find out what is the current
2215
    # scope at the beginning of the line.  This check is simplified by
2216
    # saving the previous top of nesting stack.
2217
    #
2218
    # We could save the full stack, but we only need the top.  Copying
2219
    # the full nesting stack would slow down cpplint by ~10%.
2220
    self.previous_stack_top = []
2221
2222
    # Stack of _PreprocessorInfo objects.
2223
    self.pp_stack = []
2224
2225
  def SeenOpenBrace(self):
2226
    """Check if we have seen the opening brace for the innermost block.
2227

2228
    Returns:
2229
      True if we have seen the opening brace, False if the innermost
2230
      block is still expecting an opening brace.
2231
    """
2232
    return (not self.stack) or self.stack[-1].seen_open_brace
2233
2234
  def InNamespaceBody(self):
2235
    """Check if we are currently one level inside a namespace body.
2236

2237
    Returns:
2238
      True if top of the stack is a namespace block, False otherwise.
2239
    """
2240
    return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
2241
2242
  def InExternC(self):
2243
    """Check if we are currently one level inside an 'extern "C"' block.
2244

2245
    Returns:
2246
      True if top of the stack is an extern block, False otherwise.
2247
    """
2248
    return self.stack and isinstance(self.stack[-1], _ExternCInfo)
2249
2250
  def InClassDeclaration(self):
2251
    """Check if we are currently one level inside a class or struct declaration.
2252

2253
    Returns:
2254
      True if top of the stack is a class/struct, False otherwise.
2255
    """
2256
    return self.stack and isinstance(self.stack[-1], _ClassInfo)
2257
2258
  def InAsmBlock(self):
2259
    """Check if we are currently one level inside an inline ASM block.
2260

2261
    Returns:
2262
      True if the top of the stack is a block containing inline ASM.
2263
    """
2264
    return self.stack and self.stack[-1].inline_asm != _NO_ASM
2265
2266
  def InTemplateArgumentList(self, clean_lines, linenum, pos):
2267
    """Check if current position is inside template argument list.
2268

2269
    Args:
2270
      clean_lines: A CleansedLines instance containing the file.
2271
      linenum: The number of the line to check.
2272
      pos: position just after the suspected template argument.
2273
    Returns:
2274
      True if (linenum, pos) is inside template arguments.
2275
    """
2276
    while linenum < clean_lines.NumLines():
2277
      # Find the earliest character that might indicate a template argument
2278
      line = clean_lines.elided[linenum]
2279
      match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:])
2280
      if not match:
2281
        linenum += 1
2282
        pos = 0
2283
        continue
2284
      token = match.group(1)
2285
      pos += len(match.group(0))
2286
2287
      # These things do not look like template argument list:
2288
      #   class Suspect {
2289
      #   class Suspect x; }
2290
      if token in ('{', '}', ';'): return False
2291
2292
      # These things look like template argument list:
2293
      #   template <class Suspect>
2294
      #   template <class Suspect = default_value>
2295
      #   template <class Suspect[]>
2296
      #   template <class Suspect...>
2297
      if token in ('>', '=', '[', ']', '.'): return True
2298
2299
      # Check if token is an unmatched '<'.
2300
      # If not, move on to the next character.
2301
      if token != '<':
2302
        pos += 1
2303
        if pos >= len(line):
2304
          linenum += 1
2305
          pos = 0
2306
        continue
2307
2308
      # We can't be sure if we just find a single '<', and need to
2309
      # find the matching '>'.
2310
      (_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1)
2311
      if end_pos < 0:
2312
        # Not sure if template argument list or syntax error in file
2313
        return False
2314
      linenum = end_line
2315
      pos = end_pos
2316
    return False
2317
2318
  def UpdatePreprocessor(self, line):
2319
    """Update preprocessor stack.
2320

2321
    We need to handle preprocessors due to classes like this:
2322
      #ifdef SWIG
2323
      struct ResultDetailsPageElementExtensionPoint {
2324
      #else
2325
      struct ResultDetailsPageElementExtensionPoint : public Extension {
2326
      #endif
2327

2328
    We make the following assumptions (good enough for most files):
2329
    - Preprocessor condition evaluates to true from #if up to first
2330
      #else/#elif/#endif.
2331

2332
    - Preprocessor condition evaluates to false from #else/#elif up
2333
      to #endif.  We still perform lint checks on these lines, but
2334
      these do not affect nesting stack.
2335

2336
    Args:
2337
      line: current line to check.
2338
    """
2339
    if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
2340
      # Beginning of #if block, save the nesting stack here.  The saved
2341
      # stack will allow us to restore the parsing state in the #else case.
2342
      self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
2343
    elif Match(r'^\s*#\s*(else|elif)\b', line):
2344
      # Beginning of #else block
2345
      if self.pp_stack:
2346
        if not self.pp_stack[-1].seen_else:
2347
          # This is the first #else or #elif block.  Remember the
2348
          # whole nesting stack up to this point.  This is what we
2349
          # keep after the #endif.
2350
          self.pp_stack[-1].seen_else = True
2351
          self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
2352
2353
        # Restore the stack to how it was before the #if
2354
        self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
2355
      else:
2356
        # TODO(unknown): unexpected #else, issue warning?
2357
        pass
2358
    elif Match(r'^\s*#\s*endif\b', line):
2359
      # End of #if or #else blocks.
2360
      if self.pp_stack:
2361
        # If we saw an #else, we will need to restore the nesting
2362
        # stack to its former state before the #else, otherwise we
2363
        # will just continue from where we left off.
2364
        if self.pp_stack[-1].seen_else:
2365
          # Here we can just use a shallow copy since we are the last
2366
          # reference to it.
2367
          self.stack = self.pp_stack[-1].stack_before_else
2368
        # Drop the corresponding #if
2369
        self.pp_stack.pop()
2370
      else:
2371
        # TODO(unknown): unexpected #endif, issue warning?
2372
        pass
2373
2374
  # TODO(unknown): Update() is too long, but we will refactor later.
2375
  def Update(self, filename, clean_lines, linenum, error):
2376
    """Update nesting state with current line.
2377

2378
    Args:
2379
      filename: The name of the current file.
2380
      clean_lines: A CleansedLines instance containing the file.
2381
      linenum: The number of the line to check.
2382
      error: The function to call with any errors found.
2383
    """
2384
    line = clean_lines.elided[linenum]
2385
2386
    # Remember top of the previous nesting stack.
2387
    #
2388
    # The stack is always pushed/popped and not modified in place, so
2389
    # we can just do a shallow copy instead of copy.deepcopy.  Using
2390
    # deepcopy would slow down cpplint by ~28%.
2391
    if self.stack:
2392
      self.previous_stack_top = self.stack[-1]
2393
    else:
2394
      self.previous_stack_top = None
2395
2396
    # Update pp_stack
2397
    self.UpdatePreprocessor(line)
2398
2399
    # Count parentheses.  This is to avoid adding struct arguments to
2400
    # the nesting stack.
2401
    if self.stack:
2402
      inner_block = self.stack[-1]
2403
      depth_change = line.count('(') - line.count(')')
2404
      inner_block.open_parentheses += depth_change
2405
2406
      # Also check if we are starting or ending an inline assembly block.
2407
      if inner_block.inline_asm in (_NO_ASM, _END_ASM):
2408
        if (depth_change != 0 and
2409
            inner_block.open_parentheses == 1 and
2410
            _MATCH_ASM.match(line)):
2411
          # Enter assembly block
2412
          inner_block.inline_asm = _INSIDE_ASM
2413
        else:
2414
          # Not entering assembly block.  If previous line was _END_ASM,
2415
          # we will now shift to _NO_ASM state.
2416
          inner_block.inline_asm = _NO_ASM
2417
      elif (inner_block.inline_asm == _INSIDE_ASM and
2418
            inner_block.open_parentheses == 0):
2419
        # Exit assembly block
2420
        inner_block.inline_asm = _END_ASM
2421
2422
    # Consume namespace declaration at the beginning of the line.  Do
2423
    # this in a loop so that we catch same line declarations like this:
2424
    #   namespace proto2 { namespace bridge { class MessageSet; } }
2425
    while True:
2426
      # Match start of namespace.  The "\b\s*" below catches namespace
2427
      # declarations even if it weren't followed by a whitespace, this
2428
      # is so that we don't confuse our namespace checker.  The
2429
      # missing spaces will be flagged by CheckSpacing.
2430
      namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
2431
      if not namespace_decl_match:
2432
        break
2433
2434
      new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
2435
      self.stack.append(new_namespace)
2436
2437
      line = namespace_decl_match.group(2)
2438
      if line.find('{') != -1:
2439
        new_namespace.seen_open_brace = True
2440
        line = line[line.find('{') + 1:]
2441
2442
    # Look for a class declaration in whatever is left of the line
2443
    # after parsing namespaces.  The regexp accounts for decorated classes
2444
    # such as in:
2445
    #   class LOCKABLE API Object {
2446
    #   };
2447
    class_decl_match = Match(
2448
        r'^(\s*(?:template\s*<[\w\s<>,:]*>\s*)?'
2449
        r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))'
2450
        r'(.*)$', line)
2451
    if (class_decl_match and
2452
        (not self.stack or self.stack[-1].open_parentheses == 0)):
2453
      # We do not want to accept classes that are actually template arguments:
2454
      #   template <class Ignore1,
2455
      #             class Ignore2 = Default<Args>,
2456
      #             template <Args> class Ignore3>
2457
      #   void Function() {};
2458
      #
2459
      # To avoid template argument cases, we scan forward and look for
2460
      # an unmatched '>'.  If we see one, assume we are inside a
2461
      # template argument list.
2462
      end_declaration = len(class_decl_match.group(1))
2463
      if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration):
2464
        self.stack.append(_ClassInfo(
2465
            class_decl_match.group(3), class_decl_match.group(2),
2466
            clean_lines, linenum))
2467
        line = class_decl_match.group(4)
2468
2469
    # If we have not yet seen the opening brace for the innermost block,
2470
    # run checks here.
2471
    if not self.SeenOpenBrace():
2472
      self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
2473
2474
    # Update access control if we are inside a class/struct
2475
    if self.stack and isinstance(self.stack[-1], _ClassInfo):
2476
      classinfo = self.stack[-1]
2477
      access_match = Match(
2478
          r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
2479
          r':(?:[^:]|$)',
2480
          line)
2481
      if access_match:
2482
        classinfo.access = access_match.group(2)
2483
2484
        # Check that access keywords are indented +1 space.  Skip this
2485
        # check if the keywords are not preceded by whitespaces.
2486
        indent = access_match.group(1)
2487
        if (len(indent) != classinfo.class_indent + 1 and
2488
            Match(r'^\s*$', indent)):
2489
          if classinfo.is_struct:
2490
            parent = 'struct ' + classinfo.name
2491
          else:
2492
            parent = 'class ' + classinfo.name
2493
          slots = ''
2494
          if access_match.group(3):
2495
            slots = access_match.group(3)
2496
          error(filename, linenum, 'whitespace/indent', 3,
2497
                '%s%s: should be indented +1 space inside %s' % (
2498
                    access_match.group(2), slots, parent))
2499
2500
    # Consume braces or semicolons from what's left of the line
2501
    while True:
2502
      # Match first brace, semicolon, or closed parenthesis.
2503
      matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
2504
      if not matched:
2505
        break
2506
2507
      token = matched.group(1)
2508
      if token == '{':
2509
        # If namespace or class hasn't seen a opening brace yet, mark
2510
        # namespace/class head as complete.  Push a new block onto the
2511
        # stack otherwise.
2512
        if not self.SeenOpenBrace():
2513
          self.stack[-1].seen_open_brace = True
2514
        elif Match(r'^extern\s*"[^"]*"\s*\{', line):
2515
          self.stack.append(_ExternCInfo())
2516
        else:
2517
          self.stack.append(_BlockInfo(True))
2518
          if _MATCH_ASM.match(line):
2519
            self.stack[-1].inline_asm = _BLOCK_ASM
2520
2521
      elif token == ';' or token == ')':
2522
        # If we haven't seen an opening brace yet, but we already saw
2523
        # a semicolon, this is probably a forward declaration.  Pop
2524
        # the stack for these.
2525
        #
2526
        # Similarly, if we haven't seen an opening brace yet, but we
2527
        # already saw a closing parenthesis, then these are probably
2528
        # function arguments with extra "class" or "struct" keywords.
2529
        # Also pop these stack for these.
2530
        if not self.SeenOpenBrace():
2531
          self.stack.pop()
2532
      else:  # token == '}'
2533
        # Perform end of block checks and pop the stack.
2534
        if self.stack:
2535
          self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
2536
          self.stack.pop()
2537
      line = matched.group(2)
2538
2539
  def InnermostClass(self):
2540
    """Get class info on the top of the stack.
2541

2542
    Returns:
2543
      A _ClassInfo object if we are inside a class, or None otherwise.
2544
    """
2545
    for i in range(len(self.stack), 0, -1):
2546
      classinfo = self.stack[i - 1]
2547
      if isinstance(classinfo, _ClassInfo):
2548
        return classinfo
2549
    return None
2550
2551
  def CheckCompletedBlocks(self, filename, error):
2552
    """Checks that all classes and namespaces have been completely parsed.
2553

2554
    Call this when all lines in a file have been processed.
2555
    Args:
2556
      filename: The name of the current file.
2557
      error: The function to call with any errors found.
2558
    """
2559
    # Note: This test can result in false positives if #ifdef constructs
2560
    # get in the way of brace matching. See the testBuildClass test in
2561
    # cpplint_unittest.py for an example of this.
2562
    for obj in self.stack:
2563
      if isinstance(obj, _ClassInfo):
2564
        error(filename, obj.starting_linenum, 'build/class', 5,
2565
              'Failed to find complete declaration of class %s' %
2566
              obj.name)
2567
      elif isinstance(obj, _NamespaceInfo):
2568
        error(filename, obj.starting_linenum, 'build/namespaces', 5,
2569
              'Failed to find complete declaration of namespace %s' %
2570
              obj.name)
2571
2572
2573
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
2574
                                  nesting_state, error):
2575
  r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
2576

2577
  Complain about several constructs which gcc-2 accepts, but which are
2578
  not standard C++.  Warning about these in lint is one way to ease the
2579
  transition to new compilers.
2580
  - put storage class first (e.g. "static const" instead of "const static").
2581
  - "%lld" instead of %qd" in printf-type functions.
2582
  - "%1$d" is non-standard in printf-type functions.
2583
  - "\%" is an undefined character escape sequence.
2584
  - text after #endif is not allowed.
2585
  - invalid inner-style forward declaration.
2586
  - >? and <? operators, and their >?= and <?= cousins.
2587

2588
  Additionally, check for constructor/destructor style violations and reference
2589
  members, as it is very convenient to do so while checking for
2590
  gcc-2 compliance.
2591

2592
  Args:
2593
    filename: The name of the current file.
2594
    clean_lines: A CleansedLines instance containing the file.
2595
    linenum: The number of the line to check.
2596
    nesting_state: A NestingState instance which maintains information about
2597
                   the current stack of nested blocks being parsed.
2598
    error: A callable to which errors are reported, which takes 4 arguments:
2599
           filename, line number, error level, and message
2600
  """
2601
2602
  # Remove comments from the line, but leave in strings for now.
2603
  line = clean_lines.lines[linenum]
2604
2605
  if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
2606
    error(filename, linenum, 'runtime/printf_format', 3,
2607
          '%q in format strings is deprecated.  Use %ll instead.')
2608
2609
  if Search(r'printf\s*\(.*".*%\d+\$', line):
2610
    error(filename, linenum, 'runtime/printf_format', 2,
2611
          '%N$ formats are unconventional.  Try rewriting to avoid them.')
2612
2613
  # Remove escaped backslashes before looking for undefined escapes.
2614
  line = line.replace('\\\\', '')
2615
2616
  if Search(r'("|\').*\\(%|\[|\(|{)', line):
2617
    error(filename, linenum, 'build/printf_format', 3,
2618
          '%, [, (, and { are undefined character escapes.  Unescape them.')
2619
2620
  # For the rest, work with both comments and strings removed.
2621
  line = clean_lines.elided[linenum]
2622
2623
  if Search(r'\b(const|volatile|void|char|short|int|long'
2624
            r'|float|double|signed|unsigned'
2625
            r'|schar|u?int8|u?int16|u?int32|u?int64)'
2626
            r'\s+(register|static|extern|typedef)\b',
2627
            line):
2628
    error(filename, linenum, 'build/storage_class', 5,
2629
          'Storage class (static, extern, typedef, etc) should be first.')
2630
2631
  if Match(r'\s*#\s*endif\s*[^/\s]+', line):
2632
    error(filename, linenum, 'build/endif_comment', 5,
2633
          'Uncommented text after #endif is non-standard.  Use a comment.')
2634
2635
  if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
2636
    error(filename, linenum, 'build/forward_decl', 5,
2637
          'Inner-style forward declarations are invalid.  Remove this line.')
2638
2639
  if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
2640
            line):
2641
    error(filename, linenum, 'build/deprecated', 3,
2642
          '>? and <? (max and min) operators are non-standard and deprecated.')
2643
2644
  if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
2645
    # TODO(unknown): Could it be expanded safely to arbitrary references,
2646
    # without triggering too many false positives? The first
2647
    # attempt triggered 5 warnings for mostly benign code in the regtest, hence
2648
    # the restriction.
2649
    # Here's the original regexp, for the reference:
2650
    # type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
2651
    # r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
2652
    error(filename, linenum, 'runtime/member_string_references', 2,
2653
          'const string& members are dangerous. It is much better to use '
2654
          'alternatives, such as pointers or simple constants.')
2655
2656
  # Everything else in this function operates on class declarations.
2657
  # Return early if the top of the nesting stack is not a class, or if
2658
  # the class head is not completed yet.
2659
  classinfo = nesting_state.InnermostClass()
2660
  if not classinfo or not classinfo.seen_open_brace:
2661
    return
2662
2663
  # The class may have been declared with namespace or classname qualifiers.
2664
  # The constructor and destructor will not have those qualifiers.
2665
  base_classname = classinfo.name.split('::')[-1]
2666
2667
  # Look for single-argument constructors that aren't marked explicit.
2668
  # Technically a valid construct, but against style. Also look for
2669
  # non-single-argument constructors which are also technically valid, but
2670
  # strongly suggest something is wrong.
2671
  explicit_constructor_match = Match(
2672
      r'\s+(?:inline\s+)?(explicit\s+)?(?:inline\s+)?%s\s*'
2673
      r'\(((?:[^()]|\([^()]*\))*)\)'
2674
      % re.escape(base_classname),
2675
      line)
2676
2677
  if explicit_constructor_match:
2678
    is_marked_explicit = explicit_constructor_match.group(1)
2679
2680
    if not explicit_constructor_match.group(2):
2681
      constructor_args = []
2682
    else:
2683
      constructor_args = explicit_constructor_match.group(2).split(',')
2684
2685
    # collapse arguments so that commas in template parameter lists and function
2686
    # argument parameter lists don't split arguments in two
2687
    i = 0
2688
    while i < len(constructor_args):
2689
      constructor_arg = constructor_args[i]
2690
      while (constructor_arg.count('<') > constructor_arg.count('>') or
2691
             constructor_arg.count('(') > constructor_arg.count(')')):
2692
        constructor_arg += ',' + constructor_args[i + 1]
2693
        del constructor_args[i + 1]
2694
      constructor_args[i] = constructor_arg
2695
      i += 1
2696
2697
    defaulted_args = [arg for arg in constructor_args if '=' in arg]
2698
    noarg_constructor = (not constructor_args or  # empty arg list
2699
                         # 'void' arg specifier
2700
                         (len(constructor_args) == 1 and
2701
                          constructor_args[0].strip() == 'void'))
2702
    onearg_constructor = ((len(constructor_args) == 1 and  # exactly one arg
2703
                           not noarg_constructor) or
2704
                          # all but at most one arg defaulted
2705
                          (len(constructor_args) >= 1 and
2706
                           not noarg_constructor and
2707
                           len(defaulted_args) >= len(constructor_args) - 1))
2708
    initializer_list_constructor = bool(
2709
        onearg_constructor and
2710
        Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
2711
    copy_constructor = bool(
2712
        onearg_constructor and
2713
        Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
2714
              % re.escape(base_classname), constructor_args[0].strip()))
2715
2716
    if (not is_marked_explicit and
2717
        onearg_constructor and
2718
        not initializer_list_constructor and
2719
        not copy_constructor):
2720
      if defaulted_args:
2721
        error(filename, linenum, 'runtime/explicit', 5,
2722
              'Constructors callable with one argument '
2723
              'should be marked explicit.')
2724
      else:
2725
        error(filename, linenum, 'runtime/explicit', 5,
2726
              'Single-parameter constructors should be marked explicit.')
2727
    elif is_marked_explicit and not onearg_constructor:
2728
      if noarg_constructor:
2729
        error(filename, linenum, 'runtime/explicit', 5,
2730
              'Zero-parameter constructors should not be marked explicit.')
2731
      else:
2732
        error(filename, linenum, 'runtime/explicit', 0,
2733
              'Constructors that require multiple arguments '
2734
              'should not be marked explicit.')
2735
2736
2737
def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
2738
  """Checks for the correctness of various spacing around function calls.
2739

2740
  Args:
2741
    filename: The name of the current file.
2742
    clean_lines: A CleansedLines instance containing the file.
2743
    linenum: The number of the line to check.
2744
    error: The function to call with any errors found.
2745
  """
2746
  line = clean_lines.elided[linenum]
2747
2748
  # Since function calls often occur inside if/for/while/switch
2749
  # expressions - which have their own, more liberal conventions - we
2750
  # first see if we should be looking inside such an expression for a
2751
  # function call, to which we can apply more strict standards.
2752
  fncall = line    # if there's no control flow construct, look at whole line
2753
  for pattern in (r'\bif\s*\((.*)\)\s*{',
2754
                  r'\bfor\s*\((.*)\)\s*{',
2755
                  r'\bwhile\s*\((.*)\)\s*[{;]',
2756
                  r'\bswitch\s*\((.*)\)\s*{'):
2757
    match = Search(pattern, line)
2758
    if match:
2759
      fncall = match.group(1)    # look inside the parens for function calls
2760
      break
2761
2762
  # Except in if/for/while/switch, there should never be space
2763
  # immediately inside parens (eg "f( 3, 4 )").  We make an exception
2764
  # for nested parens ( (a+b) + c ).  Likewise, there should never be
2765
  # a space before a ( when it's a function argument.  I assume it's a
2766
  # function argument when the char before the whitespace is legal in
2767
  # a function name (alnum + _) and we're not starting a macro. Also ignore
2768
  # pointers and references to arrays and functions coz they're too tricky:
2769
  # we use a very simple way to recognize these:
2770
  # " (something)(maybe-something)" or
2771
  # " (something)(maybe-something," or
2772
  # " (something)[something]"
2773
  # Note that we assume the contents of [] to be short enough that
2774
  # they'll never need to wrap.
2775
  if (  # Ignore control structures.
2776
      not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
2777
                 fncall) and
2778
      # Ignore pointers/references to functions.
2779
      not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
2780
      # Ignore pointers/references to arrays.
2781
      not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
2782
    if Search(r'\w\s*\(\s(?!\s*\\$)', fncall):      # a ( used for a fn call
2783
      error(filename, linenum, 'whitespace/parens', 4,
2784
            'Extra space after ( in function call')
2785
    elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
2786
      error(filename, linenum, 'whitespace/parens', 2,
2787
            'Extra space after (')
2788
    if (Search(r'\w\s+\(', fncall) and
2789
        not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and
2790
        not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall) and
2791
        not Search(r'\bcase\s+\(', fncall)):
2792
      # TODO(unknown): Space after an operator function seem to be a common
2793
      # error, silence those for now by restricting them to highest verbosity.
2794
      if Search(r'\boperator_*\b', line):
2795
        error(filename, linenum, 'whitespace/parens', 0,
2796
              'Extra space before ( in function call')
2797
      else:
2798
        error(filename, linenum, 'whitespace/parens', 4,
2799
              'Extra space before ( in function call')
2800
    # If the ) is followed only by a newline or a { + newline, assume it's
2801
    # part of a control statement (if/while/etc), and don't complain
2802
    if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
2803
      # If the closing parenthesis is preceded by only whitespaces,
2804
      # try to give a more descriptive error message.
2805
      if Search(r'^\s+\)', fncall):
2806
        error(filename, linenum, 'whitespace/parens', 2,
2807
              'Closing ) should be moved to the previous line')
2808
      else:
2809
        error(filename, linenum, 'whitespace/parens', 2,
2810
              'Extra space before )')
2811
2812
2813
def IsBlankLine(line):
2814
  """Returns true if the given line is blank.
2815

2816
  We consider a line to be blank if the line is empty or consists of
2817
  only white spaces.
2818

2819
  Args:
2820
    line: A line of a string.
2821

2822
  Returns:
2823
    True, if the given line is blank.
2824
  """
2825
  return not line or line.isspace()
2826
2827
2828
def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
2829
                                 error):
2830
  is_namespace_indent_item = (
2831
      len(nesting_state.stack) > 1 and
2832
      nesting_state.stack[-1].check_namespace_indentation and
2833
      isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and
2834
      nesting_state.previous_stack_top == nesting_state.stack[-2])
2835
2836
  if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
2837
                                     clean_lines.elided, line):
2838
    CheckItemIndentationInNamespace(filename, clean_lines.elided,
2839
                                    line, error)
2840
2841
2842
def CheckForFunctionLengths(filename, clean_lines, linenum,
2843
                            function_state, error):
2844
  """Reports for long function bodies.
2845

2846
  For an overview why this is done, see:
2847
  http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
2848

2849
  Uses a simplistic algorithm assuming other style guidelines
2850
  (especially spacing) are followed.
2851
  Only checks unindented functions, so class members are unchecked.
2852
  Trivial bodies are unchecked, so constructors with huge initializer lists
2853
  may be missed.
2854
  Blank/comment lines are not counted so as to avoid encouraging the removal
2855
  of vertical space and comments just to get through a lint check.
2856
  NOLINT *on the last line of a function* disables this check.
2857