Statistics
| Branch: | Tag: | Revision:

amiro-os / tools / cpplint / python / cpplint.py @ e1a164f4

History | View | Annotate | Download (236.226 KB)

1
#!/usr/bin/env python
2
#
3
# Copyright (c) 2009 Google Inc. All rights reserved.
4
#
5
# Redistribution and use in source and binary forms, with or without
6
# modification, are permitted provided that the following conditions are
7
# met:
8
#
9
#    * Redistributions of source code must retain the above copyright
10
# notice, this list of conditions and the following disclaimer.
11
#    * Redistributions in binary form must reproduce the above
12
# copyright notice, this list of conditions and the following disclaimer
13
# in the documentation and/or other materials provided with the
14
# distribution.
15
#    * Neither the name of Google Inc. nor the names of its
16
# contributors may be used to endorse or promote products derived from
17
# this software without specific prior written permission.
18
#
19
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30

    
31
"""Does google-lint on c++ files.
32

33
The goal of this script is to identify places in the code that *may*
34
be in non-compliance with google style.  It does not attempt to fix
35
up these problems -- the point is to educate.  It does also not
36
attempt to find all problems, or to ensure that everything it does
37
find is legitimately a problem.
38

39
In particular, we can get very confused by /* and // inside strings!
40
We do a small hack, which is to ignore //'s with "'s after them on the
41
same line, but it is far from perfect (in either direction).
42
"""
43

    
44
import codecs
45
import copy
46
import getopt
47
import math  # for log
48
import os
49
import re
50
import sre_compile
51
import string
52
import sys
53
import unicodedata
54

    
55

    
56
_USAGE = """
57
Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
58
                   [--counting=total|toplevel|detailed] [--root=subdir]
59
                   [--linelength=digits]
60
        <file> [file] ...
61

62
  The style guidelines this tries to follow are those in
63
    http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
64

65
  Every problem is given a confidence score from 1-5, with 5 meaning we are
66
  certain of the problem, and 1 meaning it could be a legitimate construct.
67
  This will miss some errors, and is not a substitute for a code review.
68

69
  To suppress false-positive errors of a certain category, add a
70
  'NOLINT(category)' comment to the line.  NOLINT or NOLINT(*)
71
  suppresses errors of all categories on that line.
72

73
  The files passed in will be linted; at least one file must be provided.
74
  Default linted extensions are .cc, .cpp, .cu, .cuh and .h.  Change the
75
  extensions with the --extensions flag.
76

77
  Flags:
78

79
    output=vs7
80
      By default, the output is formatted to ease emacs parsing.  Visual Studio
81
      compatible output (vs7) may also be used.  Other formats are unsupported.
82

83
    verbose=#
84
      Specify a number 0-5 to restrict errors to certain verbosity levels.
85

86
    filter=-x,+y,...
87
      Specify a comma-separated list of category-filters to apply: only
88
      error messages whose category names pass the filters will be printed.
89
      (Category names are printed with the message and look like
90
      "[whitespace/indent]".)  Filters are evaluated left to right.
91
      "-FOO" and "FOO" means "do not print categories that start with FOO".
92
      "+FOO" means "do print categories that start with FOO".
93

94
      Examples: --filter=-whitespace,+whitespace/braces
95
                --filter=whitespace,runtime/printf,+runtime/printf_format
96
                --filter=-,+build/include_what_you_use
97

98
      To see a list of all the categories used in cpplint, pass no arg:
99
         --filter=
100

101
    counting=total|toplevel|detailed
102
      The total number of errors found is always printed. If
103
      'toplevel' is provided, then the count of errors in each of
104
      the top-level categories like 'build' and 'whitespace' will
105
      also be printed. If 'detailed' is provided, then a count
106
      is provided for each category like 'build/class'.
107

108
    root=subdir
109
      The root directory used for deriving header guard CPP variable.
110
      By default, the header guard CPP variable is calculated as the relative
111
      path to the directory that contains .git, .hg, or .svn.  When this flag
112
      is specified, the relative path is calculated from the specified
113
      directory. If the specified directory does not exist, this flag is
114
      ignored.
115

116
      Examples:
117
        Assuming that src/.git exists, the header guard CPP variables for
118
        src/chrome/browser/ui/browser.h are:
119

120
        No flag => CHROME_BROWSER_UI_BROWSER_H_
121
        --root=chrome => BROWSER_UI_BROWSER_H_
122
        --root=chrome/browser => UI_BROWSER_H_
123

124
    linelength=digits
125
      This is the allowed line length for the project. The default value is
126
      80 characters.
127

128
      Examples:
129
        --linelength=120
130

131
    extensions=extension,extension,...
132
      The allowed file extensions that cpplint will check
133

134
      Examples:
135
        --extensions=hpp,cpp
136

137
    cpplint.py supports per-directory configurations specified in CPPLINT.cfg
138
    files. CPPLINT.cfg file can contain a number of key=value pairs.
139
    Currently the following options are supported:
140

141
      set noparent
142
      filter=+filter1,-filter2,...
143
      exclude_files=regex
144
      linelength=80
145

146
    "set noparent" option prevents cpplint from traversing directory tree
147
    upwards looking for more .cfg files in parent directories. This option
148
    is usually placed in the top-level project directory.
149

150
    The "filter" option is similar in function to --filter flag. It specifies
151
    message filters in addition to the |_DEFAULT_FILTERS| and those specified
152
    through --filter command-line flag.
153

154
    "exclude_files" allows to specify a regular expression to be matched against
155
    a file name. If the expression matches, the file is skipped and not run
156
    through liner.
157

158
    "linelength" allows to specify the allowed line length for the project.
159

160
    CPPLINT.cfg has an effect on files in the same directory and all
161
    sub-directories, unless overridden by a nested configuration file.
162

163
      Example file:
164
        filter=-build/include_order,+build/include_alpha
165
        exclude_files=.*\.cc
166

167
    The above example disables build/include_order warning and enables
168
    build/include_alpha as well as excludes all .cc from being
169
    processed by linter, in the current directory (where the .cfg
170
    file is located) and all sub-directories.
171
"""
172

    
173
# We categorize each error message we print.  Here are the categories.
174
# We want an explicit list so we can list them all in cpplint --filter=.
175
# If you add a new error message with a new category, add it to the list
176
# here!  cpplint_unittest.py should tell you if you forget to do this.
177
_ERROR_CATEGORIES = [
178
    'build/class',
179
    'build/c++11',
180
    'build/deprecated',
181
    'build/endif_comment',
182
    'build/explicit_make_pair',
183
    'build/forward_decl',
184
    'build/header_guard',
185
    'build/include',
186
    'build/include_alpha',
187
    'build/include_order',
188
    'build/include_what_you_use',
189
    'build/namespaces',
190
    'build/printf_format',
191
    'build/storage_class',
192
    'legal/copyright',
193
    'readability/alt_tokens',
194
    'readability/braces',
195
    'readability/casting',
196
    'readability/check',
197
    'readability/constructors',
198
    'readability/fn_size',
199
    'readability/function',
200
    'readability/inheritance',
201
    'readability/multiline_comment',
202
    'readability/multiline_string',
203
    'readability/namespace',
204
    'readability/nolint',
205
    'readability/nul',
206
    'readability/strings',
207
    'readability/todo',
208
    'readability/utf8',
209
    'runtime/arrays',
210
    'runtime/casting',
211
    'runtime/explicit',
212
    'runtime/int',
213
    'runtime/init',
214
    'runtime/invalid_increment',
215
    'runtime/member_string_references',
216
    'runtime/memset',
217
    'runtime/indentation_namespace',
218
    'runtime/operator',
219
    'runtime/printf',
220
    'runtime/printf_format',
221
    'runtime/references',
222
    'runtime/string',
223
    'runtime/threadsafe_fn',
224
    'runtime/vlog',
225
    'whitespace/blank_line',
226
    'whitespace/braces',
227
    'whitespace/comma',
228
    'whitespace/comments',
229
    'whitespace/empty_conditional_body',
230
    'whitespace/empty_loop_body',
231
    'whitespace/end_of_line',
232
    'whitespace/ending_newline',
233
    'whitespace/forcolon',
234
    'whitespace/indent',
235
    'whitespace/line_length',
236
    'whitespace/newline',
237
    'whitespace/operators',
238
    'whitespace/parens',
239
    'whitespace/semicolon',
240
    'whitespace/tab',
241
    'whitespace/todo',
242
    ]
243

    
244
# These error categories are no longer enforced by cpplint, but for backwards-
245
# compatibility they may still appear in NOLINT comments.
246
_LEGACY_ERROR_CATEGORIES = [
247
    'readability/streams',
248
    ]
249

    
250
# The default state of the category filter. This is overridden by the --filter=
251
# flag. By default all errors are on, so only add here categories that should be
252
# off by default (i.e., categories that must be enabled by the --filter= flags).
253
# All entries here should start with a '-' or '+', as in the --filter= flag.
254
_DEFAULT_FILTERS = ['-build/include_alpha']
255

    
256
# We used to check for high-bit characters, but after much discussion we
257
# decided those were OK, as long as they were in UTF-8 and didn't represent
258
# hard-coded international strings, which belong in a separate i18n file.
259

    
260
# C++ headers
261
_CPP_HEADERS = frozenset([
262
    # Legacy
263
    'algobase.h',
264
    'algo.h',
265
    'alloc.h',
266
    'builtinbuf.h',
267
    'bvector.h',
268
    'complex.h',
269
    'defalloc.h',
270
    'deque.h',
271
    'editbuf.h',
272
    'fstream.h',
273
    'function.h',
274
    'hash_map',
275
    'hash_map.h',
276
    'hash_set',
277
    'hash_set.h',
278
    'hashtable.h',
279
    'heap.h',
280
    'indstream.h',
281
    'iomanip.h',
282
    'iostream.h',
283
    'istream.h',
284
    'iterator.h',
285
    'list.h',
286
    'map.h',
287
    'multimap.h',
288
    'multiset.h',
289
    'ostream.h',
290
    'pair.h',
291
    'parsestream.h',
292
    'pfstream.h',
293
    'procbuf.h',
294
    'pthread_alloc',
295
    'pthread_alloc.h',
296
    'rope',
297
    'rope.h',
298
    'ropeimpl.h',
299
    'set.h',
300
    'slist',
301
    'slist.h',
302
    'stack.h',
303
    'stdiostream.h',
304
    'stl_alloc.h',
305
    'stl_relops.h',
306
    'streambuf.h',
307
    'stream.h',
308
    'strfile.h',
309
    'strstream.h',
310
    'tempbuf.h',
311
    'tree.h',
312
    'type_traits.h',
313
    'vector.h',
314
    # 17.6.1.2 C++ library headers
315
    'algorithm',
316
    'array',
317
    'atomic',
318
    'bitset',
319
    'chrono',
320
    'codecvt',
321
    'complex',
322
    'condition_variable',
323
    'deque',
324
    'exception',
325
    'forward_list',
326
    'fstream',
327
    'functional',
328
    'future',
329
    'initializer_list',
330
    'iomanip',
331
    'ios',
332
    'iosfwd',
333
    'iostream',
334
    'istream',
335
    'iterator',
336
    'limits',
337
    'list',
338
    'locale',
339
    'map',
340
    'memory',
341
    'mutex',
342
    'new',
343
    'numeric',
344
    'ostream',
345
    'queue',
346
    'random',
347
    'ratio',
348
    'regex',
349
    'set',
350
    'sstream',
351
    'stack',
352
    'stdexcept',
353
    'streambuf',
354
    'string',
355
    'strstream',
356
    'system_error',
357
    'thread',
358
    'tuple',
359
    'typeindex',
360
    'typeinfo',
361
    'type_traits',
362
    'unordered_map',
363
    'unordered_set',
364
    'utility',
365
    'valarray',
366
    'vector',
367
    # 17.6.1.2 C++ headers for C library facilities
368
    'cassert',
369
    'ccomplex',
370
    'cctype',
371
    'cerrno',
372
    'cfenv',
373
    'cfloat',
374
    'cinttypes',
375
    'ciso646',
376
    'climits',
377
    'clocale',
378
    'cmath',
379
    'csetjmp',
380
    'csignal',
381
    'cstdalign',
382
    'cstdarg',
383
    'cstdbool',
384
    'cstddef',
385
    'cstdint',
386
    'cstdio',
387
    'cstdlib',
388
    'cstring',
389
    'ctgmath',
390
    'ctime',
391
    'cuchar',
392
    'cwchar',
393
    'cwctype',
394
    ])
395

    
396

    
397
# These headers are excluded from [build/include] and [build/include_order]
398
# checks:
399
# - Anything not following google file name conventions (containing an
400
#   uppercase character, such as Python.h or nsStringAPI.h, for example).
401
# - Lua headers.
402
_THIRD_PARTY_HEADERS_PATTERN = re.compile(
403
    r'^(?:[^/]*[A-Z][^/]*\.h|lua\.h|lauxlib\.h|lualib\.h)$')
404

    
405

    
406
# Assertion macros.  These are defined in base/logging.h and
407
# testing/base/gunit.h.  Note that the _M versions need to come first
408
# for substring matching to work.
409
_CHECK_MACROS = [
410
    'DCHECK', 'CHECK',
411
    'EXPECT_TRUE_M', 'EXPECT_TRUE',
412
    'ASSERT_TRUE_M', 'ASSERT_TRUE',
413
    'EXPECT_FALSE_M', 'EXPECT_FALSE',
414
    'ASSERT_FALSE_M', 'ASSERT_FALSE',
415
    ]
416

    
417
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
418
_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
419

    
420
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
421
                        ('>=', 'GE'), ('>', 'GT'),
422
                        ('<=', 'LE'), ('<', 'LT')]:
423
  _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
424
  _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
425
  _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
426
  _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
427
  _CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
428
  _CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
429

    
430
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
431
                            ('>=', 'LT'), ('>', 'LE'),
432
                            ('<=', 'GT'), ('<', 'GE')]:
433
  _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
434
  _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
435
  _CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
436
  _CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
437

    
438
# Alternative tokens and their replacements.  For full list, see section 2.5
439
# Alternative tokens [lex.digraph] in the C++ standard.
440
#
441
# Digraphs (such as '%:') are not included here since it's a mess to
442
# match those on a word boundary.
443
_ALT_TOKEN_REPLACEMENT = {
444
    'and': '&&',
445
    'bitor': '|',
446
    'or': '||',
447
    'xor': '^',
448
    'compl': '~',
449
    'bitand': '&',
450
    'and_eq': '&=',
451
    'or_eq': '|=',
452
    'xor_eq': '^=',
453
    'not': '!',
454
    'not_eq': '!='
455
    }
456

    
457
# Compile regular expression that matches all the above keywords.  The "[ =()]"
458
# bit is meant to avoid matching these keywords outside of boolean expressions.
459
#
460
# False positives include C-style multi-line comments and multi-line strings
461
# but those have always been troublesome for cpplint.
462
_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile(
463
    r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)')
464

    
465

    
466
# These constants define types of headers for use with
467
# _IncludeState.CheckNextIncludeOrder().
468
_C_SYS_HEADER = 1
469
_CPP_SYS_HEADER = 2
470
_LIKELY_MY_HEADER = 3
471
_POSSIBLE_MY_HEADER = 4
472
_OTHER_HEADER = 5
473

    
474
# These constants define the current inline assembly state
475
_NO_ASM = 0       # Outside of inline assembly block
476
_INSIDE_ASM = 1   # Inside inline assembly block
477
_END_ASM = 2      # Last line of inline assembly block
478
_BLOCK_ASM = 3    # The whole block is an inline assembly block
479

    
480
# Match start of assembly blocks
481
_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)'
482
                        r'(?:\s+(volatile|__volatile__))?'
483
                        r'\s*[{(]')
484

    
485

    
486
_regexp_compile_cache = {}
487

    
488
# {str, set(int)}: a map from error categories to sets of linenumbers
489
# on which those errors are expected and should be suppressed.
490
_error_suppressions = {}
491

    
492
# The root directory used for deriving header guard CPP variable.
493
# This is set by --root flag.
494
_root = None
495

    
496
# The allowed line length of files.
497
# This is set by --linelength flag.
498
_line_length = 80
499

    
500
# The allowed extensions for file names
501
# This is set by --extensions flag.
502
_valid_extensions = set(['cc', 'h', 'cpp', 'cu', 'cuh'])
503

    
504
def ParseNolintSuppressions(filename, raw_line, linenum, error):
505
  """Updates the global list of error-suppressions.
506

507
  Parses any NOLINT comments on the current line, updating the global
508
  error_suppressions store.  Reports an error if the NOLINT comment
509
  was malformed.
510

511
  Args:
512
    filename: str, the name of the input file.
513
    raw_line: str, the line of input text, with comments.
514
    linenum: int, the number of the current line.
515
    error: function, an error handler.
516
  """
517
  matched = Search(r'\bNOLINT(NEXTLINE)?\b(\([^)]+\))?', raw_line)
518
  if matched:
519
    if matched.group(1):
520
      suppressed_line = linenum + 1
521
    else:
522
      suppressed_line = linenum
523
    category = matched.group(2)
524
    if category in (None, '(*)'):  # => "suppress all"
525
      _error_suppressions.setdefault(None, set()).add(suppressed_line)
526
    else:
527
      if category.startswith('(') and category.endswith(')'):
528
        category = category[1:-1]
529
        if category in _ERROR_CATEGORIES:
530
          _error_suppressions.setdefault(category, set()).add(suppressed_line)
531
        elif category not in _LEGACY_ERROR_CATEGORIES:
532
          error(filename, linenum, 'readability/nolint', 5,
533
                'Unknown NOLINT error category: %s' % category)
534

    
535

    
536
def ResetNolintSuppressions():
537
  """Resets the set of NOLINT suppressions to empty."""
538
  _error_suppressions.clear()
539

    
540

    
541
def IsErrorSuppressedByNolint(category, linenum):
542
  """Returns true if the specified error category is suppressed on this line.
543

544
  Consults the global error_suppressions map populated by
545
  ParseNolintSuppressions/ResetNolintSuppressions.
546

547
  Args:
548
    category: str, the category of the error.
549
    linenum: int, the current line number.
550
  Returns:
551
    bool, True iff the error should be suppressed due to a NOLINT comment.
552
  """
553
  return (linenum in _error_suppressions.get(category, set()) or
554
          linenum in _error_suppressions.get(None, set()))
555

    
556

    
557
def Match(pattern, s):
558
  """Matches the string with the pattern, caching the compiled regexp."""
559
  # The regexp compilation caching is inlined in both Match and Search for
560
  # performance reasons; factoring it out into a separate function turns out
561
  # to be noticeably expensive.
562
  if pattern not in _regexp_compile_cache:
563
    _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
564
  return _regexp_compile_cache[pattern].match(s)
565

    
566

    
567
def ReplaceAll(pattern, rep, s):
568
  """Replaces instances of pattern in a string with a replacement.
569

570
  The compiled regex is kept in a cache shared by Match and Search.
571

572
  Args:
573
    pattern: regex pattern
574
    rep: replacement text
575
    s: search string
576

577
  Returns:
578
    string with replacements made (or original string if no replacements)
579
  """
580
  if pattern not in _regexp_compile_cache:
581
    _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
582
  return _regexp_compile_cache[pattern].sub(rep, s)
583

    
584

    
585
def Search(pattern, s):
586
  """Searches the string for the pattern, caching the compiled regexp."""
587
  if pattern not in _regexp_compile_cache:
588
    _regexp_compile_cache[pattern] = sre_compile.compile(pattern)
589
  return _regexp_compile_cache[pattern].search(s)
590

    
591

    
592
class _IncludeState(object):
593
  """Tracks line numbers for includes, and the order in which includes appear.
594

595
  include_list contains list of lists of (header, line number) pairs.
596
  It's a lists of lists rather than just one flat list to make it
597
  easier to update across preprocessor boundaries.
598

599
  Call CheckNextIncludeOrder() once for each header in the file, passing
600
  in the type constants defined above. Calls in an illegal order will
601
  raise an _IncludeError with an appropriate error message.
602

603
  """
604
  # self._section will move monotonically through this set. If it ever
605
  # needs to move backwards, CheckNextIncludeOrder will raise an error.
606
  _INITIAL_SECTION = 0
607
  _MY_H_SECTION = 1
608
  _C_SECTION = 2
609
  _CPP_SECTION = 3
610
  _OTHER_H_SECTION = 4
611

    
612
  _TYPE_NAMES = {
613
      _C_SYS_HEADER: 'C system header',
614
      _CPP_SYS_HEADER: 'C++ system header',
615
      _LIKELY_MY_HEADER: 'header this file implements',
616
      _POSSIBLE_MY_HEADER: 'header this file may implement',
617
      _OTHER_HEADER: 'other header',
618
      }
619
  _SECTION_NAMES = {
620
      _INITIAL_SECTION: "... nothing. (This can't be an error.)",
621
      _MY_H_SECTION: 'a header this file implements',
622
      _C_SECTION: 'C system header',
623
      _CPP_SECTION: 'C++ system header',
624
      _OTHER_H_SECTION: 'other header',
625
      }
626

    
627
  def __init__(self):
628
    self.include_list = [[]]
629
    self.ResetSection('')
630

    
631
  def FindHeader(self, header):
632
    """Check if a header has already been included.
633

634
    Args:
635
      header: header to check.
636
    Returns:
637
      Line number of previous occurrence, or -1 if the header has not
638
      been seen before.
639
    """
640
    for section_list in self.include_list:
641
      for f in section_list:
642
        if f[0] == header:
643
          return f[1]
644
    return -1
645

    
646
  def ResetSection(self, directive):
647
    """Reset section checking for preprocessor directive.
648

649
    Args:
650
      directive: preprocessor directive (e.g. "if", "else").
651
    """
652
    # The name of the current section.
653
    self._section = self._INITIAL_SECTION
654
    # The path of last found header.
655
    self._last_header = ''
656

    
657
    # Update list of includes.  Note that we never pop from the
658
    # include list.
659
    if directive in ('if', 'ifdef', 'ifndef'):
660
      self.include_list.append([])
661
    elif directive in ('else', 'elif'):
662
      self.include_list[-1] = []
663

    
664
  def SetLastHeader(self, header_path):
665
    self._last_header = header_path
666

    
667
  def CanonicalizeAlphabeticalOrder(self, header_path):
668
    """Returns a path canonicalized for alphabetical comparison.
669

670
    - replaces "-" with "_" so they both cmp the same.
671
    - removes '-inl' since we don't require them to be after the main header.
672
    - lowercase everything, just in case.
673

674
    Args:
675
      header_path: Path to be canonicalized.
676

677
    Returns:
678
      Canonicalized path.
679
    """
680
    return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
681

    
682
  def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path):
683
    """Check if a header is in alphabetical order with the previous header.
684

685
    Args:
686
      clean_lines: A CleansedLines instance containing the file.
687
      linenum: The number of the line to check.
688
      header_path: Canonicalized header to be checked.
689

690
    Returns:
691
      Returns true if the header is in alphabetical order.
692
    """
693
    # If previous section is different from current section, _last_header will
694
    # be reset to empty string, so it's always less than current header.
695
    #
696
    # If previous line was a blank line, assume that the headers are
697
    # intentionally sorted the way they are.
698
    if (self._last_header > header_path and
699
        Match(r'^\s*#\s*include\b', clean_lines.elided[linenum - 1])):
700
      return False
701
    return True
702

    
703
  def CheckNextIncludeOrder(self, header_type):
704
    """Returns a non-empty error message if the next header is out of order.
705

706
    This function also updates the internal state to be ready to check
707
    the next include.
708

709
    Args:
710
      header_type: One of the _XXX_HEADER constants defined above.
711

712
    Returns:
713
      The empty string if the header is in the right order, or an
714
      error message describing what's wrong.
715

716
    """
717
    error_message = ('Found %s after %s' %
718
                     (self._TYPE_NAMES[header_type],
719
                      self._SECTION_NAMES[self._section]))
720

    
721
    last_section = self._section
722

    
723
    if header_type == _C_SYS_HEADER:
724
      if self._section <= self._C_SECTION:
725
        self._section = self._C_SECTION
726
      else:
727
        self._last_header = ''
728
        return error_message
729
    elif header_type == _CPP_SYS_HEADER:
730
      if self._section <= self._CPP_SECTION:
731
        self._section = self._CPP_SECTION
732
      else:
733
        self._last_header = ''
734
        return error_message
735
    elif header_type == _LIKELY_MY_HEADER:
736
      if self._section <= self._MY_H_SECTION:
737
        self._section = self._MY_H_SECTION
738
      else:
739
        self._section = self._OTHER_H_SECTION
740
    elif header_type == _POSSIBLE_MY_HEADER:
741
      if self._section <= self._MY_H_SECTION:
742
        self._section = self._MY_H_SECTION
743
      else:
744
        # This will always be the fallback because we're not sure
745
        # enough that the header is associated with this file.
746
        self._section = self._OTHER_H_SECTION
747
    else:
748
      assert header_type == _OTHER_HEADER
749
      self._section = self._OTHER_H_SECTION
750

    
751
    if last_section != self._section:
752
      self._last_header = ''
753

    
754
    return ''
755

    
756

    
757
class _CppLintState(object):
758
  """Maintains module-wide state.."""
759

    
760
  def __init__(self):
761
    self.verbose_level = 1  # global setting.
762
    self.error_count = 0    # global count of reported errors
763
    # filters to apply when emitting error messages
764
    self.filters = _DEFAULT_FILTERS[:]
765
    # backup of filter list. Used to restore the state after each file.
766
    self._filters_backup = self.filters[:]
767
    self.counting = 'total'  # In what way are we counting errors?
768
    self.errors_by_category = {}  # string to int dict storing error counts
769

    
770
    # output format:
771
    # "emacs" - format that emacs can parse (default)
772
    # "vs7" - format that Microsoft Visual Studio 7 can parse
773
    self.output_format = 'emacs'
774

    
775
  def SetOutputFormat(self, output_format):
776
    """Sets the output format for errors."""
777
    self.output_format = output_format
778

    
779
  def SetVerboseLevel(self, level):
780
    """Sets the module's verbosity, and returns the previous setting."""
781
    last_verbose_level = self.verbose_level
782
    self.verbose_level = level
783
    return last_verbose_level
784

    
785
  def SetCountingStyle(self, counting_style):
786
    """Sets the module's counting options."""
787
    self.counting = counting_style
788

    
789
  def SetFilters(self, filters):
790
    """Sets the error-message filters.
791

792
    These filters are applied when deciding whether to emit a given
793
    error message.
794

795
    Args:
796
      filters: A string of comma-separated filters (eg "+whitespace/indent").
797
               Each filter should start with + or -; else we die.
798

799
    Raises:
800
      ValueError: The comma-separated filters did not all start with '+' or '-'.
801
                  E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
802
    """
803
    # Default filters always have less priority than the flag ones.
804
    self.filters = _DEFAULT_FILTERS[:]
805
    self.AddFilters(filters)
806

    
807
  def AddFilters(self, filters):
808
    """ Adds more filters to the existing list of error-message filters. """
809
    for filt in filters.split(','):
810
      clean_filt = filt.strip()
811
      if clean_filt:
812
        self.filters.append(clean_filt)
813
    for filt in self.filters:
814
      if not (filt.startswith('+') or filt.startswith('-')):
815
        raise ValueError('Every filter in --filters must start with + or -'
816
                         ' (%s does not)' % filt)
817

    
818
  def BackupFilters(self):
819
    """ Saves the current filter list to backup storage."""
820
    self._filters_backup = self.filters[:]
821

    
822
  def RestoreFilters(self):
823
    """ Restores filters previously backed up."""
824
    self.filters = self._filters_backup[:]
825

    
826
  def ResetErrorCounts(self):
827
    """Sets the module's error statistic back to zero."""
828
    self.error_count = 0
829
    self.errors_by_category = {}
830

    
831
  def IncrementErrorCount(self, category):
832
    """Bumps the module's error statistic."""
833
    self.error_count += 1
834
    if self.counting in ('toplevel', 'detailed'):
835
      if self.counting != 'detailed':
836
        category = category.split('/')[0]
837
      if category not in self.errors_by_category:
838
        self.errors_by_category[category] = 0
839
      self.errors_by_category[category] += 1
840

    
841
  def PrintErrorCounts(self):
842
    """Print a summary of errors by category, and the total."""
843
    for category, count in self.errors_by_category.iteritems():
844
      sys.stderr.write('Category \'%s\' errors found: %d\n' %
845
                       (category, count))
846
    sys.stderr.write('Total errors found: %d\n' % self.error_count)
847

    
848
_cpplint_state = _CppLintState()
849

    
850

    
851
def _OutputFormat():
852
  """Gets the module's output format."""
853
  return _cpplint_state.output_format
854

    
855

    
856
def _SetOutputFormat(output_format):
857
  """Sets the module's output format."""
858
  _cpplint_state.SetOutputFormat(output_format)
859

    
860

    
861
def _VerboseLevel():
862
  """Returns the module's verbosity setting."""
863
  return _cpplint_state.verbose_level
864

    
865

    
866
def _SetVerboseLevel(level):
867
  """Sets the module's verbosity, and returns the previous setting."""
868
  return _cpplint_state.SetVerboseLevel(level)
869

    
870

    
871
def _SetCountingStyle(level):
872
  """Sets the module's counting options."""
873
  _cpplint_state.SetCountingStyle(level)
874

    
875

    
876
def _Filters():
877
  """Returns the module's list of output filters, as a list."""
878
  return _cpplint_state.filters
879

    
880

    
881
def _SetFilters(filters):
882
  """Sets the module's error-message filters.
883

884
  These filters are applied when deciding whether to emit a given
885
  error message.
886

887
  Args:
888
    filters: A string of comma-separated filters (eg "whitespace/indent").
889
             Each filter should start with + or -; else we die.
890
  """
891
  _cpplint_state.SetFilters(filters)
892

    
893
def _AddFilters(filters):
894
  """Adds more filter overrides.
895

896
  Unlike _SetFilters, this function does not reset the current list of filters
897
  available.
898

899
  Args:
900
    filters: A string of comma-separated filters (eg "whitespace/indent").
901
             Each filter should start with + or -; else we die.
902
  """
903
  _cpplint_state.AddFilters(filters)
904

    
905
def _BackupFilters():
906
  """ Saves the current filter list to backup storage."""
907
  _cpplint_state.BackupFilters()
908

    
909
def _RestoreFilters():
910
  """ Restores filters previously backed up."""
911
  _cpplint_state.RestoreFilters()
912

    
913
class _FunctionState(object):
914
  """Tracks current function name and the number of lines in its body."""
915

    
916
  _NORMAL_TRIGGER = 250  # for --v=0, 500 for --v=1, etc.
917
  _TEST_TRIGGER = 400    # about 50% more than _NORMAL_TRIGGER.
918

    
919
  def __init__(self):
920
    self.in_a_function = False
921
    self.lines_in_function = 0
922
    self.current_function = ''
923

    
924
  def Begin(self, function_name):
925
    """Start analyzing function body.
926

927
    Args:
928
      function_name: The name of the function being tracked.
929
    """
930
    self.in_a_function = True
931
    self.lines_in_function = 0
932
    self.current_function = function_name
933

    
934
  def Count(self):
935
    """Count line in current function body."""
936
    if self.in_a_function:
937
      self.lines_in_function += 1
938

    
939
  def Check(self, error, filename, linenum):
940
    """Report if too many lines in function body.
941

942
    Args:
943
      error: The function to call with any errors found.
944
      filename: The name of the current file.
945
      linenum: The number of the line to check.
946
    """
947
    if Match(r'T(EST|est)', self.current_function):
948
      base_trigger = self._TEST_TRIGGER
949
    else:
950
      base_trigger = self._NORMAL_TRIGGER
951
    trigger = base_trigger * 2**_VerboseLevel()
952

    
953
    if self.lines_in_function > trigger:
954
      error_level = int(math.log(self.lines_in_function / base_trigger, 2))
955
      # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
956
      if error_level > 5:
957
        error_level = 5
958
      error(filename, linenum, 'readability/fn_size', error_level,
959
            'Small and focused functions are preferred:'
960
            ' %s has %d non-comment lines'
961
            ' (error triggered by exceeding %d lines).'  % (
962
                self.current_function, self.lines_in_function, trigger))
963

    
964
  def End(self):
965
    """Stop analyzing function body."""
966
    self.in_a_function = False
967

    
968

    
969
class _IncludeError(Exception):
970
  """Indicates a problem with the include order in a file."""
971
  pass
972

    
973

    
974
class FileInfo(object):
975
  """Provides utility functions for filenames.
976

977
  FileInfo provides easy access to the components of a file's path
978
  relative to the project root.
979
  """
980

    
981
  def __init__(self, filename):
982
    self._filename = filename
983

    
984
  def FullName(self):
985
    """Make Windows paths like Unix."""
986
    return os.path.abspath(self._filename).replace('\\', '/')
987

    
988
  def RepositoryName(self):
989
    """FullName after removing the local path to the repository.
990

991
    If we have a real absolute path name here we can try to do something smart:
992
    detecting the root of the checkout and truncating /path/to/checkout from
993
    the name so that we get header guards that don't include things like
994
    "C:\Documents and Settings\..." or "/home/username/..." in them and thus
995
    people on different computers who have checked the source out to different
996
    locations won't see bogus errors.
997
    """
998
    fullname = self.FullName()
999

    
1000
    if os.path.exists(fullname):
1001
      project_dir = os.path.dirname(fullname)
1002

    
1003
      if os.path.exists(os.path.join(project_dir, ".svn")):
1004
        # If there's a .svn file in the current directory, we recursively look
1005
        # up the directory tree for the top of the SVN checkout
1006
        root_dir = project_dir
1007
        one_up_dir = os.path.dirname(root_dir)
1008
        while os.path.exists(os.path.join(one_up_dir, ".svn")):
1009
          root_dir = os.path.dirname(root_dir)
1010
          one_up_dir = os.path.dirname(one_up_dir)
1011

    
1012
        prefix = os.path.commonprefix([root_dir, project_dir])
1013
        return fullname[len(prefix) + 1:]
1014

    
1015
      # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
1016
      # searching up from the current path.
1017
      root_dir = os.path.dirname(fullname)
1018
      while (root_dir != os.path.dirname(root_dir) and
1019
             not os.path.exists(os.path.join(root_dir, ".git")) and
1020
             not os.path.exists(os.path.join(root_dir, ".hg")) and
1021
             not os.path.exists(os.path.join(root_dir, ".svn"))):
1022
        root_dir = os.path.dirname(root_dir)
1023

    
1024
      if (os.path.exists(os.path.join(root_dir, ".git")) or
1025
          os.path.exists(os.path.join(root_dir, ".hg")) or
1026
          os.path.exists(os.path.join(root_dir, ".svn"))):
1027
        prefix = os.path.commonprefix([root_dir, project_dir])
1028
        return fullname[len(prefix) + 1:]
1029

    
1030
    # Don't know what to do; header guard warnings may be wrong...
1031
    return fullname
1032

    
1033
  def Split(self):
1034
    """Splits the file into the directory, basename, and extension.
1035

1036
    For 'chrome/browser/browser.cc', Split() would
1037
    return ('chrome/browser', 'browser', '.cc')
1038

1039
    Returns:
1040
      A tuple of (directory, basename, extension).
1041
    """
1042

    
1043
    googlename = self.RepositoryName()
1044
    project, rest = os.path.split(googlename)
1045
    return (project,) + os.path.splitext(rest)
1046

    
1047
  def BaseName(self):
1048
    """File base name - text after the final slash, before the final period."""
1049
    return self.Split()[1]
1050

    
1051
  def Extension(self):
1052
    """File extension - text following the final period."""
1053
    return self.Split()[2]
1054

    
1055
  def NoExtension(self):
1056
    """File has no source file extension."""
1057
    return '/'.join(self.Split()[0:2])
1058

    
1059
  def IsSource(self):
1060
    """File has a source file extension."""
1061
    return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
1062

    
1063

    
1064
def _ShouldPrintError(category, confidence, linenum):
1065
  """If confidence >= verbose, category passes filter and is not suppressed."""
1066

    
1067
  # There are three ways we might decide not to print an error message:
1068
  # a "NOLINT(category)" comment appears in the source,
1069
  # the verbosity level isn't high enough, or the filters filter it out.
1070
  if IsErrorSuppressedByNolint(category, linenum):
1071
    return False
1072

    
1073
  if confidence < _cpplint_state.verbose_level:
1074
    return False
1075

    
1076
  is_filtered = False
1077
  for one_filter in _Filters():
1078
    if one_filter.startswith('-'):
1079
      if category.startswith(one_filter[1:]):
1080
        is_filtered = True
1081
    elif one_filter.startswith('+'):
1082
      if category.startswith(one_filter[1:]):
1083
        is_filtered = False
1084
    else:
1085
      assert False  # should have been checked for in SetFilter.
1086
  if is_filtered:
1087
    return False
1088

    
1089
  return True
1090

    
1091

    
1092
def Error(filename, linenum, category, confidence, message):
1093
  """Logs the fact we've found a lint error.
1094

1095
  We log where the error was found, and also our confidence in the error,
1096
  that is, how certain we are this is a legitimate style regression, and
1097
  not a misidentification or a use that's sometimes justified.
1098

1099
  False positives can be suppressed by the use of
1100
  "cpplint(category)"  comments on the offending line.  These are
1101
  parsed into _error_suppressions.
1102

1103
  Args:
1104
    filename: The name of the file containing the error.
1105
    linenum: The number of the line containing the error.
1106
    category: A string used to describe the "category" this bug
1107
      falls under: "whitespace", say, or "runtime".  Categories
1108
      may have a hierarchy separated by slashes: "whitespace/indent".
1109
    confidence: A number from 1-5 representing a confidence score for
1110
      the error, with 5 meaning that we are certain of the problem,
1111
      and 1 meaning that it could be a legitimate construct.
1112
    message: The error message.
1113
  """
1114
  if _ShouldPrintError(category, confidence, linenum):
1115
    _cpplint_state.IncrementErrorCount(category)
1116
    if _cpplint_state.output_format == 'vs7':
1117
      sys.stderr.write('%s(%s):  %s  [%s] [%d]\n' % (
1118
          filename, linenum, message, category, confidence))
1119
    elif _cpplint_state.output_format == 'eclipse':
1120
      sys.stderr.write('%s:%s: warning: %s  [%s] [%d]\n' % (
1121
          filename, linenum, message, category, confidence))
1122
    else:
1123
      sys.stderr.write('%s:%s:  %s  [%s] [%d]\n' % (
1124
          filename, linenum, message, category, confidence))
1125

    
1126

    
1127
# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard.
1128
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
1129
    r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
1130
# Match a single C style comment on the same line.
1131
_RE_PATTERN_C_COMMENTS = r'/\*(?:[^*]|\*(?!/))*\*/'
1132
# Matches multi-line C style comments.
1133
# This RE is a little bit more complicated than one might expect, because we
1134
# have to take care of space removals tools so we can handle comments inside
1135
# statements better.
1136
# The current rule is: We only clear spaces from both sides when we're at the
1137
# end of the line. Otherwise, we try to remove spaces from the right side,
1138
# if this doesn't work we try on left side but only if there's a non-character
1139
# on the right.
1140
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
1141
    r'(\s*' + _RE_PATTERN_C_COMMENTS + r'\s*$|' +
1142
    _RE_PATTERN_C_COMMENTS + r'\s+|' +
1143
    r'\s+' + _RE_PATTERN_C_COMMENTS + r'(?=\W)|' +
1144
    _RE_PATTERN_C_COMMENTS + r')')
1145

    
1146

    
1147
def IsCppString(line):
1148
  """Does line terminate so, that the next symbol is in string constant.
1149

1150
  This function does not consider single-line nor multi-line comments.
1151

1152
  Args:
1153
    line: is a partial line of code starting from the 0..n.
1154

1155
  Returns:
1156
    True, if next character appended to 'line' is inside a
1157
    string constant.
1158
  """
1159

    
1160
  line = line.replace(r'\\', 'XX')  # after this, \\" does not match to \"
1161
  return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
1162

    
1163

    
1164
def CleanseRawStrings(raw_lines):
1165
  """Removes C++11 raw strings from lines.
1166

1167
    Before:
1168
      static const char kData[] = R"(
1169
          multi-line string
1170
          )";
1171

1172
    After:
1173
      static const char kData[] = ""
1174
          (replaced by blank line)
1175
          "";
1176

1177
  Args:
1178
    raw_lines: list of raw lines.
1179

1180
  Returns:
1181
    list of lines with C++11 raw strings replaced by empty strings.
1182
  """
1183

    
1184
  delimiter = None
1185
  lines_without_raw_strings = []
1186
  for line in raw_lines:
1187
    if delimiter:
1188
      # Inside a raw string, look for the end
1189
      end = line.find(delimiter)
1190
      if end >= 0:
1191
        # Found the end of the string, match leading space for this
1192
        # line and resume copying the original lines, and also insert
1193
        # a "" on the last line.
1194
        leading_space = Match(r'^(\s*)\S', line)
1195
        line = leading_space.group(1) + '""' + line[end + len(delimiter):]
1196
        delimiter = None
1197
      else:
1198
        # Haven't found the end yet, append a blank line.
1199
        line = '""'
1200

    
1201
    # Look for beginning of a raw string, and replace them with
1202
    # empty strings.  This is done in a loop to handle multiple raw
1203
    # strings on the same line.
1204
    while delimiter is None:
1205
      # Look for beginning of a raw string.
1206
      # See 2.14.15 [lex.string] for syntax.
1207
      matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line)
1208
      if matched:
1209
        delimiter = ')' + matched.group(2) + '"'
1210

    
1211
        end = matched.group(3).find(delimiter)
1212
        if end >= 0:
1213
          # Raw string ended on same line
1214
          line = (matched.group(1) + '""' +
1215
                  matched.group(3)[end + len(delimiter):])
1216
          delimiter = None
1217
        else:
1218
          # Start of a multi-line raw string
1219
          line = matched.group(1) + '""'
1220
      else:
1221
        break
1222

    
1223
    lines_without_raw_strings.append(line)
1224

    
1225
  # TODO(unknown): if delimiter is not None here, we might want to
1226
  # emit a warning for unterminated string.
1227
  return lines_without_raw_strings
1228

    
1229

    
1230
def FindNextMultiLineCommentStart(lines, lineix):
1231
  """Find the beginning marker for a multiline comment."""
1232
  while lineix < len(lines):
1233
    if lines[lineix].strip().startswith('/*'):
1234
      # Only return this marker if the comment goes beyond this line
1235
      if lines[lineix].strip().find('*/', 2) < 0:
1236
        return lineix
1237
    lineix += 1
1238
  return len(lines)
1239

    
1240

    
1241
def FindNextMultiLineCommentEnd(lines, lineix):
1242
  """We are inside a comment, find the end marker."""
1243
  while lineix < len(lines):
1244
    if lines[lineix].strip().endswith('*/'):
1245
      return lineix
1246
    lineix += 1
1247
  return len(lines)
1248

    
1249

    
1250
def RemoveMultiLineCommentsFromRange(lines, begin, end):
1251
  """Clears a range of lines for multi-line comments."""
1252
  # Having // dummy comments makes the lines non-empty, so we will not get
1253
  # unnecessary blank line warnings later in the code.
1254
  for i in range(begin, end):
1255
    lines[i] = '/**/'
1256

    
1257

    
1258
def RemoveMultiLineComments(filename, lines, error):
1259
  """Removes multiline (c-style) comments from lines."""
1260
  lineix = 0
1261
  while lineix < len(lines):
1262
    lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
1263
    if lineix_begin >= len(lines):
1264
      return
1265
    lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
1266
    if lineix_end >= len(lines):
1267
      error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
1268
            'Could not find end of multi-line comment')
1269
      return
1270
    RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
1271
    lineix = lineix_end + 1
1272

    
1273

    
1274
def CleanseComments(line):
1275
  """Removes //-comments and single-line C-style /* */ comments.
1276

1277
  Args:
1278
    line: A line of C++ source.
1279

1280
  Returns:
1281
    The line with single-line comments removed.
1282
  """
1283
  commentpos = line.find('//')
1284
  if commentpos != -1 and not IsCppString(line[:commentpos]):
1285
    line = line[:commentpos].rstrip()
1286
  # get rid of /* ... */
1287
  return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
1288

    
1289

    
1290
class CleansedLines(object):
1291
  """Holds 4 copies of all lines with different preprocessing applied to them.
1292

1293
  1) elided member contains lines without strings and comments.
1294
  2) lines member contains lines without comments.
1295
  3) raw_lines member contains all the lines without processing.
1296
  4) lines_without_raw_strings member is same as raw_lines, but with C++11 raw
1297
     strings removed.
1298
  All these members are of <type 'list'>, and of the same length.
1299
  """
1300

    
1301
  def __init__(self, lines):
1302
    self.elided = []
1303
    self.lines = []
1304
    self.raw_lines = lines
1305
    self.num_lines = len(lines)
1306
    self.lines_without_raw_strings = CleanseRawStrings(lines)
1307
    for linenum in range(len(self.lines_without_raw_strings)):
1308
      self.lines.append(CleanseComments(
1309
          self.lines_without_raw_strings[linenum]))
1310
      elided = self._CollapseStrings(self.lines_without_raw_strings[linenum])
1311
      self.elided.append(CleanseComments(elided))
1312

    
1313
  def NumLines(self):
1314
    """Returns the number of lines represented."""
1315
    return self.num_lines
1316

    
1317
  @staticmethod
1318
  def _CollapseStrings(elided):
1319
    """Collapses strings and chars on a line to simple "" or '' blocks.
1320

1321
    We nix strings first so we're not fooled by text like '"http://"'
1322

1323
    Args:
1324
      elided: The line being processed.
1325

1326
    Returns:
1327
      The line with collapsed strings.
1328
    """
1329
    if _RE_PATTERN_INCLUDE.match(elided):
1330
      return elided
1331

    
1332
    # Remove escaped characters first to make quote/single quote collapsing
1333
    # basic.  Things that look like escaped characters shouldn't occur
1334
    # outside of strings and chars.
1335
    elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
1336

    
1337
    # Replace quoted strings and digit separators.  Both single quotes
1338
    # and double quotes are processed in the same loop, otherwise
1339
    # nested quotes wouldn't work.
1340
    collapsed = ''
1341
    while True:
1342
      # Find the first quote character
1343
      match = Match(r'^([^\'"]*)([\'"])(.*)$', elided)
1344
      if not match:
1345
        collapsed += elided
1346
        break
1347
      head, quote, tail = match.groups()
1348

    
1349
      if quote == '"':
1350
        # Collapse double quoted strings
1351
        second_quote = tail.find('"')
1352
        if second_quote >= 0:
1353
          collapsed += head + '""'
1354
          elided = tail[second_quote + 1:]
1355
        else:
1356
          # Unmatched double quote, don't bother processing the rest
1357
          # of the line since this is probably a multiline string.
1358
          collapsed += elided
1359
          break
1360
      else:
1361
        # Found single quote, check nearby text to eliminate digit separators.
1362
        #
1363
        # There is no special handling for floating point here, because
1364
        # the integer/fractional/exponent parts would all be parsed
1365
        # correctly as long as there are digits on both sides of the
1366
        # separator.  So we are fine as long as we don't see something
1367
        # like "0.'3" (gcc 4.9.0 will not allow this literal).
1368
        if Search(r'\b(?:0[bBxX]?|[1-9])[0-9a-fA-F]*$', head):
1369
          match_literal = Match(r'^((?:\'?[0-9a-zA-Z_])*)(.*)$', "'" + tail)
1370
          collapsed += head + match_literal.group(1).replace("'", '')
1371
          elided = match_literal.group(2)
1372
        else:
1373
          second_quote = tail.find('\'')
1374
          if second_quote >= 0:
1375
            collapsed += head + "''"
1376
            elided = tail[second_quote + 1:]
1377
          else:
1378
            # Unmatched single quote
1379
            collapsed += elided
1380
            break
1381

    
1382
    return collapsed
1383

    
1384

    
1385
def FindEndOfExpressionInLine(line, startpos, stack):
1386
  """Find the position just after the end of current parenthesized expression.
1387

1388
  Args:
1389
    line: a CleansedLines line.
1390
    startpos: start searching at this position.
1391
    stack: nesting stack at startpos.
1392

1393
  Returns:
1394
    On finding matching end: (index just after matching end, None)
1395
    On finding an unclosed expression: (-1, None)
1396
    Otherwise: (-1, new stack at end of this line)
1397
  """
1398
  for i in xrange(startpos, len(line)):
1399
    char = line[i]
1400
    if char in '([{':
1401
      # Found start of parenthesized expression, push to expression stack
1402
      stack.append(char)
1403
    elif char == '<':
1404
      # Found potential start of template argument list
1405
      if i > 0 and line[i - 1] == '<':
1406
        # Left shift operator
1407
        if stack and stack[-1] == '<':
1408
          stack.pop()
1409
          if not stack:
1410
            return (-1, None)
1411
      elif i > 0 and Search(r'\boperator\s*$', line[0:i]):
1412
        # operator<, don't add to stack
1413
        continue
1414
      else:
1415
        # Tentative start of template argument list
1416
        stack.append('<')
1417
    elif char in ')]}':
1418
      # Found end of parenthesized expression.
1419
      #
1420
      # If we are currently expecting a matching '>', the pending '<'
1421
      # must have been an operator.  Remove them from expression stack.
1422
      while stack and stack[-1] == '<':
1423
        stack.pop()
1424
      if not stack:
1425
        return (-1, None)
1426
      if ((stack[-1] == '(' and char == ')') or
1427
          (stack[-1] == '[' and char == ']') or
1428
          (stack[-1] == '{' and char == '}')):
1429
        stack.pop()
1430
        if not stack:
1431
          return (i + 1, None)
1432
      else:
1433
        # Mismatched parentheses
1434
        return (-1, None)
1435
    elif char == '>':
1436
      # Found potential end of template argument list.
1437

    
1438
      # Ignore "->" and operator functions
1439
      if (i > 0 and
1440
          (line[i - 1] == '-' or Search(r'\boperator\s*$', line[0:i - 1]))):
1441
        continue
1442

    
1443
      # Pop the stack if there is a matching '<'.  Otherwise, ignore
1444
      # this '>' since it must be an operator.
1445
      if stack:
1446
        if stack[-1] == '<':
1447
          stack.pop()
1448
          if not stack:
1449
            return (i + 1, None)
1450
    elif char == ';':
1451
      # Found something that look like end of statements.  If we are currently
1452
      # expecting a '>', the matching '<' must have been an operator, since
1453
      # template argument list should not contain statements.
1454
      while stack and stack[-1] == '<':
1455
        stack.pop()
1456
      if not stack:
1457
        return (-1, None)
1458

    
1459
  # Did not find end of expression or unbalanced parentheses on this line
1460
  return (-1, stack)
1461

    
1462

    
1463
def CloseExpression(clean_lines, linenum, pos):
1464
  """If input points to ( or { or [ or <, finds the position that closes it.
1465

1466
  If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the
1467
  linenum/pos that correspond to the closing of the expression.
1468

1469
  TODO(unknown): cpplint spends a fair bit of time matching parentheses.
1470
  Ideally we would want to index all opening and closing parentheses once
1471
  and have CloseExpression be just a simple lookup, but due to preprocessor
1472
  tricks, this is not so easy.
1473

1474
  Args:
1475
    clean_lines: A CleansedLines instance containing the file.
1476
    linenum: The number of the line to check.
1477
    pos: A position on the line.
1478

1479
  Returns:
1480
    A tuple (line, linenum, pos) pointer *past* the closing brace, or
1481
    (line, len(lines), -1) if we never find a close.  Note we ignore
1482
    strings and comments when matching; and the line we return is the
1483
    'cleansed' line at linenum.
1484
  """
1485

    
1486
  line = clean_lines.elided[linenum]
1487
  if (line[pos] not in '({[<') or Match(r'<[<=]', line[pos:]):
1488
    return (line, clean_lines.NumLines(), -1)
1489

    
1490
  # Check first line
1491
  (end_pos, stack) = FindEndOfExpressionInLine(line, pos, [])
1492
  if end_pos > -1:
1493
    return (line, linenum, end_pos)
1494

    
1495
  # Continue scanning forward
1496
  while stack and linenum < clean_lines.NumLines() - 1:
1497
    linenum += 1
1498
    line = clean_lines.elided[linenum]
1499
    (end_pos, stack) = FindEndOfExpressionInLine(line, 0, stack)
1500
    if end_pos > -1:
1501
      return (line, linenum, end_pos)
1502

    
1503
  # Did not find end of expression before end of file, give up
1504
  return (line, clean_lines.NumLines(), -1)
1505

    
1506

    
1507
def FindStartOfExpressionInLine(line, endpos, stack):
1508
  """Find position at the matching start of current expression.
1509

1510
  This is almost the reverse of FindEndOfExpressionInLine, but note
1511
  that the input position and returned position differs by 1.
1512

1513
  Args:
1514
    line: a CleansedLines line.
1515
    endpos: start searching at this position.
1516
    stack: nesting stack at endpos.
1517

1518
  Returns:
1519
    On finding matching start: (index at matching start, None)
1520
    On finding an unclosed expression: (-1, None)
1521
    Otherwise: (-1, new stack at beginning of this line)
1522
  """
1523
  i = endpos
1524
  while i >= 0:
1525
    char = line[i]
1526
    if char in ')]}':
1527
      # Found end of expression, push to expression stack
1528
      stack.append(char)
1529
    elif char == '>':
1530
      # Found potential end of template argument list.
1531
      #
1532
      # Ignore it if it's a "->" or ">=" or "operator>"
1533
      if (i > 0 and
1534
          (line[i - 1] == '-' or
1535
           Match(r'\s>=\s', line[i - 1:]) or
1536
           Search(r'\boperator\s*$', line[0:i]))):
1537
        i -= 1
1538
      else:
1539
        stack.append('>')
1540
    elif char == '<':
1541
      # Found potential start of template argument list
1542
      if i > 0 and line[i - 1] == '<':
1543
        # Left shift operator
1544
        i -= 1
1545
      else:
1546
        # If there is a matching '>', we can pop the expression stack.
1547
        # Otherwise, ignore this '<' since it must be an operator.
1548
        if stack and stack[-1] == '>':
1549
          stack.pop()
1550
          if not stack:
1551
            return (i, None)
1552
    elif char in '([{':
1553
      # Found start of expression.
1554
      #
1555
      # If there are any unmatched '>' on the stack, they must be
1556
      # operators.  Remove those.
1557
      while stack and stack[-1] == '>':
1558
        stack.pop()
1559
      if not stack:
1560
        return (-1, None)
1561
      if ((char == '(' and stack[-1] == ')') or
1562
          (char == '[' and stack[-1] == ']') or
1563
          (char == '{' and stack[-1] == '}')):
1564
        stack.pop()
1565
        if not stack:
1566
          return (i, None)
1567
      else:
1568
        # Mismatched parentheses
1569
        return (-1, None)
1570
    elif char == ';':
1571
      # Found something that look like end of statements.  If we are currently
1572
      # expecting a '<', the matching '>' must have been an operator, since
1573
      # template argument list should not contain statements.
1574
      while stack and stack[-1] == '>':
1575
        stack.pop()
1576
      if not stack:
1577
        return (-1, None)
1578

    
1579
    i -= 1
1580

    
1581
  return (-1, stack)
1582

    
1583

    
1584
def ReverseCloseExpression(clean_lines, linenum, pos):
1585
  """If input points to ) or } or ] or >, finds the position that opens it.
1586

1587
  If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the
1588
  linenum/pos that correspond to the opening of the expression.
1589

1590
  Args:
1591
    clean_lines: A CleansedLines instance containing the file.
1592
    linenum: The number of the line to check.
1593
    pos: A position on the line.
1594

1595
  Returns:
1596
    A tuple (line, linenum, pos) pointer *at* the opening brace, or
1597
    (line, 0, -1) if we never find the matching opening brace.  Note
1598
    we ignore strings and comments when matching; and the line we
1599
    return is the 'cleansed' line at linenum.
1600
  """
1601
  line = clean_lines.elided[linenum]
1602
  if line[pos] not in ')}]>':
1603
    return (line, 0, -1)
1604

    
1605
  # Check last line
1606
  (start_pos, stack) = FindStartOfExpressionInLine(line, pos, [])
1607
  if start_pos > -1:
1608
    return (line, linenum, start_pos)
1609

    
1610
  # Continue scanning backward
1611
  while stack and linenum > 0:
1612
    linenum -= 1
1613
    line = clean_lines.elided[linenum]
1614
    (start_pos, stack) = FindStartOfExpressionInLine(line, len(line) - 1, stack)
1615
    if start_pos > -1:
1616
      return (line, linenum, start_pos)
1617

    
1618
  # Did not find start of expression before beginning of file, give up
1619
  return (line, 0, -1)
1620

    
1621

    
1622
def CheckForCopyright(filename, lines, error):
1623
  """Logs an error if no Copyright message appears at the top of the file."""
1624

    
1625
  # We'll say it should occur by line 10. Don't forget there's a
1626
  # dummy line at the front.
1627
  for line in xrange(1, min(len(lines), 11)):
1628
    if re.search(r'Copyright', lines[line], re.I): break
1629
  else:                       # means no copyright line was found
1630
    error(filename, 0, 'legal/copyright', 5,
1631
          'No copyright message found.  '
1632
          'You should have a line: "Copyright [year] <Copyright Owner>"')
1633

    
1634

    
1635
def GetIndentLevel(line):
1636
  """Return the number of leading spaces in line.
1637

1638
  Args:
1639
    line: A string to check.
1640

1641
  Returns:
1642
    An integer count of leading spaces, possibly zero.
1643
  """
1644
  indent = Match(r'^( *)\S', line)
1645
  if indent:
1646
    return len(indent.group(1))
1647
  else:
1648
    return 0
1649

    
1650

    
1651
def GetHeaderGuardCPPVariable(filename):
1652
  """Returns the CPP variable that should be used as a header guard.
1653

1654
  Args:
1655
    filename: The name of a C++ header file.
1656

1657
  Returns:
1658
    The CPP variable that should be used as a header guard in the
1659
    named file.
1660

1661
  """
1662

    
1663
  # Restores original filename in case that cpplint is invoked from Emacs's
1664
  # flymake.
1665
  filename = re.sub(r'_flymake\.h$', '.h', filename)
1666
  filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename)
1667
  # Replace 'c++' with 'cpp'.
1668
  filename = filename.replace('C++', 'cpp').replace('c++', 'cpp')
1669
  
1670
  fileinfo = FileInfo(filename)
1671
  file_path_from_root = fileinfo.RepositoryName()
1672
  if _root:
1673
    file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root)
1674
  return re.sub(r'[^a-zA-Z0-9]', '_', file_path_from_root).upper() + '_'
1675

    
1676

    
1677
def CheckForHeaderGuard(filename, clean_lines, error):
1678
  """Checks that the file contains a header guard.
1679

1680
  Logs an error if no #ifndef header guard is present.  For other
1681
  headers, checks that the full pathname is used.
1682

1683
  Args:
1684
    filename: The name of the C++ header file.
1685
    clean_lines: A CleansedLines instance containing the file.
1686
    error: The function to call with any errors found.
1687
  """
1688

    
1689
  # Don't check for header guards if there are error suppression
1690
  # comments somewhere in this file.
1691
  #
1692
  # Because this is silencing a warning for a nonexistent line, we
1693
  # only support the very specific NOLINT(build/header_guard) syntax,
1694
  # and not the general NOLINT or NOLINT(*) syntax.
1695
  raw_lines = clean_lines.lines_without_raw_strings
1696
  for i in raw_lines:
1697
    if Search(r'//\s*NOLINT\(build/header_guard\)', i):
1698
      return
1699

    
1700
  cppvar = GetHeaderGuardCPPVariable(filename)
1701

    
1702
  ifndef = ''
1703
  ifndef_linenum = 0
1704
  define = ''
1705
  endif = ''
1706
  endif_linenum = 0
1707
  for linenum, line in enumerate(raw_lines):
1708
    linesplit = line.split()
1709
    if len(linesplit) >= 2:
1710
      # find the first occurrence of #ifndef and #define, save arg
1711
      if not ifndef and linesplit[0] == '#ifndef':
1712
        # set ifndef to the header guard presented on the #ifndef line.
1713
        ifndef = linesplit[1]
1714
        ifndef_linenum = linenum
1715
      if not define and linesplit[0] == '#define':
1716
        define = linesplit[1]
1717
    # find the last occurrence of #endif, save entire line
1718
    if line.startswith('#endif'):
1719
      endif = line
1720
      endif_linenum = linenum
1721

    
1722
  if not ifndef or not define or ifndef != define:
1723
    error(filename, 0, 'build/header_guard', 5,
1724
          'No #ifndef header guard found, suggested CPP variable is: %s' %
1725
          cppvar)
1726
    return
1727

    
1728
  # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
1729
  # for backward compatibility.
1730
  if ifndef != cppvar:
1731
    error_level = 0
1732
    if ifndef != cppvar + '_':
1733
      error_level = 5
1734

    
1735
    ParseNolintSuppressions(filename, raw_lines[ifndef_linenum], ifndef_linenum,
1736
                            error)
1737
    error(filename, ifndef_linenum, 'build/header_guard', error_level,
1738
          '#ifndef header guard has wrong style, please use: %s' % cppvar)
1739

    
1740
  # Check for "//" comments on endif line.
1741
  ParseNolintSuppressions(filename, raw_lines[endif_linenum], endif_linenum,
1742
                          error)
1743
  match = Match(r'#endif\s*//\s*' + cppvar + r'(_)?\b', endif)
1744
  if match:
1745
    if match.group(1) == '_':
1746
      # Issue low severity warning for deprecated double trailing underscore
1747
      error(filename, endif_linenum, 'build/header_guard', 0,
1748
            '#endif line should be "#endif  // %s"' % cppvar)
1749
    return
1750

    
1751
  # Didn't find the corresponding "//" comment.  If this file does not
1752
  # contain any "//" comments at all, it could be that the compiler
1753
  # only wants "/**/" comments, look for those instead.
1754
  no_single_line_comments = True
1755
  for i in xrange(1, len(raw_lines) - 1):
1756
    line = raw_lines[i]
1757
    if Match(r'^(?:(?:\'(?:\.|[^\'])*\')|(?:"(?:\.|[^"])*")|[^\'"])*//', line):
1758
      no_single_line_comments = False
1759
      break
1760

    
1761
  if no_single_line_comments:
1762
    match = Match(r'#endif\s*/\*\s*' + cppvar + r'(_)?\s*\*/', endif)
1763
    if match:
1764
      if match.group(1) == '_':
1765
        # Low severity warning for double trailing underscore
1766
        error(filename, endif_linenum, 'build/header_guard', 0,
1767
              '#endif line should be "#endif  /* %s */"' % cppvar)
1768
      return
1769

    
1770
  # Didn't find anything
1771
  error(filename, endif_linenum, 'build/header_guard', 5,
1772
        '#endif line should be "#endif  // %s"' % cppvar)
1773

    
1774

    
1775
def CheckHeaderFileIncluded(filename, include_state, error):
1776
  """Logs an error if a .cc file does not include its header."""
1777

    
1778
  # Do not check test files
1779
  if filename.endswith('_test.cc') or filename.endswith('_unittest.cc'):
1780
    return
1781

    
1782
  fileinfo = FileInfo(filename)
1783
  headerfile = filename[0:len(filename) - 2] + 'h'
1784
  if not os.path.exists(headerfile):
1785
    return
1786
  headername = FileInfo(headerfile).RepositoryName()
1787
  first_include = 0
1788
  for section_list in include_state.include_list:
1789
    for f in section_list:
1790
      if headername in f[0] or f[0] in headername:
1791
        return
1792
      if not first_include:
1793
        first_include = f[1]
1794

    
1795
  error(filename, first_include, 'build/include', 5,
1796
        '%s should include its header file %s' % (fileinfo.RepositoryName(),
1797
                                                  headername))
1798

    
1799

    
1800
def CheckForBadCharacters(filename, lines, error):
1801
  """Logs an error for each line containing bad characters.
1802

1803
  Two kinds of bad characters:
1804

1805
  1. Unicode replacement characters: These indicate that either the file
1806
  contained invalid UTF-8 (likely) or Unicode replacement characters (which
1807
  it shouldn't).  Note that it's possible for this to throw off line
1808
  numbering if the invalid UTF-8 occurred adjacent to a newline.
1809

1810
  2. NUL bytes.  These are problematic for some tools.
1811

1812
  Args:
1813
    filename: The name of the current file.
1814
    lines: An array of strings, each representing a line of the file.
1815
    error: The function to call with any errors found.
1816
  """
1817
  for linenum, line in enumerate(lines):
1818
    if u'\ufffd' in line:
1819
      error(filename, linenum, 'readability/utf8', 5,
1820
            'Line contains invalid UTF-8 (or Unicode replacement character).')
1821
    if '\0' in line:
1822
      error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.')
1823

    
1824

    
1825
def CheckForNewlineAtEOF(filename, lines, error):
1826
  """Logs an error if there is no newline char at the end of the file.
1827

1828
  Args:
1829
    filename: The name of the current file.
1830
    lines: An array of strings, each representing a line of the file.
1831
    error: The function to call with any errors found.
1832
  """
1833

    
1834
  # The array lines() was created by adding two newlines to the
1835
  # original file (go figure), then splitting on \n.
1836
  # To verify that the file ends in \n, we just have to make sure the
1837
  # last-but-two element of lines() exists and is empty.
1838
  if len(lines) < 3 or lines[-2]:
1839
    error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
1840
          'Could not find a newline character at the end of the file.')
1841

    
1842

    
1843
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
1844
  """Logs an error if we see /* ... */ or "..." that extend past one line.
1845

1846
  /* ... */ comments are legit inside macros, for one line.
1847
  Otherwise, we prefer // comments, so it's ok to warn about the
1848
  other.  Likewise, it's ok for strings to extend across multiple
1849
  lines, as long as a line continuation character (backslash)
1850
  terminates each line. Although not currently prohibited by the C++
1851
  style guide, it's ugly and unnecessary. We don't do well with either
1852
  in this lint program, so we warn about both.
1853

1854
  Args:
1855
    filename: The name of the current file.
1856
    clean_lines: A CleansedLines instance containing the file.
1857
    linenum: The number of the line to check.
1858
    error: The function to call with any errors found.
1859
  """
1860
  line = clean_lines.elided[linenum]
1861

    
1862
  # Remove all \\ (escaped backslashes) from the line. They are OK, and the
1863
  # second (escaped) slash may trigger later \" detection erroneously.
1864
  line = line.replace('\\\\', '')
1865

    
1866
  if line.count('/*') > line.count('*/'):
1867
    error(filename, linenum, 'readability/multiline_comment', 5,
1868
          'Complex multi-line /*...*/-style comment found. '
1869
          'Lint may give bogus warnings.  '
1870
          'Consider replacing these with //-style comments, '
1871
          'with #if 0...#endif, '
1872
          'or with more clearly structured multi-line comments.')
1873

    
1874
  if (line.count('"') - line.count('\\"')) % 2:
1875
    error(filename, linenum, 'readability/multiline_string', 5,
1876
          'Multi-line string ("...") found.  This lint script doesn\'t '
1877
          'do well with such strings, and may give bogus warnings.  '
1878
          'Use C++11 raw strings or concatenation instead.')
1879

    
1880

    
1881
# (non-threadsafe name, thread-safe alternative, validation pattern)
1882
#
1883
# The validation pattern is used to eliminate false positives such as:
1884
#  _rand();               // false positive due to substring match.
1885
#  ->rand();              // some member function rand().
1886
#  ACMRandom rand(seed);  // some variable named rand.
1887
#  ISAACRandom rand();    // another variable named rand.
1888
#
1889
# Basically we require the return value of these functions to be used
1890
# in some expression context on the same line by matching on some
1891
# operator before the function name.  This eliminates constructors and
1892
# member function calls.
1893
_UNSAFE_FUNC_PREFIX = r'(?:[-+*/=%^&|(<]\s*|>\s+)'
1894
_THREADING_LIST = (
1895
    ('asctime(', 'asctime_r(', _UNSAFE_FUNC_PREFIX + r'asctime\([^)]+\)'),
1896
    ('ctime(', 'ctime_r(', _UNSAFE_FUNC_PREFIX + r'ctime\([^)]+\)'),
1897
    ('getgrgid(', 'getgrgid_r(', _UNSAFE_FUNC_PREFIX + r'getgrgid\([^)]+\)'),
1898
    ('getgrnam(', 'getgrnam_r(', _UNSAFE_FUNC_PREFIX + r'getgrnam\([^)]+\)'),
1899
    ('getlogin(', 'getlogin_r(', _UNSAFE_FUNC_PREFIX + r'getlogin\(\)'),
1900
    ('getpwnam(', 'getpwnam_r(', _UNSAFE_FUNC_PREFIX + r'getpwnam\([^)]+\)'),
1901
    ('getpwuid(', 'getpwuid_r(', _UNSAFE_FUNC_PREFIX + r'getpwuid\([^)]+\)'),
1902
    ('gmtime(', 'gmtime_r(', _UNSAFE_FUNC_PREFIX + r'gmtime\([^)]+\)'),
1903
    ('localtime(', 'localtime_r(', _UNSAFE_FUNC_PREFIX + r'localtime\([^)]+\)'),
1904
    ('rand(', 'rand_r(', _UNSAFE_FUNC_PREFIX + r'rand\(\)'),
1905
    ('strtok(', 'strtok_r(',
1906
     _UNSAFE_FUNC_PREFIX + r'strtok\([^)]+\)'),
1907
    ('ttyname(', 'ttyname_r(', _UNSAFE_FUNC_PREFIX + r'ttyname\([^)]+\)'),
1908
    )
1909

    
1910

    
1911
def CheckPosixThreading(filename, clean_lines, linenum, error):
1912
  """Checks for calls to thread-unsafe functions.
1913

1914
  Much code has been originally written without consideration of
1915
  multi-threading. Also, engineers are relying on their old experience;
1916
  they have learned posix before threading extensions were added. These
1917
  tests guide the engineers to use thread-safe functions (when using
1918
  posix directly).
1919

1920
  Args:
1921
    filename: The name of the current file.
1922
    clean_lines: A CleansedLines instance containing the file.
1923
    linenum: The number of the line to check.
1924
    error: The function to call with any errors found.
1925
  """
1926
  line = clean_lines.elided[linenum]
1927
  for single_thread_func, multithread_safe_func, pattern in _THREADING_LIST:
1928
    # Additional pattern matching check to confirm that this is the
1929
    # function we are looking for
1930
    if Search(pattern, line):
1931
      error(filename, linenum, 'runtime/threadsafe_fn', 2,
1932
            'Consider using ' + multithread_safe_func +
1933
            '...) instead of ' + single_thread_func +
1934
            '...) for improved thread safety.')
1935

    
1936

    
1937
def CheckVlogArguments(filename, clean_lines, linenum, error):
1938
  """Checks that VLOG() is only used for defining a logging level.
1939

1940
  For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and
1941
  VLOG(FATAL) are not.
1942

1943
  Args:
1944
    filename: The name of the current file.
1945
    clean_lines: A CleansedLines instance containing the file.
1946
    linenum: The number of the line to check.
1947
    error: The function to call with any errors found.
1948
  """
1949
  line = clean_lines.elided[linenum]
1950
  if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line):
1951
    error(filename, linenum, 'runtime/vlog', 5,
1952
          'VLOG() should be used with numeric verbosity level.  '
1953
          'Use LOG() if you want symbolic severity levels.')
1954

    
1955
# Matches invalid increment: *count++, which moves pointer instead of
1956
# incrementing a value.
1957
_RE_PATTERN_INVALID_INCREMENT = re.compile(
1958
    r'^\s*\*\w+(\+\+|--);')
1959

    
1960

    
1961
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
1962
  """Checks for invalid increment *count++.
1963

1964
  For example following function:
1965
  void increment_counter(int* count) {
1966
    *count++;
1967
  }
1968
  is invalid, because it effectively does count++, moving pointer, and should
1969
  be replaced with ++*count, (*count)++ or *count += 1.
1970

1971
  Args:
1972
    filename: The name of the current file.
1973
    clean_lines: A CleansedLines instance containing the file.
1974
    linenum: The number of the line to check.
1975
    error: The function to call with any errors found.
1976
  """
1977
  line = clean_lines.elided[linenum]
1978
  if _RE_PATTERN_INVALID_INCREMENT.match(line):
1979
    error(filename, linenum, 'runtime/invalid_increment', 5,
1980
          'Changing pointer instead of value (or unused value of operator*).')
1981

    
1982

    
1983
def IsMacroDefinition(clean_lines, linenum):
1984
  if Search(r'^#define', clean_lines[linenum]):
1985
    return True
1986

    
1987
  if linenum > 0 and Search(r'\\$', clean_lines[linenum - 1]):
1988
    return True
1989

    
1990
  return False
1991

    
1992

    
1993
def IsForwardClassDeclaration(clean_lines, linenum):
1994
  return Match(r'^\s*(\btemplate\b)*.*class\s+\w+;\s*$', clean_lines[linenum])
1995

    
1996

    
1997
class _BlockInfo(object):
1998
  """Stores information about a generic block of code."""
1999

    
2000
  def __init__(self, seen_open_brace):
2001
    self.seen_open_brace = seen_open_brace
2002
    self.open_parentheses = 0
2003
    self.inline_asm = _NO_ASM
2004
    self.check_namespace_indentation = False
2005

    
2006
  def CheckBegin(self, filename, clean_lines, linenum, error):
2007
    """Run checks that applies to text up to the opening brace.
2008

2009
    This is mostly for checking the text after the class identifier
2010
    and the "{", usually where the base class is specified.  For other
2011
    blocks, there isn't much to check, so we always pass.
2012

2013
    Args:
2014
      filename: The name of the current file.
2015
      clean_lines: A CleansedLines instance containing the file.
2016
      linenum: The number of the line to check.
2017
      error: The function to call with any errors found.
2018
    """
2019
    pass
2020

    
2021
  def CheckEnd(self, filename, clean_lines, linenum, error):
2022
    """Run checks that applies to text after the closing brace.
2023

2024
    This is mostly used for checking end of namespace comments.
2025

2026
    Args:
2027
      filename: The name of the current file.
2028
      clean_lines: A CleansedLines instance containing the file.
2029
      linenum: The number of the line to check.
2030
      error: The function to call with any errors found.
2031
    """
2032
    pass
2033

    
2034
  def IsBlockInfo(self):
2035
    """Returns true if this block is a _BlockInfo.
2036

2037
    This is convenient for verifying that an object is an instance of
2038
    a _BlockInfo, but not an instance of any of the derived classes.
2039

2040
    Returns:
2041
      True for this class, False for derived classes.
2042
    """
2043
    return self.__class__ == _BlockInfo
2044

    
2045

    
2046
class _ExternCInfo(_BlockInfo):
2047
  """Stores information about an 'extern "C"' block."""
2048

    
2049
  def __init__(self):
2050
    _BlockInfo.__init__(self, True)
2051

    
2052

    
2053
class _ClassInfo(_BlockInfo):
2054
  """Stores information about a class."""
2055

    
2056
  def __init__(self, name, class_or_struct, clean_lines, linenum):
2057
    _BlockInfo.__init__(self, False)
2058
    self.name = name
2059
    self.starting_linenum = linenum
2060
    self.is_derived = False
2061
    self.check_namespace_indentation = True
2062
    if class_or_struct == 'struct':
2063
      self.access = 'public'
2064
      self.is_struct = True
2065
    else:
2066
      self.access = 'private'
2067
      self.is_struct = False
2068

    
2069
    # Remember initial indentation level for this class.  Using raw_lines here
2070
    # instead of elided to account for leading comments.
2071
    self.class_indent = GetIndentLevel(clean_lines.raw_lines[linenum])
2072

    
2073
    # Try to find the end of the class.  This will be confused by things like:
2074
    #   class A {
2075
    #   } *x = { ...
2076
    #
2077
    # But it's still good enough for CheckSectionSpacing.
2078
    self.last_line = 0
2079
    depth = 0
2080
    for i in range(linenum, clean_lines.NumLines()):
2081
      line = clean_lines.elided[i]
2082
      depth += line.count('{') - line.count('}')
2083
      if not depth:
2084
        self.last_line = i
2085
        break
2086

    
2087
  def CheckBegin(self, filename, clean_lines, linenum, error):
2088
    # Look for a bare ':'
2089
    if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]):
2090
      self.is_derived = True
2091

    
2092
  def CheckEnd(self, filename, clean_lines, linenum, error):
2093
    # If there is a DISALLOW macro, it should appear near the end of
2094
    # the class.
2095
    seen_last_thing_in_class = False
2096
    for i in xrange(linenum - 1, self.starting_linenum, -1):
2097
      match = Search(
2098
          r'\b(DISALLOW_COPY_AND_ASSIGN|DISALLOW_IMPLICIT_CONSTRUCTORS)\(' +
2099
          self.name + r'\)',
2100
          clean_lines.elided[i])
2101
      if match:
2102
        if seen_last_thing_in_class:
2103
          error(filename, i, 'readability/constructors', 3,
2104
                match.group(1) + ' should be the last thing in the class')
2105
        break
2106

    
2107
      if not Match(r'^\s*$', clean_lines.elided[i]):
2108
        seen_last_thing_in_class = True
2109

    
2110
    # Check that closing brace is aligned with beginning of the class.
2111
    # Only do this if the closing brace is indented by only whitespaces.
2112
    # This means we will not check single-line class definitions.
2113
    indent = Match(r'^( *)\}', clean_lines.elided[linenum])
2114
    if indent and len(indent.group(1)) != self.class_indent:
2115
      if self.is_struct:
2116
        parent = 'struct ' + self.name
2117
      else:
2118
        parent = 'class ' + self.name
2119
      error(filename, linenum, 'whitespace/indent', 3,
2120
            'Closing brace should be aligned with beginning of %s' % parent)
2121

    
2122

    
2123
class _NamespaceInfo(_BlockInfo):
2124
  """Stores information about a namespace."""
2125

    
2126
  def __init__(self, name, linenum):
2127
    _BlockInfo.__init__(self, False)
2128
    self.name = name or ''
2129
    self.starting_linenum = linenum
2130
    self.check_namespace_indentation = True
2131

    
2132
  def CheckEnd(self, filename, clean_lines, linenum, error):
2133
    """Check end of namespace comments."""
2134
    line = clean_lines.raw_lines[linenum]
2135

    
2136
    # Check how many lines is enclosed in this namespace.  Don't issue
2137
    # warning for missing namespace comments if there aren't enough
2138
    # lines.  However, do apply checks if there is already an end of
2139
    # namespace comment and it's incorrect.
2140
    #
2141
    # TODO(unknown): We always want to check end of namespace comments
2142
    # if a namespace is large, but sometimes we also want to apply the
2143
    # check if a short namespace contained nontrivial things (something
2144
    # other than forward declarations).  There is currently no logic on
2145
    # deciding what these nontrivial things are, so this check is
2146
    # triggered by namespace size only, which works most of the time.
2147
    if (linenum - self.starting_linenum < 10
2148
        and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)):
2149
      return
2150

    
2151
    # Look for matching comment at end of namespace.
2152
    #
2153
    # Note that we accept C style "/* */" comments for terminating
2154
    # namespaces, so that code that terminate namespaces inside
2155
    # preprocessor macros can be cpplint clean.
2156
    #
2157
    # We also accept stuff like "// end of namespace <name>." with the
2158
    # period at the end.
2159
    #
2160
    # Besides these, we don't accept anything else, otherwise we might
2161
    # get false negatives when existing comment is a substring of the
2162
    # expected namespace.
2163
    if self.name:
2164
      # Named namespace
2165
      if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) +
2166
                    r'[\*/\.\\\s]*$'),
2167
                   line):
2168
        error(filename, linenum, 'readability/namespace', 5,
2169
              'Namespace should be terminated with "// namespace %s"' %
2170
              self.name)
2171
    else:
2172
      # Anonymous namespace
2173
      if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line):
2174
        # If "// namespace anonymous" or "// anonymous namespace (more text)",
2175
        # mention "// anonymous namespace" as an acceptable form
2176
        if Match(r'}.*\b(namespace anonymous|anonymous namespace)\b', line):
2177
          error(filename, linenum, 'readability/namespace', 5,
2178
                'Anonymous namespace should be terminated with "// namespace"'
2179
                ' or "// anonymous namespace"')
2180
        else:
2181
          error(filename, linenum, 'readability/namespace', 5,
2182
                'Anonymous namespace should be terminated with "// namespace"')
2183

    
2184

    
2185
class _PreprocessorInfo(object):
2186
  """Stores checkpoints of nesting stacks when #if/#else is seen."""
2187

    
2188
  def __init__(self, stack_before_if):
2189
    # The entire nesting stack before #if
2190
    self.stack_before_if = stack_before_if
2191

    
2192
    # The entire nesting stack up to #else
2193
    self.stack_before_else = []
2194

    
2195
    # Whether we have already seen #else or #elif
2196
    self.seen_else = False
2197

    
2198

    
2199
class NestingState(object):
2200
  """Holds states related to parsing braces."""
2201

    
2202
  def __init__(self):
2203
    # Stack for tracking all braces.  An object is pushed whenever we
2204
    # see a "{", and popped when we see a "}".  Only 3 types of
2205
    # objects are possible:
2206
    # - _ClassInfo: a class or struct.
2207
    # - _NamespaceInfo: a namespace.
2208
    # - _BlockInfo: some other type of block.
2209
    self.stack = []
2210

    
2211
    # Top of the previous stack before each Update().
2212
    #
2213
    # Because the nesting_stack is updated at the end of each line, we
2214
    # had to do some convoluted checks to find out what is the current
2215
    # scope at the beginning of the line.  This check is simplified by
2216
    # saving the previous top of nesting stack.
2217
    #
2218
    # We could save the full stack, but we only need the top.  Copying
2219
    # the full nesting stack would slow down cpplint by ~10%.
2220
    self.previous_stack_top = []
2221

    
2222
    # Stack of _PreprocessorInfo objects.
2223
    self.pp_stack = []
2224

    
2225
  def SeenOpenBrace(self):
2226
    """Check if we have seen the opening brace for the innermost block.
2227

2228
    Returns:
2229
      True if we have seen the opening brace, False if the innermost
2230
      block is still expecting an opening brace.
2231
    """
2232
    return (not self.stack) or self.stack[-1].seen_open_brace
2233

    
2234
  def InNamespaceBody(self):
2235
    """Check if we are currently one level inside a namespace body.
2236

2237
    Returns:
2238
      True if top of the stack is a namespace block, False otherwise.
2239
    """
2240
    return self.stack and isinstance(self.stack[-1], _NamespaceInfo)
2241

    
2242
  def InExternC(self):
2243
    """Check if we are currently one level inside an 'extern "C"' block.
2244

2245
    Returns:
2246
      True if top of the stack is an extern block, False otherwise.
2247
    """
2248
    return self.stack and isinstance(self.stack[-1], _ExternCInfo)
2249

    
2250
  def InClassDeclaration(self):
2251
    """Check if we are currently one level inside a class or struct declaration.
2252

2253
    Returns:
2254
      True if top of the stack is a class/struct, False otherwise.
2255
    """
2256
    return self.stack and isinstance(self.stack[-1], _ClassInfo)
2257

    
2258
  def InAsmBlock(self):
2259
    """Check if we are currently one level inside an inline ASM block.
2260

2261
    Returns:
2262
      True if the top of the stack is a block containing inline ASM.
2263
    """
2264
    return self.stack and self.stack[-1].inline_asm != _NO_ASM
2265

    
2266
  def InTemplateArgumentList(self, clean_lines, linenum, pos):
2267
    """Check if current position is inside template argument list.
2268

2269
    Args:
2270
      clean_lines: A CleansedLines instance containing the file.
2271
      linenum: The number of the line to check.
2272
      pos: position just after the suspected template argument.
2273
    Returns:
2274
      True if (linenum, pos) is inside template arguments.
2275
    """
2276
    while linenum < clean_lines.NumLines():
2277
      # Find the earliest character that might indicate a template argument
2278
      line = clean_lines.elided[linenum]
2279
      match = Match(r'^[^{};=\[\]\.<>]*(.)', line[pos:])
2280
      if not match:
2281
        linenum += 1
2282
        pos = 0
2283
        continue
2284
      token = match.group(1)
2285
      pos += len(match.group(0))
2286

    
2287
      # These things do not look like template argument list:
2288
      #   class Suspect {
2289
      #   class Suspect x; }
2290
      if token in ('{', '}', ';'): return False
2291

    
2292
      # These things look like template argument list:
2293
      #   template <class Suspect>
2294
      #   template <class Suspect = default_value>
2295
      #   template <class Suspect[]>
2296
      #   template <class Suspect...>
2297
      if token in ('>', '=', '[', ']', '.'): return True
2298

    
2299
      # Check if token is an unmatched '<'.
2300
      # If not, move on to the next character.
2301
      if token != '<':
2302
        pos += 1
2303
        if pos >= len(line):
2304
          linenum += 1
2305
          pos = 0
2306
        continue
2307

    
2308
      # We can't be sure if we just find a single '<', and need to
2309
      # find the matching '>'.
2310
      (_, end_line, end_pos) = CloseExpression(clean_lines, linenum, pos - 1)
2311
      if end_pos < 0:
2312
        # Not sure if template argument list or syntax error in file
2313
        return False
2314
      linenum = end_line
2315
      pos = end_pos
2316
    return False
2317

    
2318
  def UpdatePreprocessor(self, line):
2319
    """Update preprocessor stack.
2320

2321
    We need to handle preprocessors due to classes like this:
2322
      #ifdef SWIG
2323
      struct ResultDetailsPageElementExtensionPoint {
2324
      #else
2325
      struct ResultDetailsPageElementExtensionPoint : public Extension {
2326
      #endif
2327

2328
    We make the following assumptions (good enough for most files):
2329
    - Preprocessor condition evaluates to true from #if up to first
2330
      #else/#elif/#endif.
2331

2332
    - Preprocessor condition evaluates to false from #else/#elif up
2333
      to #endif.  We still perform lint checks on these lines, but
2334
      these do not affect nesting stack.
2335

2336
    Args:
2337
      line: current line to check.
2338
    """
2339
    if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line):
2340
      # Beginning of #if block, save the nesting stack here.  The saved
2341
      # stack will allow us to restore the parsing state in the #else case.
2342
      self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack)))
2343
    elif Match(r'^\s*#\s*(else|elif)\b', line):
2344
      # Beginning of #else block
2345
      if self.pp_stack:
2346
        if not self.pp_stack[-1].seen_else:
2347
          # This is the first #else or #elif block.  Remember the
2348
          # whole nesting stack up to this point.  This is what we
2349
          # keep after the #endif.
2350
          self.pp_stack[-1].seen_else = True
2351
          self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
2352

    
2353
        # Restore the stack to how it was before the #if
2354
        self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
2355
      else:
2356
        # TODO(unknown): unexpected #else, issue warning?
2357
        pass
2358
    elif Match(r'^\s*#\s*endif\b', line):
2359
      # End of #if or #else blocks.
2360
      if self.pp_stack:
2361
        # If we saw an #else, we will need to restore the nesting
2362
        # stack to its former state before the #else, otherwise we
2363
        # will just continue from where we left off.
2364
        if self.pp_stack[-1].seen_else:
2365
          # Here we can just use a shallow copy since we are the last
2366
          # reference to it.
2367
          self.stack = self.pp_stack[-1].stack_before_else
2368
        # Drop the corresponding #if
2369
        self.pp_stack.pop()
2370
      else:
2371
        # TODO(unknown): unexpected #endif, issue warning?
2372
        pass
2373

    
2374
  # TODO(unknown): Update() is too long, but we will refactor later.
2375
  def Update(self, filename, clean_lines, linenum, error):
2376
    """Update nesting state with current line.
2377

2378
    Args:
2379
      filename: The name of the current file.
2380
      clean_lines: A CleansedLines instance containing the file.
2381
      linenum: The number of the line to check.
2382
      error: The function to call with any errors found.
2383
    """
2384
    line = clean_lines.elided[linenum]
2385

    
2386
    # Remember top of the previous nesting stack.
2387
    #
2388
    # The stack is always pushed/popped and not modified in place, so
2389
    # we can just do a shallow copy instead of copy.deepcopy.  Using
2390
    # deepcopy would slow down cpplint by ~28%.
2391
    if self.stack:
2392
      self.previous_stack_top = self.stack[-1]
2393
    else:
2394
      self.previous_stack_top = None
2395

    
2396
    # Update pp_stack
2397
    self.UpdatePreprocessor(line)
2398

    
2399
    # Count parentheses.  This is to avoid adding struct arguments to
2400
    # the nesting stack.
2401
    if self.stack:
2402
      inner_block = self.stack[-1]
2403
      depth_change = line.count('(') - line.count(')')
2404
      inner_block.open_parentheses += depth_change
2405

    
2406
      # Also check if we are starting or ending an inline assembly block.
2407
      if inner_block.inline_asm in (_NO_ASM, _END_ASM):
2408
        if (depth_change != 0 and
2409
            inner_block.open_parentheses == 1 and
2410
            _MATCH_ASM.match(line)):
2411
          # Enter assembly block
2412
          inner_block.inline_asm = _INSIDE_ASM
2413
        else:
2414
          # Not entering assembly block.  If previous line was _END_ASM,
2415
          # we will now shift to _NO_ASM state.
2416
          inner_block.inline_asm = _NO_ASM
2417
      elif (inner_block.inline_asm == _INSIDE_ASM and
2418
            inner_block.open_parentheses == 0):
2419
        # Exit assembly block
2420
        inner_block.inline_asm = _END_ASM
2421

    
2422
    # Consume namespace declaration at the beginning of the line.  Do
2423
    # this in a loop so that we catch same line declarations like this:
2424
    #   namespace proto2 { namespace bridge { class MessageSet; } }
2425
    while True:
2426
      # Match start of namespace.  The "\b\s*" below catches namespace
2427
      # declarations even if it weren't followed by a whitespace, this
2428
      # is so that we don't confuse our namespace checker.  The
2429
      # missing spaces will be flagged by CheckSpacing.
2430
      namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
2431
      if not namespace_decl_match:
2432
        break
2433

    
2434
      new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
2435
      self.stack.append(new_namespace)
2436

    
2437
      line = namespace_decl_match.group(2)
2438
      if line.find('{') != -1:
2439
        new_namespace.seen_open_brace = True
2440
        line = line[line.find('{') + 1:]
2441

    
2442
    # Look for a class declaration in whatever is left of the line
2443
    # after parsing namespaces.  The regexp accounts for decorated classes
2444
    # such as in:
2445
    #   class LOCKABLE API Object {
2446
    #   };
2447
    class_decl_match = Match(
2448
        r'^(\s*(?:template\s*<[\w\s<>,:]*>\s*)?'
2449
        r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))'
2450
        r'(.*)$', line)
2451
    if (class_decl_match and
2452
        (not self.stack or self.stack[-1].open_parentheses == 0)):
2453
      # We do not want to accept classes that are actually template arguments:
2454
      #   template <class Ignore1,
2455
      #             class Ignore2 = Default<Args>,
2456
      #             template <Args> class Ignore3>
2457
      #   void Function() {};
2458
      #
2459
      # To avoid template argument cases, we scan forward and look for
2460
      # an unmatched '>'.  If we see one, assume we are inside a
2461
      # template argument list.
2462
      end_declaration = len(class_decl_match.group(1))
2463
      if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration):
2464
        self.stack.append(_ClassInfo(
2465
            class_decl_match.group(3), class_decl_match.group(2),
2466
            clean_lines, linenum))
2467
        line = class_decl_match.group(4)
2468

    
2469
    # If we have not yet seen the opening brace for the innermost block,
2470
    # run checks here.
2471
    if not self.SeenOpenBrace():
2472
      self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
2473

    
2474
    # Update access control if we are inside a class/struct
2475
    if self.stack and isinstance(self.stack[-1], _ClassInfo):
2476
      classinfo = self.stack[-1]
2477
      access_match = Match(
2478
          r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
2479
          r':(?:[^:]|$)',
2480
          line)
2481
      if access_match:
2482
        classinfo.access = access_match.group(2)
2483

    
2484
        # Check that access keywords are indented +1 space.  Skip this
2485
        # check if the keywords are not preceded by whitespaces.
2486
        indent = access_match.group(1)
2487
        if (len(indent) != classinfo.class_indent + 1 and
2488
            Match(r'^\s*$', indent)):
2489
          if classinfo.is_struct:
2490
            parent = 'struct ' + classinfo.name
2491
          else:
2492
            parent = 'class ' + classinfo.name
2493
          slots = ''
2494
          if access_match.group(3):
2495
            slots = access_match.group(3)
2496
          error(filename, linenum, 'whitespace/indent', 3,
2497
                '%s%s: should be indented +1 space inside %s' % (
2498
                    access_match.group(2), slots, parent))
2499

    
2500
    # Consume braces or semicolons from what's left of the line
2501
    while True:
2502
      # Match first brace, semicolon, or closed parenthesis.
2503
      matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
2504
      if not matched:
2505
        break
2506

    
2507
      token = matched.group(1)
2508
      if token == '{':
2509
        # If namespace or class hasn't seen a opening brace yet, mark
2510
        # namespace/class head as complete.  Push a new block onto the
2511
        # stack otherwise.
2512
        if not self.SeenOpenBrace():
2513
          self.stack[-1].seen_open_brace = True
2514
        elif Match(r'^extern\s*"[^"]*"\s*\{', line):
2515
          self.stack.append(_ExternCInfo())
2516
        else:
2517
          self.stack.append(_BlockInfo(True))
2518
          if _MATCH_ASM.match(line):
2519
            self.stack[-1].inline_asm = _BLOCK_ASM
2520

    
2521
      elif token == ';' or token == ')':
2522
        # If we haven't seen an opening brace yet, but we already saw
2523
        # a semicolon, this is probably a forward declaration.  Pop
2524
        # the stack for these.
2525
        #
2526
        # Similarly, if we haven't seen an opening brace yet, but we
2527
        # already saw a closing parenthesis, then these are probably
2528
        # function arguments with extra "class" or "struct" keywords.
2529
        # Also pop these stack for these.
2530
        if not self.SeenOpenBrace():
2531
          self.stack.pop()
2532
      else:  # token == '}'
2533
        # Perform end of block checks and pop the stack.
2534
        if self.stack:
2535
          self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
2536
          self.stack.pop()
2537
      line = matched.group(2)
2538

    
2539
  def InnermostClass(self):
2540
    """Get class info on the top of the stack.
2541

2542
    Returns:
2543
      A _ClassInfo object if we are inside a class, or None otherwise.
2544
    """
2545
    for i in range(len(self.stack), 0, -1):
2546
      classinfo = self.stack[i - 1]
2547
      if isinstance(classinfo, _ClassInfo):
2548
        return classinfo
2549
    return None
2550

    
2551
  def CheckCompletedBlocks(self, filename, error):
2552
    """Checks that all classes and namespaces have been completely parsed.
2553

2554
    Call this when all lines in a file have been processed.
2555
    Args:
2556
      filename: The name of the current file.
2557
      error: The function to call with any errors found.
2558
    """
2559
    # Note: This test can result in false positives if #ifdef constructs
2560
    # get in the way of brace matching. See the testBuildClass test in
2561
    # cpplint_unittest.py for an example of this.
2562
    for obj in self.stack:
2563
      if isinstance(obj, _ClassInfo):
2564
        error(filename, obj.starting_linenum, 'build/class', 5,
2565
              'Failed to find complete declaration of class %s' %
2566
              obj.name)
2567
      elif isinstance(obj, _NamespaceInfo):
2568
        error(filename, obj.starting_linenum, 'build/namespaces', 5,
2569
              'Failed to find complete declaration of namespace %s' %
2570
              obj.name)
2571

    
2572

    
2573
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
2574
                                  nesting_state, error):
2575
  r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
2576

2577
  Complain about several constructs which gcc-2 accepts, but which are
2578
  not standard C++.  Warning about these in lint is one way to ease the
2579
  transition to new compilers.
2580
  - put storage class first (e.g. "static const" instead of "const static").
2581
  - "%lld" instead of %qd" in printf-type functions.
2582
  - "%1$d" is non-standard in printf-type functions.
2583
  - "\%" is an undefined character escape sequence.
2584
  - text after #endif is not allowed.
2585
  - invalid inner-style forward declaration.
2586
  - >? and <? operators, and their >?= and <?= cousins.
2587

2588
  Additionally, check for constructor/destructor style violations and reference
2589
  members, as it is very convenient to do so while checking for
2590
  gcc-2 compliance.
2591

2592
  Args:
2593
    filename: The name of the current file.
2594
    clean_lines: A CleansedLines instance containing the file.
2595
    linenum: The number of the line to check.
2596
    nesting_state: A NestingState instance which maintains information about
2597
                   the current stack of nested blocks being parsed.
2598
    error: A callable to which errors are reported, which takes 4 arguments:
2599
           filename, line number, error level, and message
2600
  """
2601

    
2602
  # Remove comments from the line, but leave in strings for now.
2603
  line = clean_lines.lines[linenum]
2604

    
2605
  if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
2606
    error(filename, linenum, 'runtime/printf_format', 3,
2607
          '%q in format strings is deprecated.  Use %ll instead.')
2608

    
2609
  if Search(r'printf\s*\(.*".*%\d+\$', line):
2610
    error(filename, linenum, 'runtime/printf_format', 2,
2611
          '%N$ formats are unconventional.  Try rewriting to avoid them.')
2612

    
2613
  # Remove escaped backslashes before looking for undefined escapes.
2614
  line = line.replace('\\\\', '')
2615

    
2616
  if Search(r'("|\').*\\(%|\[|\(|{)', line):
2617
    error(filename, linenum, 'build/printf_format', 3,
2618
          '%, [, (, and { are undefined character escapes.  Unescape them.')
2619

    
2620
  # For the rest, work with both comments and strings removed.
2621
  line = clean_lines.elided[linenum]
2622

    
2623
  if Search(r'\b(const|volatile|void|char|short|int|long'
2624
            r'|float|double|signed|unsigned'
2625
            r'|schar|u?int8|u?int16|u?int32|u?int64)'
2626
            r'\s+(register|static|extern|typedef)\b',
2627
            line):
2628
    error(filename, linenum, 'build/storage_class', 5,
2629
          'Storage class (static, extern, typedef, etc) should be first.')
2630

    
2631
  if Match(r'\s*#\s*endif\s*[^/\s]+', line):
2632
    error(filename, linenum, 'build/endif_comment', 5,
2633
          'Uncommented text after #endif is non-standard.  Use a comment.')
2634

    
2635
  if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
2636
    error(filename, linenum, 'build/forward_decl', 5,
2637
          'Inner-style forward declarations are invalid.  Remove this line.')
2638

    
2639
  if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
2640
            line):
2641
    error(filename, linenum, 'build/deprecated', 3,
2642
          '>? and <? (max and min) operators are non-standard and deprecated.')
2643

    
2644
  if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
2645
    # TODO(unknown): Could it be expanded safely to arbitrary references,
2646
    # without triggering too many false positives? The first
2647
    # attempt triggered 5 warnings for mostly benign code in the regtest, hence
2648
    # the restriction.
2649
    # Here's the original regexp, for the reference:
2650
    # type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
2651
    # r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
2652
    error(filename, linenum, 'runtime/member_string_references', 2,
2653
          'const string& members are dangerous. It is much better to use '
2654
          'alternatives, such as pointers or simple constants.')
2655

    
2656
  # Everything else in this function operates on class declarations.
2657
  # Return early if the top of the nesting stack is not a class, or if
2658
  # the class head is not completed yet.
2659
  classinfo = nesting_state.InnermostClass()
2660
  if not classinfo or not classinfo.seen_open_brace:
2661
    return
2662

    
2663
  # The class may have been declared with namespace or classname qualifiers.
2664
  # The constructor and destructor will not have those qualifiers.
2665
  base_classname = classinfo.name.split('::')[-1]
2666

    
2667
  # Look for single-argument constructors that aren't marked explicit.
2668
  # Technically a valid construct, but against style. Also look for
2669
  # non-single-argument constructors which are also technically valid, but
2670
  # strongly suggest something is wrong.
2671
  explicit_constructor_match = Match(
2672
      r'\s+(?:inline\s+)?(explicit\s+)?(?:inline\s+)?%s\s*'
2673
      r'\(((?:[^()]|\([^()]*\))*)\)'
2674
      % re.escape(base_classname),
2675
      line)
2676

    
2677
  if explicit_constructor_match:
2678
    is_marked_explicit = explicit_constructor_match.group(1)
2679

    
2680
    if not explicit_constructor_match.group(2):
2681
      constructor_args = []
2682
    else:
2683
      constructor_args = explicit_constructor_match.group(2).split(',')
2684

    
2685
    # collapse arguments so that commas in template parameter lists and function
2686
    # argument parameter lists don't split arguments in two
2687
    i = 0
2688
    while i < len(constructor_args):
2689
      constructor_arg = constructor_args[i]
2690
      while (constructor_arg.count('<') > constructor_arg.count('>') or
2691
             constructor_arg.count('(') > constructor_arg.count(')')):
2692
        constructor_arg += ',' + constructor_args[i + 1]
2693
        del constructor_args[i + 1]
2694
      constructor_args[i] = constructor_arg
2695
      i += 1
2696

    
2697
    defaulted_args = [arg for arg in constructor_args if '=' in arg]
2698
    noarg_constructor = (not constructor_args or  # empty arg list
2699
                         # 'void' arg specifier
2700
                         (len(constructor_args) == 1 and
2701
                          constructor_args[0].strip() == 'void'))
2702
    onearg_constructor = ((len(constructor_args) == 1 and  # exactly one arg
2703
                           not noarg_constructor) or
2704
                          # all but at most one arg defaulted
2705
                          (len(constructor_args) >= 1 and
2706
                           not noarg_constructor and
2707
                           len(defaulted_args) >= len(constructor_args) - 1))
2708
    initializer_list_constructor = bool(
2709
        onearg_constructor and
2710
        Search(r'\bstd\s*::\s*initializer_list\b', constructor_args[0]))
2711
    copy_constructor = bool(
2712
        onearg_constructor and
2713
        Match(r'(const\s+)?%s(\s*<[^>]*>)?(\s+const)?\s*(?:<\w+>\s*)?&'
2714
              % re.escape(base_classname), constructor_args[0].strip()))
2715

    
2716
    if (not is_marked_explicit and
2717
        onearg_constructor and
2718
        not initializer_list_constructor and
2719
        not copy_constructor):
2720
      if defaulted_args:
2721
        error(filename, linenum, 'runtime/explicit', 5,
2722
              'Constructors callable with one argument '
2723
              'should be marked explicit.')
2724
      else:
2725
        error(filename, linenum, 'runtime/explicit', 5,
2726
              'Single-parameter constructors should be marked explicit.')
2727
    elif is_marked_explicit and not onearg_constructor:
2728
      if noarg_constructor:
2729
        error(filename, linenum, 'runtime/explicit', 5,
2730
              'Zero-parameter constructors should not be marked explicit.')
2731
      else:
2732
        error(filename, linenum, 'runtime/explicit', 0,
2733
              'Constructors that require multiple arguments '
2734
              'should not be marked explicit.')
2735

    
2736

    
2737
def CheckSpacingForFunctionCall(filename, clean_lines, linenum, error):
2738
  """Checks for the correctness of various spacing around function calls.
2739

2740
  Args:
2741
    filename: The name of the current file.
2742
    clean_lines: A CleansedLines instance containing the file.
2743
    linenum: The number of the line to check.
2744
    error: The function to call with any errors found.
2745
  """
2746
  line = clean_lines.elided[linenum]
2747

    
2748
  # Since function calls often occur inside if/for/while/switch
2749
  # expressions - which have their own, more liberal conventions - we
2750
  # first see if we should be looking inside such an expression for a
2751
  # function call, to which we can apply more strict standards.
2752
  fncall = line    # if there's no control flow construct, look at whole line
2753
  for pattern in (r'\bif\s*\((.*)\)\s*{',
2754
                  r'\bfor\s*\((.*)\)\s*{',
2755
                  r'\bwhile\s*\((.*)\)\s*[{;]',
2756
                  r'\bswitch\s*\((.*)\)\s*{'):
2757
    match = Search(pattern, line)
2758
    if match:
2759
      fncall = match.group(1)    # look inside the parens for function calls
2760
      break
2761

    
2762
  # Except in if/for/while/switch, there should never be space
2763
  # immediately inside parens (eg "f( 3, 4 )").  We make an exception
2764
  # for nested parens ( (a+b) + c ).  Likewise, there should never be
2765
  # a space before a ( when it's a function argument.  I assume it's a
2766
  # function argument when the char before the whitespace is legal in
2767
  # a function name (alnum + _) and we're not starting a macro. Also ignore
2768
  # pointers and references to arrays and functions coz they're too tricky:
2769
  # we use a very simple way to recognize these:
2770
  # " (something)(maybe-something)" or
2771
  # " (something)(maybe-something," or
2772
  # " (something)[something]"
2773
  # Note that we assume the contents of [] to be short enough that
2774
  # they'll never need to wrap.
2775
  if (  # Ignore control structures.
2776
      not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b',
2777
                 fncall) and
2778
      # Ignore pointers/references to functions.
2779
      not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
2780
      # Ignore pointers/references to arrays.
2781
      not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
2782
    if Search(r'\w\s*\(\s(?!\s*\\$)', fncall):      # a ( used for a fn call
2783
      error(filename, linenum, 'whitespace/parens', 4,
2784
            'Extra space after ( in function call')
2785
    elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
2786
      error(filename, linenum, 'whitespace/parens', 2,
2787
            'Extra space after (')
2788
    if (Search(r'\w\s+\(', fncall) and
2789
        not Search(r'#\s*define|typedef|using\s+\w+\s*=', fncall) and
2790
        not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall) and
2791
        not Search(r'\bcase\s+\(', fncall)):
2792
      # TODO(unknown): Space after an operator function seem to be a common
2793
      # error, silence those for now by restricting them to highest verbosity.
2794
      if Search(r'\boperator_*\b', line):
2795
        error(filename, linenum, 'whitespace/parens', 0,
2796
              'Extra space before ( in function call')
2797
      else:
2798
        error(filename, linenum, 'whitespace/parens', 4,
2799
              'Extra space before ( in function call')
2800
    # If the ) is followed only by a newline or a { + newline, assume it's
2801
    # part of a control statement (if/while/etc), and don't complain
2802
    if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
2803
      # If the closing parenthesis is preceded by only whitespaces,
2804
      # try to give a more descriptive error message.
2805
      if Search(r'^\s+\)', fncall):
2806
        error(filename, linenum, 'whitespace/parens', 2,
2807
              'Closing ) should be moved to the previous line')
2808
      else:
2809
        error(filename, linenum, 'whitespace/parens', 2,
2810
              'Extra space before )')
2811

    
2812

    
2813
def IsBlankLine(line):
2814
  """Returns true if the given line is blank.
2815

2816
  We consider a line to be blank if the line is empty or consists of
2817
  only white spaces.
2818

2819
  Args:
2820
    line: A line of a string.
2821

2822
  Returns:
2823
    True, if the given line is blank.
2824
  """
2825
  return not line or line.isspace()
2826

    
2827

    
2828
def CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
2829
                                 error):
2830
  is_namespace_indent_item = (
2831
      len(nesting_state.stack) > 1 and
2832
      nesting_state.stack[-1].check_namespace_indentation and
2833
      isinstance(nesting_state.previous_stack_top, _NamespaceInfo) and
2834
      nesting_state.previous_stack_top == nesting_state.stack[-2])
2835

    
2836
  if ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
2837
                                     clean_lines.elided, line):
2838
    CheckItemIndentationInNamespace(filename, clean_lines.elided,
2839
                                    line, error)
2840

    
2841

    
2842
def CheckForFunctionLengths(filename, clean_lines, linenum,
2843
                            function_state, error):
2844
  """Reports for long function bodies.
2845

2846
  For an overview why this is done, see:
2847
  http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
2848

2849
  Uses a simplistic algorithm assuming other style guidelines
2850
  (especially spacing) are followed.
2851
  Only checks unindented functions, so class members are unchecked.
2852
  Trivial bodies are unchecked, so constructors with huge initializer lists
2853
  may be missed.
2854
  Blank/comment lines are not counted so as to avoid encouraging the removal
2855
  of vertical space and comments just to get through a lint check.
2856
  NOLINT *on the last line of a function* disables this check.
2857

2858
  Args:
2859
    filename: The name of the current file.
2860
    clean_lines: A CleansedLines instance containing the file.
2861
    linenum: The number of the line to check.
2862
    function_state: Current function name and lines in body so far.
2863
    error: The function to call with any errors found.
2864
  """
2865
  lines = clean_lines.lines
2866
  line = lines[linenum]
2867
  joined_line = ''
2868

    
2869
  starting_func = False
2870
  regexp = r'(\w(\w|::|\*|\&|\s)*)\('  # decls * & space::name( ...
2871
  match_result = Match(regexp, line)
2872
  if match_result:
2873
    # If the name is all caps and underscores, figure it's a macro and
2874
    # ignore it, unless it's TEST or TEST_F.
2875
    function_name = match_result.group(1).split()[-1]
2876
    if function_name == 'TEST' or function_name == 'TEST_F' or (
2877
        not Match(r'[A-Z_]+$', function_name)):
2878
      starting_func = True
2879

    
2880
  if starting_func:
2881
    body_found = False
2882
    for start_linenum in xrange(linenum, clean_lines.NumLines()):
2883
      start_line = lines[start_linenum]
2884
      joined_line += ' ' + start_line.lstrip()
2885
      if Search(r'(;|})', start_line):  # Declarations and trivial functions
2886
        body_found = True
2887
        break                              # ... ignore
2888
      elif Search(r'{', start_line):
2889
        body_found = True
2890
        function = Search(r'((\w|:)*)\(', line).group(1)
2891
        if Match(r'TEST', function):    # Handle TEST... macros
2892
          parameter_regexp = Search(r'(\(.*\))', joined_line)
2893
          if parameter_regexp:             # Ignore bad syntax
2894
            function += parameter_regexp.group(1)
2895
        else:
2896
          function += '()'
2897
        function_state.Begin(function)
2898
        break
2899
    if not body_found:
2900
      # No body for the function (or evidence of a non-function) was found.
2901
      error(filename, linenum, 'readability/fn_size', 5,
2902
            'Lint failed to find start of function body.')
2903
  elif Match(r'^\}\s*$', line):  # function end
2904
    function_state.Check(error, filename, linenum)
2905
    function_state.End()
2906
  elif not Match(r'^\s*$', line):
2907
    function_state.Count()  # Count non-blank/non-comment lines.
2908

    
2909

    
2910
_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
2911

    
2912

    
2913
def CheckComment(line, filename, linenum, next_line_start, error):
2914
  """Checks for common mistakes in comments.
2915

2916
  Args:
2917
    line: The line in question.
2918
    filename: The name of the current file.
2919
    linenum: The number of the line to check.
2920
    next_line_start: The first non-whitespace column of the next line.
2921
    error: The function to call with any errors found.
2922
  """
2923
  commentpos = line.find('//')
2924
  if commentpos != -1:
2925
    # Check if the // may be in quotes.  If so, ignore it
2926
    # Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison
2927
    if (line.count('"', 0, commentpos) -
2928
        line.count('\\"', 0, commentpos)) % 2 == 0:   # not in quotes
2929
      # Allow one space for new scopes, two spaces otherwise:
2930
      if (not (Match(r'^.*{ *//', line) and next_line_start == commentpos) and
2931
          ((commentpos >= 1 and
2932
            line[commentpos-1] not in string.whitespace) or
2933
           (commentpos >= 2 and
2934
            line[commentpos-2] not in string.whitespace))):
2935
        error(filename, linenum, 'whitespace/comments', 2,
2936
              'At least two spaces is best between code and comments')
2937

    
2938
      # Checks for common mistakes in TODO comments.
2939
      comment = line[commentpos:]
2940
      match = _RE_PATTERN_TODO.match(comment)
2941
      if match:
2942
        # One whitespace is correct; zero whitespace is handled elsewhere.
2943
        leading_whitespace = match.group(1)
2944
        if len(leading_whitespace) > 1:
2945
          error(filename, linenum, 'whitespace/todo', 2,
2946
                'Too many spaces before TODO')
2947

    
2948
        username = match.group(2)
2949
        if not username:
2950
          error(filename, linenum, 'readability/todo', 2,
2951
                'Missing username in TODO; it should look like '
2952
                '"// TODO(my_username): Stuff."')
2953

    
2954
        middle_whitespace = match.group(3)
2955
        # Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison
2956
        if middle_whitespace != ' ' and middle_whitespace != '':
2957
          error(filename, linenum, 'whitespace/todo', 2,
2958
                'TODO(my_username) should be followed by a space')
2959

    
2960
      # If the comment contains an alphanumeric character, there
2961
      # should be a space somewhere between it and the // unless
2962
      # it's a /// or //! Doxygen comment.
2963
      if (Match(r'//[^ ]*\w', comment) and
2964
          not Match(r'(///|//\!)(\s+|$)', comment)):
2965
        error(filename, linenum, 'whitespace/comments', 4,
2966
              'Should have a space between // and comment')
2967

    
2968

    
2969
def CheckAccess(filename, clean_lines, linenum, nesting_state, error):
2970
  """Checks for improper use of DISALLOW* macros.
2971

2972
  Args:
2973
    filename: The name of the current file.
2974
    clean_lines: A CleansedLines instance containing the file.
2975
    linenum: The number of the line to check.
2976
    nesting_state: A NestingState instance which maintains information about
2977
                   the current stack of nested blocks being parsed.
2978
    error: The function to call with any errors found.
2979
  """
2980
  line = clean_lines.elided[linenum]  # get rid of comments and strings
2981

    
2982
  matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|'
2983
                   r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line)
2984
  if not matched:
2985
    return
2986
  if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo):
2987
    if nesting_state.stack[-1].access != 'private':
2988
      error(filename, linenum, 'readability/constructors', 3,
2989
            '%s must be in the private: section' % matched.group(1))
2990

    
2991
  else:
2992
    # Found DISALLOW* macro outside a class declaration, or perhaps it
2993
    # was used inside a function when it should have been part of the
2994
    # class declaration.  We could issue a warning here, but it
2995
    # probably resulted in a compiler error already.
2996
    pass
2997

    
2998

    
2999
def CheckSpacing(filename, clean_lines, linenum, nesting_state, error):
3000
  """Checks for the correctness of various spacing issues in the code.
3001

3002
  Things we check for: spaces around operators, spaces after
3003
  if/for/while/switch, no spaces around parens in function calls, two
3004
  spaces between code and comment, don't start a block with a blank
3005
  line, don't end a function with a blank line, don't add a blank line
3006
  after public/protected/private, don't have too many blank lines in a row.
3007

3008
  Args:
3009
    filename: The name of the current file.
3010
    clean_lines: A CleansedLines instance containing the file.
3011
    linenum: The number of the line to check.
3012
    nesting_state: A NestingState instance which maintains information about
3013
                   the current stack of nested blocks being parsed.
3014
    error: The function to call with any errors found.
3015
  """
3016

    
3017
  # Don't use "elided" lines here, otherwise we can't check commented lines.
3018
  # Don't want to use "raw" either, because we don't want to check inside C++11
3019
  # raw strings,
3020
  raw = clean_lines.lines_without_raw_strings
3021
  line = raw[linenum]
3022

    
3023
  # Before nixing comments, check if the line is blank for no good
3024
  # reason.  This includes the first line after a block is opened, and
3025
  # blank lines at the end of a function (ie, right before a line like '}'
3026
  #
3027
  # Skip all the blank line checks if we are immediately inside a
3028
  # namespace body.  In other words, don't issue blank line warnings
3029
  # for this block:
3030
  #   namespace {
3031
  #
3032
  #   }
3033
  #
3034
  # A warning about missing end of namespace comments will be issued instead.
3035
  #
3036
  # Also skip blank line checks for 'extern "C"' blocks, which are formatted
3037
  # like namespaces.
3038
  if (IsBlankLine(line) and
3039
      not nesting_state.InNamespaceBody() and
3040
      not nesting_state.InExternC()):
3041
    elided = clean_lines.elided
3042
    prev_line = elided[linenum - 1]
3043
    prevbrace = prev_line.rfind('{')
3044
    # TODO(unknown): Don't complain if line before blank line, and line after,
3045
    #                both start with alnums and are indented the same amount.
3046
    #                This ignores whitespace at the start of a namespace block
3047
    #                because those are not usually indented.
3048
    if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1:
3049
      # OK, we have a blank line at the start of a code block.  Before we
3050
      # complain, we check if it is an exception to the rule: The previous
3051
      # non-empty line has the parameters of a function header that are indented
3052
      # 4 spaces (because they did not fit in a 80 column line when placed on
3053
      # the same line as the function name).  We also check for the case where
3054
      # the previous line is indented 6 spaces, which may happen when the
3055
      # initializers of a constructor do not fit into a 80 column line.
3056
      exception = False
3057
      if Match(r' {6}\w', prev_line):  # Initializer list?
3058
        # We are looking for the opening column of initializer list, which
3059
        # should be indented 4 spaces to cause 6 space indentation afterwards.
3060
        search_position = linenum-2
3061
        while (search_position >= 0
3062
               and Match(r' {6}\w', elided[search_position])):
3063
          search_position -= 1
3064
        exception = (search_position >= 0
3065
                     and elided[search_position][:5] == '    :')
3066
      else:
3067
        # Search for the function arguments or an initializer list.  We use a
3068
        # simple heuristic here: If the line is indented 4 spaces; and we have a
3069
        # closing paren, without the opening paren, followed by an opening brace
3070
        # or colon (for initializer lists) we assume that it is the last line of
3071
        # a function header.  If we have a colon indented 4 spaces, it is an
3072
        # initializer list.
3073
        exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
3074
                           prev_line)
3075
                     or Match(r' {4}:', prev_line))
3076

    
3077
      if not exception:
3078
        error(filename, linenum, 'whitespace/blank_line', 2,
3079
              'Redundant blank line at the start of a code block '
3080
              'should be deleted.')
3081
    # Ignore blank lines at the end of a block in a long if-else
3082
    # chain, like this:
3083
    #   if (condition1) {
3084
    #     // Something followed by a blank line
3085
    #
3086
    #   } else if (condition2) {
3087
    #     // Something else
3088
    #   }
3089
    if linenum + 1 < clean_lines.NumLines():
3090
      next_line = raw[linenum + 1]
3091
      if (next_line
3092
          and Match(r'\s*}', next_line)
3093
          and next_line.find('} else ') == -1):
3094
        error(filename, linenum, 'whitespace/blank_line', 3,
3095
              'Redundant blank line at the end of a code block '
3096
              'should be deleted.')
3097

    
3098
    matched = Match(r'\s*(public|protected|private):', prev_line)
3099
    if matched:
3100
      error(filename, linenum, 'whitespace/blank_line', 3,
3101
            'Do not leave a blank line after "%s:"' % matched.group(1))
3102

    
3103
  # Next, check comments
3104
  next_line_start = 0
3105
  if linenum + 1 < clean_lines.NumLines():
3106
    next_line = raw[linenum + 1]
3107
    next_line_start = len(next_line) - len(next_line.lstrip())
3108
  CheckComment(line, filename, linenum, next_line_start, error)
3109

    
3110
  # get rid of comments and strings
3111
  line = clean_lines.elided[linenum]
3112

    
3113
  # You shouldn't have spaces before your brackets, except maybe after
3114
  # 'delete []' or 'return []() {};'
3115
  if Search(r'\w\s+\[', line) and not Search(r'(?:delete|return)\s+\[', line):
3116
    error(filename, linenum, 'whitespace/braces', 5,
3117
          'Extra space before [')
3118

    
3119
  # In range-based for, we wanted spaces before and after the colon, but
3120
  # not around "::" tokens that might appear.
3121
  if (Search(r'for *\(.*[^:]:[^: ]', line) or
3122
      Search(r'for *\(.*[^: ]:[^:]', line)):
3123
    error(filename, linenum, 'whitespace/forcolon', 2,
3124
          'Missing space around colon in range-based for loop')
3125

    
3126

    
3127
def CheckOperatorSpacing(filename, clean_lines, linenum, error):
3128
  """Checks for horizontal spacing around operators.
3129

3130
  Args:
3131
    filename: The name of the current file.
3132
    clean_lines: A CleansedLines instance containing the file.
3133
    linenum: The number of the line to check.
3134
    error: The function to call with any errors found.
3135
  """
3136
  line = clean_lines.elided[linenum]
3137

    
3138
  # Don't try to do spacing checks for operator methods.  Do this by
3139
  # replacing the troublesome characters with something else,
3140
  # preserving column position for all other characters.
3141
  #
3142
  # The replacement is done repeatedly to avoid false positives from
3143
  # operators that call operators.
3144
  while True:
3145
    match = Match(r'^(.*\boperator\b)(\S+)(\s*\(.*)$', line)
3146
    if match:
3147
      line = match.group(1) + ('_' * len(match.group(2))) + match.group(3)
3148
    else:
3149
      break
3150

    
3151
  # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
3152
  # Otherwise not.  Note we only check for non-spaces on *both* sides;
3153
  # sometimes people put non-spaces on one side when aligning ='s among
3154
  # many lines (not that this is behavior that I approve of...)
3155
  if ((Search(r'[\w.]=', line) or
3156
       Search(r'=[\w.]', line))
3157
      and not Search(r'\b(if|while|for) ', line)
3158
      # Operators taken from [lex.operators] in C++11 standard.
3159
      and not Search(r'(>=|<=|==|!=|&=|\^=|\|=|\+=|\*=|\/=|\%=)', line)
3160
      and not Search(r'operator=', line)):
3161
    error(filename, linenum, 'whitespace/operators', 4,
3162
          'Missing spaces around =')
3163

    
3164
  # It's ok not to have spaces around binary operators like + - * /, but if
3165
  # there's too little whitespace, we get concerned.  It's hard to tell,
3166
  # though, so we punt on this one for now.  TODO.
3167

    
3168
  # You should always have whitespace around binary operators.
3169
  #
3170
  # Check <= and >= first to avoid false positives with < and >, then
3171
  # check non-include lines for spacing around < and >.
3172
  #
3173
  # If the operator is followed by a comma, assume it's be used in a
3174
  # macro context and don't do any checks.  This avoids false
3175
  # positives.
3176
  #
3177
  # Note that && is not included here.  Those are checked separately
3178
  # in CheckRValueReference
3179
  match = Search(r'[^<>=!\s](==|!=|<=|>=|\|\|)[^<>=!\s,;\)]', line)
3180
  if match:
3181
    error(filename, linenum, 'whitespace/operators', 3,
3182
          'Missing spaces around %s' % match.group(1))
3183
  elif not Match(r'#.*include', line):
3184
    # Look for < that is not surrounded by spaces.  This is only
3185
    # triggered if both sides are missing spaces, even though
3186
    # technically should should flag if at least one side is missing a
3187
    # space.  This is done to avoid some false positives with shifts.
3188
    match = Match(r'^(.*[^\s<])<[^\s=<,]', line)
3189
    if match:
3190
      (_, _, end_pos) = CloseExpression(
3191
          clean_lines, linenum, len(match.group(1)))
3192
      if end_pos <= -1:
3193
        error(filename, linenum, 'whitespace/operators', 3,
3194
              'Missing spaces around <')
3195

    
3196
    # Look for > that is not surrounded by spaces.  Similar to the
3197
    # above, we only trigger if both sides are missing spaces to avoid
3198
    # false positives with shifts.
3199
    match = Match(r'^(.*[^-\s>])>[^\s=>,]', line)
3200
    if match:
3201
      (_, _, start_pos) = ReverseCloseExpression(
3202
          clean_lines, linenum, len(match.group(1)))
3203
      if start_pos <= -1:
3204
        error(filename, linenum, 'whitespace/operators', 3,
3205
              'Missing spaces around >')
3206

    
3207
  # We allow no-spaces around << when used like this: 10<<20, but
3208
  # not otherwise (particularly, not when used as streams)
3209
  #
3210
  # We also allow operators following an opening parenthesis, since
3211
  # those tend to be macros that deal with operators.
3212
  match = Search(r'(operator|[^\s(<])(?:L|UL|ULL|l|ul|ull)?<<([^\s,=<])', line)
3213
  if (match and not (match.group(1).isdigit() and match.group(2).isdigit()) and
3214
      not (match.group(1) == 'operator' and match.group(2) == ';')):
3215
    error(filename, linenum, 'whitespace/operators', 3,
3216
          'Missing spaces around <<')
3217

    
3218
  # We allow no-spaces around >> for almost anything.  This is because
3219
  # C++11 allows ">>" to close nested templates, which accounts for
3220
  # most cases when ">>" is not followed by a space.
3221
  #
3222
  # We still warn on ">>" followed by alpha character, because that is
3223
  # likely due to ">>" being used for right shifts, e.g.:
3224
  #   value >> alpha
3225
  #
3226
  # When ">>" is used to close templates, the alphanumeric letter that
3227
  # follows would be part of an identifier, and there should still be
3228
  # a space separating the template type and the identifier.
3229
  #   type<type<type>> alpha
3230
  match = Search(r'>>[a-zA-Z_]', line)
3231
  if match:
3232
    error(filename, linenum, 'whitespace/operators', 3,
3233
          'Missing spaces around >>')
3234

    
3235
  # There shouldn't be space around unary operators
3236
  match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
3237
  if match:
3238
    error(filename, linenum, 'whitespace/operators', 4,
3239
          'Extra space for operator %s' % match.group(1))
3240

    
3241

    
3242
def CheckParenthesisSpacing(filename, clean_lines, linenum, error):
3243
  """Checks for horizontal spacing around parentheses.
3244

3245
  Args:
3246
    filename: The name of the current file.
3247
    clean_lines: A CleansedLines instance containing the file.
3248
    linenum: The number of the line to check.
3249
    error: The function to call with any errors found.
3250
  """
3251
  line = clean_lines.elided[linenum]
3252

    
3253
  # No spaces after an if, while, switch, or for
3254
  match = Search(r' (if\(|for\(|while\(|switch\()', line)
3255
  if match:
3256
    error(filename, linenum, 'whitespace/parens', 5,
3257
          'Missing space before ( in %s' % match.group(1))
3258

    
3259
  # For if/for/while/switch, the left and right parens should be
3260
  # consistent about how many spaces are inside the parens, and
3261
  # there should either be zero or one spaces inside the parens.
3262
  # We don't want: "if ( foo)" or "if ( foo   )".
3263
  # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
3264
  match = Search(r'\b(if|for|while|switch)\s*'
3265
                 r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
3266
                 line)
3267
  if match:
3268
    if len(match.group(2)) != len(match.group(4)):
3269
      if not (match.group(3) == ';' and
3270
              len(match.group(2)) == 1 + len(match.group(4)) or
3271
              not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
3272
        error(filename, linenum, 'whitespace/parens', 5,
3273
              'Mismatching spaces inside () in %s' % match.group(1))
3274
    if len(match.group(2)) not in [0, 1]:
3275
      error(filename, linenum, 'whitespace/parens', 5,
3276
            'Should have zero or one spaces inside ( and ) in %s' %
3277
            match.group(1))
3278

    
3279

    
3280
def CheckCommaSpacing(filename, clean_lines, linenum, error):
3281
  """Checks for horizontal spacing near commas and semicolons.
3282

3283
  Args:
3284
    filename: The name of the current file.
3285
    clean_lines: A CleansedLines instance containing the file.
3286
    linenum: The number of the line to check.
3287
    error: The function to call with any errors found.
3288
  """
3289
  raw = clean_lines.lines_without_raw_strings
3290
  line = clean_lines.elided[linenum]
3291

    
3292
  # You should always have a space after a comma (either as fn arg or operator)
3293
  #
3294
  # This does not apply when the non-space character following the
3295
  # comma is another comma, since the only time when that happens is
3296
  # for empty macro arguments.
3297
  #
3298
  # We run this check in two passes: first pass on elided lines to
3299
  # verify that lines contain missing whitespaces, second pass on raw
3300
  # lines to confirm that those missing whitespaces are not due to
3301
  # elided comments.
3302
  if (Search(r',[^,\s]', ReplaceAll(r'\boperator\s*,\s*\(', 'F(', line)) and
3303
      Search(r',[^,\s]', raw[linenum])):
3304
    error(filename, linenum, 'whitespace/comma', 3,
3305
          'Missing space after ,')
3306

    
3307
  # You should always have a space after a semicolon
3308
  # except for few corner cases
3309
  # TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
3310
  # space after ;
3311
  if Search(r';[^\s};\\)/]', line):
3312
    error(filename, linenum, 'whitespace/semicolon', 3,
3313
          'Missing space after ;')
3314

    
3315

    
3316
def CheckBracesSpacing(filename, clean_lines, linenum, error):
3317
  """Checks for horizontal spacing near commas.
3318

3319
  Args:
3320
    filename: The name of the current file.
3321
    clean_lines: A CleansedLines instance containing the file.
3322
    linenum: The number of the line to check.
3323
    error: The function to call with any errors found.
3324
  """
3325
  line = clean_lines.elided[linenum]
3326

    
3327
  # Except after an opening paren, or after another opening brace (in case of
3328
  # an initializer list, for instance), you should have spaces before your
3329
  # braces. And since you should never have braces at the beginning of a line,
3330
  # this is an easy test.
3331
  match = Match(r'^(.*[^ ({>]){', line)
3332
  if match:
3333
    # Try a bit harder to check for brace initialization.  This
3334
    # happens in one of the following forms:
3335
    #   Constructor() : initializer_list_{} { ... }
3336
    #   Constructor{}.MemberFunction()
3337
    #   Type variable{};
3338
    #   FunctionCall(type{}, ...);
3339
    #   LastArgument(..., type{});
3340
    #   LOG(INFO) << type{} << " ...";
3341
    #   map_of_type[{...}] = ...;
3342
    #   ternary = expr ? new type{} : nullptr;
3343
    #   OuterTemplate<InnerTemplateConstructor<Type>{}>
3344
    #
3345
    # We check for the character following the closing brace, and
3346
    # silence the warning if it's one of those listed above, i.e.
3347
    # "{.;,)<>]:".
3348
    #
3349
    # To account for nested initializer list, we allow any number of
3350
    # closing braces up to "{;,)<".  We can't simply silence the
3351
    # warning on first sight of closing brace, because that would
3352
    # cause false negatives for things that are not initializer lists.
3353
    #   Silence this:         But not this:
3354
    #     Outer{                if (...) {
3355
    #       Inner{...}            if (...){  // Missing space before {
3356
    #     };                    }
3357
    #
3358
    # There is a false negative with this approach if people inserted
3359
    # spurious semicolons, e.g. "if (cond){};", but we will catch the
3360
    # spurious semicolon with a separate check.
3361
    (endline, endlinenum, endpos) = CloseExpression(
3362
        clean_lines, linenum, len(match.group(1)))
3363
    trailing_text = ''
3364
    if endpos > -1:
3365
      trailing_text = endline[endpos:]
3366
    for offset in xrange(endlinenum + 1,
3367
                         min(endlinenum + 3, clean_lines.NumLines() - 1)):
3368
      trailing_text += clean_lines.elided[offset]
3369
    if not Match(r'^[\s}]*[{.;,)<>\]:]', trailing_text):
3370
      error(filename, linenum, 'whitespace/braces', 5,
3371
            'Missing space before {')
3372

    
3373
  # Make sure '} else {' has spaces.
3374
  if Search(r'}else', line):
3375
    error(filename, linenum, 'whitespace/braces', 5,
3376
          'Missing space before else')
3377

    
3378
  # You shouldn't have a space before a semicolon at the end of the line.
3379
  # There's a special case for "for" since the style guide allows space before
3380
  # the semicolon there.
3381
  if Search(r':\s*;\s*$', line):
3382
    error(filename, linenum, 'whitespace/semicolon', 5,
3383
          'Semicolon defining empty statement. Use {} instead.')
3384
  elif Search(r'^\s*;\s*$', line):
3385
    error(filename, linenum, 'whitespace/semicolon', 5,
3386
          'Line contains only semicolon. If this should be an empty statement, '
3387
          'use {} instead.')
3388
  elif (Search(r'\s+;\s*$', line) and
3389
        not Search(r'\bfor\b', line)):
3390
    error(filename, linenum, 'whitespace/semicolon', 5,
3391
          'Extra space before last semicolon. If this should be an empty '
3392
          'statement, use {} instead.')
3393

    
3394

    
3395
def IsDecltype(clean_lines, linenum, column):
3396
  """Check if the token ending on (linenum, column) is decltype().
3397

3398
  Args:
3399
    clean_lines: A CleansedLines instance containing the file.
3400
    linenum: the number of the line to check.
3401
    column: end column of the token to check.
3402
  Returns:
3403
    True if this token is decltype() expression, False otherwise.
3404
  """
3405
  (text, _, start_col) = ReverseCloseExpression(clean_lines, linenum, column)
3406
  if start_col < 0:
3407
    return False
3408
  if Search(r'\bdecltype\s*$', text[0:start_col]):
3409
    return True
3410
  return False
3411

    
3412

    
3413
def IsTemplateParameterList(clean_lines, linenum, column):
3414
  """Check if the token ending on (linenum, column) is the end of template<>.
3415

3416
  Args:
3417
    clean_lines: A CleansedLines instance containing the file.
3418
    linenum: the number of the line to check.
3419
    column: end column of the token to check.
3420
  Returns:
3421
    True if this token is end of a template parameter list, False otherwise.
3422
  """
3423
  (_, startline, startpos) = ReverseCloseExpression(
3424
      clean_lines, linenum, column)
3425
  if (startpos > -1 and
3426
      Search(r'\btemplate\s*$', clean_lines.elided[startline][0:startpos])):
3427
    return True
3428
  return False
3429

    
3430

    
3431
def IsRValueType(typenames, clean_lines, nesting_state, linenum, column):
3432
  """Check if the token ending on (linenum, column) is a type.
3433

3434
  Assumes that text to the right of the column is "&&" or a function
3435
  name.
3436

3437
  Args:
3438
    typenames: set of type names from template-argument-list.
3439
    clean_lines: A CleansedLines instance containing the file.
3440
    nesting_state: A NestingState instance which maintains information about
3441
                   the current stack of nested blocks being parsed.
3442
    linenum: the number of the line to check.
3443
    column: end column of the token to check.
3444
  Returns:
3445
    True if this token is a type, False if we are not sure.
3446
  """
3447
  prefix = clean_lines.elided[linenum][0:column]
3448

    
3449
  # Get one word to the left.  If we failed to do so, this is most
3450
  # likely not a type, since it's unlikely that the type name and "&&"
3451
  # would be split across multiple lines.
3452
  match = Match(r'^(.*)(\b\w+|[>*)&])\s*$', prefix)
3453
  if not match:
3454
    return False
3455

    
3456
  # Check text following the token.  If it's "&&>" or "&&," or "&&...", it's
3457
  # most likely a rvalue reference used inside a template.
3458
  suffix = clean_lines.elided[linenum][column:]
3459
  if Match(r'&&\s*(?:[>,]|\.\.\.)', suffix):
3460
    return True
3461

    
3462
  # Check for known types and end of templates:
3463
  #   int&& variable
3464
  #   vector<int>&& variable
3465
  #
3466
  # Because this function is called recursively, we also need to
3467
  # recognize pointer and reference types:
3468
  #   int* Function()
3469
  #   int& Function()
3470
  if (match.group(2) in typenames or
3471
      match.group(2) in ['char', 'char16_t', 'char32_t', 'wchar_t', 'bool',
3472
                         'short', 'int', 'long', 'signed', 'unsigned',
3473
                         'float', 'double', 'void', 'auto', '>', '*', '&']):
3474
    return True
3475

    
3476
  # If we see a close parenthesis, look for decltype on the other side.
3477
  # decltype would unambiguously identify a type, anything else is
3478
  # probably a parenthesized expression and not a type.
3479
  if match.group(2) == ')':
3480
    return IsDecltype(
3481
        clean_lines, linenum, len(match.group(1)) + len(match.group(2)) - 1)
3482

    
3483
  # Check for casts and cv-qualifiers.
3484
  #   match.group(1)  remainder
3485
  #   --------------  ---------
3486
  #   const_cast<     type&&
3487
  #   const           type&&
3488
  #   type            const&&
3489
  if Search(r'\b(?:const_cast\s*<|static_cast\s*<|dynamic_cast\s*<|'
3490
            r'reinterpret_cast\s*<|\w+\s)\s*$',
3491
            match.group(1)):
3492
    return True
3493

    
3494
  # Look for a preceding symbol that might help differentiate the context.
3495
  # These are the cases that would be ambiguous:
3496
  #   match.group(1)  remainder
3497
  #   --------------  ---------
3498
  #   Call         (   expression &&
3499
  #   Declaration  (   type&&
3500
  #   sizeof       (   type&&
3501
  #   if           (   expression &&
3502
  #   while        (   expression &&
3503
  #   for          (   type&&
3504
  #   for(         ;   expression &&
3505
  #   statement    ;   type&&
3506
  #   block        {   type&&
3507
  #   constructor  {   expression &&
3508
  start = linenum
3509
  line = match.group(1)
3510
  match_symbol = None
3511
  while start >= 0:
3512
    # We want to skip over identifiers and commas to get to a symbol.
3513
    # Commas are skipped so that we can find the opening parenthesis
3514
    # for function parameter lists.
3515
    match_symbol = Match(r'^(.*)([^\w\s,])[\w\s,]*$', line)
3516
    if match_symbol:
3517
      break
3518
    start -= 1
3519
    line = clean_lines.elided[start]
3520

    
3521
  if not match_symbol:
3522
    # Probably the first statement in the file is an rvalue reference
3523
    return True
3524

    
3525
  if match_symbol.group(2) == '}':
3526
    # Found closing brace, probably an indicate of this:
3527
    #   block{} type&&
3528
    return True
3529

    
3530
  if match_symbol.group(2) == ';':
3531
    # Found semicolon, probably one of these:
3532
    #   for(; expression &&
3533
    #   statement; type&&
3534

    
3535
    # Look for the previous 'for(' in the previous lines.
3536
    before_text = match_symbol.group(1)
3537
    for i in xrange(start - 1, max(start - 6, 0), -1):
3538
      before_text = clean_lines.elided[i] + before_text
3539
    if Search(r'for\s*\([^{};]*$', before_text):
3540
      # This is the condition inside a for-loop
3541
      return False
3542

    
3543
    # Did not find a for-init-statement before this semicolon, so this
3544
    # is probably a new statement and not a condition.
3545
    return True
3546

    
3547
  if match_symbol.group(2) == '{':
3548
    # Found opening brace, probably one of these:
3549
    #   block{ type&& = ... ; }
3550
    #   constructor{ expression && expression }
3551

    
3552
    # Look for a closing brace or a semicolon.  If we see a semicolon
3553
    # first, this is probably a rvalue reference.
3554
    line = clean_lines.elided[start][0:len(match_symbol.group(1)) + 1]
3555
    end = start
3556
    depth = 1
3557
    while True:
3558
      for ch in line:
3559
        if ch == ';':
3560
          return True
3561
        elif ch == '{':
3562
          depth += 1
3563
        elif ch == '}':
3564
          depth -= 1
3565
          if depth == 0:
3566
            return False
3567
      end += 1
3568
      if end >= clean_lines.NumLines():
3569
        break
3570
      line = clean_lines.elided[end]
3571
    # Incomplete program?
3572
    return False
3573

    
3574
  if match_symbol.group(2) == '(':
3575
    # Opening parenthesis.  Need to check what's to the left of the
3576
    # parenthesis.  Look back one extra line for additional context.
3577
    before_text = match_symbol.group(1)
3578
    if linenum > 1:
3579
      before_text = clean_lines.elided[linenum - 1] + before_text
3580
    before_text = match_symbol.group(1)
3581

    
3582
    # Patterns that are likely to be types:
3583
    #   [](type&&
3584
    #   for (type&&
3585
    #   sizeof(type&&
3586
    #   operator=(type&&
3587
    #
3588
    if Search(r'(?:\]|\bfor|\bsizeof|\boperator\s*\S+\s*)\s*$', before_text):
3589
      return True
3590

    
3591
    # Patterns that are likely to be expressions:
3592
    #   if (expression &&
3593
    #   while (expression &&
3594
    #   : initializer(expression &&
3595
    #   , initializer(expression &&
3596
    #   ( FunctionCall(expression &&
3597
    #   + FunctionCall(expression &&
3598
    #   + (expression &&
3599
    #
3600
    # The last '+' represents operators such as '+' and '-'.
3601
    if Search(r'(?:\bif|\bwhile|[-+=%^(<!?:,&*]\s*)$', before_text):
3602
      return False
3603

    
3604
    # Something else.  Check that tokens to the left look like
3605
    #   return_type function_name
3606
    match_func = Match(r'^(.*\S.*)\s+\w(?:\w|::)*(?:<[^<>]*>)?\s*$',
3607
                       match_symbol.group(1))
3608
    if match_func:
3609
      # Check for constructors, which don't have return types.
3610
      if Search(r'\b(?:explicit|inline)$', match_func.group(1)):
3611
        return True
3612
      implicit_constructor = Match(r'\s*(\w+)\((?:const\s+)?(\w+)', prefix)
3613
      if (implicit_constructor and
3614
          implicit_constructor.group(1) == implicit_constructor.group(2)):
3615
        return True
3616
      return IsRValueType(typenames, clean_lines, nesting_state, linenum,
3617
                          len(match_func.group(1)))
3618

    
3619
    # Nothing before the function name.  If this is inside a block scope,
3620
    # this is probably a function call.
3621
    return not (nesting_state.previous_stack_top and
3622
                nesting_state.previous_stack_top.IsBlockInfo())
3623

    
3624
  if match_symbol.group(2) == '>':
3625
    # Possibly a closing bracket, check that what's on the other side
3626
    # looks like the start of a template.
3627
    return IsTemplateParameterList(
3628
        clean_lines, start, len(match_symbol.group(1)))
3629

    
3630
  # Some other symbol, usually something like "a=b&&c".  This is most
3631
  # likely not a type.
3632
  return False
3633

    
3634

    
3635
def IsDeletedOrDefault(clean_lines, linenum):
3636
  """Check if current constructor or operator is deleted or default.
3637

3638
  Args:
3639
    clean_lines: A CleansedLines instance containing the file.
3640
    linenum: The number of the line to check.
3641
  Returns:
3642
    True if this is a deleted or default constructor.
3643
  """
3644
  open_paren = clean_lines.elided[linenum].find('(')
3645
  if open_paren < 0:
3646
    return False
3647
  (close_line, _, close_paren) = CloseExpression(
3648
      clean_lines, linenum, open_paren)
3649
  if close_paren < 0:
3650
    return False
3651
  return Match(r'\s*=\s*(?:delete|default)\b', close_line[close_paren:])
3652

    
3653

    
3654
def IsRValueAllowed(clean_lines, linenum, typenames):
3655
  """Check if RValue reference is allowed on a particular line.
3656

3657
  Args:
3658
    clean_lines: A CleansedLines instance containing the file.
3659
    linenum: The number of the line to check.
3660
    typenames: set of type names from template-argument-list.
3661
  Returns:
3662
    True if line is within the region where RValue references are allowed.
3663
  """
3664
  # Allow region marked by PUSH/POP macros
3665
  for i in xrange(linenum, 0, -1):
3666
    line = clean_lines.elided[i]
3667
    if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line):
3668
      if not line.endswith('PUSH'):
3669
        return False
3670
      for j in xrange(linenum, clean_lines.NumLines(), 1):
3671
        line = clean_lines.elided[j]
3672
        if Match(r'GOOGLE_ALLOW_RVALUE_REFERENCES_(?:PUSH|POP)', line):
3673
          return line.endswith('POP')
3674

    
3675
  # Allow operator=
3676
  line = clean_lines.elided[linenum]
3677
  if Search(r'\boperator\s*=\s*\(', line):
3678
    return IsDeletedOrDefault(clean_lines, linenum)
3679

    
3680
  # Allow constructors
3681
  match = Match(r'\s*(?:[\w<>]+::)*([\w<>]+)\s*::\s*([\w<>]+)\s*\(', line)
3682
  if match and match.group(1) == match.group(2):
3683
    return IsDeletedOrDefault(clean_lines, linenum)
3684
  if Search(r'\b(?:explicit|inline)\s+[\w<>]+\s*\(', line):
3685
    return IsDeletedOrDefault(clean_lines, linenum)
3686

    
3687
  if Match(r'\s*[\w<>]+\s*\(', line):
3688
    previous_line = 'ReturnType'
3689
    if linenum > 0:
3690
      previous_line = clean_lines.elided[linenum - 1]
3691
    if Match(r'^\s*$', previous_line) or Search(r'[{}:;]\s*$', previous_line):
3692
      return IsDeletedOrDefault(clean_lines, linenum)
3693

    
3694
  # Reject types not mentioned in template-argument-list
3695
  while line:
3696
    match = Match(r'^.*?(\w+)\s*&&(.*)$', line)
3697
    if not match:
3698
      break
3699
    if match.group(1) not in typenames:
3700
      return False
3701
    line = match.group(2)
3702

    
3703
  # All RValue types that were in template-argument-list should have
3704
  # been removed by now.  Those were allowed, assuming that they will
3705
  # be forwarded.
3706
  #
3707
  # If there are no remaining RValue types left (i.e. types that were
3708
  # not found in template-argument-list), flag those as not allowed.
3709
  return line.find('&&') < 0
3710

    
3711

    
3712
def GetTemplateArgs(clean_lines, linenum):
3713
  """Find list of template arguments associated with this function declaration.
3714

3715
  Args:
3716
    clean_lines: A CleansedLines instance containing the file.
3717
    linenum: Line number containing the start of the function declaration,
3718
             usually one line after the end of the template-argument-list.
3719
  Returns:
3720
    Set of type names, or empty set if this does not appear to have
3721
    any template parameters.
3722
  """
3723
  # Find start of function
3724
  func_line = linenum
3725
  while func_line > 0:
3726
    line = clean_lines.elided[func_line]
3727
    if Match(r'^\s*$', line):
3728
      return set()
3729
    if line.find('(') >= 0:
3730
      break
3731
    func_line -= 1
3732
  if func_line == 0:
3733
    return set()
3734

    
3735
  # Collapse template-argument-list into a single string
3736
  argument_list = ''
3737
  match = Match(r'^(\s*template\s*)<', clean_lines.elided[func_line])
3738
  if match:
3739
    # template-argument-list on the same line as function name
3740
    start_col = len(match.group(1))
3741
    _, end_line, end_col = CloseExpression(clean_lines, func_line, start_col)
3742
    if end_col > -1 and end_line == func_line:
3743
      start_col += 1  # Skip the opening bracket
3744
      argument_list = clean_lines.elided[func_line][start_col:end_col]
3745

    
3746
  elif func_line > 1:
3747
    # template-argument-list one line before function name
3748
    match = Match(r'^(.*)>\s*$', clean_lines.elided[func_line - 1])
3749
    if match:
3750
      end_col = len(match.group(1))
3751
      _, start_line, start_col = ReverseCloseExpression(
3752
          clean_lines, func_line - 1, end_col)
3753
      if start_col > -1:
3754
        start_col += 1  # Skip the opening bracket
3755
        while start_line < func_line - 1:
3756
          argument_list += clean_lines.elided[start_line][start_col:]
3757
          start_col = 0
3758
          start_line += 1
3759
        argument_list += clean_lines.elided[func_line - 1][start_col:end_col]
3760

    
3761
  if not argument_list:
3762
    return set()
3763

    
3764
  # Extract type names
3765
  typenames = set()
3766
  while True:
3767
    match = Match(r'^[,\s]*(?:typename|class)(?:\.\.\.)?\s+(\w+)(.*)$',
3768
                  argument_list)
3769
    if not match:
3770
      break
3771
    typenames.add(match.group(1))
3772
    argument_list = match.group(2)
3773
  return typenames
3774

    
3775

    
3776
def CheckRValueReference(filename, clean_lines, linenum, nesting_state, error):
3777
  """Check for rvalue references.
3778

3779
  Args:
3780
    filename: The name of the current file.
3781
    clean_lines: A CleansedLines instance containing the file.
3782
    linenum: The number of the line to check.
3783
    nesting_state: A NestingState instance which maintains information about
3784
                   the current stack of nested blocks being parsed.
3785
    error: The function to call with any errors found.
3786
  """
3787
  # Find lines missing spaces around &&.
3788
  # TODO(unknown): currently we don't check for rvalue references
3789
  # with spaces surrounding the && to avoid false positives with
3790
  # boolean expressions.
3791
  line = clean_lines.elided[linenum]
3792
  match = Match(r'^(.*\S)&&', line)
3793
  if not match:
3794
    match = Match(r'(.*)&&\S', line)
3795
  if (not match) or '(&&)' in line or Search(r'\boperator\s*$', match.group(1)):
3796
    return
3797

    
3798
  # Either poorly formed && or an rvalue reference, check the context
3799
  # to get a more accurate error message.  Mostly we want to determine
3800
  # if what's to the left of "&&" is a type or not.
3801
  typenames = GetTemplateArgs(clean_lines, linenum)
3802
  and_pos = len(match.group(1))
3803
  if IsRValueType(typenames, clean_lines, nesting_state, linenum, and_pos):
3804
    if not IsRValueAllowed(clean_lines, linenum, typenames):
3805
      error(filename, linenum, 'build/c++11', 3,
3806
            'RValue references are an unapproved C++ feature.')
3807
  else:
3808
    error(filename, linenum, 'whitespace/operators', 3,
3809
          'Missing spaces around &&')
3810

    
3811

    
3812
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
3813
  """Checks for additional blank line issues related to sections.
3814

3815
  Currently the only thing checked here is blank line before protected/private.
3816

3817
  Args:
3818
    filename: The name of the current file.
3819
    clean_lines: A CleansedLines instance containing the file.
3820
    class_info: A _ClassInfo objects.
3821
    linenum: The number of the line to check.
3822
    error: The function to call with any errors found.
3823
  """
3824
  # Skip checks if the class is small, where small means 25 lines or less.
3825
  # 25 lines seems like a good cutoff since that's the usual height of
3826
  # terminals, and any class that can't fit in one screen can't really
3827
  # be considered "small".
3828
  #
3829
  # Also skip checks if we are on the first line.  This accounts for
3830
  # classes that look like
3831
  #   class Foo { public: ... };
3832
  #
3833
  # If we didn't find the end of the class, last_line would be zero,
3834
  # and the check will be skipped by the first condition.
3835
  if (class_info.last_line - class_info.starting_linenum <= 24 or
3836
      linenum <= class_info.starting_linenum):
3837
    return
3838

    
3839
  matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
3840
  if matched:
3841
    # Issue warning if the line before public/protected/private was
3842
    # not a blank line, but don't do this if the previous line contains
3843
    # "class" or "struct".  This can happen two ways:
3844
    #  - We are at the beginning of the class.
3845
    #  - We are forward-declaring an inner class that is semantically
3846
    #    private, but needed to be public for implementation reasons.
3847
    # Also ignores cases where the previous line ends with a backslash as can be
3848
    # common when defining classes in C macros.
3849
    prev_line = clean_lines.lines[linenum - 1]
3850
    if (not IsBlankLine(prev_line) and
3851
        not Search(r'\b(class|struct)\b', prev_line) and
3852
        not Search(r'\\$', prev_line)):
3853
      # Try a bit harder to find the beginning of the class.  This is to
3854
      # account for multi-line base-specifier lists, e.g.:
3855
      #   class Derived
3856
      #       : public Base {
3857
      end_class_head = class_info.starting_linenum
3858
      for i in range(class_info.starting_linenum, linenum):
3859
        if Search(r'\{\s*$', clean_lines.lines[i]):
3860
          end_class_head = i
3861
          break
3862
      if end_class_head < linenum - 1:
3863
        error(filename, linenum, 'whitespace/blank_line', 3,
3864
              '"%s:" should be preceded by a blank line' % matched.group(1))
3865

    
3866

    
3867
def GetPreviousNonBlankLine(clean_lines, linenum):
3868
  """Return the most recent non-blank line and its line number.
3869

3870
  Args:
3871
    clean_lines: A CleansedLines instance containing the file contents.
3872
    linenum: The number of the line to check.
3873

3874
  Returns:
3875
    A tuple with two elements.  The first element is the contents of the last
3876
    non-blank line before the current line, or the empty string if this is the
3877
    first non-blank line.  The second is the line number of that line, or -1
3878
    if this is the first non-blank line.
3879
  """
3880

    
3881
  prevlinenum = linenum - 1
3882
  while prevlinenum >= 0:
3883
    prevline = clean_lines.elided[prevlinenum]
3884
    if not IsBlankLine(prevline):     # if not a blank line...
3885
      return (prevline, prevlinenum)
3886
    prevlinenum -= 1
3887
  return ('', -1)
3888

    
3889

    
3890
def CheckBraces(filename, clean_lines, linenum, error):
3891
  """Looks for misplaced braces (e.g. at the end of line).
3892

3893
  Args:
3894
    filename: The name of the current file.
3895
    clean_lines: A CleansedLines instance containing the file.
3896
    linenum: The number of the line to check.
3897
    error: The function to call with any errors found.
3898
  """
3899

    
3900
  line = clean_lines.elided[linenum]        # get rid of comments and strings
3901

    
3902
  if Match(r'\s*{\s*$', line):
3903
    # We allow an open brace to start a line in the case where someone is using
3904
    # braces in a block to explicitly create a new scope, which is commonly used
3905
    # to control the lifetime of stack-allocated variables.  Braces are also
3906
    # used for brace initializers inside function calls.  We don't detect this
3907
    # perfectly: we just don't complain if the last non-whitespace character on
3908
    # the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the
3909
    # previous line starts a preprocessor block.
3910
    prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
3911
    if (not Search(r'[,;:}{(]\s*$', prevline) and
3912
        not Match(r'\s*#', prevline)):
3913
      error(filename, linenum, 'whitespace/braces', 4,
3914
            '{ should almost always be at the end of the previous line')
3915

    
3916
  # An else clause should be on the same line as the preceding closing brace.
3917
  if Match(r'\s*else\b\s*(?:if\b|\{|$)', line):
3918
    prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
3919
    if Match(r'\s*}\s*$', prevline):
3920
      error(filename, linenum, 'whitespace/newline', 4,
3921
            'An else should appear on the same line as the preceding }')
3922

    
3923
  # If braces come on one side of an else, they should be on both.
3924
  # However, we have to worry about "else if" that spans multiple lines!
3925
  if Search(r'else if\s*\(', line):       # could be multi-line if
3926
    brace_on_left = bool(Search(r'}\s*else if\s*\(', line))
3927
    # find the ( after the if
3928
    pos = line.find('else if')
3929
    pos = line.find('(', pos)
3930
    if pos > 0:
3931
      (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
3932
      brace_on_right = endline[endpos:].find('{') != -1
3933
      if brace_on_left != brace_on_right:    # must be brace after if
3934
        error(filename, linenum, 'readability/braces', 5,
3935
              'If an else has a brace on one side, it should have it on both')
3936
  elif Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
3937
    error(filename, linenum, 'readability/braces', 5,
3938
          'If an else has a brace on one side, it should have it on both')
3939

    
3940
  # Likewise, an else should never have the else clause on the same line
3941
  if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
3942
    error(filename, linenum, 'whitespace/newline', 4,
3943
          'Else clause should never be on same line as else (use 2 lines)')
3944

    
3945
  # In the same way, a do/while should never be on one line
3946
  if Match(r'\s*do [^\s{]', line):
3947
    error(filename, linenum, 'whitespace/newline', 4,
3948
          'do/while clauses should not be on a single line')
3949

    
3950
  # Check single-line if/else bodies. The style guide says 'curly braces are not
3951
  # required for single-line statements'. We additionally allow multi-line,
3952
  # single statements, but we reject anything with more than one semicolon in
3953
  # it. This means that the first semicolon after the if should be at the end of
3954
  # its line, and the line after that should have an indent level equal to or
3955
  # lower than the if. We also check for ambiguous if/else nesting without
3956
  # braces.
3957
  if_else_match = Search(r'\b(if\s*\(|else\b)', line)
3958
  if if_else_match and not Match(r'\s*#', line):
3959
    if_indent = GetIndentLevel(line)
3960
    endline, endlinenum, endpos = line, linenum, if_else_match.end()
3961
    if_match = Search(r'\bif\s*\(', line)
3962
    if if_match:
3963
      # This could be a multiline if condition, so find the end first.
3964
      pos = if_match.end() - 1
3965
      (endline, endlinenum, endpos) = CloseExpression(clean_lines, linenum, pos)
3966
    # Check for an opening brace, either directly after the if or on the next
3967
    # line. If found, this isn't a single-statement conditional.
3968
    if (not Match(r'\s*{', endline[endpos:])
3969
        and not (Match(r'\s*$', endline[endpos:])
3970
                 and endlinenum < (len(clean_lines.elided) - 1)
3971
                 and Match(r'\s*{', clean_lines.elided[endlinenum + 1]))):
3972
      while (endlinenum < len(clean_lines.elided)
3973
             and ';' not in clean_lines.elided[endlinenum][endpos:]):
3974
        endlinenum += 1
3975
        endpos = 0
3976
      if endlinenum < len(clean_lines.elided):
3977
        endline = clean_lines.elided[endlinenum]
3978
        # We allow a mix of whitespace and closing braces (e.g. for one-liner
3979
        # methods) and a single \ after the semicolon (for macros)
3980
        endpos = endline.find(';')
3981
        if not Match(r';[\s}]*(\\?)$', endline[endpos:]):
3982
          # Semicolon isn't the last character, there's something trailing.
3983
          # Output a warning if the semicolon is not contained inside
3984
          # a lambda expression.
3985
          if not Match(r'^[^{};]*\[[^\[\]]*\][^{}]*\{[^{}]*\}\s*\)*[;,]\s*$',
3986
                       endline):
3987
            error(filename, linenum, 'readability/braces', 4,
3988
                  'If/else bodies with multiple statements require braces')
3989
        elif endlinenum < len(clean_lines.elided) - 1:
3990
          # Make sure the next line is dedented
3991
          next_line = clean_lines.elided[endlinenum + 1]
3992
          next_indent = GetIndentLevel(next_line)
3993
          # With ambiguous nested if statements, this will error out on the
3994
          # if that *doesn't* match the else, regardless of whether it's the
3995
          # inner one or outer one.
3996
          if (if_match and Match(r'\s*else\b', next_line)
3997
              and next_indent != if_indent):
3998
            error(filename, linenum, 'readability/braces', 4,
3999
                  'Else clause should be indented at the same level as if. '
4000
                  'Ambiguous nested if/else chains require braces.')
4001
          elif next_indent > if_indent:
4002
            error(filename, linenum, 'readability/braces', 4,
4003
                  'If/else bodies with multiple statements require braces')
4004

    
4005

    
4006
def CheckTrailingSemicolon(filename, clean_lines, linenum, error):
4007
  """Looks for redundant trailing semicolon.
4008

4009
  Args:
4010
    filename: The name of the current file.
4011
    clean_lines: A CleansedLines instance containing the file.
4012
    linenum: The number of the line to check.
4013
    error: The function to call with any errors found.
4014
  """
4015

    
4016
  line = clean_lines.elided[linenum]
4017

    
4018
  # Block bodies should not be followed by a semicolon.  Due to C++11
4019
  # brace initialization, there are more places where semicolons are
4020
  # required than not, so we use a whitelist approach to check these
4021
  # rather than a blacklist.  These are the places where "};" should
4022
  # be replaced by just "}":
4023
  # 1. Some flavor of block following closing parenthesis:
4024
  #    for (;;) {};
4025
  #    while (...) {};
4026
  #    switch (...) {};
4027
  #    Function(...) {};
4028
  #    if (...) {};
4029
  #    if (...) else if (...) {};
4030
  #
4031
  # 2. else block:
4032
  #    if (...) else {};
4033
  #
4034
  # 3. const member function:
4035
  #    Function(...) const {};
4036
  #
4037
  # 4. Block following some statement:
4038
  #    x = 42;
4039
  #    {};
4040
  #
4041
  # 5. Block at the beginning of a function:
4042
  #    Function(...) {
4043
  #      {};
4044
  #    }
4045
  #
4046
  #    Note that naively checking for the preceding "{" will also match
4047
  #    braces inside multi-dimensional arrays, but this is fine since
4048
  #    that expression will not contain semicolons.
4049
  #
4050
  # 6. Block following another block:
4051
  #    while (true) {}
4052
  #    {};
4053
  #
4054
  # 7. End of namespaces:
4055
  #    namespace {};
4056
  #
4057
  #    These semicolons seems far more common than other kinds of
4058
  #    redundant semicolons, possibly due to people converting classes
4059
  #    to namespaces.  For now we do not warn for this case.
4060
  #
4061
  # Try matching case 1 first.
4062
  match = Match(r'^(.*\)\s*)\{', line)
4063
  if match:
4064
    # Matched closing parenthesis (case 1).  Check the token before the
4065
    # matching opening parenthesis, and don't warn if it looks like a
4066
    # macro.  This avoids these false positives:
4067
    #  - macro that defines a base class
4068
    #  - multi-line macro that defines a base class
4069
    #  - macro that defines the whole class-head
4070
    #
4071
    # But we still issue warnings for macros that we know are safe to
4072
    # warn, specifically:
4073
    #  - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P
4074
    #  - TYPED_TEST
4075
    #  - INTERFACE_DEF
4076
    #  - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED:
4077
    #
4078
    # We implement a whitelist of safe macros instead of a blacklist of
4079
    # unsafe macros, even though the latter appears less frequently in
4080
    # google code and would have been easier to implement.  This is because
4081
    # the downside for getting the whitelist wrong means some extra
4082
    # semicolons, while the downside for getting the blacklist wrong
4083
    # would result in compile errors.
4084
    #
4085
    # In addition to macros, we also don't want to warn on
4086
    #  - Compound literals
4087
    #  - Lambdas
4088
    #  - alignas specifier with anonymous structs:
4089
    closing_brace_pos = match.group(1).rfind(')')
4090
    opening_parenthesis = ReverseCloseExpression(
4091
        clean_lines, linenum, closing_brace_pos)
4092
    if opening_parenthesis[2] > -1:
4093
      line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]]
4094
      macro = Search(r'\b([A-Z_]+)\s*$', line_prefix)
4095
      func = Match(r'^(.*\])\s*$', line_prefix)
4096
      if ((macro and
4097
           macro.group(1) not in (
4098
               'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST',
4099
               'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED',
4100
               'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or
4101
          (func and not Search(r'\boperator\s*\[\s*\]', func.group(1))) or
4102
          Search(r'\b(?:struct|union)\s+alignas\s*$', line_prefix) or
4103
          Search(r'\s+=\s*$', line_prefix)):
4104
        match = None
4105
    if (match and
4106
        opening_parenthesis[1] > 1 and
4107
        Search(r'\]\s*$', clean_lines.elided[opening_parenthesis[1] - 1])):
4108
      # Multi-line lambda-expression
4109
      match = None
4110

    
4111
  else:
4112
    # Try matching cases 2-3.
4113
    match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line)
4114
    if not match:
4115
      # Try matching cases 4-6.  These are always matched on separate lines.
4116
      #
4117
      # Note that we can't simply concatenate the previous line to the
4118
      # current line and do a single match, otherwise we may output
4119
      # duplicate warnings for the blank line case:
4120
      #   if (cond) {
4121
      #     // blank line
4122
      #   }
4123
      prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
4124
      if prevline and Search(r'[;{}]\s*$', prevline):
4125
        match = Match(r'^(\s*)\{', line)
4126

    
4127
  # Check matching closing brace
4128
  if match:
4129
    (endline, endlinenum, endpos) = CloseExpression(
4130
        clean_lines, linenum, len(match.group(1)))
4131
    if endpos > -1 and Match(r'^\s*;', endline[endpos:]):
4132
      # Current {} pair is eligible for semicolon check, and we have found
4133
      # the redundant semicolon, output warning here.
4134
      #
4135
      # Note: because we are scanning forward for opening braces, and
4136
      # outputting warnings for the matching closing brace, if there are
4137
      # nested blocks with trailing semicolons, we will get the error
4138
      # messages in reversed order.
4139
      error(filename, endlinenum, 'readability/braces', 4,
4140
            "You don't need a ; after a }")
4141

    
4142

    
4143
def CheckEmptyBlockBody(filename, clean_lines, linenum, error):
4144
  """Look for empty loop/conditional body with only a single semicolon.
4145

4146
  Args:
4147
    filename: The name of the current file.
4148
    clean_lines: A CleansedLines instance containing the file.
4149
    linenum: The number of the line to check.
4150
    error: The function to call with any errors found.
4151
  """
4152

    
4153
  # Search for loop keywords at the beginning of the line.  Because only
4154
  # whitespaces are allowed before the keywords, this will also ignore most
4155
  # do-while-loops, since those lines should start with closing brace.
4156
  #
4157
  # We also check "if" blocks here, since an empty conditional block
4158
  # is likely an error.
4159
  line = clean_lines.elided[linenum]
4160
  matched = Match(r'\s*(for|while|if)\s*\(', line)
4161
  if matched:
4162
    # Find the end of the conditional expression
4163
    (end_line, end_linenum, end_pos) = CloseExpression(
4164
        clean_lines, linenum, line.find('('))
4165

    
4166
    # Output warning if what follows the condition expression is a semicolon.
4167
    # No warning for all other cases, including whitespace or newline, since we
4168
    # have a separate check for semicolons preceded by whitespace.
4169
    if end_pos >= 0 and Match(r';', end_line[end_pos:]):
4170
      if matched.group(1) == 'if':
4171
        error(filename, end_linenum, 'whitespace/empty_conditional_body', 5,
4172
              'Empty conditional bodies should use {}')
4173
      else:
4174
        error(filename, end_linenum, 'whitespace/empty_loop_body', 5,
4175
              'Empty loop bodies should use {} or continue')
4176

    
4177

    
4178
def FindCheckMacro(line):
4179
  """Find a replaceable CHECK-like macro.
4180

4181
  Args:
4182
    line: line to search on.
4183
  Returns:
4184
    (macro name, start position), or (None, -1) if no replaceable
4185
    macro is found.
4186
  """
4187
  for macro in _CHECK_MACROS:
4188
    i = line.find(macro)
4189
    if i >= 0:
4190
      # Find opening parenthesis.  Do a regular expression match here
4191
      # to make sure that we are matching the expected CHECK macro, as
4192
      # opposed to some other macro that happens to contain the CHECK
4193
      # substring.
4194
      matched = Match(r'^(.*\b' + macro + r'\s*)\(', line)
4195
      if not matched:
4196
        continue
4197
      return (macro, len(matched.group(1)))
4198
  return (None, -1)
4199

    
4200

    
4201
def CheckCheck(filename, clean_lines, linenum, error):
4202
  """Checks the use of CHECK and EXPECT macros.
4203

4204
  Args:
4205
    filename: The name of the current file.
4206
    clean_lines: A CleansedLines instance containing the file.
4207
    linenum: The number of the line to check.
4208
    error: The function to call with any errors found.
4209
  """
4210

    
4211
  # Decide the set of replacement macros that should be suggested
4212
  lines = clean_lines.elided
4213
  (check_macro, start_pos) = FindCheckMacro(lines[linenum])
4214
  if not check_macro:
4215
    return
4216

    
4217
  # Find end of the boolean expression by matching parentheses
4218
  (last_line, end_line, end_pos) = CloseExpression(
4219
      clean_lines, linenum, start_pos)
4220
  if end_pos < 0:
4221
    return
4222

    
4223
  # If the check macro is followed by something other than a
4224
  # semicolon, assume users will log their own custom error messages
4225
  # and don't suggest any replacements.
4226
  if not Match(r'\s*;', last_line[end_pos:]):
4227
    return
4228

    
4229
  if linenum == end_line:
4230
    expression = lines[linenum][start_pos + 1:end_pos - 1]
4231
  else:
4232
    expression = lines[linenum][start_pos + 1:]
4233
    for i in xrange(linenum + 1, end_line):
4234
      expression += lines[i]
4235
    expression += last_line[0:end_pos - 1]
4236

    
4237
  # Parse expression so that we can take parentheses into account.
4238
  # This avoids false positives for inputs like "CHECK((a < 4) == b)",
4239
  # which is not replaceable by CHECK_LE.
4240
  lhs = ''
4241
  rhs = ''
4242
  operator = None
4243
  while expression:
4244
    matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||'
4245
                    r'==|!=|>=|>|<=|<|\()(.*)$', expression)
4246
    if matched:
4247
      token = matched.group(1)
4248
      if token == '(':
4249
        # Parenthesized operand
4250
        expression = matched.group(2)
4251
        (end, _) = FindEndOfExpressionInLine(expression, 0, ['('])
4252
        if end < 0:
4253
          return  # Unmatched parenthesis
4254
        lhs += '(' + expression[0:end]
4255
        expression = expression[end:]
4256
      elif token in ('&&', '||'):
4257
        # Logical and/or operators.  This means the expression
4258
        # contains more than one term, for example:
4259
        #   CHECK(42 < a && a < b);
4260
        #
4261
        # These are not replaceable with CHECK_LE, so bail out early.
4262
        return
4263
      elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'):
4264
        # Non-relational operator
4265
        lhs += token
4266
        expression = matched.group(2)
4267
      else:
4268
        # Relational operator
4269
        operator = token
4270
        rhs = matched.group(2)
4271
        break
4272
    else:
4273
      # Unparenthesized operand.  Instead of appending to lhs one character
4274
      # at a time, we do another regular expression match to consume several
4275
      # characters at once if possible.  Trivial benchmark shows that this
4276
      # is more efficient when the operands are longer than a single
4277
      # character, which is generally the case.
4278
      matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression)
4279
      if not matched:
4280
        matched = Match(r'^(\s*\S)(.*)$', expression)
4281
        if not matched:
4282
          break
4283
      lhs += matched.group(1)
4284
      expression = matched.group(2)
4285

    
4286
  # Only apply checks if we got all parts of the boolean expression
4287
  if not (lhs and operator and rhs):
4288
    return
4289

    
4290
  # Check that rhs do not contain logical operators.  We already know
4291
  # that lhs is fine since the loop above parses out && and ||.
4292
  if rhs.find('&&') > -1 or rhs.find('||') > -1:
4293
    return
4294

    
4295
  # At least one of the operands must be a constant literal.  This is
4296
  # to avoid suggesting replacements for unprintable things like
4297
  # CHECK(variable != iterator)
4298
  #
4299
  # The following pattern matches decimal, hex integers, strings, and
4300
  # characters (in that order).
4301
  lhs = lhs.strip()
4302
  rhs = rhs.strip()
4303
  match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$'
4304
  if Match(match_constant, lhs) or Match(match_constant, rhs):
4305
    # Note: since we know both lhs and rhs, we can provide a more
4306
    # descriptive error message like:
4307
    #   Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42)
4308
    # Instead of:
4309
    #   Consider using CHECK_EQ instead of CHECK(a == b)
4310
    #
4311
    # We are still keeping the less descriptive message because if lhs
4312
    # or rhs gets long, the error message might become unreadable.
4313
    error(filename, linenum, 'readability/check', 2,
4314
          'Consider using %s instead of %s(a %s b)' % (
4315
              _CHECK_REPLACEMENT[check_macro][operator],
4316
              check_macro, operator))
4317

    
4318

    
4319
def CheckAltTokens(filename, clean_lines, linenum, error):
4320
  """Check alternative keywords being used in boolean expressions.
4321

4322
  Args:
4323
    filename: The name of the current file.
4324
    clean_lines: A CleansedLines instance containing the file.
4325
    linenum: The number of the line to check.
4326
    error: The function to call with any errors found.
4327
  """
4328
  line = clean_lines.elided[linenum]
4329

    
4330
  # Avoid preprocessor lines
4331
  if Match(r'^\s*#', line):
4332
    return
4333

    
4334
  # Last ditch effort to avoid multi-line comments.  This will not help
4335
  # if the comment started before the current line or ended after the
4336
  # current line, but it catches most of the false positives.  At least,
4337
  # it provides a way to workaround this warning for people who use
4338
  # multi-line comments in preprocessor macros.
4339
  #
4340
  # TODO(unknown): remove this once cpplint has better support for
4341
  # multi-line comments.
4342
  if line.find('/*') >= 0 or line.find('*/') >= 0:
4343
    return
4344

    
4345
  for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line):
4346
    error(filename, linenum, 'readability/alt_tokens', 2,
4347
          'Use operator %s instead of %s' % (
4348
              _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1)))
4349

    
4350

    
4351
def GetLineWidth(line):
4352
  """Determines the width of the line in column positions.
4353

4354
  Args:
4355
    line: A string, which may be a Unicode string.
4356

4357
  Returns:
4358
    The width of the line in column positions, accounting for Unicode
4359
    combining characters and wide characters.
4360
  """
4361
  if isinstance(line, unicode):
4362
    width = 0
4363
    for uc in unicodedata.normalize('NFC', line):
4364
      if unicodedata.east_asian_width(uc) in ('W', 'F'):
4365
        width += 2
4366
      elif not unicodedata.combining(uc):
4367
        width += 1
4368
    return width
4369
  else:
4370
    return len(line)
4371

    
4372

    
4373
def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state,
4374
               error):
4375
  """Checks rules from the 'C++ style rules' section of cppguide.html.
4376

4377
  Most of these rules are hard to test (naming, comment style), but we
4378
  do what we can.  In particular we check for 2-space indents, line lengths,
4379
  tab usage, spaces inside code, etc.
4380

4381
  Args:
4382
    filename: The name of the current file.
4383
    clean_lines: A CleansedLines instance containing the file.
4384
    linenum: The number of the line to check.
4385
    file_extension: The extension (without the dot) of the filename.
4386
    nesting_state: A NestingState instance which maintains information about
4387
                   the current stack of nested blocks being parsed.
4388
    error: The function to call with any errors found.
4389
  """
4390

    
4391
  # Don't use "elided" lines here, otherwise we can't check commented lines.
4392
  # Don't want to use "raw" either, because we don't want to check inside C++11
4393
  # raw strings,
4394
  raw_lines = clean_lines.lines_without_raw_strings
4395
  line = raw_lines[linenum]
4396

    
4397
  if line.find('\t') != -1:
4398
    error(filename, linenum, 'whitespace/tab', 1,
4399
          'Tab found; better to use spaces')
4400

    
4401
  # One or three blank spaces at the beginning of the line is weird; it's
4402
  # hard to reconcile that with 2-space indents.
4403
  # NOTE: here are the conditions rob pike used for his tests.  Mine aren't
4404
  # as sophisticated, but it may be worth becoming so:  RLENGTH==initial_spaces
4405
  # if(RLENGTH > 20) complain = 0;
4406
  # if(match($0, " +(error|private|public|protected):")) complain = 0;
4407
  # if(match(prev, "&& *$")) complain = 0;
4408
  # if(match(prev, "\\|\\| *$")) complain = 0;
4409
  # if(match(prev, "[\",=><] *$")) complain = 0;
4410
  # if(match($0, " <<")) complain = 0;
4411
  # if(match(prev, " +for \\(")) complain = 0;
4412
  # if(prevodd && match(prevprev, " +for \\(")) complain = 0;
4413
  scope_or_label_pattern = r'\s*\w+\s*:\s*\\?$'
4414
  classinfo = nesting_state.InnermostClass()
4415
  initial_spaces = 0
4416
  cleansed_line = clean_lines.elided[linenum]
4417
  while initial_spaces < len(line) and line[initial_spaces] == ' ':
4418
    initial_spaces += 1
4419
  if line and line[-1].isspace():
4420
    error(filename, linenum, 'whitespace/end_of_line', 4,
4421
          'Line ends in whitespace.  Consider deleting these extra spaces.')
4422
  # There are certain situations we allow one space, notably for
4423
  # section labels, and also lines containing multi-line raw strings.
4424
  elif ((initial_spaces == 1 or initial_spaces == 3) and
4425
        not Match(scope_or_label_pattern, cleansed_line) and
4426
        not (clean_lines.raw_lines[linenum] != line and
4427
             Match(r'^\s*""', line))):
4428
    error(filename, linenum, 'whitespace/indent', 3,
4429
          'Weird number of spaces at line-start.  '
4430
          'Are you using a 2-space indent?')
4431

    
4432
  # Check if the line is a header guard.
4433
  is_header_guard = False
4434
  if file_extension == 'h':
4435
    cppvar = GetHeaderGuardCPPVariable(filename)
4436
    if (line.startswith('#ifndef %s' % cppvar) or
4437
        line.startswith('#define %s' % cppvar) or
4438
        line.startswith('#endif  // %s' % cppvar)):
4439
      is_header_guard = True
4440
  # #include lines and header guards can be long, since there's no clean way to
4441
  # split them.
4442
  #
4443
  # URLs can be long too.  It's possible to split these, but it makes them
4444
  # harder to cut&paste.
4445
  #
4446
  # The "$Id:...$" comment may also get very long without it being the
4447
  # developers fault.
4448
  if (not line.startswith('#include') and not is_header_guard and
4449
      not Match(r'^\s*//.*http(s?)://\S*$', line) and
4450
      not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
4451
    line_width = GetLineWidth(line)
4452
    extended_length = int((_line_length * 1.25))
4453
    if line_width > extended_length:
4454
      error(filename, linenum, 'whitespace/line_length', 4,
4455
            'Lines should very rarely be longer than %i characters' %
4456
            extended_length)
4457
    elif line_width > _line_length:
4458
      error(filename, linenum, 'whitespace/line_length', 2,
4459
            'Lines should be <= %i characters long' % _line_length)
4460

    
4461
  if (cleansed_line.count(';') > 1 and
4462
      # for loops are allowed two ;'s (and may run over two lines).
4463
      cleansed_line.find('for') == -1 and
4464
      (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
4465
       GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
4466
      # It's ok to have many commands in a switch case that fits in 1 line
4467
      not ((cleansed_line.find('case ') != -1 or
4468
            cleansed_line.find('default:') != -1) and
4469
           cleansed_line.find('break;') != -1)):
4470
    error(filename, linenum, 'whitespace/newline', 0,
4471
          'More than one command on the same line')
4472

    
4473
  # Some more style checks
4474
  CheckBraces(filename, clean_lines, linenum, error)
4475
  CheckTrailingSemicolon(filename, clean_lines, linenum, error)
4476
  CheckEmptyBlockBody(filename, clean_lines, linenum, error)
4477
  CheckAccess(filename, clean_lines, linenum, nesting_state, error)
4478
  CheckSpacing(filename, clean_lines, linenum, nesting_state, error)
4479
  CheckOperatorSpacing(filename, clean_lines, linenum, error)
4480
  CheckParenthesisSpacing(filename, clean_lines, linenum, error)
4481
  CheckCommaSpacing(filename, clean_lines, linenum, error)
4482
  CheckBracesSpacing(filename, clean_lines, linenum, error)
4483
  CheckSpacingForFunctionCall(filename, clean_lines, linenum, error)
4484
  CheckRValueReference(filename, clean_lines, linenum, nesting_state, error)
4485
  CheckCheck(filename, clean_lines, linenum, error)
4486
  CheckAltTokens(filename, clean_lines, linenum, error)
4487
  classinfo = nesting_state.InnermostClass()
4488
  if classinfo:
4489
    CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error)
4490

    
4491

    
4492
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
4493
# Matches the first component of a filename delimited by -s and _s. That is:
4494
#  _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
4495
#  _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
4496
#  _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
4497
#  _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
4498
_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
4499

    
4500

    
4501
def _DropCommonSuffixes(filename):
4502
  """Drops common suffixes like _test.cc or -inl.h from filename.
4503

4504
  For example:
4505
    >>> _DropCommonSuffixes('foo/foo-inl.h')
4506
    'foo/foo'
4507
    >>> _DropCommonSuffixes('foo/bar/foo.cc')
4508
    'foo/bar/foo'
4509
    >>> _DropCommonSuffixes('foo/foo_internal.h')
4510
    'foo/foo'
4511
    >>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
4512
    'foo/foo_unusualinternal'
4513

4514
  Args:
4515
    filename: The input filename.
4516

4517
  Returns:
4518
    The filename with the common suffix removed.
4519
  """
4520
  for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
4521
                 'inl.h', 'impl.h', 'internal.h'):
4522
    if (filename.endswith(suffix) and len(filename) > len(suffix) and
4523
        filename[-len(suffix) - 1] in ('-', '_')):
4524
      return filename[:-len(suffix) - 1]
4525
  return os.path.splitext(filename)[0]
4526

    
4527

    
4528
def _IsTestFilename(filename):
4529
  """Determines if the given filename has a suffix that identifies it as a test.
4530

4531
  Args:
4532
    filename: The input filename.
4533

4534
  Returns:
4535
    True if 'filename' looks like a test, False otherwise.
4536
  """
4537
  if (filename.endswith('_test.cc') or
4538
      filename.endswith('_unittest.cc') or
4539
      filename.endswith('_regtest.cc')):
4540
    return True
4541
  else:
4542
    return False
4543

    
4544

    
4545
def _ClassifyInclude(fileinfo, include, is_system):
4546
  """Figures out what kind of header 'include' is.
4547

4548
  Args:
4549
    fileinfo: The current file cpplint is running over. A FileInfo instance.
4550
    include: The path to a #included file.
4551
    is_system: True if the #include used <> rather than "".
4552

4553
  Returns:
4554
    One of the _XXX_HEADER constants.
4555

4556
  For example:
4557
    >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
4558
    _C_SYS_HEADER
4559
    >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
4560
    _CPP_SYS_HEADER
4561
    >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
4562
    _LIKELY_MY_HEADER
4563
    >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
4564
    ...                  'bar/foo_other_ext.h', False)
4565
    _POSSIBLE_MY_HEADER
4566
    >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
4567
    _OTHER_HEADER
4568
  """
4569
  # This is a list of all standard c++ header files, except
4570
  # those already checked for above.
4571
  is_cpp_h = include in _CPP_HEADERS
4572

    
4573
  if is_system:
4574
    if is_cpp_h:
4575
      return _CPP_SYS_HEADER
4576
    else:
4577
      return _C_SYS_HEADER
4578

    
4579
  # If the target file and the include we're checking share a
4580
  # basename when we drop common extensions, and the include
4581
  # lives in . , then it's likely to be owned by the target file.
4582
  target_dir, target_base = (
4583
      os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
4584
  include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
4585
  if target_base == include_base and (
4586
      include_dir == target_dir or
4587
      include_dir == os.path.normpath(target_dir + '/../public')):
4588
    return _LIKELY_MY_HEADER
4589

    
4590
  # If the target and include share some initial basename
4591
  # component, it's possible the target is implementing the
4592
  # include, so it's allowed to be first, but we'll never
4593
  # complain if it's not there.
4594
  target_first_component = _RE_FIRST_COMPONENT.match(target_base)
4595
  include_first_component = _RE_FIRST_COMPONENT.match(include_base)
4596
  if (target_first_component and include_first_component and
4597
      target_first_component.group(0) ==
4598
      include_first_component.group(0)):
4599
    return _POSSIBLE_MY_HEADER
4600

    
4601
  return _OTHER_HEADER
4602

    
4603

    
4604

    
4605
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
4606
  """Check rules that are applicable to #include lines.
4607

4608
  Strings on #include lines are NOT removed from elided line, to make
4609
  certain tasks easier. However, to prevent false positives, checks
4610
  applicable to #include lines in CheckLanguage must be put here.
4611

4612
  Args:
4613
    filename: The name of the current file.
4614
    clean_lines: A CleansedLines instance containing the file.
4615
    linenum: The number of the line to check.
4616
    include_state: An _IncludeState instance in which the headers are inserted.
4617
    error: The function to call with any errors found.
4618
  """
4619
  fileinfo = FileInfo(filename)
4620
  line = clean_lines.lines[linenum]
4621

    
4622
  # "include" should use the new style "foo/bar.h" instead of just "bar.h"
4623
  # Only do this check if the included header follows google naming
4624
  # conventions.  If not, assume that it's a 3rd party API that
4625
  # requires special include conventions.
4626
  #
4627
  # We also make an exception for Lua headers, which follow google
4628
  # naming convention but not the include convention.
4629
  match = Match(r'#include\s*"([^/]+\.h)"', line)
4630
  if match and not _THIRD_PARTY_HEADERS_PATTERN.match(match.group(1)):
4631
    error(filename, linenum, 'build/include', 4,
4632
          'Include the directory when naming .h files')
4633

    
4634
  # we shouldn't include a file more than once. actually, there are a
4635
  # handful of instances where doing so is okay, but in general it's
4636
  # not.
4637
  match = _RE_PATTERN_INCLUDE.search(line)
4638
  if match:
4639
    include = match.group(2)
4640
    is_system = (match.group(1) == '<')
4641
    duplicate_line = include_state.FindHeader(include)
4642
    if duplicate_line >= 0:
4643
      error(filename, linenum, 'build/include', 4,
4644
            '"%s" already included at %s:%s' %
4645
            (include, filename, duplicate_line))
4646
    elif (include.endswith('.cc') and
4647
          os.path.dirname(fileinfo.RepositoryName()) != os.path.dirname(include)):
4648
      error(filename, linenum, 'build/include', 4,
4649
            'Do not include .cc files from other packages')
4650
    elif not _THIRD_PARTY_HEADERS_PATTERN.match(include):
4651
      include_state.include_list[-1].append((include, linenum))
4652

    
4653
      # We want to ensure that headers appear in the right order:
4654
      # 1) for foo.cc, foo.h  (preferred location)
4655
      # 2) c system files
4656
      # 3) cpp system files
4657
      # 4) for foo.cc, foo.h  (deprecated location)
4658
      # 5) other google headers
4659
      #
4660
      # We classify each include statement as one of those 5 types
4661
      # using a number of techniques. The include_state object keeps
4662
      # track of the highest type seen, and complains if we see a
4663
      # lower type after that.
4664
      error_message = include_state.CheckNextIncludeOrder(
4665
          _ClassifyInclude(fileinfo, include, is_system))
4666
      if error_message:
4667
        error(filename, linenum, 'build/include_order', 4,
4668
              '%s. Should be: %s.h, c system, c++ system, other.' %
4669
              (error_message, fileinfo.BaseName()))
4670
      canonical_include = include_state.CanonicalizeAlphabeticalOrder(include)
4671
      if not include_state.IsInAlphabeticalOrder(
4672
          clean_lines, linenum, canonical_include):
4673
        error(filename, linenum, 'build/include_alpha', 4,
4674
              'Include "%s" not in alphabetical order' % include)
4675
      include_state.SetLastHeader(canonical_include)
4676

    
4677

    
4678

    
4679
def _GetTextInside(text, start_pattern):
4680
  r"""Retrieves all the text between matching open and close parentheses.
4681

4682
  Given a string of lines and a regular expression string, retrieve all the text
4683
  following the expression and between opening punctuation symbols like
4684
  (, [, or {, and the matching close-punctuation symbol. This properly nested
4685
  occurrences of the punctuations, so for the text like
4686
    printf(a(), b(c()));
4687
  a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
4688
  start_pattern must match string having an open punctuation symbol at the end.
4689

4690
  Args:
4691
    text: The lines to extract text. Its comments and strings must be elided.
4692
           It can be single line and can span multiple lines.
4693
    start_pattern: The regexp string indicating where to start extracting
4694
                   the text.
4695
  Returns:
4696
    The extracted text.
4697
    None if either the opening string or ending punctuation could not be found.
4698
  """
4699
  # TODO(unknown): Audit cpplint.py to see what places could be profitably
4700
  # rewritten to use _GetTextInside (and use inferior regexp matching today).
4701

    
4702
  # Give opening punctuations to get the matching close-punctuations.
4703
  matching_punctuation = {'(': ')', '{': '}', '[': ']'}
4704
  closing_punctuation = set(matching_punctuation.itervalues())
4705

    
4706
  # Find the position to start extracting text.
4707
  match = re.search(start_pattern, text, re.M)
4708
  if not match:  # start_pattern not found in text.
4709
    return None
4710
  start_position = match.end(0)
4711

    
4712
  assert start_position > 0, (
4713
      'start_pattern must ends with an opening punctuation.')
4714
  assert text[start_position - 1] in matching_punctuation, (
4715
      'start_pattern must ends with an opening punctuation.')
4716
  # Stack of closing punctuations we expect to have in text after position.
4717
  punctuation_stack = [matching_punctuation[text[start_position - 1]]]
4718
  position = start_position
4719
  while punctuation_stack and position < len(text):
4720
    if text[position] == punctuation_stack[-1]:
4721
      punctuation_stack.pop()
4722
    elif text[position] in closing_punctuation:
4723
      # A closing punctuation without matching opening punctuations.
4724
      return None
4725
    elif text[position] in matching_punctuation:
4726
      punctuation_stack.append(matching_punctuation[text[position]])
4727
    position += 1
4728
  if punctuation_stack:
4729
    # Opening punctuations left without matching close-punctuations.
4730
    return None
4731
  # punctuations match.
4732
  return text[start_position:position - 1]
4733

    
4734

    
4735
# Patterns for matching call-by-reference parameters.
4736
#
4737
# Supports nested templates up to 2 levels deep using this messy pattern:
4738
#   < (?: < (?: < [^<>]*
4739
#               >
4740
#           |   [^<>] )*
4741
#         >
4742
#     |   [^<>] )*
4743
#   >
4744
_RE_PATTERN_IDENT = r'[_a-zA-Z]\w*'  # =~ [[:alpha:]][[:alnum:]]*
4745
_RE_PATTERN_TYPE = (
4746
    r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?'
4747
    r'(?:\w|'
4748
    r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|'
4749
    r'::)+')
4750
# A call-by-reference parameter ends with '& identifier'.
4751
_RE_PATTERN_REF_PARAM = re.compile(
4752
    r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*'
4753
    r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]')
4754
# A call-by-const-reference parameter either ends with 'const& identifier'
4755
# or looks like 'const type& identifier' when 'type' is atomic.
4756
_RE_PATTERN_CONST_REF_PARAM = (
4757
    r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT +
4758
    r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')')
4759

    
4760

    
4761
def CheckLanguage(filename, clean_lines, linenum, file_extension,
4762
                  include_state, nesting_state, error):
4763
  """Checks rules from the 'C++ language rules' section of cppguide.html.
4764

4765
  Some of these rules are hard to test (function overloading, using
4766
  uint32 inappropriately), but we do the best we can.
4767

4768
  Args:
4769
    filename: The name of the current file.
4770
    clean_lines: A CleansedLines instance containing the file.
4771
    linenum: The number of the line to check.
4772
    file_extension: The extension (without the dot) of the filename.
4773
    include_state: An _IncludeState instance in which the headers are inserted.
4774
    nesting_state: A NestingState instance which maintains information about
4775
                   the current stack of nested blocks being parsed.
4776
    error: The function to call with any errors found.
4777
  """
4778
  # If the line is empty or consists of entirely a comment, no need to
4779
  # check it.
4780
  line = clean_lines.elided[linenum]
4781
  if not line:
4782
    return
4783

    
4784
  match = _RE_PATTERN_INCLUDE.search(line)
4785
  if match:
4786
    CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
4787
    return
4788

    
4789
  # Reset include state across preprocessor directives.  This is meant
4790
  # to silence warnings for conditional includes.
4791
  match = Match(r'^\s*#\s*(if|ifdef|ifndef|elif|else|endif)\b', line)
4792
  if match:
4793
    include_state.ResetSection(match.group(1))
4794

    
4795
  # Make Windows paths like Unix.
4796
  fullname = os.path.abspath(filename).replace('\\', '/')
4797
  
4798
  # Perform other checks now that we are sure that this is not an include line
4799
  CheckCasts(filename, clean_lines, linenum, error)
4800
  CheckGlobalStatic(filename, clean_lines, linenum, error)
4801
  CheckPrintf(filename, clean_lines, linenum, error)
4802

    
4803
  if file_extension == 'h':
4804
    # TODO(unknown): check that 1-arg constructors are explicit.
4805
    #                How to tell it's a constructor?
4806
    #                (handled in CheckForNonStandardConstructs for now)
4807
    # TODO(unknown): check that classes declare or disable copy/assign
4808
    #                (level 1 error)
4809
    pass
4810

    
4811
  # Check if people are using the verboten C basic types.  The only exception
4812
  # we regularly allow is "unsigned short port" for port.
4813
  if Search(r'\bshort port\b', line):
4814
    if not Search(r'\bunsigned short port\b', line):
4815
      error(filename, linenum, 'runtime/int', 4,
4816
            'Use "unsigned short" for ports, not "short"')
4817
  else:
4818
    match = Search(r'\b(short|long(?! +double)|long long)\b', line)
4819
    if match:
4820
      error(filename, linenum, 'runtime/int', 4,
4821
            'Use int16/int64/etc, rather than the C type %s' % match.group(1))
4822

    
4823
  # Check if some verboten operator overloading is going on
4824
  # TODO(unknown): catch out-of-line unary operator&:
4825
  #   class X {};
4826
  #   int operator&(const X& x) { return 42; }  // unary operator&
4827
  # The trick is it's hard to tell apart from binary operator&:
4828
  #   class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
4829
  if Search(r'\boperator\s*&\s*\(\s*\)', line):
4830
    error(filename, linenum, 'runtime/operator', 4,
4831
          'Unary operator& is dangerous.  Do not use it.')
4832

    
4833
  # Check for suspicious usage of "if" like
4834
  # } if (a == b) {
4835
  if Search(r'\}\s*if\s*\(', line):
4836
    error(filename, linenum, 'readability/braces', 4,
4837
          'Did you mean "else if"? If not, start a new line for "if".')
4838

    
4839
  # Check for potential format string bugs like printf(foo).
4840
  # We constrain the pattern not to pick things like DocidForPrintf(foo).
4841
  # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
4842
  # TODO(unknown): Catch the following case. Need to change the calling
4843
  # convention of the whole function to process multiple line to handle it.
4844
  #   printf(
4845
  #       boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
4846
  printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
4847
  if printf_args:
4848
    match = Match(r'([\w.\->()]+)$', printf_args)
4849
    if match and match.group(1) != '__VA_ARGS__':
4850
      function_name = re.search(r'\b((?:string)?printf)\s*\(',
4851
                                line, re.I).group(1)
4852
      error(filename, linenum, 'runtime/printf', 4,
4853
            'Potential format string bug. Do %s("%%s", %s) instead.'
4854
            % (function_name, match.group(1)))
4855

    
4856
  # Check for potential memset bugs like memset(buf, sizeof(buf), 0).
4857
  match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
4858
  if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
4859
    error(filename, linenum, 'runtime/memset', 4,
4860
          'Did you mean "memset(%s, 0, %s)"?'
4861
          % (match.group(1), match.group(2)))
4862

    
4863
  if Search(r'\busing namespace\b', line):
4864
    error(filename, linenum, 'build/namespaces', 5,
4865
          'Do not use namespace using-directives.  '
4866
          'Use using-declarations instead.')
4867

    
4868
  # Detect variable-length arrays.
4869
  match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
4870
  if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
4871
      match.group(3).find(']') == -1):
4872
    # Split the size using space and arithmetic operators as delimiters.
4873
    # If any of the resulting tokens are not compile time constants then
4874
    # report the error.
4875
    tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
4876
    is_const = True
4877
    skip_next = False
4878
    for tok in tokens:
4879
      if skip_next:
4880
        skip_next = False
4881
        continue
4882

    
4883
      if Search(r'sizeof\(.+\)', tok): continue
4884
      if Search(r'arraysize\(\w+\)', tok): continue
4885

    
4886
      tok = tok.lstrip('(')
4887
      tok = tok.rstrip(')')
4888
      if not tok: continue
4889
      if Match(r'\d+', tok): continue
4890
      if Match(r'0[xX][0-9a-fA-F]+', tok): continue
4891
      if Match(r'k[A-Z0-9]\w*', tok): continue
4892
      if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
4893
      if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
4894
      # A catch all for tricky sizeof cases, including 'sizeof expression',
4895
      # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
4896
      # requires skipping the next token because we split on ' ' and '*'.
4897
      if tok.startswith('sizeof'):
4898
        skip_next = True
4899
        continue
4900
      is_const = False
4901
      break
4902
    if not is_const:
4903
      error(filename, linenum, 'runtime/arrays', 1,
4904
            'Do not use variable-length arrays.  Use an appropriately named '
4905
            "('k' followed by CamelCase) compile-time constant for the size.")
4906

    
4907
  # Check for use of unnamed namespaces in header files.  Registration
4908
  # macros are typically OK, so we allow use of "namespace {" on lines
4909
  # that end with backslashes.
4910
  if (file_extension == 'h'
4911
      and Search(r'\bnamespace\s*{', line)
4912
      and line[-1] != '\\'):
4913
    error(filename, linenum, 'build/namespaces', 4,
4914
          'Do not use unnamed namespaces in header files.  See '
4915
          'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
4916
          ' for more information.')
4917

    
4918

    
4919
def CheckGlobalStatic(filename, clean_lines, linenum, error):
4920
  """Check for unsafe global or static objects.
4921

4922
  Args:
4923
    filename: The name of the current file.
4924
    clean_lines: A CleansedLines instance containing the file.
4925
    linenum: The number of the line to check.
4926
    error: The function to call with any errors found.
4927
  """
4928
  line = clean_lines.elided[linenum]
4929

    
4930
  # Match two lines at a time to support multiline declarations
4931
  if linenum + 1 < clean_lines.NumLines() and not Search(r'[;({]', line):
4932
    line += clean_lines.elided[linenum + 1].strip()
4933

    
4934
  # Check for people declaring static/global STL strings at the top level.
4935
  # This is dangerous because the C++ language does not guarantee that
4936
  # globals with constructors are initialized before the first access.
4937
  match = Match(
4938
      r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
4939
      line)
4940

    
4941
  # Remove false positives:
4942
  # - String pointers (as opposed to values).
4943
  #    string *pointer
4944
  #    const string *pointer
4945
  #    string const *pointer
4946
  #    string *const pointer
4947
  #
4948
  # - Functions and template specializations.
4949
  #    string Function<Type>(...
4950
  #    string Class<Type>::Method(...
4951
  #
4952
  # - Operators.  These are matched separately because operator names
4953
  #   cross non-word boundaries, and trying to match both operators
4954
  #   and functions at the same time would decrease accuracy of
4955
  #   matching identifiers.
4956
  #    string Class::operator*()
4957
  if (match and
4958
      not Search(r'\bstring\b(\s+const)?\s*\*\s*(const\s+)?\w', line) and
4959
      not Search(r'\boperator\W', line) and
4960
      not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)*\s*\(([^"]|$)', match.group(3))):
4961
    error(filename, linenum, 'runtime/string', 4,
4962
          'For a static/global string constant, use a C style string instead: '
4963
          '"%schar %s[]".' %
4964
          (match.group(1), match.group(2)))
4965

    
4966
  if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
4967
    error(filename, linenum, 'runtime/init', 4,
4968
          'You seem to be initializing a member variable with itself.')
4969

    
4970

    
4971
def CheckPrintf(filename, clean_lines, linenum, error):
4972
  """Check for printf related issues.
4973

4974
  Args:
4975
    filename: The name of the current file.
4976
    clean_lines: A CleansedLines instance containing the file.
4977
    linenum: The number of the line to check.
4978
    error: The function to call with any errors found.
4979
  """
4980
  line = clean_lines.elided[linenum]
4981

    
4982
  # When snprintf is used, the second argument shouldn't be a literal.
4983
  match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
4984
  if match and match.group(2) != '0':
4985
    # If 2nd arg is zero, snprintf is used to calculate size.
4986
    error(filename, linenum, 'runtime/printf', 3,
4987
          'If you can, use sizeof(%s) instead of %s as the 2nd arg '
4988
          'to snprintf.' % (match.group(1), match.group(2)))
4989

    
4990
  # Check if some verboten C functions are being used.
4991
  if Search(r'\bsprintf\s*\(', line):
4992
    error(filename, linenum, 'runtime/printf', 5,
4993
          'Never use sprintf. Use snprintf instead.')
4994
  match = Search(r'\b(strcpy|strcat)\s*\(', line)
4995
  if match:
4996
    error(filename, linenum, 'runtime/printf', 4,
4997
          'Almost always, snprintf is better than %s' % match.group(1))
4998

    
4999

    
5000
def IsDerivedFunction(clean_lines, linenum):
5001
  """Check if current line contains an inherited function.
5002

5003
  Args:
5004
    clean_lines: A CleansedLines instance containing the file.
5005
    linenum: The number of the line to check.
5006
  Returns:
5007
    True if current line contains a function with "override"
5008
    virt-specifier.
5009
  """
5010
  # Scan back a few lines for start of current function
5011
  for i in xrange(linenum, max(-1, linenum - 10), -1):
5012
    match = Match(r'^([^()]*\w+)\(', clean_lines.elided[i])
5013
    if match:
5014
      # Look for "override" after the matching closing parenthesis
5015
      line, _, closing_paren = CloseExpression(
5016
          clean_lines, i, len(match.group(1)))
5017
      return (closing_paren >= 0 and
5018
              Search(r'\boverride\b', line[closing_paren:]))
5019
  return False
5020

    
5021

    
5022
def IsOutOfLineMethodDefinition(clean_lines, linenum):
5023
  """Check if current line contains an out-of-line method definition.
5024

5025
  Args:
5026
    clean_lines: A CleansedLines instance containing the file.
5027
    linenum: The number of the line to check.
5028
  Returns:
5029
    True if current line contains an out-of-line method definition.
5030
  """
5031
  # Scan back a few lines for start of current function
5032
  for i in xrange(linenum, max(-1, linenum - 10), -1):
5033
    if Match(r'^([^()]*\w+)\(', clean_lines.elided[i]):
5034
      return Match(r'^[^()]*\w+::\w+\(', clean_lines.elided[i]) is not None
5035
  return False
5036

    
5037

    
5038
def IsInitializerList(clean_lines, linenum):
5039
  """Check if current line is inside constructor initializer list.
5040

5041
  Args:
5042
    clean_lines: A CleansedLines instance containing the file.
5043
    linenum: The number of the line to check.
5044
  Returns:
5045
    True if current line appears to be inside constructor initializer
5046
    list, False otherwise.
5047
  """
5048
  for i in xrange(linenum, 1, -1):
5049
    line = clean_lines.elided[i]
5050
    if i == linenum:
5051
      remove_function_body = Match(r'^(.*)\{\s*$', line)
5052
      if remove_function_body:
5053
        line = remove_function_body.group(1)
5054

    
5055
    if Search(r'\s:\s*\w+[({]', line):
5056
      # A lone colon tend to indicate the start of a constructor
5057
      # initializer list.  It could also be a ternary operator, which
5058
      # also tend to appear in constructor initializer lists as
5059
      # opposed to parameter lists.
5060
      return True
5061
    if Search(r'\}\s*,\s*$', line):
5062
      # A closing brace followed by a comma is probably the end of a
5063
      # brace-initialized member in constructor initializer list.
5064
      return True
5065
    if Search(r'[{};]\s*$', line):
5066
      # Found one of the following:
5067
      # - A closing brace or semicolon, probably the end of the previous
5068
      #   function.
5069
      # - An opening brace, probably the start of current class or namespace.
5070
      #
5071
      # Current line is probably not inside an initializer list since
5072
      # we saw one of those things without seeing the starting colon.
5073
      return False
5074

    
5075
  # Got to the beginning of the file without seeing the start of
5076
  # constructor initializer list.
5077
  return False
5078

    
5079

    
5080
def CheckForNonConstReference(filename, clean_lines, linenum,
5081
                              nesting_state, error):
5082
  """Check for non-const references.
5083

5084
  Separate from CheckLanguage since it scans backwards from current
5085
  line, instead of scanning forward.
5086

5087
  Args:
5088
    filename: The name of the current file.
5089
    clean_lines: A CleansedLines instance containing the file.
5090
    linenum: The number of the line to check.
5091
    nesting_state: A NestingState instance which maintains information about
5092
                   the current stack of nested blocks being parsed.
5093
    error: The function to call with any errors found.
5094
  """
5095
  # Do nothing if there is no '&' on current line.
5096
  line = clean_lines.elided[linenum]
5097
  if '&' not in line:
5098
    return
5099

    
5100
  # If a function is inherited, current function doesn't have much of
5101
  # a choice, so any non-const references should not be blamed on
5102
  # derived function.
5103
  if IsDerivedFunction(clean_lines, linenum):
5104
    return
5105

    
5106
  # Don't warn on out-of-line method definitions, as we would warn on the
5107
  # in-line declaration, if it isn't marked with 'override'.
5108
  if IsOutOfLineMethodDefinition(clean_lines, linenum):
5109
    return
5110

    
5111
  # Long type names may be broken across multiple lines, usually in one
5112
  # of these forms:
5113
  #   LongType
5114
  #       ::LongTypeContinued &identifier
5115
  #   LongType::
5116
  #       LongTypeContinued &identifier
5117
  #   LongType<
5118
  #       ...>::LongTypeContinued &identifier
5119
  #
5120
  # If we detected a type split across two lines, join the previous
5121
  # line to current line so that we can match const references
5122
  # accordingly.
5123
  #
5124
  # Note that this only scans back one line, since scanning back
5125
  # arbitrary number of lines would be expensive.  If you have a type
5126
  # that spans more than 2 lines, please use a typedef.
5127
  if linenum > 1:
5128
    previous = None
5129
    if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line):
5130
      # previous_line\n + ::current_line
5131
      previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$',
5132
                        clean_lines.elided[linenum - 1])
5133
    elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line):
5134
      # previous_line::\n + current_line
5135
      previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$',
5136
                        clean_lines.elided[linenum - 1])
5137
    if previous:
5138
      line = previous.group(1) + line.lstrip()
5139
    else:
5140
      # Check for templated parameter that is split across multiple lines
5141
      endpos = line.rfind('>')
5142
      if endpos > -1:
5143
        (_, startline, startpos) = ReverseCloseExpression(
5144
            clean_lines, linenum, endpos)
5145
        if startpos > -1 and startline < linenum:
5146
          # Found the matching < on an earlier line, collect all
5147
          # pieces up to current line.
5148
          line = ''
5149
          for i in xrange(startline, linenum + 1):
5150
            line += clean_lines.elided[i].strip()
5151

    
5152
  # Check for non-const references in function parameters.  A single '&' may
5153
  # found in the following places:
5154
  #   inside expression: binary & for bitwise AND
5155
  #   inside expression: unary & for taking the address of something
5156
  #   inside declarators: reference parameter
5157
  # We will exclude the first two cases by checking that we are not inside a
5158
  # function body, including one that was just introduced by a trailing '{'.
5159
  # TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare].
5160
  if (nesting_state.previous_stack_top and
5161
      not (isinstance(nesting_state.previous_stack_top, _ClassInfo) or
5162
           isinstance(nesting_state.previous_stack_top, _NamespaceInfo))):
5163
    # Not at toplevel, not within a class, and not within a namespace
5164
    return
5165

    
5166
  # Avoid initializer lists.  We only need to scan back from the
5167
  # current line for something that starts with ':'.
5168
  #
5169
  # We don't need to check the current line, since the '&' would
5170
  # appear inside the second set of parentheses on the current line as
5171
  # opposed to the first set.
5172
  if linenum > 0:
5173
    for i in xrange(linenum - 1, max(0, linenum - 10), -1):
5174
      previous_line = clean_lines.elided[i]
5175
      if not Search(r'[),]\s*$', previous_line):
5176
        break
5177
      if Match(r'^\s*:\s+\S', previous_line):
5178
        return
5179

    
5180
  # Avoid preprocessors
5181
  if Search(r'\\\s*$', line):
5182
    return
5183

    
5184
  # Avoid constructor initializer lists
5185
  if IsInitializerList(clean_lines, linenum):
5186
    return
5187

    
5188
  # We allow non-const references in a few standard places, like functions
5189
  # called "swap()" or iostream operators like "<<" or ">>".  Do not check
5190
  # those function parameters.
5191
  #
5192
  # We also accept & in static_assert, which looks like a function but
5193
  # it's actually a declaration expression.
5194
  whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|'
5195
                           r'operator\s*[<>][<>]|'
5196
                           r'static_assert|COMPILE_ASSERT'
5197
                           r')\s*\(')
5198
  if Search(whitelisted_functions, line):
5199
    return
5200
  elif not Search(r'\S+\([^)]*$', line):
5201
    # Don't see a whitelisted function on this line.  Actually we
5202
    # didn't see any function name on this line, so this is likely a
5203
    # multi-line parameter list.  Try a bit harder to catch this case.
5204
    for i in xrange(2):
5205
      if (linenum > i and
5206
          Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])):
5207
        return
5208

    
5209
  decls = ReplaceAll(r'{[^}]*}', ' ', line)  # exclude function body
5210
  for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls):
5211
    if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter):
5212
      error(filename, linenum, 'runtime/references', 2,
5213
            'Is this a non-const reference? '
5214
            'If so, make const or use a pointer: ' +
5215
            ReplaceAll(' *<', '<', parameter))
5216

    
5217

    
5218
def CheckCasts(filename, clean_lines, linenum, error):
5219
  """Various cast related checks.
5220

5221
  Args:
5222
    filename: The name of the current file.
5223
    clean_lines: A CleansedLines instance containing the file.
5224
    linenum: The number of the line to check.
5225
    error: The function to call with any errors found.
5226
  """
5227
  line = clean_lines.elided[linenum]
5228

    
5229
  # Check to see if they're using an conversion function cast.
5230
  # I just try to capture the most common basic types, though there are more.
5231
  # Parameterless conversion functions, such as bool(), are allowed as they are
5232
  # probably a member operator declaration or default constructor.
5233
  match = Search(
5234
      r'(\bnew\s+|\S<\s*(?:const\s+)?)?\b'
5235
      r'(int|float|double|bool|char|int32|uint32|int64|uint64)'
5236
      r'(\([^)].*)', line)
5237
  expecting_function = ExpectingFunctionArgs(clean_lines, linenum)
5238
  if match and not expecting_function:
5239
    matched_type = match.group(2)
5240

    
5241
    # matched_new_or_template is used to silence two false positives:
5242
    # - New operators
5243
    # - Template arguments with function types
5244
    #
5245
    # For template arguments, we match on types immediately following
5246
    # an opening bracket without any spaces.  This is a fast way to
5247
    # silence the common case where the function type is the first
5248
    # template argument.  False negative with less-than comparison is
5249
    # avoided because those operators are usually followed by a space.
5250
    #
5251
    #   function<double(double)>   // bracket + no space = false positive
5252
    #   value < double(42)         // bracket + space = true positive
5253
    matched_new_or_template = match.group(1)
5254

    
5255
    # Avoid arrays by looking for brackets that come after the closing
5256
    # parenthesis.
5257
    if Match(r'\([^()]+\)\s*\[', match.group(3)):
5258
      return
5259

    
5260
    # Other things to ignore:
5261
    # - Function pointers
5262
    # - Casts to pointer types
5263
    # - Placement new
5264
    # - Alias declarations
5265
    matched_funcptr = match.group(3)
5266
    if (matched_new_or_template is None and
5267
        not (matched_funcptr and
5268
             (Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(',
5269
                    matched_funcptr) or
5270
              matched_funcptr.startswith('(*)'))) and
5271
        not Match(r'\s*using\s+\S+\s*=\s*' + matched_type, line) and
5272
        not Search(r'new\(\S+\)\s*' + matched_type, line)):
5273
      error(filename, linenum, 'readability/casting', 4,
5274
            'Using deprecated casting style.  '
5275
            'Use static_cast<%s>(...) instead' %
5276
            matched_type)
5277

    
5278
  if not expecting_function:
5279
    CheckCStyleCast(filename, clean_lines, linenum, 'static_cast',
5280
                    r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
5281

    
5282
  # This doesn't catch all cases. Consider (const char * const)"hello".
5283
  #
5284
  # (char *) "foo" should always be a const_cast (reinterpret_cast won't
5285
  # compile).
5286
  if CheckCStyleCast(filename, clean_lines, linenum, 'const_cast',
5287
                     r'\((char\s?\*+\s?)\)\s*"', error):
5288
    pass
5289
  else:
5290
    # Check pointer casts for other than string constants
5291
    CheckCStyleCast(filename, clean_lines, linenum, 'reinterpret_cast',
5292
                    r'\((\w+\s?\*+\s?)\)', error)
5293

    
5294
  # In addition, we look for people taking the address of a cast.  This
5295
  # is dangerous -- casts can assign to temporaries, so the pointer doesn't
5296
  # point where you think.
5297
  #
5298
  # Some non-identifier character is required before the '&' for the
5299
  # expression to be recognized as a cast.  These are casts:
5300
  #   expression = &static_cast<int*>(temporary());
5301
  #   function(&(int*)(temporary()));
5302
  #
5303
  # This is not a cast:
5304
  #   reference_type&(int* function_param);
5305
  match = Search(
5306
      r'(?:[^\w]&\(([^)*][^)]*)\)[\w(])|'
5307
      r'(?:[^\w]&(static|dynamic|down|reinterpret)_cast\b)', line)
5308
  if match:
5309
    # Try a better error message when the & is bound to something
5310
    # dereferenced by the casted pointer, as opposed to the casted
5311
    # pointer itself.
5312
    parenthesis_error = False
5313
    match = Match(r'^(.*&(?:static|dynamic|down|reinterpret)_cast\b)<', line)
5314
    if match:
5315
      _, y1, x1 = CloseExpression(clean_lines, linenum, len(match.group(1)))
5316
      if x1 >= 0 and clean_lines.elided[y1][x1] == '(':
5317
        _, y2, x2 = CloseExpression(clean_lines, y1, x1)
5318
        if x2 >= 0:
5319
          extended_line = clean_lines.elided[y2][x2:]
5320
          if y2 < clean_lines.NumLines() - 1:
5321
            extended_line += clean_lines.elided[y2 + 1]
5322
          if Match(r'\s*(?:->|\[)', extended_line):
5323
            parenthesis_error = True
5324

    
5325
    if parenthesis_error:
5326
      error(filename, linenum, 'readability/casting', 4,
5327
            ('Are you taking an address of something dereferenced '
5328
             'from a cast?  Wrapping the dereferenced expression in '
5329
             'parentheses will make the binding more obvious'))
5330
    else:
5331
      error(filename, linenum, 'runtime/casting', 4,
5332
            ('Are you taking an address of a cast?  '
5333
             'This is dangerous: could be a temp var.  '
5334
             'Take the address before doing the cast, rather than after'))
5335

    
5336

    
5337
def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error):
5338
  """Checks for a C-style cast by looking for the pattern.
5339

5340
  Args:
5341
    filename: The name of the current file.
5342
    clean_lines: A CleansedLines instance containing the file.
5343
    linenum: The number of the line to check.
5344
    cast_type: The string for the C++ cast to recommend.  This is either
5345
      reinterpret_cast, static_cast, or const_cast, depending.
5346
    pattern: The regular expression used to find C-style casts.
5347
    error: The function to call with any errors found.
5348

5349
  Returns:
5350
    True if an error was emitted.
5351
    False otherwise.
5352
  """
5353
  line = clean_lines.elided[linenum]
5354
  match = Search(pattern, line)
5355
  if not match:
5356
    return False
5357

    
5358
  # Exclude lines with keywords that tend to look like casts
5359
  context = line[0:match.start(1) - 1]
5360
  if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context):
5361
    return False
5362

    
5363
  # Try expanding current context to see if we one level of
5364
  # parentheses inside a macro.
5365
  if linenum > 0:
5366
    for i in xrange(linenum - 1, max(0, linenum - 5), -1):
5367
      context = clean_lines.elided[i] + context
5368
  if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context):
5369
    return False
5370

    
5371
  # operator++(int) and operator--(int)
5372
  if context.endswith(' operator++') or context.endswith(' operator--'):
5373
    return False
5374

    
5375
  # A single unnamed argument for a function tends to look like old
5376
  # style cast.  If we see those, don't issue warnings for deprecated
5377
  # casts, instead issue warnings for unnamed arguments where
5378
  # appropriate.
5379
  #
5380
  # These are things that we want warnings for, since the style guide
5381
  # explicitly require all parameters to be named:
5382
  #   Function(int);
5383
  #   Function(int) {
5384
  #   ConstMember(int) const;
5385
  #   ConstMember(int) const {
5386
  #   ExceptionMember(int) throw (...);
5387
  #   ExceptionMember(int) throw (...) {
5388
  #   PureVirtual(int) = 0;
5389
  #   [](int) -> bool {
5390
  #
5391
  # These are functions of some sort, where the compiler would be fine
5392
  # if they had named parameters, but people often omit those
5393
  # identifiers to reduce clutter:
5394
  #   (FunctionPointer)(int);
5395
  #   (FunctionPointer)(int) = value;
5396
  #   Function((function_pointer_arg)(int))
5397
  #   Function((function_pointer_arg)(int), int param)
5398
  #   <TemplateArgument(int)>;
5399
  #   <(FunctionPointerTemplateArgument)(int)>;
5400
  remainder = line[match.end(0):]
5401
  if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)',
5402
           remainder):
5403
    # Looks like an unnamed parameter.
5404

    
5405
    # Don't warn on any kind of template arguments.
5406
    if Match(r'^\s*>', remainder):
5407
      return False
5408

    
5409
    # Don't warn on assignments to function pointers, but keep warnings for
5410
    # unnamed parameters to pure virtual functions.  Note that this pattern
5411
    # will also pass on assignments of "0" to function pointers, but the
5412
    # preferred values for those would be "nullptr" or "NULL".
5413
    matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder)
5414
    if matched_zero and matched_zero.group(1) != '0':
5415
      return False
5416

    
5417
    # Don't warn on function pointer declarations.  For this we need
5418
    # to check what came before the "(type)" string.
5419
    if Match(r'.*\)\s*$', line[0:match.start(0)]):
5420
      return False
5421

    
5422
    # Don't warn if the parameter is named with block comments, e.g.:
5423
    #  Function(int /*unused_param*/);
5424
    raw_line = clean_lines.raw_lines[linenum]
5425
    if '/*' in raw_line:
5426
      return False
5427

    
5428
    # Passed all filters, issue warning here.
5429
    error(filename, linenum, 'readability/function', 3,
5430
          'All parameters should be named in a function')
5431
    return True
5432

    
5433
  # At this point, all that should be left is actual casts.
5434
  error(filename, linenum, 'readability/casting', 4,
5435
        'Using C-style cast.  Use %s<%s>(...) instead' %
5436
        (cast_type, match.group(1)))
5437

    
5438
  return True
5439

    
5440

    
5441
def ExpectingFunctionArgs(clean_lines, linenum):
5442
  """Checks whether where function type arguments are expected.
5443

5444
  Args:
5445
    clean_lines: A CleansedLines instance containing the file.
5446
    linenum: The number of the line to check.
5447

5448
  Returns:
5449
    True if the line at 'linenum' is inside something that expects arguments
5450
    of function types.
5451
  """
5452
  line = clean_lines.elided[linenum]
5453
  return (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
5454
          (linenum >= 2 and
5455
           (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$',
5456
                  clean_lines.elided[linenum - 1]) or
5457
            Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$',
5458
                  clean_lines.elided[linenum - 2]) or
5459
            Search(r'\bstd::m?function\s*\<\s*$',
5460
                   clean_lines.elided[linenum - 1]))))
5461

    
5462

    
5463
_HEADERS_CONTAINING_TEMPLATES = (
5464
    ('<deque>', ('deque',)),
5465
    ('<functional>', ('unary_function', 'binary_function',
5466
                      'plus', 'minus', 'multiplies', 'divides', 'modulus',
5467
                      'negate',
5468
                      'equal_to', 'not_equal_to', 'greater', 'less',
5469
                      'greater_equal', 'less_equal',
5470
                      'logical_and', 'logical_or', 'logical_not',
5471
                      'unary_negate', 'not1', 'binary_negate', 'not2',
5472
                      'bind1st', 'bind2nd',
5473
                      'pointer_to_unary_function',
5474
                      'pointer_to_binary_function',
5475
                      'ptr_fun',
5476
                      'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
5477
                      'mem_fun_ref_t',
5478
                      'const_mem_fun_t', 'const_mem_fun1_t',
5479
                      'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
5480
                      'mem_fun_ref',
5481
                     )),
5482
    ('<limits>', ('numeric_limits',)),
5483
    ('<list>', ('list',)),
5484
    ('<map>', ('map', 'multimap',)),
5485
    ('<memory>', ('allocator',)),
5486
    ('<queue>', ('queue', 'priority_queue',)),
5487
    ('<set>', ('set', 'multiset',)),
5488
    ('<stack>', ('stack',)),
5489
    ('<string>', ('char_traits', 'basic_string',)),
5490
    ('<tuple>', ('tuple',)),
5491
    ('<utility>', ('pair',)),
5492
    ('<vector>', ('vector',)),
5493

    
5494
    # gcc extensions.
5495
    # Note: std::hash is their hash, ::hash is our hash
5496
    ('<hash_map>', ('hash_map', 'hash_multimap',)),
5497
    ('<hash_set>', ('hash_set', 'hash_multiset',)),
5498
    ('<slist>', ('slist',)),
5499
    )
5500

    
5501
_RE_PATTERN_STRING = re.compile(r'\bstring\b')
5502

    
5503
_re_pattern_algorithm_header = []
5504
for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
5505
                  'transform'):
5506
  # Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
5507
  # type::max().
5508
  _re_pattern_algorithm_header.append(
5509
      (re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
5510
       _template,
5511
       '<algorithm>'))
5512

    
5513
_re_pattern_templates = []
5514
for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
5515
  for _template in _templates:
5516
    _re_pattern_templates.append(
5517
        (re.compile(r'(\<|\b)' + _template + r'\s*\<'),
5518
         _template + '<>',
5519
         _header))
5520

    
5521

    
5522
def FilesBelongToSameModule(filename_cc, filename_h):
5523
  """Check if these two filenames belong to the same module.
5524

5525
  The concept of a 'module' here is a as follows:
5526
  foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
5527
  same 'module' if they are in the same directory.
5528
  some/path/public/xyzzy and some/path/internal/xyzzy are also considered
5529
  to belong to the same module here.
5530

5531
  If the filename_cc contains a longer path than the filename_h, for example,
5532
  '/absolute/path/to/base/sysinfo.cc', and this file would include
5533
  'base/sysinfo.h', this function also produces the prefix needed to open the
5534
  header. This is used by the caller of this function to more robustly open the
5535
  header file. We don't have access to the real include paths in this context,
5536
  so we need this guesswork here.
5537

5538
  Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
5539
  according to this implementation. Because of this, this function gives
5540
  some false positives. This should be sufficiently rare in practice.
5541

5542
  Args:
5543
    filename_cc: is the path for the .cc file
5544
    filename_h: is the path for the header path
5545

5546
  Returns:
5547
    Tuple with a bool and a string:
5548
    bool: True if filename_cc and filename_h belong to the same module.
5549
    string: the additional prefix needed to open the header file.
5550
  """
5551

    
5552
  if not filename_cc.endswith('.cc'):
5553
    return (False, '')
5554
  filename_cc = filename_cc[:-len('.cc')]
5555
  if filename_cc.endswith('_unittest'):
5556
    filename_cc = filename_cc[:-len('_unittest')]
5557
  elif filename_cc.endswith('_test'):
5558
    filename_cc = filename_cc[:-len('_test')]
5559
  filename_cc = filename_cc.replace('/public/', '/')
5560
  filename_cc = filename_cc.replace('/internal/', '/')
5561

    
5562
  if not filename_h.endswith('.h'):
5563
    return (False, '')
5564
  filename_h = filename_h[:-len('.h')]
5565
  if filename_h.endswith('-inl'):
5566
    filename_h = filename_h[:-len('-inl')]
5567
  filename_h = filename_h.replace('/public/', '/')
5568
  filename_h = filename_h.replace('/internal/', '/')
5569

    
5570
  files_belong_to_same_module = filename_cc.endswith(filename_h)
5571
  common_path = ''
5572
  if files_belong_to_same_module:
5573
    common_path = filename_cc[:-len(filename_h)]
5574
  return files_belong_to_same_module, common_path
5575

    
5576

    
5577
def UpdateIncludeState(filename, include_dict, io=codecs):
5578
  """Fill up the include_dict with new includes found from the file.
5579

5580
  Args:
5581
    filename: the name of the header to read.
5582
    include_dict: a dictionary in which the headers are inserted.
5583
    io: The io factory to use to read the file. Provided for testability.
5584

5585
  Returns:
5586
    True if a header was successfully added. False otherwise.
5587
  """
5588
  headerfile = None
5589
  try:
5590
    headerfile = io.open(filename, 'r', 'utf8', 'replace')
5591
  except IOError:
5592
    return False
5593
  linenum = 0
5594
  for line in headerfile:
5595
    linenum += 1
5596
    clean_line = CleanseComments(line)
5597
    match = _RE_PATTERN_INCLUDE.search(clean_line)
5598
    if match:
5599
      include = match.group(2)
5600
      include_dict.setdefault(include, linenum)
5601
  return True
5602

    
5603

    
5604
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
5605
                              io=codecs):
5606
  """Reports for missing stl includes.
5607

5608
  This function will output warnings to make sure you are including the headers
5609
  necessary for the stl containers and functions that you use. We only give one
5610
  reason to include a header. For example, if you use both equal_to<> and
5611
  less<> in a .h file, only one (the latter in the file) of these will be
5612
  reported as a reason to include the <functional>.
5613

5614
  Args:
5615
    filename: The name of the current file.
5616
    clean_lines: A CleansedLines instance containing the file.
5617
    include_state: An _IncludeState instance.
5618
    error: The function to call with any errors found.
5619
    io: The IO factory to use to read the header file. Provided for unittest
5620
        injection.
5621
  """
5622
  required = {}  # A map of header name to linenumber and the template entity.
5623
                 # Example of required: { '<functional>': (1219, 'less<>') }
5624

    
5625
  for linenum in xrange(clean_lines.NumLines()):
5626
    line = clean_lines.elided[linenum]
5627
    if not line or line[0] == '#':
5628
      continue
5629

    
5630
    # String is special -- it is a non-templatized type in STL.
5631
    matched = _RE_PATTERN_STRING.search(line)
5632
    if matched:
5633
      # Don't warn about strings in non-STL namespaces:
5634
      # (We check only the first match per line; good enough.)
5635
      prefix = line[:matched.start()]
5636
      if prefix.endswith('std::') or not prefix.endswith('::'):
5637
        required['<string>'] = (linenum, 'string')
5638

    
5639
    for pattern, template, header in _re_pattern_algorithm_header:
5640
      if pattern.search(line):
5641
        required[header] = (linenum, template)
5642

    
5643
    # The following function is just a speed up, no semantics are changed.
5644
    if not '<' in line:  # Reduces the cpu time usage by skipping lines.
5645
      continue
5646

    
5647
    for pattern, template, header in _re_pattern_templates:
5648
      if pattern.search(line):
5649
        required[header] = (linenum, template)
5650

    
5651
  # The policy is that if you #include something in foo.h you don't need to
5652
  # include it again in foo.cc. Here, we will look at possible includes.
5653
  # Let's flatten the include_state include_list and copy it into a dictionary.
5654
  include_dict = dict([item for sublist in include_state.include_list
5655
                       for item in sublist])
5656

    
5657
  # Did we find the header for this file (if any) and successfully load it?
5658
  header_found = False
5659

    
5660
  # Use the absolute path so that matching works properly.
5661
  abs_filename = FileInfo(filename).FullName()
5662

    
5663
  # For Emacs's flymake.
5664
  # If cpplint is invoked from Emacs's flymake, a temporary file is generated
5665
  # by flymake and that file name might end with '_flymake.cc'. In that case,
5666
  # restore original file name here so that the corresponding header file can be
5667
  # found.
5668
  # e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
5669
  # instead of 'foo_flymake.h'
5670
  abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
5671

    
5672
  # include_dict is modified during iteration, so we iterate over a copy of
5673
  # the keys.
5674
  header_keys = include_dict.keys()
5675
  for header in header_keys:
5676
    (same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
5677
    fullpath = common_path + header
5678
    if same_module and UpdateIncludeState(fullpath, include_dict, io):
5679
      header_found = True
5680

    
5681
  # If we can't find the header file for a .cc, assume it's because we don't
5682
  # know where to look. In that case we'll give up as we're not sure they
5683
  # didn't include it in the .h file.
5684
  # TODO(unknown): Do a better job of finding .h files so we are confident that
5685
  # not having the .h file means there isn't one.
5686
  if filename.endswith('.cc') and not header_found:
5687
    return
5688

    
5689
  # All the lines have been processed, report the errors found.
5690
  for required_header_unstripped in required:
5691
    template = required[required_header_unstripped][1]
5692
    if required_header_unstripped.strip('<>"') not in include_dict:
5693
      error(filename, required[required_header_unstripped][0],
5694
            'build/include_what_you_use', 4,
5695
            'Add #include ' + required_header_unstripped + ' for ' + template)
5696

    
5697

    
5698
_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
5699

    
5700

    
5701
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
5702
  """Check that make_pair's template arguments are deduced.
5703

5704
  G++ 4.6 in C++11 mode fails badly if make_pair's template arguments are
5705
  specified explicitly, and such use isn't intended in any case.
5706

5707
  Args:
5708
    filename: The name of the current file.
5709
    clean_lines: A CleansedLines instance containing the file.
5710
    linenum: The number of the line to check.
5711
    error: The function to call with any errors found.
5712
  """
5713
  line = clean_lines.elided[linenum]
5714
  match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
5715
  if match:
5716
    error(filename, linenum, 'build/explicit_make_pair',
5717
          4,  # 4 = high confidence
5718
          'For C++11-compatibility, omit template arguments from make_pair'
5719
          ' OR use pair directly OR if appropriate, construct a pair directly')
5720

    
5721

    
5722
def CheckDefaultLambdaCaptures(filename, clean_lines, linenum, error):
5723
  """Check that default lambda captures are not used.
5724

5725
  Args:
5726
    filename: The name of the current file.
5727
    clean_lines: A CleansedLines instance containing the file.
5728
    linenum: The number of the line to check.
5729
    error: The function to call with any errors found.
5730
  """
5731
  line = clean_lines.elided[linenum]
5732

    
5733
  # A lambda introducer specifies a default capture if it starts with "[="
5734
  # or if it starts with "[&" _not_ followed by an identifier.
5735
  match = Match(r'^(.*)\[\s*(?:=|&[^\w])', line)
5736
  if match:
5737
    # Found a potential error, check what comes after the lambda-introducer.
5738
    # If it's not open parenthesis (for lambda-declarator) or open brace
5739
    # (for compound-statement), it's not a lambda.
5740
    line, _, pos = CloseExpression(clean_lines, linenum, len(match.group(1)))
5741
    if pos >= 0 and Match(r'^\s*[{(]', line[pos:]):
5742
      error(filename, linenum, 'build/c++11',
5743
            4,  # 4 = high confidence
5744
            'Default lambda captures are an unapproved C++ feature.')
5745

    
5746

    
5747
def CheckRedundantVirtual(filename, clean_lines, linenum, error):
5748
  """Check if line contains a redundant "virtual" function-specifier.
5749

5750
  Args:
5751
    filename: The name of the current file.
5752
    clean_lines: A CleansedLines instance containing the file.
5753
    linenum: The number of the line to check.
5754
    error: The function to call with any errors found.
5755
  """
5756
  # Look for "virtual" on current line.
5757
  line = clean_lines.elided[linenum]
5758
  virtual = Match(r'^(.*)(\bvirtual\b)(.*)$', line)
5759
  if not virtual: return
5760

    
5761
  # Ignore "virtual" keywords that are near access-specifiers.  These
5762
  # are only used in class base-specifier and do not apply to member
5763
  # functions.
5764
  if (Search(r'\b(public|protected|private)\s+$', virtual.group(1)) or
5765
      Match(r'^\s+(public|protected|private)\b', virtual.group(3))):
5766
    return
5767

    
5768
  # Ignore the "virtual" keyword from virtual base classes.  Usually
5769
  # there is a column on the same line in these cases (virtual base
5770
  # classes are rare in google3 because multiple inheritance is rare).
5771
  if Match(r'^.*[^:]:[^:].*$', line): return
5772

    
5773
  # Look for the next opening parenthesis.  This is the start of the
5774
  # parameter list (possibly on the next line shortly after virtual).
5775
  # TODO(unknown): doesn't work if there are virtual functions with
5776
  # decltype() or other things that use parentheses, but csearch suggests
5777
  # that this is rare.
5778
  end_col = -1
5779
  end_line = -1
5780
  start_col = len(virtual.group(2))
5781
  for start_line in xrange(linenum, min(linenum + 3, clean_lines.NumLines())):
5782
    line = clean_lines.elided[start_line][start_col:]
5783
    parameter_list = Match(r'^([^(]*)\(', line)
5784
    if parameter_list:
5785
      # Match parentheses to find the end of the parameter list
5786
      (_, end_line, end_col) = CloseExpression(
5787
          clean_lines, start_line, start_col + len(parameter_list.group(1)))
5788
      break
5789
    start_col = 0
5790

    
5791
  if end_col < 0:
5792
    return  # Couldn't find end of parameter list, give up
5793

    
5794
  # Look for "override" or "final" after the parameter list
5795
  # (possibly on the next few lines).
5796
  for i in xrange(end_line, min(end_line + 3, clean_lines.NumLines())):
5797
    line = clean_lines.elided[i][end_col:]
5798
    match = Search(r'\b(override|final)\b', line)
5799
    if match:
5800
      error(filename, linenum, 'readability/inheritance', 4,
5801
            ('"virtual" is redundant since function is '
5802
             'already declared as "%s"' % match.group(1)))
5803

    
5804
    # Set end_col to check whole lines after we are done with the
5805
    # first line.
5806
    end_col = 0
5807
    if Search(r'[^\w]\s*$', line):
5808
      break
5809

    
5810

    
5811
def CheckRedundantOverrideOrFinal(filename, clean_lines, linenum, error):
5812
  """Check if line contains a redundant "override" or "final" virt-specifier.
5813

5814
  Args:
5815
    filename: The name of the current file.
5816
    clean_lines: A CleansedLines instance containing the file.
5817
    linenum: The number of the line to check.
5818
    error: The function to call with any errors found.
5819
  """
5820
  # Look for closing parenthesis nearby.  We need one to confirm where
5821
  # the declarator ends and where the virt-specifier starts to avoid
5822
  # false positives.
5823
  line = clean_lines.elided[linenum]
5824
  declarator_end = line.rfind(')')
5825
  if declarator_end >= 0:
5826
    fragment = line[declarator_end:]
5827
  else:
5828
    if linenum > 1 and clean_lines.elided[linenum - 1].rfind(')') >= 0:
5829
      fragment = line
5830
    else:
5831
      return
5832

    
5833
  # Check that at most one of "override" or "final" is present, not both
5834
  if Search(r'\boverride\b', fragment) and Search(r'\bfinal\b', fragment):
5835
    error(filename, linenum, 'readability/inheritance', 4,
5836
          ('"override" is redundant since function is '
5837
           'already declared as "final"'))
5838

    
5839

    
5840

    
5841

    
5842
# Returns true if we are at a new block, and it is directly
5843
# inside of a namespace.
5844
def IsBlockInNameSpace(nesting_state, is_forward_declaration):
5845
  """Checks that the new block is directly in a namespace.
5846

5847
  Args:
5848
    nesting_state: The _NestingState object that contains info about our state.
5849
    is_forward_declaration: If the class is a forward declared class.
5850
  Returns:
5851
    Whether or not the new block is directly in a namespace.
5852
  """
5853
  if is_forward_declaration:
5854
    if len(nesting_state.stack) >= 1 and (
5855
        isinstance(nesting_state.stack[-1], _NamespaceInfo)):
5856
      return True
5857
    else:
5858
      return False
5859

    
5860
  return (len(nesting_state.stack) > 1 and
5861
          nesting_state.stack[-1].check_namespace_indentation and
5862
          isinstance(nesting_state.stack[-2], _NamespaceInfo))
5863

    
5864

    
5865
def ShouldCheckNamespaceIndentation(nesting_state, is_namespace_indent_item,
5866
                                    raw_lines_no_comments, linenum):
5867
  """This method determines if we should apply our namespace indentation check.
5868

5869
  Args:
5870
    nesting_state: The current nesting state.
5871
    is_namespace_indent_item: If we just put a new class on the stack, True.
5872
      If the top of the stack is not a class, or we did not recently
5873
      add the class, False.
5874
    raw_lines_no_comments: The lines without the comments.
5875
    linenum: The current line number we are processing.
5876

5877
  Returns:
5878
    True if we should apply our namespace indentation check. Currently, it
5879
    only works for classes and namespaces inside of a namespace.
5880
  """
5881

    
5882
  is_forward_declaration = IsForwardClassDeclaration(raw_lines_no_comments,
5883
                                                     linenum)
5884

    
5885
  if not (is_namespace_indent_item or is_forward_declaration):
5886
    return False
5887

    
5888
  # If we are in a macro, we do not want to check the namespace indentation.
5889
  if IsMacroDefinition(raw_lines_no_comments, linenum):
5890
    return False
5891

    
5892
  return IsBlockInNameSpace(nesting_state, is_forward_declaration)
5893

    
5894

    
5895
# Call this method if the line is directly inside of a namespace.
5896
# If the line above is blank (excluding comments) or the start of
5897
# an inner namespace, it cannot be indented.
5898
def CheckItemIndentationInNamespace(filename, raw_lines_no_comments, linenum,
5899
                                    error):
5900
  line = raw_lines_no_comments[linenum]
5901
  if Match(r'^\s+', line):
5902
    error(filename, linenum, 'runtime/indentation_namespace', 4,
5903
          'Do not indent within a namespace')
5904

    
5905

    
5906
def ProcessLine(filename, file_extension, clean_lines, line,
5907
                include_state, function_state, nesting_state, error,
5908
                extra_check_functions=[]):
5909
  """Processes a single line in the file.
5910

5911
  Args:
5912
    filename: Filename of the file that is being processed.
5913
    file_extension: The extension (dot not included) of the file.
5914
    clean_lines: An array of strings, each representing a line of the file,
5915
                 with comments stripped.
5916
    line: Number of line being processed.
5917
    include_state: An _IncludeState instance in which the headers are inserted.
5918
    function_state: A _FunctionState instance which counts function lines, etc.
5919
    nesting_state: A NestingState instance which maintains information about
5920
                   the current stack of nested blocks being parsed.
5921
    error: A callable to which errors are reported, which takes 4 arguments:
5922
           filename, line number, error level, and message
5923
    extra_check_functions: An array of additional check functions that will be
5924
                           run on each source line. Each function takes 4
5925
                           arguments: filename, clean_lines, line, error
5926
  """
5927
  raw_lines = clean_lines.raw_lines
5928
  ParseNolintSuppressions(filename, raw_lines[line], line, error)
5929
  nesting_state.Update(filename, clean_lines, line, error)
5930
  CheckForNamespaceIndentation(filename, nesting_state, clean_lines, line,
5931
                               error)
5932
  if nesting_state.InAsmBlock(): return
5933
  CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
5934
  CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
5935
  CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error)
5936
  CheckLanguage(filename, clean_lines, line, file_extension, include_state,
5937
                nesting_state, error)
5938
  CheckForNonConstReference(filename, clean_lines, line, nesting_state, error)
5939
  CheckForNonStandardConstructs(filename, clean_lines, line,
5940
                                nesting_state, error)
5941
  CheckVlogArguments(filename, clean_lines, line, error)
5942
  CheckPosixThreading(filename, clean_lines, line, error)
5943
  CheckInvalidIncrement(filename, clean_lines, line, error)
5944
  CheckMakePairUsesDeduction(filename, clean_lines, line, error)
5945
  CheckDefaultLambdaCaptures(filename, clean_lines, line, error)
5946
  CheckRedundantVirtual(filename, clean_lines, line, error)
5947
  CheckRedundantOverrideOrFinal(filename, clean_lines, line, error)
5948
  for check_fn in extra_check_functions:
5949
    check_fn(filename, clean_lines, line, error)
5950

    
5951
def FlagCxx11Features(filename, clean_lines, linenum, error):
5952
  """Flag those c++11 features that we only allow in certain places.
5953

5954
  Args:
5955
    filename: The name of the current file.
5956
    clean_lines: A CleansedLines instance containing the file.
5957
    linenum: The number of the line to check.
5958
    error: The function to call with any errors found.
5959
  """
5960
  line = clean_lines.elided[linenum]
5961

    
5962
  # Flag unapproved C++11 headers.
5963
  include = Match(r'\s*#\s*include\s+[<"]([^<"]+)[">]', line)
5964
  if include and include.group(1) in ('cfenv',
5965
                                      'condition_variable',
5966
                                      'fenv.h',
5967
                                      'future',
5968
                                      'mutex',
5969
                                      'thread',
5970
                                      'chrono',
5971
                                      'ratio',
5972
                                      'regex',
5973
                                      'system_error',
5974
                                     ):
5975
    error(filename, linenum, 'build/c++11', 5,
5976
          ('<%s> is an unapproved C++11 header.') % include.group(1))
5977

    
5978
  # The only place where we need to worry about C++11 keywords and library
5979
  # features in preprocessor directives is in macro definitions.
5980
  if Match(r'\s*#', line) and not Match(r'\s*#\s*define\b', line): return
5981

    
5982
  # These are classes and free functions.  The classes are always
5983
  # mentioned as std::*, but we only catch the free functions if
5984
  # they're not found by ADL.  They're alphabetical by header.
5985
  for top_name in (
5986
      # type_traits
5987
      'alignment_of',
5988
      'aligned_union',
5989
      ):
5990
    if Search(r'\bstd::%s\b' % top_name, line):
5991
      error(filename, linenum, 'build/c++11', 5,
5992
            ('std::%s is an unapproved C++11 class or function.  Send c-style '
5993
             'an example of where it would make your code more readable, and '
5994
             'they may let you use it.') % top_name)
5995

    
5996

    
5997
def ProcessFileData(filename, file_extension, lines, error,
5998
                    extra_check_functions=[]):
5999
  """Performs lint checks and reports any errors to the given error function.
6000

6001
  Args:
6002
    filename: Filename of the file that is being processed.
6003
    file_extension: The extension (dot not included) of the file.
6004
    lines: An array of strings, each representing a line of the file, with the
6005
           last element being empty if the file is terminated with a newline.
6006
    error: A callable to which errors are reported, which takes 4 arguments:
6007
           filename, line number, error level, and message
6008
    extra_check_functions: An array of additional check functions that will be
6009
                           run on each source line. Each function takes 4
6010
                           arguments: filename, clean_lines, line, error
6011
  """
6012
  lines = (['// marker so line numbers and indices both start at 1'] + lines +
6013
           ['// marker so line numbers end in a known way'])
6014

    
6015
  include_state = _IncludeState()
6016
  function_state = _FunctionState()
6017
  nesting_state = NestingState()
6018

    
6019
  ResetNolintSuppressions()
6020

    
6021
  CheckForCopyright(filename, lines, error)
6022

    
6023
  RemoveMultiLineComments(filename, lines, error)
6024
  clean_lines = CleansedLines(lines)
6025

    
6026
  if file_extension == 'h':
6027
    CheckForHeaderGuard(filename, clean_lines, error)
6028

    
6029
  for line in xrange(clean_lines.NumLines()):
6030
    ProcessLine(filename, file_extension, clean_lines, line,
6031
                include_state, function_state, nesting_state, error,
6032
                extra_check_functions)
6033
    FlagCxx11Features(filename, clean_lines, line, error)
6034
  nesting_state.CheckCompletedBlocks(filename, error)
6035

    
6036
  CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
6037
  
6038
  # Check that the .cc file has included its header if it exists.
6039
  if file_extension == 'cc':
6040
    CheckHeaderFileIncluded(filename, include_state, error)
6041

    
6042
  # We check here rather than inside ProcessLine so that we see raw
6043
  # lines rather than "cleaned" lines.
6044
  CheckForBadCharacters(filename, lines, error)
6045

    
6046
  CheckForNewlineAtEOF(filename, lines, error)
6047

    
6048
def ProcessConfigOverrides(filename):
6049
  """ Loads the configuration files and processes the config overrides.
6050

6051
  Args:
6052
    filename: The name of the file being processed by the linter.
6053

6054
  Returns:
6055
    False if the current |filename| should not be processed further.
6056
  """
6057

    
6058
  abs_filename = os.path.abspath(filename)
6059
  cfg_filters = []
6060
  keep_looking = True
6061
  while keep_looking:
6062
    abs_path, base_name = os.path.split(abs_filename)
6063
    if not base_name:
6064
      break  # Reached the root directory.
6065

    
6066
    cfg_file = os.path.join(abs_path, "CPPLINT.cfg")
6067
    abs_filename = abs_path
6068
    if not os.path.isfile(cfg_file):
6069
      continue
6070

    
6071
    try:
6072
      with open(cfg_file) as file_handle:
6073
        for line in file_handle:
6074
          line, _, _ = line.partition('#')  # Remove comments.
6075
          if not line.strip():
6076
            continue
6077

    
6078
          name, _, val = line.partition('=')
6079
          name = name.strip()
6080
          val = val.strip()
6081
          if name == 'set noparent':
6082
            keep_looking = False
6083
          elif name == 'filter':
6084
            cfg_filters.append(val)
6085
          elif name == 'exclude_files':
6086
            # When matching exclude_files pattern, use the base_name of
6087
            # the current file name or the directory name we are processing.
6088
            # For example, if we are checking for lint errors in /foo/bar/baz.cc
6089
            # and we found the .cfg file at /foo/CPPLINT.cfg, then the config
6090
            # file's "exclude_files" filter is meant to be checked against "bar"
6091
            # and not "baz" nor "bar/baz.cc".
6092
            if base_name:
6093
              pattern = re.compile(val)
6094
              if pattern.match(base_name):
6095
                sys.stderr.write('Ignoring "%s": file excluded by "%s". '
6096
                                 'File path component "%s" matches '
6097
                                 'pattern "%s"\n' %
6098
                                 (filename, cfg_file, base_name, val))
6099
                return False
6100
          elif name == 'linelength':
6101
            global _line_length
6102
            try:
6103
                _line_length = int(val)
6104
            except ValueError:
6105
                sys.stderr.write('Line length must be numeric.')
6106
          else:
6107
            sys.stderr.write(
6108
                'Invalid configuration option (%s) in file %s\n' %
6109
                (name, cfg_file))
6110

    
6111
    except IOError:
6112
      sys.stderr.write(
6113
          "Skipping config file '%s': Can't open for reading\n" % cfg_file)
6114
      keep_looking = False
6115

    
6116
  # Apply all the accumulated filters in reverse order (top-level directory
6117
  # config options having the least priority).
6118
  for filter in reversed(cfg_filters):
6119
     _AddFilters(filter)
6120

    
6121
  return True
6122

    
6123

    
6124
def ProcessFile(filename, vlevel, extra_check_functions=[]):
6125
  """Does google-lint on a single file.
6126

6127
  Args:
6128
    filename: The name of the file to parse.
6129

6130
    vlevel: The level of errors to report.  Every error of confidence
6131
    >= verbose_level will be reported.  0 is a good default.
6132

6133
    extra_check_functions: An array of additional check functions that will be
6134
                           run on each source line. Each function takes 4
6135
                           arguments: filename, clean_lines, line, error
6136
  """
6137

    
6138
  _SetVerboseLevel(vlevel)
6139
  _BackupFilters()
6140

    
6141
  if not ProcessConfigOverrides(filename):
6142
    _RestoreFilters()
6143
    return
6144

    
6145
  lf_lines = []
6146
  crlf_lines = []
6147
  try:
6148
    # Support the UNIX convention of using "-" for stdin.  Note that
6149
    # we are not opening the file with universal newline support
6150
    # (which codecs doesn't support anyway), so the resulting lines do
6151
    # contain trailing '\r' characters if we are reading a file that
6152
    # has CRLF endings.
6153
    # If after the split a trailing '\r' is present, it is removed
6154
    # below.
6155
    if filename == '-':
6156
      lines = codecs.StreamReaderWriter(sys.stdin,
6157
                                        codecs.getreader('utf8'),
6158
                                        codecs.getwriter('utf8'),
6159
                                        'replace').read().split('\n')
6160
    else:
6161
      lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
6162

    
6163
    # Remove trailing '\r'.
6164
    # The -1 accounts for the extra trailing blank line we get from split()
6165
    for linenum in range(len(lines) - 1):
6166
      if lines[linenum].endswith('\r'):
6167
        lines[linenum] = lines[linenum].rstrip('\r')
6168
        crlf_lines.append(linenum + 1)
6169
      else:
6170
        lf_lines.append(linenum + 1)
6171

    
6172
  except IOError:
6173
    sys.stderr.write(
6174
        "Skipping input '%s': Can't open for reading\n" % filename)
6175
    _RestoreFilters()
6176
    return
6177

    
6178
  # Note, if no dot is found, this will give the entire filename as the ext.
6179
  file_extension = filename[filename.rfind('.') + 1:]
6180

    
6181
  # When reading from stdin, the extension is unknown, so no cpplint tests
6182
  # should rely on the extension.
6183
  if filename != '-' and file_extension not in _valid_extensions:
6184
    sys.stderr.write('Ignoring %s; not a valid file name '
6185
                     '(%s)\n' % (filename, ', '.join(_valid_extensions)))
6186
  else:
6187
    ProcessFileData(filename, file_extension, lines, Error,
6188
                    extra_check_functions)
6189

    
6190
    # If end-of-line sequences are a mix of LF and CR-LF, issue
6191
    # warnings on the lines with CR.
6192
    #
6193
    # Don't issue any warnings if all lines are uniformly LF or CR-LF,
6194
    # since critique can handle these just fine, and the style guide
6195
    # doesn't dictate a particular end of line sequence.
6196
    #
6197
    # We can't depend on os.linesep to determine what the desired
6198
    # end-of-line sequence should be, since that will return the
6199
    # server-side end-of-line sequence.
6200
    if lf_lines and crlf_lines:
6201
      # Warn on every line with CR.  An alternative approach might be to
6202
      # check whether the file is mostly CRLF or just LF, and warn on the
6203
      # minority, we bias toward LF here since most tools prefer LF.
6204
      for linenum in crlf_lines:
6205
        Error(filename, linenum, 'whitespace/newline', 1,
6206
              'Unexpected \\r (^M) found; better to use only \\n')
6207

    
6208
  sys.stderr.write('Done processing %s\n' % filename)
6209
  _RestoreFilters()
6210

    
6211

    
6212
def PrintUsage(message):
6213
  """Prints a brief usage string and exits, optionally with an error message.
6214

6215
  Args:
6216
    message: The optional error message.
6217
  """
6218
  sys.stderr.write(_USAGE)
6219
  if message:
6220
    sys.exit('\nFATAL ERROR: ' + message)
6221
  else:
6222
    sys.exit(1)
6223

    
6224

    
6225
def PrintCategories():
6226
  """Prints a list of all the error-categories used by error messages.
6227

6228
  These are the categories used to filter messages via --filter.
6229
  """
6230
  sys.stderr.write(''.join('  %s\n' % cat for cat in _ERROR_CATEGORIES))
6231
  sys.exit(0)
6232

    
6233

    
6234
def ParseArguments(args):
6235
  """Parses the command line arguments.
6236

6237
  This may set the output format and verbosity level as side-effects.
6238

6239
  Args:
6240
    args: The command line arguments:
6241

6242
  Returns:
6243
    The list of filenames to lint.
6244
  """
6245
  try:
6246
    (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
6247
                                                 'counting=',
6248
                                                 'filter=',
6249
                                                 'root=',
6250
                                                 'linelength=',
6251
                                                 'extensions='])
6252
  except getopt.GetoptError:
6253
    PrintUsage('Invalid arguments.')
6254

    
6255
  verbosity = _VerboseLevel()
6256
  output_format = _OutputFormat()
6257
  filters = ''
6258
  counting_style = ''
6259

    
6260
  for (opt, val) in opts:
6261
    if opt == '--help':
6262
      PrintUsage(None)
6263
    elif opt == '--output':
6264
      if val not in ('emacs', 'vs7', 'eclipse'):
6265
        PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.')
6266
      output_format = val
6267
    elif opt == '--verbose':
6268
      verbosity = int(val)
6269
    elif opt == '--filter':
6270
      filters = val
6271
      if not filters:
6272
        PrintCategories()
6273
    elif opt == '--counting':
6274
      if val not in ('total', 'toplevel', 'detailed'):
6275
        PrintUsage('Valid counting options are total, toplevel, and detailed')
6276
      counting_style = val
6277
    elif opt == '--root':
6278
      global _root
6279
      _root = val
6280
    elif opt == '--linelength':
6281
      global _line_length
6282
      try:
6283
          _line_length = int(val)
6284
      except ValueError:
6285
          PrintUsage('Line length must be digits.')
6286
    elif opt == '--extensions':
6287
      global _valid_extensions
6288
      try:
6289
          _valid_extensions = set(val.split(','))
6290
      except ValueError:
6291
          PrintUsage('Extensions must be comma seperated list.')
6292

    
6293
  if not filenames:
6294
    PrintUsage('No files were specified.')
6295

    
6296
  _SetOutputFormat(output_format)
6297
  _SetVerboseLevel(verbosity)
6298
  _SetFilters(filters)
6299
  _SetCountingStyle(counting_style)
6300

    
6301
  return filenames
6302

    
6303

    
6304
def main():
6305
  filenames = ParseArguments(sys.argv[1:])
6306

    
6307
  # Change stderr to write with replacement characters so we don't die
6308
  # if we try to print something containing non-ASCII characters.
6309
  sys.stderr = codecs.StreamReaderWriter(sys.stderr,
6310
                                         codecs.getreader('utf8'),
6311
                                         codecs.getwriter('utf8'),
6312
                                         'replace')
6313

    
6314
  _cpplint_state.ResetErrorCounts()
6315
  for filename in filenames:
6316
    ProcessFile(filename, _cpplint_state.verbose_level)
6317
  _cpplint_state.PrintErrorCounts()
6318

    
6319
  sys.exit(_cpplint_state.error_count > 0)
6320

    
6321

    
6322
if __name__ == '__main__':
6323
  main()