From f4e66759ec9435be6a515530f9463950f9a5e6c2 Mon Sep 17 00:00:00 2001 From: Akelge Date: Fri, 5 Jul 2013 20:27:58 +0000 Subject: [PATCH] Added vim-flake8 (pyflakes+pep8+mccabe) Updated vcs plugin --- vim/GetLatest/GetLatestVimScripts.dat | 2 +- vim/ftplugin/python/flake8.py | 160 ++ vim/ftplugin/python/flake8.pyc | Bin 0 -> 6234 bytes vim/ftplugin/python/flake8.vim | 188 ++ vim/ftplugin/python/mccabe/__init__.py | 0 vim/ftplugin/python/mccabe/mccabe.py | 293 ++++ vim/ftplugin/python/pep8/__init__.py | 0 vim/ftplugin/python/pep8/autopep8.py | 2047 ++++++++++++++++++++++ vim/ftplugin/python/pep8/pep8.py | 1991 +++++++++++++++++++++ vim/ftplugin/python/pyflakes/__init__.py | 2 + vim/ftplugin/python/pyflakes/api.py | 130 ++ vim/ftplugin/python/pyflakes/checker.py | 723 ++++++++ vim/ftplugin/python/pyflakes/messages.py | 113 ++ vim/ftplugin/python/pyflakes/reporter.py | 79 + vim/plugin/vcsbzr.vim | 2 +- vim/plugin/vcscommand.vim | 38 +- vim/plugin/vcscvs.vim | 2 +- vim/plugin/vcsgit.vim | 2 +- vim/plugin/vcshg.vim | 2 +- vim/plugin/vcssvk.vim | 2 +- vim/plugin/vcssvn.vim | 6 +- 21 files changed, 5750 insertions(+), 32 deletions(-) create mode 100755 vim/ftplugin/python/flake8.py create mode 100644 vim/ftplugin/python/flake8.pyc create mode 100755 vim/ftplugin/python/flake8.vim create mode 100755 vim/ftplugin/python/mccabe/__init__.py create mode 100755 vim/ftplugin/python/mccabe/mccabe.py create mode 100755 vim/ftplugin/python/pep8/__init__.py create mode 100755 vim/ftplugin/python/pep8/autopep8.py create mode 100755 vim/ftplugin/python/pep8/pep8.py create mode 100755 vim/ftplugin/python/pyflakes/__init__.py create mode 100755 vim/ftplugin/python/pyflakes/api.py create mode 100755 vim/ftplugin/python/pyflakes/checker.py create mode 100755 vim/ftplugin/python/pyflakes/messages.py create mode 100755 vim/ftplugin/python/pyflakes/reporter.py diff --git a/vim/GetLatest/GetLatestVimScripts.dat b/vim/GetLatest/GetLatestVimScripts.dat index eb15428..913b995 100644 --- a/vim/GetLatest/GetLatestVimScripts.dat +++ b/vim/GetLatest/GetLatestVimScripts.dat @@ -8,5 +8,5 @@ ScriptID SourceID Filename 2324 9247 :AutoInstall: TextFormat 1658 17123 :AutoInstall: NERD_tree.vim 1218 14455 :AutoInstall: NERD_commenter.vim -90 17031 :AutoInstall: vcscommand.vim +90 19809 :AutoInstall: vcscommand.vim 2896 11941 :AutoInstall: open_terminal.vim diff --git a/vim/ftplugin/python/flake8.py b/vim/ftplugin/python/flake8.py new file mode 100755 index 0000000..517f623 --- /dev/null +++ b/vim/ftplugin/python/flake8.py @@ -0,0 +1,160 @@ +# coding: utf-8 + +from mccabe.mccabe import get_module_complexity +from pyflakes import checker, messages +import _ast +from pep8 import pep8 as p8 +from pep8.autopep8 import fix_file as pep8_fix, fix_lines as pep8_fix_lines +import os + + +class Pep8Options(): + verbose = 0 + diff = False + in_place = True + recursive = False + pep8_passes = 100 + max_line_length = 79 + ignore = '' + select = '' + aggressive = False + + +class MccabeOptions(): + complexity = 10 + +flake_code_mapping = { + 'W402': (messages.UnusedImport,), + 'W403': (messages.ImportShadowedByLoopVar,), + 'W404': (messages.ImportStarUsed,), + 'W405': (messages.LateFutureImport,), + 'W801': (messages.RedefinedWhileUnused, + messages.RedefinedInListComp,), + 'W802': (messages.UndefinedName,), + 'W803': (messages.UndefinedExport,), + 'W804': (messages.UndefinedLocal, + messages.UnusedVariable,), + 'W805': (messages.DuplicateArgument,), + 'W806': (messages.Redefined,), +} + +flake_class_mapping = dict( + (k, c) for (c, v) in flake_code_mapping.items() for k in v) + + +def fix_file(filename): + pep8_fix(filename, Pep8Options) + + +def fix_lines(lines): + return pep8_fix_lines(lines, Pep8Options) + + +def run_checkers(filename, checkers, ignore): + + result = [] + + for c in checkers: + + checker_fun = globals().get(c) + if not checker: + continue + + try: + for e in checker_fun(filename): + e.update( + col=e.get('col') or 0, + text="{0} [{1}]".format( + e.get('text', '').strip( + ).replace("'", "\"").splitlines()[0], + c), + filename=os.path.normpath(filename), + type=e.get('type') or 'W', + bufnr=0, + ) + result.append(e) + except: + pass + + result = filter(lambda e: _ignore_error(e, ignore), result) + return sorted(result, key=lambda x: x['lnum']) + + +def mccabe(filename): + return get_module_complexity(filename, min=MccabeOptions.complexity) + + +def pep8(filename): + style = PEP8 or _init_pep8() + return style.input_file(filename) + + +def pyflakes(filename): + codeString = file(filename, 'U').read() + '\n' + errors = [] + try: + tree = compile(codeString, filename, "exec", _ast.PyCF_ONLY_AST) + except SyntaxError as e: + errors.append(dict( + lnum=e.lineno or 0, + col=e.offset or 0, + text=getattr(e, 'msg', None) or str(e), + type='E' + )) + else: + w = checker.Checker(tree, filename) + w.messages.sort(lambda a, b: cmp(a.lineno, b.lineno)) + for w in w.messages: + errors.append(dict( + lnum=w.lineno, + col=0, + text=u'{0} {1}'.format( + flake_class_mapping.get(w.__class__, ''), + w.message % w.message_args), + type='E' + )) + return errors + + +PEP8 = None + + +def _init_pep8(): + global PEP8 + + class _PEP8Report(p8.BaseReport): + + def init_file(self, filename, lines, expected, line_offset): + super(_PEP8Report, self).init_file( + filename, lines, expected, line_offset) + self.errors = [] + + def error(self, line_number, offset, text, check): + code = super(_PEP8Report, self).error( + line_number, offset, text, check) + + self.errors.append(dict( + text=text, + type=code, + col=offset + 1, + lnum=line_number, + )) + + def get_file_results(self): + return self.errors + + PEP8 = p8.StyleGuide(reporter=_PEP8Report) + return PEP8 + + +def _ignore_error(e, ignore): + for i in ignore: + if e['text'].startswith(i): + return False + return True + +if __name__ == '__main__': + for r in run_checkers( + '/home/andrew/devel/vim/bundle/flake8-vim/ftplugin/python/flake8.py', + checkers=['mccabe', 'pyflakes', 'pep8'], ignore=[]): + print r diff --git a/vim/ftplugin/python/flake8.pyc b/vim/ftplugin/python/flake8.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f828b1ffdbd81a5759e9be0e8da96afb829d4039 GIT binary patch literal 6234 zcmbtYOK;rP6+YyQMjE{>S$-snlO}PjK4QsDVrXHwjV;>=;K~jaTgfB>K@IPWI3hU| z$+1U`g)Uq)X*UJBD9~*--F4YN&`lRzb=_tEL4QE|edo@M>=q4RSrV_#;eDNZ9^W~a zm4D4lep|c!NmB+tHT?f6p5kwgNQgg2Vv)p=h9eGnS7KL^iZm*cRHae1Zz~dyNn=cB zR3)xSQj^BG?HiMLLXx^PCMB7Y#`I{$j5KB>tV#H`gyZr{k*6Y`H)bWAkno~}_0g+2 zi6+HVr86%rN5VQ%GJiQnAapcO&QIybS_Z%ji-*_ukpHsOYCUWB|M|Vl!RxMctgT- zN_6){-kf`r%w1lD;|+7$`k zk~TExxEniA@+C2sW)(1=+rKTvSz}?5d zds!KE(_-Xjl;H-R;yM}wRl%-kT`_R2su*}vP3dvEU;`fEsyfj-$@4U(w0By(trZFr z;gJYSNOTWPNp@w16ovEsG)Ro^my^m2&g|jTLA=uRig~^N6APsY~dihb5thUN5?zN+IH9IW#y6I{w4))FZW_t$KfxKR2Q|Zu0#pH1KvD0h>N`6Y#{F^%5Jw ze9d@eV4Pe~j|zAfkIE;`|H`K^_>6qd$wrSG%BB%hJR>0kQbFV;#N!%Qo)M2~BeXF} zNf+m!OyDML-TxaYv!7>(6Ii*1ks@Heh+&;os2InSLjbAkxg zsY_>aup9E?V1qh0&;G)M7$U*Am^tRaWjj+Gfj;NpL)qSj%#P!r{S41`IW1qq@{n&@ zeg(T!xC@RCfB+pLA4d-q@J4HSP@o(tu*~oJ@l)!qc^_z>dMiMN1dwE zUn=OGZ+gr&cJN_lv}aopcDL6`^Crg(y?7CHaqK1EU~hL6WE`Z>%r_Z zN_xaR^^zBbO-1%=aggkW!F48%0*~mC_s!u*P6pBzdneGq=&z1H=24~- zb7M0XfxO7#sI`TN|XY8TXgf%gex?;H8bZ8 zYK1~gp~H~ZdU5H!iB3(DgAsnKmwIb>d8{v;^P)xcCEliOX@mS~PvXXB;=NF~WQR3Vo^K*(%bAs^rc$Wc)(91Rhc$)17HmSH5JKzC-5brEie%U`y>RZ5f}f zZz6zP#Yu7c!Qk!2&O@e(=S0D9>;!m+C=hO%_m*-&I*U5zG!Yla=c-HEo{=-)pA+QZ z@3M_Gw-gCB6-*x}%ucF#P2V)V_n=rsXz%-=JjQ|IL{hZ9ACVzg+-lN~hQ6decI@y> zcJLxCB8D!+p}I^n$<#Xu9BJ29{ai87b$3th^NMvHtW@BU9zlg!1xZD7$_lGOJ*D0) z=uf!`LPa%}pS*DJr5_Zf=85}< z5rt~wYg==;tT_~;w#Rm4WGFV!; zG%k<>)=?G}jmt2_l@hlZO4cO64Idh;(C`p}6vR)3B7zQr64k#Vj?>mYAJQJz|6I)G z*mF0K~JbASBm1R^-g5Mavsz9WrmLA z+bXK~EjOm`i(|Do0a9<`YH^PT@La?gMS^MSs#YL~Z zgG%B?P#7DiT3Bqcez?<%LgRfjk_p71O5*Y_?ARY;($6_j9lP@S(t@MejA6*-Tu{LLtr!P;|mk9s9@{_|)VVH_*Mh*G-K5{_}V>G>=TY%CA7X zy)=x?;N#D`-~C;P%XoL#L~N+4>7_i=_Hnu zmxEr}C86H~yo(&gDD^I}xyFWO({fqU-Gj7OnDFC-{!}u@TXJhJ2)mC>c;oPHx0^kL z9T<>C{V)fJ%o3k z(!qpHd<4-iccVAA`YIZ?AHBQVZ3eMQWY>m#QLu{&gx_E`dbn6MA=Za^yO)@>)Qm9N z871GSHUMCaN&|wPqG42;r0@c;-Yqt2vK>+kA&>5_UZdaOG~egP{-@N7hLpP7HTx~} zI{l*HPV>$gBzJ8qi_?ph3Y^pa)}H%6kDl|DdKJGH@Ql@8tySvF_1d)J=Cbo2Epx#8 literal 0 HcmV?d00001 diff --git a/vim/ftplugin/python/flake8.vim b/vim/ftplugin/python/flake8.vim new file mode 100755 index 0000000..c91cf48 --- /dev/null +++ b/vim/ftplugin/python/flake8.vim @@ -0,0 +1,188 @@ +" Check python support +if !has('python') + echo "Error: PyFlake.vim required vim compiled with +python." + finish +endif + +if !exists('g:PyFlakeRangeCommand') + let g:PyFlakeRangeCommand = 'Q' +endif + +if !exists('b:PyFlake_initialized') + let b:PyFlake_initialized = 1 + + au BufWritePost call flake8#on_write() + au CursorHold call flake8#get_message() + au CursorMoved call flake8#get_message() + + " Commands + command! -buffer PyFlakeToggle :let b:PyFlake_disabled = exists('b:PyFlake_disabled') ? b:PyFlake_disabled ? 0 : 1 : 1 + command! -buffer PyFlake :call flake8#run() + command! -buffer -range=% PyFlakeAuto :call flake8#auto(,) + + " Keymaps + if g:PyFlakeRangeCommand != '' + exec 'vnoremap ' . g:PyFlakeRangeCommand . ' :PyFlakeAuto' + endif + + let b:showing_message = 0 + + " Signs definition + sign define W text=WW texthl=Todo + sign define C text=CC texthl=Comment + sign define R text=RR texthl=Visual + sign define E text=EE texthl=Error +endif + + "Check for flake8 plugin is loaded +if exists("g:PyFlakeDirectory") + finish +endif + +if !exists('g:PyFlakeOnWrite') + let g:PyFlakeOnWrite = 1 +endif + +" Init variables +let g:PyFlakeDirectory = expand(':p:h') + +if !exists('g:PyFlakeCheckers') + let g:PyFlakeCheckers = 'pep8,mccabe,pyflakes' +endif +if !exists('g:PyFlakeDefaultComplexity') + let g:PyFlakeDefaultComplexity=10 +endif +if !exists('g:PyFlakeDisabledMessages') + let g:PyFlakeDisabledMessages = 'E501' +endif +if !exists('g:PyFlakeCWindow') + let g:PyFlakeCWindow = 6 +endif +if !exists('g:PyFlakeSigns') + let g:PyFlakeSigns = 1 +endif +if !exists('g:PyFlakeMaxLineLength') + let g:PyFlakeMaxLineLength = 100 +endif + +python << EOF + +import sys +import json +import vim + +sys.path.insert(0, vim.eval("g:PyFlakeDirectory")) +from flake8 import run_checkers, fix_lines, Pep8Options, MccabeOptions + +def flake8_check(): + checkers=vim.eval('g:PyFlakeCheckers').split(',') + ignore=vim.eval('g:PyFlakeDisabledMessages').split(',') + MccabeOptions.complexity=int(vim.eval('g:PyFlakeDefaultComplexity')) + Pep8Options.max_line_length=int(vim.eval('g:PyFlakeMaxLineLength')) + filename=vim.current.buffer.name + parse_result(run_checkers(filename, checkers, ignore)) + +def parse_result(result): + vim.command('let g:qf_list = {}'.format(json.dumps(result, ensure_ascii=False))) + +EOF + +function! flake8#on_write() + if !g:PyFlakeOnWrite || exists("b:PyFlake_disabled") && b:PyFlake_disabled + return + endif + call flake8#check() +endfunction + +function! flake8#run() + if &modifiable && &modified + write + endif + call flake8#check() +endfun + +function! flake8#check() + py flake8_check() + let s:matchDict = {} + for err in g:qf_list + let s:matchDict[err.lnum] = err.text + endfor + call setqflist(g:qf_list, 'r') + + " Place signs + if g:PyFlakeSigns + call flake8#place_signs() + endif + + " Open cwindow + if g:PyFlakeCWindow + cclose + if len(g:qf_list) + let l:winsize = len(g:qf_list) > g:PyFlakeCWindow ? g:PyFlakeCWindow : len(g:qf_list) + exec l:winsize . 'cwindow' + endif + endif +endfunction + +function! flake8#auto(l1, l2) "{{{ + cclose + sign unplace * + let s:matchDict = {} + call setqflist([]) + +python << EOF +start, end = int(vim.eval('a:l1'))-1, int(vim.eval('a:l2')) +enc = vim.eval('&enc') +lines = fix_lines(vim.current.buffer[start:end]).splitlines() +res = [ln.encode(enc, 'replace') for ln in lines] +vim.current.buffer[start:end] = res +EOF +endfunction "}}} + +function! flake8#place_signs() + "first remove all sings + sign unplace * + + "now we place one sign for every quickfix line + let l:id = 1 + for item in getqflist() + execute(':sign place '.l:id.' name='.l:item.type.' line='.l:item.lnum.' buffer='.l:item.bufnr) + let l:id = l:id + 1 + endfor +endfunction + +" keep track of whether or not we are showing a message +" WideMsg() prints [long] message up to (&columns-1) length +" guaranteed without "Press Enter" prompt. +function! flake8#wide_msg(msg) + let x=&ruler | let y=&showcmd + set noruler noshowcmd + redraw + echo strpart(a:msg, 0, &columns-1) + let &ruler=x | let &showcmd=y +endfun + + +function! flake8#get_message() + let s:cursorPos = getpos(".") + + " Bail if RunPyflakes hasn't been called yet. + if !exists('s:matchDict') + return + endif + + " if there's a message for the line the cursor is currently on, echo + " it to the console + if has_key(s:matchDict, s:cursorPos[1]) + let s:pyflakesMatch = get(s:matchDict, s:cursorPos[1]) + call flake8#wide_msg(s:pyflakesMatch) + let b:showing_message = 1 + return + endif + + " otherwise, if we're showing a message, clear it + if b:showing_message == 1 + echo + let b:showing_message = 0 + endif +endfunction diff --git a/vim/ftplugin/python/mccabe/__init__.py b/vim/ftplugin/python/mccabe/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/vim/ftplugin/python/mccabe/mccabe.py b/vim/ftplugin/python/mccabe/mccabe.py new file mode 100755 index 0000000..96fb6e7 --- /dev/null +++ b/vim/ftplugin/python/mccabe/mccabe.py @@ -0,0 +1,293 @@ +""" Meager code path measurement tool. + Ned Batchelder + http://nedbatchelder.com/blog/200803/python_code_complexity_microtool.html + MIT License. +""" +try: + from compiler import parse # NOQA + iter_child_nodes = None # NOQA +except ImportError: + from ast import parse, iter_child_nodes # NOQA + +import optparse +import sys +from collections import defaultdict + +WARNING_CODE = "W901" + + +class ASTVisitor: + + VERBOSE = 0 + + def __init__(self): + self.node = None + self._cache = {} + + def default(self, node, *args): + if hasattr(node, 'getChildNodes'): + children = node.getChildNodes() + else: + children = iter_child_nodes(node) + + for child in children: + self.dispatch(child, *args) + + def dispatch(self, node, *args): + self.node = node + klass = node.__class__ + meth = self._cache.get(klass) + if meth is None: + className = klass.__name__ + meth = getattr(self.visitor, 'visit' + className, self.default) + self._cache[klass] = meth + + return meth(node, *args) + + def preorder(self, tree, visitor, *args): + """Do preorder walk of tree using visitor""" + self.visitor = visitor + visitor.visit = self.dispatch + self.dispatch(tree, *args) # XXX *args make sense? + + +class PathNode: + def __init__(self, name, look="circle"): + self.name = name + self.look = look + + def to_dot(self): + print('node [shape=%s,label="%s"] %d;' % \ + (self.look, self.name, self.dot_id())) + + def dot_id(self): + return id(self) + + +class PathGraph: + def __init__(self, name, entity, lineno): + self.name = name + self.entity = entity + self.lineno = lineno + self.nodes = defaultdict(list) + + def connect(self, n1, n2): + self.nodes[n1].append(n2) + + def to_dot(self): + print('subgraph {') + for node in self.nodes: + node.to_dot() + for node, nexts in self.nodes.items(): + for next in nexts: + print('%s -- %s;' % (node.dot_id(), next.dot_id())) + print('}') + + def complexity(self): + """ Return the McCabe complexity for the graph. + V-E+2 + """ + num_edges = sum([len(n) for n in self.nodes.values()]) + num_nodes = len(self.nodes) + return num_edges - num_nodes + 2 + + +class PathGraphingAstVisitor(ASTVisitor): + """ A visitor for a parsed Abstract Syntax Tree which finds executable + statements. + """ + + def __init__(self): + ASTVisitor.__init__(self) + self.classname = "" + self.graphs = {} + self.reset() + + def reset(self): + self.graph = None + self.tail = None + + def visitFunction(self, node): + + if self.classname: + entity = '%s%s' % (self.classname, node.name) + else: + entity = node.name + + name = '%d:1: %r' % (node.lineno, entity) + + if self.graph is not None: + # closure + pathnode = self.appendPathNode(name) + self.tail = pathnode + self.default(node) + bottom = PathNode("", look='point') + self.graph.connect(self.tail, bottom) + self.graph.connect(pathnode, bottom) + self.tail = bottom + else: + self.graph = PathGraph(name, entity, node.lineno) + pathnode = PathNode(name) + self.tail = pathnode + self.default(node) + self.graphs["%s%s" % (self.classname, node.name)] = self.graph + self.reset() + + visitFunctionDef = visitFunction + + def visitClass(self, node): + old_classname = self.classname + self.classname += node.name + "." + self.default(node) + self.classname = old_classname + + def appendPathNode(self, name): + if not self.tail: + return + pathnode = PathNode(name) + self.graph.connect(self.tail, pathnode) + self.tail = pathnode + return pathnode + + def visitSimpleStatement(self, node): + if node.lineno is None: + lineno = 0 + else: + lineno = node.lineno + name = "Stmt %d" % lineno + self.appendPathNode(name) + + visitAssert = visitAssign = visitAssTuple = visitPrint = \ + visitPrintnl = visitRaise = visitSubscript = visitDecorators = \ + visitPass = visitDiscard = visitGlobal = visitReturn = \ + visitSimpleStatement + + def visitLoop(self, node): + name = "Loop %d" % node.lineno + + if self.graph is None: + # global loop + self.graph = PathGraph(name, name, node.lineno) + pathnode = PathNode(name) + self.tail = pathnode + self.default(node) + self.graphs["%s%s" % (self.classname, name)] = self.graph + self.reset() + else: + pathnode = self.appendPathNode(name) + self.tail = pathnode + self.default(node.body) + bottom = PathNode("", look='point') + self.graph.connect(self.tail, bottom) + self.graph.connect(pathnode, bottom) + self.tail = bottom + + # TODO: else clause in node.else_ + + visitFor = visitWhile = visitLoop + + def visitIf(self, node): + name = "If %d" % node.lineno + pathnode = self.appendPathNode(name) + if not pathnode: + return # TODO: figure out what to do with if's outside def's. + loose_ends = [] + for t, n in node.tests: + self.tail = pathnode + self.default(n) + loose_ends.append(self.tail) + if node.else_: + self.tail = pathnode + self.default(node.else_) + loose_ends.append(self.tail) + else: + loose_ends.append(pathnode) + bottom = PathNode("", look='point') + for le in loose_ends: + self.graph.connect(le, bottom) + self.tail = bottom + + # TODO: visitTryExcept + # TODO: visitTryFinally + # TODO: visitWith + + # XXX todo: determine which ones can add to the complexity + # py2 + # TODO: visitStmt + # TODO: visitAssName + # TODO: visitCallFunc + # TODO: visitConst + + # py3 + # TODO: visitStore + # TODO: visitCall + # TODO: visitLoad + # TODO: visitNum + # TODO: visitarguments + # TODO: visitExpr + + +def get_code_complexity(code, min=7, filename='stdin'): + complex = [] + try: + ast = parse(code) + except AttributeError: + e = sys.exc_info()[1] + sys.stderr.write("Unable to parse %s: %s\n" % (filename, e)) + return 0 + + visitor = PathGraphingAstVisitor() + visitor.preorder(ast, visitor) + for graph in visitor.graphs.values(): + if graph is None: + # ? + continue + if graph.complexity() >= min: + complex.append(dict( + type = 'W', + lnum = graph.lineno, + text = '%s %r is too complex (%d)' % ( + WARNING_CODE, + graph.entity, + graph.complexity(), + ) + )) + + return complex + + +def get_module_complexity(module_path, min=7): + """Returns the complexity of a module""" + code = open(module_path, "rU").read() + '\n\n' + return get_code_complexity(code, min, filename=module_path) + + +def main(argv): + opar = optparse.OptionParser() + opar.add_option("-d", "--dot", dest="dot", + help="output a graphviz dot file", action="store_true") + opar.add_option("-m", "--min", dest="min", + help="minimum complexity for output", type="int", + default=2) + + options, args = opar.parse_args(argv) + + text = open(args[0], "rU").read() + '\n\n' + ast = parse(text) + visitor = PathGraphingAstVisitor() + visitor.preorder(ast, visitor) + + if options.dot: + print('graph {') + for graph in visitor.graphs.values(): + if graph.complexity() >= options.min: + graph.to_dot() + print('}') + else: + for graph in visitor.graphs.values(): + if graph.complexity() >= options.min: + print(graph.name, graph.complexity()) + + +if __name__ == '__main__': + main(sys.argv[1:]) diff --git a/vim/ftplugin/python/pep8/__init__.py b/vim/ftplugin/python/pep8/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/vim/ftplugin/python/pep8/autopep8.py b/vim/ftplugin/python/pep8/autopep8.py new file mode 100755 index 0000000..143e28e --- /dev/null +++ b/vim/ftplugin/python/pep8/autopep8.py @@ -0,0 +1,2047 @@ +#!/usr/bin/env python +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +"""Automatically formats Python code to conform to the PEP 8 style guide.""" + +from __future__ import print_function +from __future__ import division + +import copy +import os +import re +import sys +import inspect +import codecs +try: + from StringIO import StringIO +except ImportError: + from io import StringIO +import token +import tokenize +from optparse import OptionParser +from subprocess import Popen, PIPE +import difflib +import tempfile + +from distutils.version import StrictVersion +try: + import pep8 + try: + if StrictVersion(pep8.__version__) < StrictVersion('1.3a2'): + pep8 = None + except ValueError: + # Ignore non-standard version tags. + pass +except ImportError: + pep8 = None + + +__version__ = '0.8.5' + + +PEP8_BIN = 'pep8' +CR = '\r' +LF = '\n' +CRLF = '\r\n' + +try: + unicode +except NameError: + unicode = str + + +def open_with_encoding(filename, encoding=None, mode='r'): + """Return opened file with a specific encoding.""" + if not encoding: + encoding = detect_encoding(filename) + + import io + return io.open(filename, mode=mode, encoding=encoding, + newline='') # Preserve line endings + + +def detect_encoding(filename): + """Return file encoding.""" + try: + with open(filename, 'rb') as input_file: + from lib2to3.pgen2 import tokenize as lib2to3_tokenize + encoding = lib2to3_tokenize.detect_encoding(input_file.readline)[0] + + # Check for correctness of encoding + with open_with_encoding(filename, encoding) as test_file: + test_file.read() + + return encoding + except (SyntaxError, LookupError, UnicodeDecodeError): + return 'latin-1' + + +def read_from_filename(filename, readlines=False): + """Return contents of file.""" + with open_with_encoding(filename) as input_file: + return input_file.readlines() if readlines else input_file.read() + + +class FixPEP8(object): + + """Fix invalid code. + + Fixer methods are prefixed "fix_". The _fix_source() method looks for these + automatically. + + The fixer method can take either one or two arguments (in addition to + self). The first argument is "result", which is the error information from + pep8. The second argument, "logical", is required only for logical-line + fixes. + + The fixer method can return the list of modified lines or None. An empty + list would mean that no changes were made. None would mean that only the + line reported in the pep8 error was modified. Note that the modified line + numbers that are returned are indexed at 1. This typically would correspond + with the line number reported in the pep8 error information. + + [fixed method list] + - e111 + - e121,e122,e123,e124,e125,e126,e127,e128 + - e201,e202,e203 + - e211 + - e221,e222,e223,e224,e225 + - e231 + - e251 + - e261,e262 + - e271,e272,e273,e274 + - e301,e302,e303 + - e401 + - e502 + - e701,e702 + - e711 + - e721 + - w291,w293 + - w391 + - w602,w603,w604 + + """ + + def __init__(self, filename, options, contents=None): + self.filename = filename + if contents is None: + self.source = read_from_filename(filename, readlines=True) + else: + sio = StringIO(contents) + self.source = sio.readlines() + self.newline = find_newline(self.source) + self.options = options + self.indent_word = _get_indentword(unicode().join(self.source)) + self.logical_start = None + self.logical_end = None + # method definition + self.fix_e111 = self.fix_e101 + self.fix_e128 = self.fix_e127 + self.fix_e202 = self.fix_e201 + self.fix_e203 = self.fix_e201 + self.fix_e211 = self.fix_e201 + self.fix_e221 = self.fix_e271 + self.fix_e222 = self.fix_e271 + self.fix_e223 = self.fix_e271 + self.fix_e226 = self.fix_e225 + self.fix_e241 = self.fix_e271 + self.fix_e242 = self.fix_e224 + self.fix_e261 = self.fix_e262 + self.fix_e272 = self.fix_e271 + self.fix_e273 = self.fix_e271 + self.fix_e274 = self.fix_e271 + self.fix_e703 = self.fix_e702 + self.fix_w191 = self.fix_e101 + + def _fix_source(self, results): + completed_lines = set() + for result in sorted(results, key=_priority_key): + if result['line'] in completed_lines: + continue + + fixed_methodname = 'fix_%s' % result['id'].lower() + if hasattr(self, fixed_methodname): + fix = getattr(self, fixed_methodname) + + is_logical_fix = len(inspect.getargspec(fix).args) > 2 + if is_logical_fix: + # Do not run logical fix if any lines have been modified. + if completed_lines: + continue + + logical = self._get_logical(result) + if not logical: + continue + + modified_lines = fix(result, logical) + else: + modified_lines = fix(result) + + if modified_lines: + completed_lines.update(modified_lines) + elif modified_lines == []: # Empty list means no fix + if self.options.verbose >= 2: + print( + '---> Not fixing {f} on line {l}'.format( + f=result['id'], l=result['line']), + file=sys.stderr) + else: # We assume one-line fix when None + completed_lines.add(result['line']) + else: + if self.options.verbose >= 3: + print("---> '%s' is not defined." % fixed_methodname, + file=sys.stderr) + info = result['info'].strip() + print('---> %s:%s:%s:%s' % (self.filename, + result['line'], + result['column'], + info), + file=sys.stderr) + + def fix(self): + """Return a version of the source code with PEP 8 violations fixed.""" + if pep8: + pep8_options = { + 'ignore': + self.options.ignore and self.options.ignore.split(','), + 'select': + self.options.select and self.options.select.split(','), + 'max_line_length': + self.options.max_line_length, + } + results = _execute_pep8(pep8_options, self.source) + else: + encoding = detect_encoding(self.filename) + + (_tmp_open_file, tmp_filename) = tempfile.mkstemp() + os.close(_tmp_open_file) + fp = open_with_encoding(tmp_filename, encoding=encoding, mode='w') + fp.write(unicode().join(self.source)) + fp.close() + + if self.options.verbose: + print('Running in compatibility mode. Consider ' + 'upgrading to the latest pep8.', + file=sys.stderr) + results = _spawn_pep8((['--ignore=' + self.options.ignore] + if self.options.ignore else []) + + (['--select=' + self.options.select] + if self.options.select else []) + + (['--max-line-length={length}'.format( + length=self.options.max_line_length)] + if self.options.max_line_length else []) + + [tmp_filename]) + if not pep8: + os.remove(tmp_filename) + + if self.options.verbose: + progress = {} + for r in results: + if r['id'] not in progress: + progress[r['id']] = set() + progress[r['id']].add(r['line']) + print('---> {n} issue(s) to fix {progress}'.format( + n=len(results), progress=progress), file=sys.stderr) + + self._fix_source(filter_results(source=unicode().join(self.source), + results=results, + aggressive=self.options.aggressive)) + return unicode().join(self.source) + + def fix_e101(self, _): + """Reindent all lines.""" + reindenter = Reindenter(self.source, self.newline) + modified_line_numbers = reindenter.run() + if modified_line_numbers: + self.source = reindenter.fixed_lines() + return modified_line_numbers + else: + return [] + + def find_logical(self, force=False): + # make a variable which is the index of all the starts of lines + if not force and self.logical_start is not None: + return + logical_start = [] + logical_end = [] + last_newline = True + sio = StringIO(''.join(self.source)) + parens = 0 + for t in tokenize.generate_tokens(sio.readline): + if t[0] in [tokenize.COMMENT, tokenize.DEDENT, + tokenize.INDENT, tokenize.NL, + tokenize.ENDMARKER]: + continue + if not parens and t[0] in [ + tokenize.NEWLINE, tokenize.SEMI + ]: + last_newline = True + logical_end.append((t[3][0] - 1, t[2][1])) + continue + if last_newline and not parens: + logical_start.append((t[2][0] - 1, t[2][1])) + last_newline = False + if t[0] == tokenize.OP: + if t[1] in '([{': + parens += 1 + elif t[1] in '}])': + parens -= 1 + self.logical_start = logical_start + self.logical_end = logical_end + + def _get_logical(self, result): + """Return the logical line corresponding to the result. + + Assumes input is already E702-clean. + + """ + try: + self.find_logical() + except (IndentationError, tokenize.TokenError): + return None + + row = result['line'] - 1 + col = result['column'] - 1 + ls = None + le = None + for i in range(0, len(self.logical_start), 1): + x = self.logical_end[i] + if x[0] > row or (x[0] == row and x[1] > col): + le = x + ls = self.logical_start[i] + break + if ls is None: + return None + original = self.source[ls[0]:le[0] + 1] + return ls, le, original + + def _fix_reindent(self, result, logical, fix_distinct=False): + """Fix a badly indented line. + + This is done by adding or removing from its initial indent only. + + """ + if not logical: + return [] + ls, _, original = logical + try: + rewrapper = Wrapper(original) + except (tokenize.TokenError, IndentationError): + return [] + valid_indents = rewrapper.pep8_expected() + if not rewrapper.rel_indent: + return [] + if result['line'] > ls[0]: + # got a valid continuation line number from pep8 + row = result['line'] - ls[0] - 1 + # always pick the first option for this + valid = valid_indents[row] + got = rewrapper.rel_indent[row] + else: + # Line number from pep8 isn't a continuation line. Instead, + # compare our own function's result, look for the first mismatch, + # and just hope that we take fewer than 100 iterations to finish. + for row in range(0, len(original), 1): + valid = valid_indents[row] + got = rewrapper.rel_indent[row] + if valid != got: + break + line = ls[0] + row + # always pick the expected indent, for now. + indent_to = valid[0] + if fix_distinct and indent_to == 4: + if len(valid) == 1: + return [] + else: + indent_to = valid[1] + + if got != indent_to: + orig_line = self.source[line] + new_line = ' ' * (indent_to) + orig_line.lstrip() + if new_line == orig_line: + return [] + else: + self.source[line] = new_line + return [line + 1] # Line indexed at 1 + else: + return [] + + def fix_e121(self, result, logical): + """Fix indentation to be a multiple of four.""" + # Fix by adjusting initial indent level. + return self._fix_reindent(result, logical) + + def fix_e122(self, result, logical): + """Add absent indentation for hanging indentation.""" + # Fix by adding an initial indent. + return self._fix_reindent(result, logical) + + def fix_e123(self, result, logical): + """Align closing bracket to match opening bracket.""" + # Fix by deleting whitespace to the correct level. + if not logical: + return [] + logical_lines = logical[2] + line_index = result['line'] - 1 + original_line = self.source[line_index] + + fixed_line = (_get_indentation(logical_lines[0]) + + original_line.lstrip()) + if fixed_line == original_line: + # Fall back to slower method. + return self._fix_reindent(result, logical) + else: + self.source[line_index] = fixed_line + + def fix_e124(self, result, logical): + """Align closing bracket to match visual indentation.""" + # Fix by inserting whitespace before the closing bracket. + return self._fix_reindent(result, logical) + + def fix_e125(self, result, logical): + """Indent to distinguish line from next logical line.""" + # Fix by indenting the line in error to the next stop. + modified_lines = self._fix_reindent(result, logical, fix_distinct=True) + if modified_lines: + return modified_lines + else: + # Fallback + line_index = result['line'] - 1 + original_line = self.source[line_index] + self.source[line_index] = self.indent_word + original_line + + def fix_e126(self, result, logical): + """Fix over-indented hanging indentation.""" + # fix by deleting whitespace to the left + if not logical: + return [] + logical_lines = logical[2] + line_index = result['line'] - 1 + original = self.source[line_index] + + fixed = (_get_indentation(logical_lines[0]) + + self.indent_word + original.lstrip()) + if fixed == original: + # Fallback to slower method. + return self._fix_reindent(result, logical) + else: + self.source[line_index] = fixed + + def fix_e127(self, result, logical): + """Fix visual indentation.""" + # Fix by inserting/deleting whitespace to the correct level. + modified_lines = self._align_visual_indent(result, logical) + if modified_lines != []: + return modified_lines + else: + # Fallback to slower method. + return self._fix_reindent(result, logical) + + def _align_visual_indent(self, result, logical): + """Correct visual indent. + + This includes over (E127) and under (E128) indented lines. + + """ + if not logical: + return [] + logical_lines = logical[2] + line_index = result['line'] - 1 + original = self.source[line_index] + fixed = original + + if logical_lines[0].rstrip().endswith('\\'): + fixed = (_get_indentation(logical_lines[0]) + + self.indent_word + original.lstrip()) + else: + for symbol in '([{': + if symbol in logical_lines[0]: + fixed = logical_lines[0].find( + symbol) * ' ' + original.lstrip() + break + + if fixed == original: + return [] + else: + self.source[line_index] = fixed + + def fix_e201(self, result): + """Remove extraneous whitespace.""" + line_index = result['line'] - 1 + target = self.source[line_index] + offset = result['column'] - 1 + + # When multiline strings are involved, pep8 reports the error as + # being at the start of the multiline string, which doesn't work + # for us. + if ('"""' in target or + "'''" in target or + target.rstrip().endswith('\\')): + return [] + + fixed = fix_whitespace(target, + offset=offset, + replacement='') + + if fixed == target: + return [] + else: + self.source[line_index] = fixed + + def fix_e224(self, result): + """Remove extraneous whitespace around operator.""" + target = self.source[result['line'] - 1] + offset = result['column'] - 1 + fixed = target[:offset] + target[offset:].replace('\t', ' ') + self.source[result['line'] - 1] = fixed + + def fix_e225(self, result): + """Fix missing whitespace around operator.""" + target = self.source[result['line'] - 1] + offset = result['column'] - 1 + fixed = target[:offset] + ' ' + target[offset:] + + # Only proceed if non-whitespace characters match. + # And make sure we don't break the indentation. + if (fixed.replace(' ', '') == target.replace(' ', '') and + _get_indentation(fixed) == _get_indentation(target)): + self.source[result['line'] - 1] = fixed + else: + return [] + + def fix_e231(self, result): + """Add missing whitespace.""" + line_index = result['line'] - 1 + target = self.source[line_index] + offset = result['column'] + fixed = target[:offset] + ' ' + target[offset:] + self.source[line_index] = fixed + + def fix_e251(self, result): + """Remove whitespace around parameter '=' sign.""" + line_index = result['line'] - 1 + target = self.source[line_index] + + # This is necessary since pep8 sometimes reports columns that goes + # past the end of the physical line. This happens in cases like, + # foo(bar\n=None) + c = min(result['column'] - 1, + len(target) - 1) + + if target[c].strip(): + fixed = target + else: + fixed = target[:c].rstrip() + target[c:].lstrip() + + # There could be an escaped newline + # + # def foo(a=\ + # 1) + if (fixed.endswith('=\\\n') or + fixed.endswith('=\\\r\n') or + fixed.endswith('=\\\r')): + self.source[line_index] = fixed.rstrip('\n\r \t\\') + self.source[line_index + 1] = self.source[line_index + 1].lstrip() + return [line_index + 1, line_index + 2] # Line indexed at 1 + + self.source[result['line'] - 1] = fixed + + def fix_e262(self, result): + """Fix spacing after comment hash.""" + target = self.source[result['line'] - 1] + offset = result['column'] + + code = target[:offset].rstrip(' \t#') + comment = target[offset:].lstrip(' \t#') + + fixed = code + (' # ' + comment if comment.strip() + else self.newline) + + self.source[result['line'] - 1] = fixed + + def fix_e271(self, result): + """Fix extraneous whitespace around keywords.""" + line_index = result['line'] - 1 + target = self.source[line_index] + offset = result['column'] - 1 + + # When multiline strings are involved, pep8 reports the error as + # being at the start of the multiline string, which doesn't work + # for us. + if ('"""' in target or + "'''" in target or + target.rstrip().endswith('\\')): + return [] + + fixed = fix_whitespace(target, + offset=offset, + replacement=' ') + + if fixed == target: + return [] + else: + self.source[line_index] = fixed + + def fix_e301(self, result): + """Add missing blank line.""" + cr = self.newline + self.source[result['line'] - 1] = cr + self.source[result['line'] - 1] + + def fix_e302(self, result): + """Add missing 2 blank lines.""" + add_linenum = 2 - int(result['info'].split()[-1]) + cr = self.newline * add_linenum + self.source[result['line'] - 1] = cr + self.source[result['line'] - 1] + + def fix_e303(self, result): + """Remove extra blank lines.""" + delete_linenum = int(result['info'].split('(')[1].split(')')[0]) - 2 + delete_linenum = max(1, delete_linenum) + + # We need to count because pep8 reports an offset line number if there + # are comments. + cnt = 0 + line = result['line'] - 2 + modified_lines = [] + while cnt < delete_linenum: + if line < 0: + break + if not self.source[line].strip(): + self.source[line] = '' + modified_lines.append(1 + line) # Line indexed at 1 + cnt += 1 + line -= 1 + + return modified_lines + + def fix_e304(self, result): + """Remove blank line following function decorator.""" + line = result['line'] - 2 + if not self.source[line].strip(): + self.source[line] = '' + + def fix_e401(self, result): + """Put imports on separate lines.""" + line_index = result['line'] - 1 + target = self.source[line_index] + offset = result['column'] - 1 + + if not target.lstrip().startswith('import'): + return [] + + # pep8 (1.3.1) reports false positive if there is an import statement + # followed by a semicolon and some unrelated statement with commas in + # it. + if ';' in target: + return [] + + indentation = re.split(pattern=r'\bimport\b', + string=target, maxsplit=1)[0] + fixed = (target[:offset].rstrip('\t ,') + self.newline + + indentation + 'import ' + target[offset:].lstrip('\t ,')) + self.source[line_index] = fixed + + def fix_e501(self, result): + """Try to make lines fit within --max-line-length characters.""" + line_index = result['line'] - 1 + target = self.source[line_index] + + if target.lstrip().startswith('#'): + # Shorten comment if it is the last comment line. + try: + if self.source[line_index + 1].lstrip().startswith('#'): + return [] + except IndexError: + pass + + # Wrap commented lines. + fixed = shorten_comment( + line=target, + newline=self.newline, + max_line_length=self.options.max_line_length) + if fixed == self.source[line_index]: + return [] + else: + self.source[line_index] = fixed + return + + indent = _get_indentation(target) + source = target[len(indent):] + sio = StringIO(target) + + # Check for multiline string. + try: + tokens = list(tokenize.generate_tokens(sio.readline)) + except (tokenize.TokenError, IndentationError): + multi_line_candidate = break_multi_line( + target, newline=self.newline, + indent_word=self.indent_word, + max_line_length=self.options.max_line_length) + + if multi_line_candidate: + self.source[line_index] = multi_line_candidate + return + else: + return [] + + # Prefer + # my_long_function_name( + # x, y, z, ...) + # + # over + # my_long_function_name(x, y, + # z, ...) + candidates = [None, None] + for candidate_index, reverse in enumerate([False, True]): + candidates[candidate_index] = shorten_line( + tokens, source, target, indent, + self.indent_word, newline=self.newline, + max_line_length=self.options.max_line_length, + reverse=reverse, + aggressive=self.options.aggressive) + + if candidates[0] and candidates[1]: + if candidates[0].split(self.newline)[0].endswith('('): + self.source[line_index] = candidates[0] + else: + self.source[line_index] = candidates[1] + elif candidates[0]: + self.source[line_index] = candidates[0] + elif candidates[1]: + self.source[line_index] = candidates[1] + else: + # Otherwise both don't work + return [] + + def fix_e502(self, result): + """Remove extraneous escape of newline.""" + line_index = result['line'] - 1 + target = self.source[line_index] + self.source[line_index] = target.rstrip('\n\r \t\\') + self.newline + + def fix_e701(self, result): + """Put colon-separated compound statement on separate lines.""" + line_index = result['line'] - 1 + target = self.source[line_index] + c = result['column'] + + fixed_source = (target[:c] + self.newline + + _get_indentation(target) + self.indent_word + + target[c:].lstrip('\n\r \t\\')) + self.source[result['line'] - 1] = fixed_source + + def fix_e702(self, result, logical): + """Put semicolon-separated compound statement on separate lines.""" + logical_lines = logical[2] + + line_index = result['line'] - 1 + target = self.source[line_index] + + if target.rstrip().endswith('\\'): + # Normalize '1; \\\n2' into '1; 2'. + self.source[line_index] = target.rstrip('\n \r\t\\') + self.source[line_index + 1] = self.source[line_index + 1].lstrip() + return [line_index + 1, line_index + 2] + + if target.rstrip().endswith(';'): + self.source[line_index] = target.rstrip('\n \r\t;') + self.newline + return + + offset = result['column'] - 1 + first = target[:offset].rstrip(';').rstrip() + second = (_get_indentation(logical_lines[0]) + + target[offset:].lstrip(';').lstrip()) + + self.source[line_index] = first + self.newline + second + + def fix_e711(self, result): + """Fix comparison with None.""" + line_index = result['line'] - 1 + target = self.source[line_index] + offset = result['column'] - 1 + + right_offset = offset + 2 + if right_offset >= len(target): + return [] + + left = target[:offset].rstrip() + center = target[offset:right_offset] + right = target[right_offset:].lstrip() + + if not right.startswith('None'): + return [] + + if center.strip() == '==': + new_center = 'is' + elif center.strip() == '!=': + new_center = 'is not' + else: + return [] + + self.source[line_index] = ' '.join([left, new_center, right]) + + def fix_e712(self, result): + """Fix comparison with boolean.""" + line_index = result['line'] - 1 + target = self.source[line_index] + offset = result['column'] - 1 + + right_offset = offset + 2 + if right_offset >= len(target): + return [] + + left = target[:offset].rstrip() + center = target[offset:right_offset] + right = target[right_offset:].lstrip() + + # Handle simple cases only. + new_right = None + if center.strip() == '==': + if re.match(r'\bTrue\b', right): + new_right = re.sub(r'\bTrue\b *', '', right, count=1) + elif center.strip() == '!=': + if re.match(r'\bFalse\b', right): + new_right = re.sub(r'\bFalse\b *', '', right, count=1) + + if new_right is None: + return [] + + if new_right[0].isalnum(): + new_right = ' ' + new_right + + self.source[line_index] = left + new_right + + def fix_e721(self, _): + """Switch to use isinstance().""" + return self.refactor('idioms') + + def fix_w291(self, result): + """Remove trailing whitespace.""" + fixed_line = self.source[result['line'] - 1].rstrip() + self.source[result['line'] - 1] = '%s%s' % (fixed_line, self.newline) + + def fix_w293(self, result): + """Remove trailing whitespace on blank line.""" + assert not self.source[result['line'] - 1].strip() + self.source[result['line'] - 1] = self.newline + + def fix_w391(self, _): + """Remove trailing blank lines.""" + blank_count = 0 + for line in reversed(self.source): + line = line.rstrip() + if line: + break + else: + blank_count += 1 + + original_length = len(self.source) + self.source = self.source[:original_length - blank_count] + return range(1, 1 + original_length) + + def refactor(self, fixer_name, ignore=None): + """Return refactored code using lib2to3. + + Skip if ignore string is produced in the refactored code. + + """ + from lib2to3 import pgen2 + try: + new_text = refactor_with_2to3(''.join(self.source), + fixer_name=fixer_name) + except (pgen2.parse.ParseError, + UnicodeDecodeError, UnicodeEncodeError): + return [] + + original = unicode().join(self.source).strip() + if original == new_text.strip(): + return [] + else: + if ignore: + if ignore in new_text and ignore not in ''.join(self.source): + return [] + original_length = len(self.source) + self.source = [new_text] + return range(1, 1 + original_length) + + def fix_w601(self, _): + """Replace the {}.has_key() form with 'in'.""" + return self.refactor('has_key') + + def fix_w602(self, _): + """Fix deprecated form of raising exception.""" + return self.refactor('raise', + ignore='with_traceback') + + def fix_w603(self, _): + """Replace <> with !=.""" + return self.refactor('ne') + + def fix_w604(self, _): + """Replace backticks with repr().""" + return self.refactor('repr') + + +def find_newline(source): + """Return type of newline used in source.""" + cr, lf, crlf = 0, 0, 0 + for s in source: + if s.endswith(CRLF): + crlf += 1 + elif s.endswith(CR): + cr += 1 + elif s.endswith(LF): + lf += 1 + _max = max(cr, crlf, lf) + if _max == lf: + return LF + elif _max == crlf: + return CRLF + elif _max == cr: + return CR + else: + return LF + + +def _get_indentword(source): + """Return indentation type.""" + sio = StringIO(source) + indent_word = ' ' # Default in case source has no indentation + try: + for t in tokenize.generate_tokens(sio.readline): + if t[0] == token.INDENT: + indent_word = t[1] + break + except (tokenize.TokenError, IndentationError): + pass + return indent_word + + +def _get_indentation(line): + """Return leading whitespace.""" + if line.strip(): + non_whitespace_index = len(line) - len(line.lstrip()) + return line[:non_whitespace_index] + else: + return '' + + +def _analyze_pep8result(result): + tmp = result.split(':') + info = ' '.join(result.split()[1:]) + return {'id': info.lstrip().split()[0], + 'filename': tmp[0], + 'line': int(tmp[1]), + 'column': int(tmp[2]), + 'info': info} + + +def _get_difftext(old, new, filename): + diff = difflib.unified_diff( + old, new, + 'original/' + filename, + 'fixed/' + filename) + return ''.join(diff) + + +def _priority_key(pep8_result): + """Key for sorting PEP8 results. + + Global fixes should be done first. This is important for things + like indentation. + + """ + priority = ['e101', 'e111', 'w191', # Global fixes + 'e701', # Fix multiline colon-based before semicolon based + 'e702', # Break multiline statements early + 'e225', 'e231', # things that make lines longer + 'e201', # Remove extraneous whitespace before breaking lines + 'e501', # before we break lines + ] + key = pep8_result['id'].lower() + if key in priority: + return priority.index(key) + else: + # Lowest priority + return len(priority) + + +def shorten_line(tokens, source, target, indentation, indent_word, newline, + max_line_length, reverse=False, aggressive=False): + """Separate line at OPERATOR.""" + actual_length = len(indentation) + len(source) + + delta = (actual_length - max_line_length) // 3 + assert delta >= 0 + + if not delta: + delta = 1 + + shortened = None + length = None + for length in range(max_line_length, actual_length, delta): + shortened = _shorten_line( + tokens=tokens, + source=source, + target=target, + indentation=indentation, + indent_word=indent_word, + newline=newline, + max_line_length=length, + reverse=reverse, + aggressive=aggressive) + + if shortened is not None: + break + + if aggressive and (length is None or length > max_line_length): + commas_shortened = _shorten_line_at_commas( + tokens=tokens, + source=source, + indentation=indentation, + indent_word=indent_word, + newline=newline) + + if commas_shortened is not None and commas_shortened != source: + shortened = commas_shortened + + return shortened + + +def _shorten_line(tokens, source, target, indentation, indent_word, newline, + max_line_length, reverse=False, aggressive=False): + """Separate line at OPERATOR.""" + max_line_length_minus_indentation = max_line_length - len(indentation) + if reverse: + tokens = reversed(tokens) + for tkn in tokens: + # Don't break on '=' after keyword as this violates PEP 8. + if token.OP == tkn[0] and tkn[1] != '=': + offset = tkn[2][1] + 1 + if reverse: + if offset > (max_line_length_minus_indentation - + len(indent_word)): + continue + else: + if (len(target.rstrip()) - offset > + (max_line_length_minus_indentation - + len(indent_word))): + continue + first = source[:offset - len(indentation)] + + second_indent = indentation + if first.rstrip().endswith('('): + second_indent += indent_word + elif '(' in first: + second_indent += ' ' * (1 + first.find('(')) + else: + second_indent += indent_word + + second = (second_indent + + source[offset - len(indentation):].lstrip()) + if not second.strip(): + continue + + # Don't modify if lines are not short enough + if len(first) > max_line_length_minus_indentation: + continue + if len(second) > max_line_length: # Already includes indentation + continue + # Do not begin a line with a comma + if second.lstrip().startswith(','): + continue + # Do end a line with a dot + if first.rstrip().endswith('.'): + continue + if tkn[1] in '+-*/': + fixed = first + ' \\' + newline + second + else: + fixed = first + newline + second + + # Only fix if syntax is okay. + if check_syntax(normalize_multiline(fixed) + if aggressive else fixed): + return indentation + fixed + return None + + +def _shorten_line_at_commas(tokens, source, indentation, indent_word, newline): + """Separate line by breaking at commas.""" + if ',' not in source: + return None + + fixed = '' + for tkn in tokens: + token_type = tkn[0] + token_string = tkn[1] + + if token_string == '.': + fixed = fixed.rstrip() + + fixed += token_string + + if token_type == token.OP and token_string == ',': + fixed += newline + indent_word + elif token_type not in (token.NEWLINE, token.ENDMARKER): + if token_string != '.': + fixed += ' ' + + if check_syntax(fixed): + return indentation + fixed + else: + return None + + +def normalize_multiline(line): + """Remove multiline-related code that will cause syntax error. + + This is for purposes of checking syntax. + + """ + for quote in '\'"': + dict_pattern = r'^{q}[^{q}]*{q}\s*:\s*'.format(q=quote) + if re.match(dict_pattern, line): + if not line.strip().endswith('}'): + line += '}' + return '{' + line + + return line + + +def fix_whitespace(line, offset, replacement): + """Replace whitespace at offset and return fixed line.""" + # Replace escaped newlines too + left = line[:offset].rstrip('\n\r \t\\') + right = line[offset:].lstrip('\n\r \t\\') + if right.startswith('#'): + return line + else: + return left + replacement + right + + +def _spawn_pep8(pep8_options): + """Execute pep8 via subprocess.Popen.""" + p = Popen([PEP8_BIN] + pep8_options, stdout=PIPE) + output = p.communicate()[0].decode('utf-8') + return [_analyze_pep8result(l) for l in output.splitlines()] + + +def _execute_pep8(pep8_options, source): + """Execute pep8 via python method calls.""" + class QuietReport(pep8.BaseReport): + + """Version of checker that does not print.""" + + def __init__(self, options): + super(QuietReport, self).__init__(options) + self.__full_error_results = [] + + def error(self, line_number, offset, text, _): + """Collect errors.""" + code = super(QuietReport, self).error(line_number, offset, text, _) + if code: + self.__full_error_results.append( + {'id': code, + 'line': line_number, + 'column': offset + 1, + 'info': text}) + + def full_error_results(self): + """Return error results in detail. + + Results are in the form of a list of dictionaries. Each dictionary + contains 'id', 'line', 'column', and 'info'. + + """ + return self.__full_error_results + + checker = pep8.Checker('', lines=source, + reporter=QuietReport, **pep8_options) + checker.check_all() + return checker.report.full_error_results() + + +class Reindenter(object): + + """Reindents badly-indented code to uniformly use four-space indentation. + + Released to the public domain, by Tim Peters, 03 October 2000. + + """ + + def __init__(self, input_text, newline): + self.newline = newline + + # Raw file lines. + self.raw = input_text + self.after = None + + self.string_content_line_numbers = multiline_string_lines( + ''.join(self.raw)) + + # File lines, rstripped & tab-expanded. Dummy at start is so + # that we can use tokenize's 1-based line numbering easily. + # Note that a line is all-blank iff it is a newline. + self.lines = [] + for line_number, line in enumerate(self.raw, start=1): + # Do not modify if inside a multi-line string. + if line_number in self.string_content_line_numbers: + self.lines.append(line) + else: + # Only expand leading tabs. + self.lines.append(_get_indentation(line).expandtabs() + + line.strip() + newline) + + self.lines.insert(0, None) + self.index = 1 # index into self.lines of next line + + def run(self): + """Fix indentation and return modified line numbers. + + Line numbers are indexed at 1. + + """ + try: + stats = reindent_stats(tokenize.generate_tokens(self.getline)) + except (tokenize.TokenError, IndentationError): + return set() + # Remove trailing empty lines. + lines = self.lines + while lines and lines[-1] == self.newline: + lines.pop() + # Sentinel. + stats.append((len(lines), 0)) + # Map count of leading spaces to # we want. + have2want = {} + # Program after transformation. + after = self.after = [] + # Copy over initial empty lines -- there's nothing to do until + # we see a line with *something* on it. + i = stats[0][0] + after.extend(lines[1:i]) + for i in range(len(stats) - 1): + thisstmt, thislevel = stats[i] + nextstmt = stats[i + 1][0] + have = _leading_space_count(lines[thisstmt]) + want = thislevel * 4 + if want < 0: + # A comment line. + if have: + # An indented comment line. If we saw the same + # indentation before, reuse what it most recently + # mapped to. + want = have2want.get(have, - 1) + if want < 0: + # Then it probably belongs to the next real stmt. + for j in range(i + 1, len(stats) - 1): + jline, jlevel = stats[j] + if jlevel >= 0: + if have == _leading_space_count(lines[jline]): + want = jlevel * 4 + break + if want < 0: # Maybe it's a hanging + # comment like this one, + # in which case we should shift it like its base + # line got shifted. + for j in range(i - 1, -1, -1): + jline, jlevel = stats[j] + if jlevel >= 0: + want = (have + _leading_space_count( + after[jline - 1]) - + _leading_space_count(lines[jline])) + break + if want < 0: + # Still no luck -- leave it alone. + want = have + else: + want = 0 + assert want >= 0 + have2want[have] = want + diff = want - have + if diff == 0 or have == 0: + after.extend(lines[thisstmt:nextstmt]) + else: + for line_number, line in enumerate(lines[thisstmt:nextstmt], + start=thisstmt): + if line_number in self.string_content_line_numbers: + after.append(line) + elif diff > 0: + if line == self.newline: + after.append(line) + else: + after.append(' ' * diff + line) + else: + remove = min(_leading_space_count(line), -diff) + after.append(line[remove:]) + + if self.raw == self.after: + return set() + else: + return (set(range(1, 1 + len(self.raw))) - + self.string_content_line_numbers) + + def fixed_lines(self): + return self.after + + def getline(self): + """Line-getter for tokenize.""" + if self.index >= len(self.lines): + line = '' + else: + line = self.lines[self.index] + self.index += 1 + return line + + +def reindent_stats(tokens): + """Return list of (lineno, indentlevel) pairs. + + One for each stmt and comment line. indentlevel is -1 for comment lines, as + a signal that tokenize doesn't know what to do about them; indeed, they're + our headache! + + """ + find_stmt = 1 # next token begins a fresh stmt? + level = 0 # current indent level + stats = [] + + for t in tokens: + token_type = t[0] + sline = t[2][0] + line = t[4] + + if token_type == tokenize.NEWLINE: + # A program statement, or ENDMARKER, will eventually follow, + # after some (possibly empty) run of tokens of the form + # (NL | COMMENT)* (INDENT | DEDENT+)? + find_stmt = 1 + + elif token_type == tokenize.INDENT: + find_stmt = 1 + level += 1 + + elif token_type == tokenize.DEDENT: + find_stmt = 1 + level -= 1 + + elif token_type == tokenize.COMMENT: + if find_stmt: + stats.append((sline, -1)) + # but we're still looking for a new stmt, so leave + # find_stmt alone + + elif token_type == tokenize.NL: + pass + + elif find_stmt: + # This is the first "real token" following a NEWLINE, so it + # must be the first token of the next program statement, or an + # ENDMARKER. + find_stmt = 0 + if line: # not endmarker + stats.append((sline, level)) + + return stats + + +class Wrapper(object): + + """Class for functions relating to continuation lines and line folding. + + Each instance operates on a single logical line. + + """ + + SKIP_TOKENS = frozenset([ + tokenize.COMMENT, tokenize.NL, tokenize.INDENT, + tokenize.DEDENT, tokenize.NEWLINE, tokenize.ENDMARKER + ]) + + def __init__(self, physical_lines): + self.lines = physical_lines + self.tokens = [] + self.rel_indent = None + sio = StringIO(''.join(physical_lines)) + for t in tokenize.generate_tokens(sio.readline): + if not len(self.tokens) and t[0] in self.SKIP_TOKENS: + continue + if t[0] != tokenize.ENDMARKER: + self.tokens.append(t) + + self.logical_line = self.build_tokens_logical(self.tokens) + + def build_tokens_logical(self, tokens): + """Build a logical line from a list of tokens. + + Return the logical line and a list of (offset, token) tuples. Does + not mute strings like the version in pep8.py. + + """ + # from pep8.py with minor modifications + logical = [] + previous = None + for t in tokens: + token_type, text = t[0:2] + if token_type in self.SKIP_TOKENS: + continue + if previous: + end_line, end = previous[3] + start_line, start = t[2] + if end_line != start_line: # different row + prev_text = self.lines[end_line - 1][end - 1] + if prev_text == ',' or (prev_text not in '{[(' + and text not in '}])'): + logical.append(' ') + elif end != start: # different column + fill = self.lines[end_line - 1][end:start] + logical.append(fill) + logical.append(text) + previous = t + logical_line = ''.join(logical) + assert logical_line.lstrip() == logical_line + assert logical_line.rstrip() == logical_line + return logical_line + + def pep8_expected(self): + """Replicate logic in pep8.py, to know what level to indent things to. + + Return a list of lists; each list represents valid indent levels for + the line in question, relative from the initial indent. However, the + first entry is the indent level which was expected. + + """ + # What follows is an adjusted version of + # pep8.py:continuation_line_indentation. All of the comments have been + # stripped and the 'yield' statements replaced with 'pass'. + tokens = self.tokens + if not tokens: + return + + first_row = tokens[0][2][0] + nrows = 1 + tokens[-1][2][0] - first_row + + # here are the return values + valid_indents = [list()] * nrows + indent_level = tokens[0][2][1] + valid_indents[0].append(indent_level) + + if nrows == 1: + # bug, really. + return valid_indents + + indent_next = self.logical_line.endswith(':') + + row = depth = 0 + parens = [0] * nrows + self.rel_indent = rel_indent = [0] * nrows + indent = [indent_level] + indent_chances = {} + last_indent = (0, 0) + last_token_multiline = None + + for token_type, text, start, end, _ in self.tokens: + newline = row < start[0] - first_row + if newline: + row = start[0] - first_row + newline = (not last_token_multiline and + token_type not in (tokenize.NL, tokenize.NEWLINE)) + + if newline: + # This is where the differences start. Instead of looking at + # the line and determining whether the observed indent matches + # our expectations, we decide which type of indentation is in + # use at the given indent level, and return the offset. This + # algorithm is susceptible to "carried errors", but should + # through repeated runs eventually solve indentation for + # multi-line expressions less than PEP8_PASSES_MAX lines long. + + if depth: + for open_row in range(row - 1, -1, -1): + if parens[open_row]: + break + else: + open_row = 0 + + # That's all we get to work with. This code attempts to + # "reverse" the below logic, and place into the valid indents + # list + vi = [] + add_second_chances = False + if token_type == tokenize.OP and text in ']})': + # this line starts with a closing bracket, so it needs to + # be closed at the same indent as the opening one. + if indent[depth]: + # hanging indent + vi.append(indent[depth]) + else: + # visual indent + vi.append(indent_level + rel_indent[open_row]) + elif depth and indent[depth]: + # visual indent was previously confirmed. + vi.append(indent[depth]) + add_second_chances = True + elif depth and True in indent_chances.values(): + # visual indent happened before, so stick to + # visual indent this time. + if depth > 1 and indent[depth - 1]: + vi.append(indent[depth - 1]) + else: + # stupid fallback + vi.append(indent_level + 4) + add_second_chances = True + elif not depth: + vi.append(indent_level + 4) + else: + # must be in hanging indent + hang = rel_indent[open_row] + 4 + vi.append(indent_level + hang) + + # about the best we can do without look-ahead + if (indent_next and vi[0] == indent_level + 4 and + nrows == row + 1): + vi[0] += 4 + + if add_second_chances: + # visual indenters like to line things up. + min_indent = vi[0] + for col, what in indent_chances.items(): + if col > min_indent and ( + what is True or + (what == str and token_type == tokenize.STRING) or + (what == text and token_type == tokenize.OP) + ): + vi.append(col) + vi = sorted(vi) + + valid_indents[row] = vi + + # Returning to original continuation_line_indentation() from + # pep8. + visual_indent = indent_chances.get(start[1]) + last_indent = start + rel_indent[row] = start[1] - indent_level + hang = rel_indent[row] - rel_indent[open_row] + + if token_type == tokenize.OP and text in ']})': + pass + elif visual_indent is True: + if not indent[depth]: + indent[depth] = start[1] + + # line altered: comments shouldn't define a visual indent + if parens[row] and not indent[depth] and token_type not in ( + tokenize.NL, tokenize.COMMENT + ): + indent[depth] = start[1] + indent_chances[start[1]] = True + elif token_type == tokenize.STRING or text in ( + 'u', 'ur', 'b', 'br' + ): + indent_chances[start[1]] = str + + if token_type == tokenize.OP: + if text in '([{': + depth += 1 + indent.append(0) + parens[row] += 1 + elif text in ')]}' and depth > 0: + prev_indent = indent.pop() or last_indent[1] + for d in range(depth): + if indent[d] > prev_indent: + indent[d] = 0 + for ind in list(indent_chances): + if ind >= prev_indent: + del indent_chances[ind] + depth -= 1 + if depth and indent[depth]: # modified + indent_chances[indent[depth]] = True + for idx in range(row, -1, -1): + if parens[idx]: + parens[idx] -= 1 + break + assert len(indent) == depth + 1 + if start[1] not in indent_chances: + indent_chances[start[1]] = text + + last_token_multiline = (start[0] != end[0]) + + return valid_indents + + +def _leading_space_count(line): + """Return number of leading spaces in line.""" + i = 0 + while i < len(line) and line[i] == ' ': + i += 1 + return i + + +def refactor_with_2to3(source_text, fixer_name): + """Use lib2to3 to refactor the source. + + Return the refactored source code. + + """ + from lib2to3 import refactor + fixers = ['lib2to3.fixes.fix_' + fixer_name] + tool = refactor.RefactoringTool( + fixer_names=fixers, + explicit=fixers) + return unicode(tool.refactor_string(source_text, name='')) + + +def break_multi_line(source_text, newline, indent_word, max_line_length): + """Break first line of multi-line code. + + Return None if a break is not possible. + + """ + # Handle special case only. + for symbol in '([{': + # Only valid if symbol is not on a line by itself. + if (symbol in source_text + and source_text.rstrip().endswith(',') + and not source_text.lstrip().startswith(symbol)): + index = 1 + source_text.find(symbol) + if index >= max_line_length: + return None + + # Make sure we are not in a string. + for quote in ['"', "'"]: + if quote in source_text: + if source_text.find(quote) < index: + return None + + # Make sure we are not in a comment. + if '#' in source_text: + if source_text.find('#') < index: + return None + + assert index < len(source_text) + return ( + source_text[:index].rstrip() + newline + + _get_indentation(source_text) + indent_word + + source_text[index:].lstrip()) + + return None + + +def check_syntax(code): + """Return True if syntax is okay.""" + try: + return compile(code, '', 'exec') + except (SyntaxError, TypeError, UnicodeDecodeError): + return False + + +def filter_results(source, results, aggressive=False): + """Filter out spurious reports from pep8. + + If aggressive is True, we allow possibly unsafe fixes (E711, E712). + + """ + non_docstring_string_line_numbers = multiline_string_lines( + source, include_docstrings=False) + all_string_line_numbers = multiline_string_lines( + source, include_docstrings=True) + + split_source = [None] + source.splitlines() + + for r in results: + issue_id = r['id'].lower() + + if r['line'] in non_docstring_string_line_numbers: + if issue_id.startswith('e1'): + continue + elif issue_id in ['e501', 'w191']: + continue + + if r['line'] in all_string_line_numbers: + if issue_id in ['e501']: + continue + + # Filter out incorrect E101 reports when there are no tabs. + # pep8 will complain about this even if the tab indentation found + # elsewhere is in a multi-line string. + if issue_id == 'e101' and '\t' not in split_source[r['line']]: + continue + + if issue_id in ['e711', 'e712'] and not aggressive: + continue + + yield r + + +def multiline_string_lines(source, include_docstrings=False): + """Return line numbers that are within multiline strings. + + The line numbers are indexed at 1. + + Docstrings are ignored. + + """ + sio = StringIO(source) + line_numbers = set() + previous_token_type = '' + try: + for t in tokenize.generate_tokens(sio.readline): + token_type = t[0] + start_row = t[2][0] + end_row = t[3][0] + start_row = t[2][0] + end_row = t[3][0] + + if (token_type == tokenize.STRING and start_row != end_row): + if (include_docstrings or + previous_token_type != tokenize.INDENT): + # We increment by one since we want the contents of the + # string. + line_numbers |= set(range(1 + start_row, 1 + end_row)) + + previous_token_type = token_type + except (IndentationError, tokenize.TokenError): + pass + + return line_numbers + + +def shorten_comment(line, newline, max_line_length): + """Return trimmed or split long comment line.""" + assert len(line) > max_line_length + line = line.rstrip() + + # PEP 8 recommends 72 characters for comment text. + indentation = _get_indentation(line) + '# ' + max_line_length = min(max_line_length, + len(indentation) + 72) + + MIN_CHARACTER_REPEAT = 5 + if (len(line) - len(line.rstrip(line[-1])) >= MIN_CHARACTER_REPEAT and + not line[-1].isalnum()): + # Trim comments that end with things like --------- + return line[:max_line_length] + newline + elif re.match(r'\s*#+\s*\w+', line): + import textwrap + split_lines = textwrap.wrap(line.lstrip(' \t#'), + initial_indent=indentation, + subsequent_indent=indentation, + width=max_line_length, + break_long_words=False) + return newline.join(split_lines) + newline + else: + return line + newline + + +def format_block_comments(source): + """Format block comments.""" + if '#' not in source: + # Optimization. + return source + + string_line_numbers = multiline_string_lines(source, + include_docstrings=True) + fixed_lines = [] + sio = StringIO(source) + for (line_number, line) in enumerate(sio.readlines(), start=1): + if (re.match(r'\s*#+\w+', line) and + line_number not in string_line_numbers): + fixed_lines.append(_get_indentation(line) + + '# ' + + line.lstrip().lstrip('#')) + else: + fixed_lines.append(line) + + return ''.join(fixed_lines) + + +def normalize_line_endings(lines): + """Return fixed line endings. + + All lines will be modified to use the most common line ending. + + """ + newline = find_newline(lines) + return [line.rstrip('\n\r') + newline for line in lines] + + +def mutual_startswith(a, b): + return b.startswith(a) or a.startswith(b) + + +def code_match(code, select, ignore): + if ignore: + for ignored_code in [c.strip() for c in ignore.split(',')]: + if mutual_startswith(code.lower(), ignored_code.lower()): + return False + + if select: + for selected_code in [c.strip() for c in select.split(',')]: + if mutual_startswith(code.lower(), selected_code.lower()): + return True + return False + + return True + + +def fix_string(source, options=None): + """Return fixed source code.""" + if not options: + options = parse_args([''])[0] + + sio = StringIO(source) + return fix_lines(sio.readlines(), options=options) + + +def fix_lines(source_lines, options, filename=''): + """Return fixed source code.""" + tmp_source = unicode().join(normalize_line_endings(source_lines)) + + # Keep a history to break out of cycles. + previous_hashes = set([hash(tmp_source)]) + + fixed_source = tmp_source + if code_match('e26', select=options.select, ignore=options.ignore): + fixed_source = format_block_comments(fixed_source) + + for _ in range(-1, options.pep8_passes): + tmp_source = copy.copy(fixed_source) + + fix = FixPEP8(filename, options, contents=tmp_source) + fixed_source = fix.fix() + + if hash(fixed_source) in previous_hashes: + break + else: + previous_hashes.add(hash(fixed_source)) + + return fixed_source + + +def fix_file(filename, options=None, output=None): + if not options: + options = parse_args([filename])[0] + + original_source = read_from_filename(filename, readlines=True) + + fixed_source = original_source + + if options.in_place: + encoding = detect_encoding(filename) + + interruption = None + try: + fixed_source = fix_lines(fixed_source, options, filename=filename) + except KeyboardInterrupt as exception: + # Allow stopping early. + interruption = exception + + if options.diff: + new = StringIO(fixed_source) + new = new.readlines() + diff = _get_difftext(original_source, new, filename) + if output: + output.write(diff) + else: + return output + elif options.in_place: + fp = open_with_encoding(filename, encoding=encoding, + mode='w') + fp.write(fixed_source) + fp.close() + else: + if output: + output.write(fixed_source) + else: + return fixed_source + + if interruption: + raise interruption + + +def parse_args(args): + """Parse command-line options.""" + parser = OptionParser(usage='Usage: autopep8 [options] ' + '[filename [filename ...]]' + '\nUse filename \'-\' for stdin.', + version='autopep8: %s' % __version__, + description=__doc__, + prog='autopep8') + parser.add_option('-v', '--verbose', action='count', dest='verbose', + default=0, + help='print verbose messages; ' + 'multiple -v result in more verbose messages') + parser.add_option('-d', '--diff', action='store_true', dest='diff', + help='print the diff for the fixed source') + parser.add_option('-i', '--in-place', action='store_true', + help='make changes to files in place') + parser.add_option('-r', '--recursive', action='store_true', + help='run recursively; must be used with --in-place or ' + '--diff') + parser.add_option('-p', '--pep8-passes', + default=100, type=int, + help='maximum number of additional pep8 passes' + ' (default: %default)') + parser.add_option('--list-fixes', action='store_true', + help='list codes for fixes; ' + 'used by --ignore and --select') + parser.add_option('--ignore', default='', + help='do not fix these errors/warnings (e.g. E4,W)') + parser.add_option('--select', default='', + help='fix only these errors/warnings (e.g. E4,W)') + parser.add_option('--max-line-length', default=79, type=int, + help='set maximum allowed line length ' + '(default: %default)') + parser.add_option('--aggressive', action='store_true', + help='enable possibly unsafe changes (E711, E712)') + options, args = parser.parse_args(args) + + if not len(args) and not options.list_fixes: + parser.error('incorrect number of arguments') + + if '-' in args and len(args) > 1: + parser.error('cannot mix stdin and regular files') + + if len(args) > 1 and not (options.in_place or options.diff): + parser.error('autopep8 only takes one filename as argument ' + 'unless the "--in-place" or "--diff" options are ' + 'used') + + if options.recursive and not (options.in_place or options.diff): + parser.error('--recursive must be used with --in-place or --diff') + + if options.in_place and options.diff: + parser.error('--in-place and --diff are mutually exclusive') + + if options.max_line_length < 8: + parser.error('--max-line-length must greater than 8') + + if args == ['-'] and (options.in_place or options.recursive): + parser.error('--in-place or --recursive cannot be used with ' + 'standard input') + + return options, args + + +def supported_fixes(): + """Yield pep8 error codes that autopep8 fixes. + + Each item we yield is a tuple of the code followed by its description. + + """ + instance = FixPEP8(filename=None, options=None, contents='') + for attribute in dir(instance): + code = re.match('fix_([ew][0-9][0-9][0-9])', attribute) + if code: + yield (code.group(1).upper(), + re.sub(r'\s+', ' ', + getattr(instance, attribute).__doc__)) + + +class LineEndingWrapper(object): + + r"""Replace line endings to work with sys.stdout. + + It seems that sys.stdout expects only '\n' as the line ending, no matter + the platform. Otherwise, we get repeated line endings. + + """ + + def __init__(self, output): + self.__output = output + + def write(self, s): + self.__output.write(s.replace('\r\n', '\n').replace('\r', '\n')) + + def __getattr__(self, key): + return getattr(self.__output, key) + + +def temporary_file(): + """Return temporary file.""" + try: + return tempfile.NamedTemporaryFile(mode='w', encoding='utf-8') + except TypeError: + return tempfile.NamedTemporaryFile(mode='w') + + +def fix_multiple_files(filenames, options=None, output=None): + """Fix list of files. + + Optionally fix files recursively. + + """ + while filenames: + name = filenames.pop(0) + if options.recursive and os.path.isdir(name): + for root, directories, children in os.walk(name): + filenames += [os.path.join(root, f) for f in children + if f.endswith('.py') and + not f.startswith('.')] + for d in directories: + if d.startswith('.'): + directories.remove(d) + else: + if options.verbose: + print('[file:%s]' % name, file=sys.stderr) + try: + fix_file(name, options, output) + except IOError as error: + print(str(error), file=sys.stderr) + + +def main(): + """Tool main.""" + options, args = parse_args(sys.argv[1:]) + + if options.list_fixes: + for code, description in supported_fixes(): + print('{code} - {description}'.format( + code=code, description=description)) + return 0 + + if options.in_place or options.diff: + filenames = list(set(args)) + else: + assert len(args) == 1 + assert not options.recursive + if args == ['-']: + assert not options.in_place + temp = temporary_file() + temp.write(sys.stdin.read()) + temp.flush() + filenames = [temp.name] + else: + filenames = args[:1] + + output = codecs.getwriter('utf-8')(sys.stdout.buffer + if sys.version_info[0] >= 3 + else sys.stdout) + + output = LineEndingWrapper(output) + + fix_multiple_files(filenames, options, output) + + +if __name__ == '__main__': + try: + sys.exit(main()) + except KeyboardInterrupt: + sys.exit(1) diff --git a/vim/ftplugin/python/pep8/pep8.py b/vim/ftplugin/python/pep8/pep8.py new file mode 100755 index 0000000..8823436 --- /dev/null +++ b/vim/ftplugin/python/pep8/pep8.py @@ -0,0 +1,1991 @@ +#!/usr/bin/env python +# pep8.py - Check Python source code formatting, according to PEP 8 +# Copyright (C) 2006-2009 Johann C. Rocholl +# Copyright (C) 2009-2012 Florent Xicluna +# +# Permission is hereby granted, free of charge, to any person +# obtaining a copy of this software and associated documentation files +# (the "Software"), to deal in the Software without restriction, +# including without limitation the rights to use, copy, modify, merge, +# publish, distribute, sublicense, and/or sell copies of the Software, +# and to permit persons to whom the Software is furnished to do so, +# subject to the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +r""" +Check Python source code formatting, according to PEP 8: +http://www.python.org/dev/peps/pep-0008/ + +For usage and a list of options, try this: +$ python pep8.py -h + +This program and its regression test suite live here: +http://github.com/jcrocholl/pep8 + +Groups of errors and warnings: +E errors +W warnings +100 indentation +200 whitespace +300 blank lines +400 imports +500 line length +600 deprecation +700 statements +900 syntax error +""" +__version__ = '1.4.2a0' + +import os +import sys +import re +import time +import inspect +import keyword +import tokenize +from optparse import OptionParser +from fnmatch import fnmatch +try: + from configparser import RawConfigParser + from io import TextIOWrapper +except ImportError: + from ConfigParser import RawConfigParser + +DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git' +DEFAULT_IGNORE = 'E226,E24' +if sys.platform == 'win32': + DEFAULT_CONFIG = os.path.expanduser(r'~\.pep8') +else: + DEFAULT_CONFIG = os.path.join(os.getenv('XDG_CONFIG_HOME') or + os.path.expanduser('~/.config'), 'pep8') +PROJECT_CONFIG = ('.pep8', 'tox.ini', 'setup.cfg') +MAX_LINE_LENGTH = 79 +REPORT_FORMAT = { + 'default': '%(path)s:%(row)d:%(col)d: %(code)s %(text)s', + 'pylint': '%(path)s:%(row)d: [%(code)s] %(text)s', +} + +PyCF_ONLY_AST = 1024 +SINGLETONS = frozenset(['False', 'None', 'True']) +KEYWORDS = frozenset(keyword.kwlist + ['print']) - SINGLETONS +UNARY_OPERATORS = frozenset(['>>', '**', '*', '+', '-']) +WS_OPTIONAL_OPERATORS = frozenset(['**', '*', '/', '//', '+', '-']) +WS_NEEDED_OPERATORS = frozenset([ + '**=', '*=', '/=', '//=', '+=', '-=', '!=', '<>', + '%=', '^=', '&=', '|=', '==', '<=', '>=', '<<=', '>>=', + '%', '^', '&', '|', '=', '<', '>', '<<']) +WHITESPACE = frozenset(' \t') +SKIP_TOKENS = frozenset([tokenize.COMMENT, tokenize.NL, tokenize.NEWLINE, + tokenize.INDENT, tokenize.DEDENT]) +BENCHMARK_KEYS = ['directories', 'files', 'logical lines', 'physical lines'] + +INDENT_REGEX = re.compile(r'([ \t]*)') +RAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*(,)') +RERAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,\s*\w+\s*,\s*\w+') +SELFTEST_REGEX = re.compile(r'(Okay|[EW]\d{3}):\s(.*)') +ERRORCODE_REGEX = re.compile(r'[EW]\d{3}') +DOCSTRING_REGEX = re.compile(r'u?r?["\']') +EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[[({] | []}),;:]') +WHITESPACE_AFTER_COMMA_REGEX = re.compile(r'[,;:]\s*(?: |\t)') +COMPARE_SINGLETON_REGEX = re.compile(r'([=!]=)\s*(None|False|True)') +COMPARE_TYPE_REGEX = re.compile(r'([=!]=|is|is\s+not)\s*type(?:s\.(\w+)Type' + r'|\(\s*(\(\s*\)|[^)]*[^ )])\s*\))') +KEYWORD_REGEX = re.compile(r'(?:[^\s]|\b)(\s*)\b(?:%s)\b(\s*)' % + r'|'.join(KEYWORDS)) +OPERATOR_REGEX = re.compile(r'(?:[^,\s])(\s*)(?:[-+*/|!<=>%&^]+)(\s*)') +LAMBDA_REGEX = re.compile(r'\blambda\b') +HUNK_REGEX = re.compile(r'^@@ -\d+(?:,\d+)? \+(\d+)(?:,(\d+))? @@.*$') + +# Work around Python < 2.6 behaviour, which does not generate NL after +# a comment which is on a line by itself. +COMMENT_WITH_NL = tokenize.generate_tokens(['#\n'].pop).send(None)[1] == '#\n' + + +############################################################################## +# Plugins (check functions) for physical lines +############################################################################## + + +def tabs_or_spaces(physical_line, indent_char): + r""" + Never mix tabs and spaces. + + The most popular way of indenting Python is with spaces only. The + second-most popular way is with tabs only. Code indented with a mixture + of tabs and spaces should be converted to using spaces exclusively. When + invoking the Python command line interpreter with the -t option, it issues + warnings about code that illegally mixes tabs and spaces. When using -tt + these warnings become errors. These options are highly recommended! + + Okay: if a == 0:\n a = 1\n b = 1 + E101: if a == 0:\n a = 1\n\tb = 1 + """ + indent = INDENT_REGEX.match(physical_line).group(1) + for offset, char in enumerate(indent): + if char != indent_char: + return offset, "E101 indentation contains mixed spaces and tabs" + + +def tabs_obsolete(physical_line): + r""" + For new projects, spaces-only are strongly recommended over tabs. Most + editors have features that make this easy to do. + + Okay: if True:\n return + W191: if True:\n\treturn + """ + indent = INDENT_REGEX.match(physical_line).group(1) + if '\t' in indent: + return indent.index('\t'), "W191 indentation contains tabs" + + +def trailing_whitespace(physical_line): + r""" + JCR: Trailing whitespace is superfluous. + FBM: Except when it occurs as part of a blank line (i.e. the line is + nothing but whitespace). According to Python docs[1] a line with only + whitespace is considered a blank line, and is to be ignored. However, + matching a blank line to its indentation level avoids mistakenly + terminating a multi-line statement (e.g. class declaration) when + pasting code into the standard Python interpreter. + + [1] http://docs.python.org/reference/lexical_analysis.html#blank-lines + + The warning returned varies on whether the line itself is blank, for easier + filtering for those who want to indent their blank lines. + + Okay: spam(1)\n# + W291: spam(1) \n# + W293: class Foo(object):\n \n bang = 12 + """ + physical_line = physical_line.rstrip('\n') # chr(10), newline + physical_line = physical_line.rstrip('\r') # chr(13), carriage return + physical_line = physical_line.rstrip('\x0c') # chr(12), form feed, ^L + stripped = physical_line.rstrip(' \t\v') + if physical_line != stripped: + if stripped: + return len(stripped), "W291 trailing whitespace" + else: + return 0, "W293 blank line contains whitespace" + + +def trailing_blank_lines(physical_line, lines, line_number): + r""" + JCR: Trailing blank lines are superfluous. + + Okay: spam(1) + W391: spam(1)\n + """ + if not physical_line.rstrip() and line_number == len(lines): + return 0, "W391 blank line at end of file" + + +def missing_newline(physical_line): + """ + JCR: The last line should have a newline. + + Reports warning W292. + """ + if physical_line.rstrip() == physical_line: + return len(physical_line), "W292 no newline at end of file" + + +def maximum_line_length(physical_line, max_line_length): + """ + Limit all lines to a maximum of 79 characters. + + There are still many devices around that are limited to 80 character + lines; plus, limiting windows to 80 characters makes it possible to have + several windows side-by-side. The default wrapping on such devices looks + ugly. Therefore, please limit all lines to a maximum of 79 characters. + For flowing long blocks of text (docstrings or comments), limiting the + length to 72 characters is recommended. + + Reports error E501. + """ + line = physical_line.rstrip() + length = len(line) + if length > max_line_length: + if noqa(line): + return + if hasattr(line, 'decode'): # Python 2 + # The line could contain multi-byte characters + try: + length = len(line.decode('utf-8')) + except UnicodeError: + pass + if length > max_line_length: + return (max_line_length, "E501 line too long " + "(%d > %d characters)" % (length, max_line_length)) + + +############################################################################## +# Plugins (check functions) for logical lines +############################################################################## + + +def blank_lines(logical_line, blank_lines, indent_level, line_number, + previous_logical, previous_indent_level): + r""" + Separate top-level function and class definitions with two blank lines. + + Method definitions inside a class are separated by a single blank line. + + Extra blank lines may be used (sparingly) to separate groups of related + functions. Blank lines may be omitted between a bunch of related + one-liners (e.g. a set of dummy implementations). + + Use blank lines in functions, sparingly, to indicate logical sections. + + Okay: def a():\n pass\n\n\ndef b():\n pass + Okay: def a():\n pass\n\n\n# Foo\n# Bar\n\ndef b():\n pass + + E301: class Foo:\n b = 0\n def bar():\n pass + E302: def a():\n pass\n\ndef b(n):\n pass + E303: def a():\n pass\n\n\n\ndef b(n):\n pass + E303: def a():\n\n\n\n pass + E304: @decorator\n\ndef a():\n pass + """ + if line_number == 1: + return # Don't expect blank lines before the first line + if previous_logical.startswith('@'): + if blank_lines: + yield 0, "E304 blank lines found after function decorator" + elif blank_lines > 2 or (indent_level and blank_lines == 2): + yield 0, "E303 too many blank lines (%d)" % blank_lines + elif logical_line.startswith(('def ', 'class ', '@')): + if indent_level: + if not (blank_lines or previous_indent_level < indent_level or + DOCSTRING_REGEX.match(previous_logical)): + yield 0, "E301 expected 1 blank line, found 0" + elif blank_lines != 2: + yield 0, "E302 expected 2 blank lines, found %d" % blank_lines + + +def extraneous_whitespace(logical_line): + """ + Avoid extraneous whitespace in the following situations: + + - Immediately inside parentheses, brackets or braces. + + - Immediately before a comma, semicolon, or colon. + + Okay: spam(ham[1], {eggs: 2}) + E201: spam( ham[1], {eggs: 2}) + E201: spam(ham[ 1], {eggs: 2}) + E201: spam(ham[1], { eggs: 2}) + E202: spam(ham[1], {eggs: 2} ) + E202: spam(ham[1 ], {eggs: 2}) + E202: spam(ham[1], {eggs: 2 }) + + E203: if x == 4: print x, y; x, y = y , x + E203: if x == 4: print x, y ; x, y = y, x + E203: if x == 4 : print x, y; x, y = y, x + """ + line = logical_line + for match in EXTRANEOUS_WHITESPACE_REGEX.finditer(line): + text = match.group() + char = text.strip() + found = match.start() + if text == char + ' ': + # assert char in '([{' + yield found + 1, "E201 whitespace after '%s'" % char + elif line[found - 1] != ',': + code = ('E202' if char in '}])' else 'E203') # if char in ',;:' + yield found, "%s whitespace before '%s'" % (code, char) + + +def whitespace_around_keywords(logical_line): + r""" + Avoid extraneous whitespace around keywords. + + Okay: True and False + E271: True and False + E272: True and False + E273: True and\tFalse + E274: True\tand False + """ + for match in KEYWORD_REGEX.finditer(logical_line): + before, after = match.groups() + + if '\t' in before: + yield match.start(1), "E274 tab before keyword" + elif len(before) > 1: + yield match.start(1), "E272 multiple spaces before keyword" + + if '\t' in after: + yield match.start(2), "E273 tab after keyword" + elif len(after) > 1: + yield match.start(2), "E271 multiple spaces after keyword" + + +def missing_whitespace(logical_line): + """ + JCR: Each comma, semicolon or colon should be followed by whitespace. + + Okay: [a, b] + Okay: (3,) + Okay: a[1:4] + Okay: a[:4] + Okay: a[1:] + Okay: a[1:4:2] + E231: ['a','b'] + E231: foo(bar,baz) + E231: [{'a':'b'}] + """ + line = logical_line + for index in range(len(line) - 1): + char = line[index] + if char in ',;:' and line[index + 1] not in WHITESPACE: + before = line[:index] + if char == ':' and before.count('[') > before.count(']') and \ + before.rfind('{') < before.rfind('['): + continue # Slice syntax, no space required + if char == ',' and line[index + 1] == ')': + continue # Allow tuple with only one element: (3,) + yield index, "E231 missing whitespace after '%s'" % char + + +def indentation(logical_line, previous_logical, indent_char, + indent_level, previous_indent_level): + r""" + Use 4 spaces per indentation level. + + For really old code that you don't want to mess up, you can continue to + use 8-space tabs. + + Okay: a = 1 + Okay: if a == 0:\n a = 1 + E111: a = 1 + + Okay: for item in items:\n pass + E112: for item in items:\npass + + Okay: a = 1\nb = 2 + E113: a = 1\n b = 2 + """ + if indent_char == ' ' and indent_level % 4: + yield 0, "E111 indentation is not a multiple of four" + indent_expect = previous_logical.endswith(':') + if indent_expect and indent_level <= previous_indent_level: + yield 0, "E112 expected an indented block" + if indent_level > previous_indent_level and not indent_expect: + yield 0, "E113 unexpected indentation" + + +def continuation_line_indentation(logical_line, tokens, indent_level, verbose): + r""" + Continuation lines should align wrapped elements either vertically using + Python's implicit line joining inside parentheses, brackets and braces, or + using a hanging indent. + + When using a hanging indent the following considerations should be applied: + + - there should be no arguments on the first line, and + + - further indentation should be used to clearly distinguish itself as a + continuation line. + + Okay: a = (\n) + E123: a = (\n ) + + Okay: a = (\n 42) + E121: a = (\n 42) + E122: a = (\n42) + E123: a = (\n 42\n ) + E124: a = (24,\n 42\n) + E125: if (a or\n b):\n pass + E126: a = (\n 42) + E127: a = (24,\n 42) + E128: a = (24,\n 42) + """ + first_row = tokens[0][2][0] + nrows = 1 + tokens[-1][2][0] - first_row + if nrows == 1: + return + + # indent_next tells us whether the next block is indented; assuming + # that it is indented by 4 spaces, then we should not allow 4-space + # indents on the final continuation line; in turn, some other + # indents are allowed to have an extra 4 spaces. + indent_next = logical_line.endswith(':') + + row = depth = 0 + # remember how many brackets were opened on each line + parens = [0] * nrows + # relative indents of physical lines + rel_indent = [0] * nrows + # visual indents + indent = [indent_level] + indent_chances = {} + last_indent = tokens[0][2] + if verbose >= 3: + print(">>> " + tokens[0][4].rstrip()) + + for token_type, text, start, end, line in tokens: + if noqa(line): + continue + + newline = row < start[0] - first_row + if newline: + row = start[0] - first_row + newline = (not last_token_multiline and + token_type not in (tokenize.NL, tokenize.NEWLINE)) + + if newline: + # this is the beginning of a continuation line. + last_indent = start + if verbose >= 3: + print("... " + line.rstrip()) + + # record the initial indent. + rel_indent[row] = start[1] - indent_level + + if depth: + # a bracket expression in a continuation line. + # find the line that it was opened on + for open_row in range(row - 1, -1, -1): + if parens[open_row]: + break + else: + # an unbracketed continuation line (ie, backslash) + open_row = 0 + hang = rel_indent[row] - rel_indent[open_row] + visual_indent = indent_chances.get(start[1]) + + if token_type == tokenize.OP and text in ']})': + # this line starts with a closing bracket + if indent[depth]: + if start[1] != indent[depth]: + yield (start, "E124 closing bracket does not match " + "visual indentation") + elif hang: + yield (start, "E123 closing bracket does not match " + "indentation of opening bracket's line") + elif visual_indent is True: + # visual indent is verified + if not indent[depth]: + indent[depth] = start[1] + elif visual_indent in (text, str): + # ignore token lined up with matching one from a previous line + pass + elif indent[depth] and start[1] < indent[depth]: + # visual indent is broken + yield (start, "E128 continuation line " + "under-indented for visual indent") + elif hang == 4 or (indent_next and rel_indent[row] == 8): + # hanging indent is verified + pass + else: + # indent is broken + if hang <= 0: + error = "E122", "missing indentation or outdented" + elif indent[depth]: + error = "E127", "over-indented for visual indent" + elif hang % 4: + error = "E121", "indentation is not a multiple of four" + else: + error = "E126", "over-indented for hanging indent" + yield start, "%s continuation line %s" % error + + # look for visual indenting + if (parens[row] and token_type not in (tokenize.NL, tokenize.COMMENT) + and not indent[depth]): + indent[depth] = start[1] + indent_chances[start[1]] = True + if verbose >= 4: + print("bracket depth %s indent to %s" % (depth, start[1])) + # deal with implicit string concatenation + elif (token_type in (tokenize.STRING, tokenize.COMMENT) or + text in ('u', 'ur', 'b', 'br')): + indent_chances[start[1]] = str + + # keep track of bracket depth + if token_type == tokenize.OP: + if text in '([{': + depth += 1 + indent.append(0) + parens[row] += 1 + if verbose >= 4: + print("bracket depth %s seen, col %s, visual min = %s" % + (depth, start[1], indent[depth])) + elif text in ')]}' and depth > 0: + # parent indents should not be more than this one + prev_indent = indent.pop() or last_indent[1] + for d in range(depth): + if indent[d] > prev_indent: + indent[d] = 0 + for ind in list(indent_chances): + if ind >= prev_indent: + del indent_chances[ind] + depth -= 1 + if depth: + indent_chances[indent[depth]] = True + for idx in range(row, -1, -1): + if parens[idx]: + parens[idx] -= 1 + break + assert len(indent) == depth + 1 + if start[1] not in indent_chances: + # allow to line up tokens + indent_chances[start[1]] = text + + last_token_multiline = (start[0] != end[0]) + + if indent_next and rel_indent[-1] == 4: + yield (last_indent, "E125 continuation line does not distinguish " + "itself from next logical line") + + +def whitespace_before_parameters(logical_line, tokens): + """ + Avoid extraneous whitespace in the following situations: + + - Immediately before the open parenthesis that starts the argument + list of a function call. + + - Immediately before the open parenthesis that starts an indexing or + slicing. + + Okay: spam(1) + E211: spam (1) + + Okay: dict['key'] = list[index] + E211: dict ['key'] = list[index] + E211: dict['key'] = list [index] + """ + prev_type = tokens[0][0] + prev_text = tokens[0][1] + prev_end = tokens[0][3] + for index in range(1, len(tokens)): + token_type, text, start, end, line = tokens[index] + if (token_type == tokenize.OP and + text in '([' and + start != prev_end and + (prev_type == tokenize.NAME or prev_text in '}])') and + # Syntax "class A (B):" is allowed, but avoid it + (index < 2 or tokens[index - 2][1] != 'class') and + # Allow "return (a.foo for a in range(5))" + not keyword.iskeyword(prev_text)): + yield prev_end, "E211 whitespace before '%s'" % text + prev_type = token_type + prev_text = text + prev_end = end + + +def whitespace_around_operator(logical_line): + r""" + Avoid extraneous whitespace in the following situations: + + - More than one space around an assignment (or other) operator to + align it with another. + + Okay: a = 12 + 3 + E221: a = 4 + 5 + E222: a = 4 + 5 + E223: a = 4\t+ 5 + E224: a = 4 +\t5 + """ + for match in OPERATOR_REGEX.finditer(logical_line): + before, after = match.groups() + + if '\t' in before: + yield match.start(1), "E223 tab before operator" + elif len(before) > 1: + yield match.start(1), "E221 multiple spaces before operator" + + if '\t' in after: + yield match.start(2), "E224 tab after operator" + elif len(after) > 1: + yield match.start(2), "E222 multiple spaces after operator" + + +def missing_whitespace_around_operator(logical_line, tokens): + r""" + - Always surround these binary operators with a single space on + either side: assignment (=), augmented assignment (+=, -= etc.), + comparisons (==, <, >, !=, <>, <=, >=, in, not in, is, is not), + Booleans (and, or, not). + + - Use spaces around arithmetic operators. + + Okay: i = i + 1 + Okay: submitted += 1 + Okay: x = x * 2 - 1 + Okay: hypot2 = x * x + y * y + Okay: c = (a + b) * (a - b) + Okay: foo(bar, key='word', *args, **kwargs) + Okay: baz(**kwargs) + Okay: negative = -1 + Okay: spam(-1) + Okay: alpha[:-i] + Okay: if not -5 < x < +5:\n pass + Okay: lambda *args, **kw: (args, kw) + Okay: z = 2 ** 30 + Okay: x = x / 2 - 1 + + E225: i=i+1 + E225: submitted +=1 + E225: c = alpha -4 + E225: x = x /2 - 1 + E225: z = x **y + E226: c = (a+b) * (a-b) + E226: z = 2**30 + E226: x = x*2 - 1 + E226: x = x/2 - 1 + E226: hypot2 = x*x + y*y + """ + parens = 0 + need_space = False + prev_type = tokenize.OP + prev_text = prev_end = None + for token_type, text, start, end, line in tokens: + if token_type in (tokenize.NL, tokenize.NEWLINE, tokenize.ERRORTOKEN): + # ERRORTOKEN is triggered by backticks in Python 3 + continue + if text in ('(', 'lambda'): + parens += 1 + elif text == ')': + parens -= 1 + if need_space: + if start != prev_end: + # Found a (probably) needed space + if need_space is not True and not need_space[1]: + yield (need_space[0], + "E225 missing whitespace around operator") + need_space = False + elif text == '>' and prev_text in ('<', '-'): + # Tolerate the "<>" operator, even if running Python 3 + # Deal with Python 3's annotated return value "->" + pass + else: + if need_space is True or need_space[1]: + # A needed trailing space was not found + yield prev_end, "E225 missing whitespace around operator" + else: + yield (need_space[0], + "E226 missing optional whitespace around operator") + need_space = False + elif token_type == tokenize.OP and prev_end is not None: + if text == '=' and parens: + # Allow keyword args or defaults: foo(bar=None). + pass + elif text in WS_NEEDED_OPERATORS: + need_space = True + elif text in UNARY_OPERATORS: + # Check if the operator is being used as a binary operator + # Allow unary operators: -123, -x, +1. + # Allow argument unpacking: foo(*args, **kwargs). + if prev_type == tokenize.OP: + binary_usage = (prev_text in '}])') + elif prev_type == tokenize.NAME: + binary_usage = (prev_text not in KEYWORDS) + else: + binary_usage = (prev_type not in SKIP_TOKENS) + + if binary_usage: + if text in WS_OPTIONAL_OPERATORS: + need_space = None + else: + need_space = True + elif text in WS_OPTIONAL_OPERATORS: + need_space = None + + if need_space is None: + # Surrounding space is optional, but ensure that + # trailing space matches opening space + need_space = (prev_end, start != prev_end) + elif need_space and start == prev_end: + # A needed opening space was not found + yield prev_end, "E225 missing whitespace around operator" + need_space = False + prev_type = token_type + prev_text = text + prev_end = end + + +def whitespace_around_comma(logical_line): + r""" + Avoid extraneous whitespace in the following situations: + + - More than one space around an assignment (or other) operator to + align it with another. + + Note: these checks are disabled by default + + Okay: a = (1, 2) + E241: a = (1, 2) + E242: a = (1,\t2) + """ + line = logical_line + for m in WHITESPACE_AFTER_COMMA_REGEX.finditer(line): + found = m.start() + 1 + if '\t' in m.group(): + yield found, "E242 tab after '%s'" % m.group()[0] + else: + yield found, "E241 multiple spaces after '%s'" % m.group()[0] + + +def whitespace_around_named_parameter_equals(logical_line, tokens): + """ + Don't use spaces around the '=' sign when used to indicate a + keyword argument or a default parameter value. + + Okay: def complex(real, imag=0.0): + Okay: return magic(r=real, i=imag) + Okay: boolean(a == b) + Okay: boolean(a != b) + Okay: boolean(a <= b) + Okay: boolean(a >= b) + + E251: def complex(real, imag = 0.0): + E251: return magic(r = real, i = imag) + """ + parens = 0 + no_space = False + prev_end = None + for token_type, text, start, end, line in tokens: + if no_space: + no_space = False + if start != prev_end: + yield (prev_end, + "E251 no spaces around keyword / parameter equals") + elif token_type == tokenize.OP: + if text == '(': + parens += 1 + elif text == ')': + parens -= 1 + elif parens and text == '=': + no_space = True + if start != prev_end: + yield (prev_end, + "E251 no spaces around keyword / parameter equals") + prev_end = end + + +def whitespace_before_inline_comment(logical_line, tokens): + """ + Separate inline comments by at least two spaces. + + An inline comment is a comment on the same line as a statement. Inline + comments should be separated by at least two spaces from the statement. + They should start with a # and a single space. + + Okay: x = x + 1 # Increment x + Okay: x = x + 1 # Increment x + E261: x = x + 1 # Increment x + E262: x = x + 1 #Increment x + E262: x = x + 1 # Increment x + """ + prev_end = (0, 0) + for token_type, text, start, end, line in tokens: + if token_type == tokenize.COMMENT: + if not line[:start[1]].strip(): + continue + if prev_end[0] == start[0] and start[1] < prev_end[1] + 2: + yield (prev_end, + "E261 at least two spaces before inline comment") + symbol, sp, comment = text.partition(' ') + if symbol not in ('#', '#:') or comment[:1].isspace(): + yield start, "E262 inline comment should start with '# '" + elif token_type != tokenize.NL: + prev_end = end + + +def imports_on_separate_lines(logical_line): + r""" + Imports should usually be on separate lines. + + Okay: import os\nimport sys + E401: import sys, os + + Okay: from subprocess import Popen, PIPE + Okay: from myclas import MyClass + Okay: from foo.bar.yourclass import YourClass + Okay: import myclass + Okay: import foo.bar.yourclass + """ + line = logical_line + if line.startswith('import '): + found = line.find(',') + if -1 < found and ';' not in line[:found]: + yield found, "E401 multiple imports on one line" + + +def compound_statements(logical_line): + r""" + Compound statements (multiple statements on the same line) are + generally discouraged. + + While sometimes it's okay to put an if/for/while with a small body + on the same line, never do this for multi-clause statements. Also + avoid folding such long lines! + + Okay: if foo == 'blah':\n do_blah_thing() + Okay: do_one() + Okay: do_two() + Okay: do_three() + + E701: if foo == 'blah': do_blah_thing() + E701: for x in lst: total += x + E701: while t < 10: t = delay() + E701: if foo == 'blah': do_blah_thing() + E701: else: do_non_blah_thing() + E701: try: something() + E701: finally: cleanup() + E701: if foo == 'blah': one(); two(); three() + + E702: do_one(); do_two(); do_three() + E703: do_four(); # useless semicolon + """ + line = logical_line + last_char = len(line) - 1 + found = line.find(':') + if -1 < found < last_char: + before = line[:found] + if (before.count('{') <= before.count('}') and # {'a': 1} (dict) + before.count('[') <= before.count(']') and # [1:2] (slice) + before.count('(') <= before.count(')') and # (Python 3 annotation) + not LAMBDA_REGEX.search(before)): # lambda x: x + yield found, "E701 multiple statements on one line (colon)" + found = line.find(';') + if -1 < found: + if found < last_char: + yield found, "E702 multiple statements on one line (semicolon)" + else: + yield found, "E703 statement ends with a semicolon" + + +def explicit_line_join(logical_line, tokens): + r""" + Avoid explicit line join between brackets. + + The preferred way of wrapping long lines is by using Python's implied line + continuation inside parentheses, brackets and braces. Long lines can be + broken over multiple lines by wrapping expressions in parentheses. These + should be used in preference to using a backslash for line continuation. + + E502: aaa = [123, \\n 123] + E502: aaa = ("bbb " \\n "ccc") + + Okay: aaa = [123,\n 123] + Okay: aaa = ("bbb "\n "ccc") + Okay: aaa = "bbb " \\n "ccc" + """ + prev_start = prev_end = parens = 0 + for token_type, text, start, end, line in tokens: + if start[0] != prev_start and parens and backslash: + yield backslash, "E502 the backslash is redundant between brackets" + if end[0] != prev_end: + if line.rstrip('\r\n').endswith('\\'): + backslash = (end[0], len(line.splitlines()[-1]) - 1) + else: + backslash = None + prev_start = prev_end = end[0] + else: + prev_start = start[0] + if token_type == tokenize.OP: + if text in '([{': + parens += 1 + elif text in ')]}': + parens -= 1 + + +def comparison_to_singleton(logical_line): + """ + Comparisons to singletons like None should always be done + with "is" or "is not", never the equality operators. + + Okay: if arg is not None: + E711: if arg != None: + E712: if arg == True: + + Also, beware of writing if x when you really mean if x is not None -- + e.g. when testing whether a variable or argument that defaults to None was + set to some other value. The other value might have a type (such as a + container) that could be false in a boolean context! + """ + match = COMPARE_SINGLETON_REGEX.search(logical_line) + if match: + same = (match.group(1) == '==') + singleton = match.group(2) + msg = "'if cond is %s:'" % (('' if same else 'not ') + singleton) + if singleton in ('None',): + code = 'E711' + else: + code = 'E712' + nonzero = ((singleton == 'True' and same) or + (singleton == 'False' and not same)) + msg += " or 'if %scond:'" % ('' if nonzero else 'not ') + yield match.start(1), ("%s comparison to %s should be %s" % + (code, singleton, msg)) + + +def comparison_type(logical_line): + """ + Object type comparisons should always use isinstance() instead of + comparing types directly. + + Okay: if isinstance(obj, int): + E721: if type(obj) is type(1): + + When checking if an object is a string, keep in mind that it might be a + unicode string too! In Python 2.3, str and unicode have a common base + class, basestring, so you can do: + + Okay: if isinstance(obj, basestring): + Okay: if type(a1) is type(b1): + """ + match = COMPARE_TYPE_REGEX.search(logical_line) + if match: + inst = match.group(3) + if inst and isidentifier(inst) and inst not in SINGLETONS: + return # Allow comparison for types which are not obvious + yield match.start(1), "E721 do not compare types, use 'isinstance()'" + + +def python_3000_has_key(logical_line): + r""" + The {}.has_key() method is removed in the Python 3. + Use the 'in' operation instead. + + Okay: if "alph" in d:\n print d["alph"] + W601: assert d.has_key('alph') + """ + pos = logical_line.find('.has_key(') + if pos > -1: + yield pos, "W601 .has_key() is deprecated, use 'in'" + + +def python_3000_raise_comma(logical_line): + """ + When raising an exception, use "raise ValueError('message')" + instead of the older form "raise ValueError, 'message'". + + The paren-using form is preferred because when the exception arguments + are long or include string formatting, you don't need to use line + continuation characters thanks to the containing parentheses. The older + form is removed in Python 3. + + Okay: raise DummyError("Message") + W602: raise DummyError, "Message" + """ + match = RAISE_COMMA_REGEX.match(logical_line) + if match and not RERAISE_COMMA_REGEX.match(logical_line): + yield match.start(1), "W602 deprecated form of raising exception" + + +def python_3000_not_equal(logical_line): + """ + != can also be written <>, but this is an obsolete usage kept for + backwards compatibility only. New code should always use !=. + The older syntax is removed in Python 3. + + Okay: if a != 'no': + W603: if a <> 'no': + """ + pos = logical_line.find('<>') + if pos > -1: + yield pos, "W603 '<>' is deprecated, use '!='" + + +def python_3000_backticks(logical_line): + """ + Backticks are removed in Python 3. + Use repr() instead. + + Okay: val = repr(1 + 2) + W604: val = `1 + 2` + """ + pos = logical_line.find('`') + if pos > -1: + yield pos, "W604 backticks are deprecated, use 'repr()'" + + +############################################################################## +# Helper functions +############################################################################## + + +if '' == ''.encode(): + # Python 2: implicit encoding. + def readlines(filename): + f = open(filename) + try: + return f.readlines() + finally: + f.close() + + isidentifier = re.compile(r'[a-zA-Z_]\w*').match + stdin_get_value = sys.stdin.read +else: + # Python 3 + def readlines(filename): + f = open(filename, 'rb') + try: + coding, lines = tokenize.detect_encoding(f.readline) + f = TextIOWrapper(f, coding, line_buffering=True) + return [l.decode(coding) for l in lines] + f.readlines() + except (LookupError, SyntaxError, UnicodeError): + f.close() + # Fall back if files are improperly declared + f = open(filename, encoding='latin-1') + return f.readlines() + finally: + f.close() + + isidentifier = str.isidentifier + + def stdin_get_value(): + return TextIOWrapper(sys.stdin.buffer, errors='ignore').read() +readlines.__doc__ = " Read the source code." + + +def expand_indent(line): + r""" + Return the amount of indentation. + Tabs are expanded to the next multiple of 8. + + >>> expand_indent(' ') + 4 + >>> expand_indent('\t') + 8 + >>> expand_indent(' \t') + 8 + >>> expand_indent(' \t') + 8 + >>> expand_indent(' \t') + 16 + """ + if '\t' not in line: + return len(line) - len(line.lstrip()) + result = 0 + for char in line: + if char == '\t': + result = result // 8 * 8 + 8 + elif char == ' ': + result += 1 + else: + break + return result + + +def mute_string(text): + """ + Replace contents with 'xxx' to prevent syntax matching. + + >>> mute_string('"abc"') + '"xxx"' + >>> mute_string("'''abc'''") + "'''xxx'''" + >>> mute_string("r'abc'") + "r'xxx'" + """ + # String modifiers (e.g. u or r) + start = text.index(text[-1]) + 1 + end = len(text) - 1 + # Triple quotes + if text[-3:] in ('"""', "'''"): + start += 2 + end -= 2 + return text[:start] + 'x' * (end - start) + text[end:] + + +def noqa(line): + return line.strip().lower().endswith(('# noqa', '# nopep8')) + + +def parse_udiff(diff, patterns=None, parent='.'): + """Return a dictionary of matching lines.""" + # For each file of the diff, the entry key is the filename, + # and the value is a set of row numbers to consider. + rv = {} + path = nrows = None + for line in diff.splitlines(): + if nrows: + if line[:1] != '-': + nrows -= 1 + continue + if line[:3] == '@@ ': + hunk_match = HUNK_REGEX.match(line) + row, nrows = [int(g or '1') for g in hunk_match.groups()] + rv[path].update(range(row, row + nrows)) + elif line[:3] == '+++': + path = line[4:].split('\t', 1)[0] + if path[:2] == 'b/': + path = path[2:] + rv[path] = set() + return dict([(os.path.join(parent, path), rows) + for (path, rows) in rv.items() + if rows and filename_match(path, patterns)]) + + +def filename_match(filename, patterns, default=True): + """ + Check if patterns contains a pattern that matches filename. + If patterns is unspecified, this always returns True. + """ + if not patterns: + return default + return any(fnmatch(filename, pattern) for pattern in patterns) + + +############################################################################## +# Framework to run all checks +############################################################################## + + +_checks = {'physical_line': {}, 'logical_line': {}, 'tree': {}} + + +def register_check(check, codes=None): + """ + Register a new check object. + """ + if inspect.isfunction(check): + args = inspect.getargspec(check)[0] + if args and args[0] in ('physical_line', 'logical_line'): + if codes is None: + codes = ERRORCODE_REGEX.findall(check.__doc__ or '') + _checks[args[0]][check] = (codes or [''], args) + elif inspect.isclass(check): + if inspect.getargspec(check.__init__)[0][:2] == ['self', 'tree']: + _checks['tree'][check] = (codes or [''], None) + + +def init_checks_registry(): + """ + Register all globally visible functions where the first argument name + is 'physical_line' or 'logical_line'. + """ + mod = inspect.getmodule(register_check) + for (name, function) in inspect.getmembers(mod, inspect.isfunction): + register_check(function) +init_checks_registry() + + +class Checker(object): + """ + Load a Python source file, tokenize it, check coding style. + """ + + def __init__(self, filename=None, lines=None, + options=None, report=None, **kwargs): + if options is None: + options = StyleGuide(kwargs).options + else: + assert not kwargs + self._io_error = None + self._physical_checks = options.physical_checks + self._logical_checks = options.logical_checks + self._ast_checks = options.ast_checks + self._options = options + self.max_line_length = options.max_line_length + self.verbose = options.verbose + self.filename = filename + if filename is None: + self.filename = 'stdin' + self.lines = lines or [] + elif filename == '-': + self.filename = 'stdin' + self.lines = stdin_get_value().splitlines(True) + elif lines is None: + try: + self.lines = readlines(filename) + except IOError: + exc_type, exc = sys.exc_info()[:2] + self._io_error = '%s: %s' % (exc_type.__name__, exc) + self.lines = [] + else: + self.lines = lines + self.report = report or options.report + self.report_error = self.report.error + + def report_invalid_syntax(self): + exc_type, exc = sys.exc_info()[:2] + offset = exc.args[1] + if len(offset) > 2: + offset = offset[1:3] + self.report_error(offset[0], offset[1], + 'E901 %s: %s' % (exc_type.__name__, exc.args[0]), + self.report_invalid_syntax) + report_invalid_syntax.__doc__ = " Check if the syntax is valid." + + def readline(self): + """ + Get the next line from the input buffer. + """ + self.line_number += 1 + if self.line_number > len(self.lines): + return '' + return self.lines[self.line_number - 1] + + def readline_check_physical(self): + """ + Check and return the next physical line. This method can be + used to feed tokenize.generate_tokens. + """ + line = self.readline() + if line: + self.check_physical(line) + return line + + def run_check(self, check, argument_names): + """ + Run a check plugin. + """ + arguments = [] + for name in argument_names: + arguments.append(getattr(self, name)) + return check(*arguments) + + def check_physical(self, line): + """ + Run all physical checks on a raw input line. + """ + self.physical_line = line + if self.indent_char is None and line[:1] in WHITESPACE: + self.indent_char = line[0] + for name, check, argument_names in self._physical_checks: + result = self.run_check(check, argument_names) + if result is not None: + offset, text = result + self.report_error(self.line_number, offset, text, check) + + def build_tokens_line(self): + """ + Build a logical line from tokens. + """ + self.mapping = [] + logical = [] + length = 0 + previous = None + for token in self.tokens: + token_type, text = token[0:2] + if token_type in SKIP_TOKENS: + continue + if token_type == tokenize.STRING: + text = mute_string(text) + if previous: + end_row, end = previous[3] + start_row, start = token[2] + if end_row != start_row: # different row + prev_text = self.lines[end_row - 1][end - 1] + if prev_text == ',' or (prev_text not in '{[(' + and text not in '}])'): + logical.append(' ') + length += 1 + elif end != start: # different column + fill = self.lines[end_row - 1][end:start] + logical.append(fill) + length += len(fill) + self.mapping.append((length, token)) + logical.append(text) + length += len(text) + previous = token + self.logical_line = ''.join(logical) + # With Python 2, if the line ends with '\r\r\n' the assertion fails + # assert self.logical_line.strip() == self.logical_line + + def check_logical(self): + """ + Build a line from tokens and run all logical checks on it. + """ + self.build_tokens_line() + self.report.increment_logical_line() + first_line = self.lines[self.mapping[0][1][2][0] - 1] + indent = first_line[:self.mapping[0][1][2][1]] + self.previous_indent_level = self.indent_level + self.indent_level = expand_indent(indent) + if self.verbose >= 2: + print(self.logical_line[:80].rstrip()) + for name, check, argument_names in self._logical_checks: + if self.verbose >= 4: + print(' ' + name) + for result in self.run_check(check, argument_names): + offset, text = result + if isinstance(offset, tuple): + orig_number, orig_offset = offset + else: + for token_offset, token in self.mapping: + if offset >= token_offset: + orig_number = token[2][0] + orig_offset = (token[2][1] + offset - token_offset) + self.report_error(orig_number, orig_offset, text, check) + self.previous_logical = self.logical_line + + def check_ast(self): + try: + tree = compile(''.join(self.lines), '', 'exec', PyCF_ONLY_AST) + except SyntaxError: + return self.report_invalid_syntax() + for name, cls, _ in self._ast_checks: + checker = cls(tree, self.filename, self._options) + for lineno, offset, text, check in checker.run(): + if not noqa(self.lines[lineno - 1]): + self.report_error(lineno, offset, text, check) + + def generate_tokens(self): + if self._io_error: + self.report_error(1, 0, 'E902 %s' % self._io_error, readlines) + tokengen = tokenize.generate_tokens(self.readline_check_physical) + try: + for token in tokengen: + yield token + except (SyntaxError, tokenize.TokenError): + self.report_invalid_syntax() + + def check_all(self, expected=None, line_offset=0): + """ + Run all checks on the input file. + """ + self.report.init_file(self.filename, self.lines, expected, line_offset) + if self._ast_checks: + self.check_ast() + self.line_number = 0 + self.indent_char = None + self.indent_level = 0 + self.previous_logical = '' + self.tokens = [] + self.blank_lines = blank_lines_before_comment = 0 + parens = 0 + for token in self.generate_tokens(): + self.tokens.append(token) + token_type, text = token[0:2] + if self.verbose >= 3: + if token[2][0] == token[3][0]: + pos = '[%s:%s]' % (token[2][1] or '', token[3][1]) + else: + pos = 'l.%s' % token[3][0] + print('l.%s\t%s\t%s\t%r' % + (token[2][0], pos, tokenize.tok_name[token[0]], text)) + if token_type == tokenize.OP: + if text in '([{': + parens += 1 + elif text in '}])': + parens -= 1 + elif not parens: + if token_type == tokenize.NEWLINE: + if self.blank_lines < blank_lines_before_comment: + self.blank_lines = blank_lines_before_comment + self.check_logical() + self.tokens = [] + self.blank_lines = blank_lines_before_comment = 0 + elif token_type == tokenize.NL: + if len(self.tokens) == 1: + # The physical line contains only this token. + self.blank_lines += 1 + self.tokens = [] + elif token_type == tokenize.COMMENT and len(self.tokens) == 1: + if blank_lines_before_comment < self.blank_lines: + blank_lines_before_comment = self.blank_lines + self.blank_lines = 0 + if COMMENT_WITH_NL: + # The comment also ends a physical line + self.tokens = [] + return self.report.get_file_results() + + +class BaseReport(object): + """Collect the results of the checks.""" + print_filename = False + + def __init__(self, options): + self._benchmark_keys = options.benchmark_keys + self._ignore_code = options.ignore_code + # Results + self.elapsed = 0 + self.total_errors = 0 + self.counters = dict.fromkeys(self._benchmark_keys, 0) + self.messages = {} + + def start(self): + """Start the timer.""" + self._start_time = time.time() + + def stop(self): + """Stop the timer.""" + self.elapsed = time.time() - self._start_time + + def init_file(self, filename, lines, expected, line_offset): + """Signal a new file.""" + self.filename = filename + self.lines = lines + self.expected = expected or () + self.line_offset = line_offset + self.file_errors = 0 + self.counters['files'] += 1 + self.counters['physical lines'] += len(lines) + + def increment_logical_line(self): + """Signal a new logical line.""" + self.counters['logical lines'] += 1 + + def error(self, line_number, offset, text, check): + """Report an error, according to options.""" + code = text[:4] + if self._ignore_code(code): + return + if code in self.counters: + self.counters[code] += 1 + else: + self.counters[code] = 1 + self.messages[code] = text[5:] + # Don't care about expected errors or warnings + if code in self.expected: + return + if self.print_filename and not self.file_errors: + print(self.filename) + self.file_errors += 1 + self.total_errors += 1 + return code + + def get_file_results(self): + """Return the count of errors and warnings for this file.""" + return self.file_errors + + def get_count(self, prefix=''): + """Return the total count of errors and warnings.""" + return sum([self.counters[key] + for key in self.messages if key.startswith(prefix)]) + + def get_statistics(self, prefix=''): + """ + Get statistics for message codes that start with the prefix. + + prefix='' matches all errors and warnings + prefix='E' matches all errors + prefix='W' matches all warnings + prefix='E4' matches all errors that have to do with imports + """ + return ['%-7s %s %s' % (self.counters[key], key, self.messages[key]) + for key in sorted(self.messages) if key.startswith(prefix)] + + def print_statistics(self, prefix=''): + """Print overall statistics (number of errors and warnings).""" + for line in self.get_statistics(prefix): + print(line) + + def print_benchmark(self): + """Print benchmark numbers.""" + print('%-7.2f %s' % (self.elapsed, 'seconds elapsed')) + if self.elapsed: + for key in self._benchmark_keys: + print('%-7d %s per second (%d total)' % + (self.counters[key] / self.elapsed, key, + self.counters[key])) + + +class FileReport(BaseReport): + """Collect the results of the checks and print only the filenames.""" + print_filename = True + + +class StandardReport(BaseReport): + """Collect and print the results of the checks.""" + + def __init__(self, options): + super(StandardReport, self).__init__(options) + self._fmt = REPORT_FORMAT.get(options.format.lower(), + options.format) + self._repeat = options.repeat + self._show_source = options.show_source + self._show_pep8 = options.show_pep8 + + def error(self, line_number, offset, text, check): + """ + Report an error, according to options. + """ + code = super(StandardReport, self).error(line_number, offset, + text, check) + if code and (self.counters[code] == 1 or self._repeat): + print(self._fmt % { + 'path': self.filename, + 'row': self.line_offset + line_number, 'col': offset + 1, + 'code': code, 'text': text[5:], + }) + if self._show_source: + if line_number > len(self.lines): + line = '' + else: + line = self.lines[line_number - 1] + print(line.rstrip()) + print(' ' * offset + '^') + if self._show_pep8: + print(check.__doc__.lstrip('\n').rstrip()) + return code + + +class DiffReport(StandardReport): + """Collect and print the results for the changed lines only.""" + + def __init__(self, options): + super(DiffReport, self).__init__(options) + self._selected = options.selected_lines + + def error(self, line_number, offset, text, check): + if line_number not in self._selected[self.filename]: + return + return super(DiffReport, self).error(line_number, offset, text, check) + + +class TestReport(StandardReport): + """Collect the results for the tests.""" + + def __init__(self, options): + options.benchmark_keys += ['test cases', 'failed tests'] + super(TestReport, self).__init__(options) + self._verbose = options.verbose + + def get_file_results(self): + # Check if the expected errors were found + label = '%s:%s:1' % (self.filename, self.line_offset) + codes = sorted(self.expected) + for code in codes: + if not self.counters.get(code): + self.file_errors += 1 + self.total_errors += 1 + print('%s: error %s not found' % (label, code)) + if self._verbose and not self.file_errors: + print('%s: passed (%s)' % + (label, ' '.join(codes) or 'Okay')) + self.counters['test cases'] += 1 + if self.file_errors: + self.counters['failed tests'] += 1 + # Reset counters + for key in set(self.counters) - set(self._benchmark_keys): + del self.counters[key] + self.messages = {} + return self.file_errors + + def print_results(self): + results = ("%(physical lines)d lines tested: %(files)d files, " + "%(test cases)d test cases%%s." % self.counters) + if self.total_errors: + print(results % ", %s failures" % self.total_errors) + else: + print(results % "") + print("Test failed." if self.total_errors else "Test passed.") + + +class StyleGuide(object): + """Initialize a PEP-8 instance with few options.""" + + def __init__(self, *args, **kwargs): + # build options from the command line + parse_argv = kwargs.pop('parse_argv', False) + config_file = kwargs.pop('config_file', None) + parser = kwargs.pop('parser', None) + options, self.paths = process_options( + parse_argv=parse_argv, config_file=config_file, parser=parser) + if args or kwargs: + # build options from dict + options_dict = dict(*args, **kwargs) + options.__dict__.update(options_dict) + if 'paths' in options_dict: + self.paths = options_dict['paths'] + + self.runner = self.input_file + self.options = options + + if not options.reporter: + options.reporter = BaseReport if options.quiet else StandardReport + + for index, value in enumerate(options.exclude): + options.exclude[index] = value.rstrip('/') + # Ignore all checks which are not explicitly selected + options.select = tuple(options.select or ()) + options.ignore = tuple(options.ignore or options.select and ('',)) + options.benchmark_keys = BENCHMARK_KEYS[:] + options.ignore_code = self.ignore_code + options.physical_checks = self.get_checks('physical_line') + options.logical_checks = self.get_checks('logical_line') + options.ast_checks = self.get_checks('tree') + self.init_report() + + def init_report(self, reporter=None): + """Initialize the report instance.""" + self.options.report = (reporter or self.options.reporter)(self.options) + return self.options.report + + def check_files(self, paths=None): + """Run all checks on the paths.""" + if paths is None: + paths = self.paths + report = self.options.report + runner = self.runner + report.start() + for path in paths: + if os.path.isdir(path): + self.input_dir(path) + elif not self.excluded(path): + runner(path) + report.stop() + return report + + def input_file(self, filename, lines=None, expected=None, line_offset=0): + """Run all checks on a Python source file.""" + if self.options.verbose: + print('checking %s' % filename) + fchecker = Checker(filename, lines=lines, options=self.options) + return fchecker.check_all(expected=expected, line_offset=line_offset) + + def input_dir(self, dirname): + """Check all files in this directory and all subdirectories.""" + dirname = dirname.rstrip('/') + if self.excluded(dirname): + return 0 + counters = self.options.report.counters + verbose = self.options.verbose + filepatterns = self.options.filename + runner = self.runner + for root, dirs, files in os.walk(dirname): + if verbose: + print('directory ' + root) + counters['directories'] += 1 + for subdir in sorted(dirs): + if self.excluded(os.path.join(root, subdir)): + dirs.remove(subdir) + for filename in sorted(files): + # contain a pattern that matches? + if ((filename_match(filename, filepatterns) and + not self.excluded(filename))): + runner(os.path.join(root, filename)) + + def excluded(self, filename): + """ + Check if options.exclude contains a pattern that matches filename. + """ + basename = os.path.basename(filename) + return any((filename_match(filename, self.options.exclude, + default=False), + filename_match(basename, self.options.exclude, + default=False))) + + def ignore_code(self, code): + """ + Check if the error code should be ignored. + + If 'options.select' contains a prefix of the error code, + return False. Else, if 'options.ignore' contains a prefix of + the error code, return True. + """ + return (code.startswith(self.options.ignore) and + not code.startswith(self.options.select)) + + def get_checks(self, argument_name): + """ + Find all globally visible functions where the first argument name + starts with argument_name and which contain selected tests. + """ + checks = [] + for check, attrs in _checks[argument_name].items(): + (codes, args) = attrs + if any(not (code and self.ignore_code(code)) for code in codes): + checks.append((check.__name__, check, args)) + return sorted(checks) + + +def init_tests(pep8style): + """ + Initialize testing framework. + + A test file can provide many tests. Each test starts with a + declaration. This declaration is a single line starting with '#:'. + It declares codes of expected failures, separated by spaces or 'Okay' + if no failure is expected. + If the file does not contain such declaration, it should pass all + tests. If the declaration is empty, following lines are not checked, + until next declaration. + + Examples: + + * Only E224 and W701 are expected: #: E224 W701 + * Following example is conform: #: Okay + * Don't check these lines: #: + """ + report = pep8style.init_report(TestReport) + runner = pep8style.input_file + + def run_tests(filename): + """Run all the tests from a file.""" + lines = readlines(filename) + ['#:\n'] + line_offset = 0 + codes = ['Okay'] + testcase = [] + count_files = report.counters['files'] + for index, line in enumerate(lines): + if not line.startswith('#:'): + if codes: + # Collect the lines of the test case + testcase.append(line) + continue + if codes and index: + codes = [c for c in codes if c != 'Okay'] + # Run the checker + runner(filename, testcase, expected=codes, + line_offset=line_offset) + # output the real line numbers + line_offset = index + 1 + # configure the expected errors + codes = line.split()[1:] + # empty the test case buffer + del testcase[:] + report.counters['files'] = count_files + 1 + return report.counters['failed tests'] + + pep8style.runner = run_tests + + +def selftest(options): + """ + Test all check functions with test cases in docstrings. + """ + count_failed = count_all = 0 + report = BaseReport(options) + counters = report.counters + checks = options.physical_checks + options.logical_checks + for name, check, argument_names in checks: + for line in check.__doc__.splitlines(): + line = line.lstrip() + match = SELFTEST_REGEX.match(line) + if match is None: + continue + code, source = match.groups() + lines = [part.replace(r'\t', '\t') + '\n' + for part in source.split(r'\n')] + checker = Checker(lines=lines, options=options, report=report) + checker.check_all() + error = None + if code == 'Okay': + if len(counters) > len(options.benchmark_keys): + codes = [key for key in counters + if key not in options.benchmark_keys] + error = "incorrectly found %s" % ', '.join(codes) + elif not counters.get(code): + error = "failed to find %s" % code + # Keep showing errors for multiple tests + for key in set(counters) - set(options.benchmark_keys): + del counters[key] + report.messages = {} + count_all += 1 + if not error: + if options.verbose: + print("%s: %s" % (code, source)) + else: + count_failed += 1 + print("%s: %s:" % (__file__, error)) + for line in checker.lines: + print(line.rstrip()) + return count_failed, count_all + + +def get_parser(prog='pep8', version=__version__): + parser = OptionParser(prog=prog, version=version, + usage="%prog [options] input ...") + parser.config_options = [ + 'exclude', 'filename', 'select', 'ignore', 'max-line-length', 'count', + 'format', 'quiet', 'show-pep8', 'show-source', 'statistics', 'verbose'] + parser.add_option('-v', '--verbose', default=0, action='count', + help="print status messages, or debug with -vv") + parser.add_option('-q', '--quiet', default=0, action='count', + help="report only file names, or nothing with -qq") + parser.add_option('-r', '--repeat', default=True, action='store_true', + help="(obsolete) show all occurrences of the same error") + parser.add_option('--first', action='store_false', dest='repeat', + help="show first occurrence of each error") + parser.add_option('--exclude', metavar='patterns', default=DEFAULT_EXCLUDE, + help="exclude files or directories which match these " + "comma separated patterns (default: %default)") + parser.add_option('--filename', metavar='patterns', default='*.py', + help="when parsing directories, only check filenames " + "matching these comma separated patterns " + "(default: %default)") + parser.add_option('--select', metavar='errors', default='', + help="select errors and warnings (e.g. E,W6)") + parser.add_option('--ignore', metavar='errors', default='', + help="skip errors and warnings (e.g. E4,W)") + parser.add_option('--show-source', action='store_true', + help="show source code for each error") + parser.add_option('--show-pep8', action='store_true', + help="show text of PEP 8 for each error " + "(implies --first)") + parser.add_option('--statistics', action='store_true', + help="count errors and warnings") + parser.add_option('--count', action='store_true', + help="print total number of errors and warnings " + "to standard error and set exit code to 1 if " + "total is not null") + parser.add_option('--max-line-length', type='int', metavar='n', + default=MAX_LINE_LENGTH, + help="set maximum allowed line length " + "(default: %default)") + parser.add_option('--format', metavar='format', default='default', + help="set the error format [default|pylint|]") + parser.add_option('--diff', action='store_true', + help="report only lines changed according to the " + "unified diff received on STDIN") + group = parser.add_option_group("Testing Options") + group.add_option('--testsuite', metavar='dir', + help="run regression tests from dir") + group.add_option('--doctest', action='store_true', + help="run doctest on myself") + group.add_option('--benchmark', action='store_true', + help="measure processing speed") + return parser + + +def read_config(options, args, arglist, parser): + """Read both user configuration and local configuration.""" + config = RawConfigParser() + + user_conf = options.config + if user_conf and os.path.isfile(user_conf): + if options.verbose: + print('user configuration: %s' % user_conf) + config.read(user_conf) + + parent = tail = args and os.path.abspath(os.path.commonprefix(args)) + while tail: + for name in PROJECT_CONFIG: + local_conf = os.path.join(parent, name) + if os.path.isfile(local_conf): + break + else: + parent, tail = os.path.split(parent) + continue + if options.verbose: + print('local configuration: %s' % local_conf) + config.read(local_conf) + break + + pep8_section = parser.prog + if config.has_section(pep8_section): + option_list = dict([(o.dest, o.type or o.action) + for o in parser.option_list]) + + # First, read the default values + new_options, _ = parser.parse_args([]) + + # Second, parse the configuration + for opt in config.options(pep8_section): + if options.verbose > 1: + print(" %s = %s" % (opt, config.get(pep8_section, opt))) + if opt.replace('_', '-') not in parser.config_options: + print("Unknown option: '%s'\n not in [%s]" % + (opt, ' '.join(parser.config_options))) + sys.exit(1) + normalized_opt = opt.replace('-', '_') + opt_type = option_list[normalized_opt] + if opt_type in ('int', 'count'): + value = config.getint(pep8_section, opt) + elif opt_type == 'string': + value = config.get(pep8_section, opt) + else: + assert opt_type in ('store_true', 'store_false') + value = config.getboolean(pep8_section, opt) + setattr(new_options, normalized_opt, value) + + # Third, overwrite with the command-line options + options, _ = parser.parse_args(arglist, values=new_options) + + return options + + +def process_options(arglist=None, parse_argv=False, config_file=None, + parser=None): + """Process options passed either via arglist or via command line args.""" + if not arglist and not parse_argv: + # Don't read the command line if the module is used as a library. + arglist = [] + if not parser: + parser = get_parser() + if not parser.has_option('--config'): + if config_file is True: + config_file = DEFAULT_CONFIG + group = parser.add_option_group("Configuration", description=( + "The project options are read from the [%s] section of the " + "tox.ini file or the setup.cfg file located in any parent folder " + "of the path(s) being processed. Allowed options are: %s." % + (parser.prog, ', '.join(parser.config_options)))) + group.add_option('--config', metavar='path', default=config_file, + help="user config file location (default: %default)") + options, args = parser.parse_args(arglist) + options.reporter = None + + if options.testsuite: + args.append(options.testsuite) + elif not options.doctest: + if parse_argv and not args: + if options.diff or any(os.path.exists(name) + for name in PROJECT_CONFIG): + args = ['.'] + else: + parser.error('input not specified') + options = read_config(options, args, arglist, parser) + options.reporter = parse_argv and options.quiet == 1 and FileReport + + if options.filename: + options.filename = options.filename.split(',') + options.exclude = options.exclude.split(',') + if options.select: + options.select = options.select.split(',') + if options.ignore: + options.ignore = options.ignore.split(',') + elif not (options.select or + options.testsuite or options.doctest) and DEFAULT_IGNORE: + # The default choice: ignore controversial checks + # (for doctest and testsuite, all checks are required) + options.ignore = DEFAULT_IGNORE.split(',') + + if options.diff: + options.reporter = DiffReport + stdin = stdin_get_value() + options.selected_lines = parse_udiff(stdin, options.filename, args[0]) + args = sorted(options.selected_lines) + + return options, args + + +def _main(): + """Parse options and run checks on Python source.""" + pep8style = StyleGuide(parse_argv=True, config_file=True) + options = pep8style.options + if options.doctest: + import doctest + fail_d, done_d = doctest.testmod(report=False, verbose=options.verbose) + fail_s, done_s = selftest(options) + count_failed = fail_s + fail_d + if not options.quiet: + count_passed = done_d + done_s - count_failed + print("%d passed and %d failed." % (count_passed, count_failed)) + print("Test failed." if count_failed else "Test passed.") + if count_failed: + sys.exit(1) + if options.testsuite: + init_tests(pep8style) + report = pep8style.check_files() + if options.statistics: + report.print_statistics() + if options.benchmark: + report.print_benchmark() + if options.testsuite and not options.quiet: + report.print_results() + if report.total_errors: + if options.count: + sys.stderr.write(str(report.total_errors) + '\n') + sys.exit(1) + +if __name__ == '__main__': + _main() diff --git a/vim/ftplugin/python/pyflakes/__init__.py b/vim/ftplugin/python/pyflakes/__init__.py new file mode 100755 index 0000000..7a2d0cd --- /dev/null +++ b/vim/ftplugin/python/pyflakes/__init__.py @@ -0,0 +1,2 @@ + +__version__ = '0.6.1' diff --git a/vim/ftplugin/python/pyflakes/api.py b/vim/ftplugin/python/pyflakes/api.py new file mode 100755 index 0000000..5648c56 --- /dev/null +++ b/vim/ftplugin/python/pyflakes/api.py @@ -0,0 +1,130 @@ +""" +API for the command-line I{pyflakes} tool. +""" + +import sys +import os +import _ast + +from pyflakes import checker +from pyflakes import reporter as modReporter + +__all__ = ['check', 'checkPath', 'checkRecursive', 'iterSourceCode', 'main'] + + +def check(codeString, filename, reporter=None): + """ + Check the Python source given by C{codeString} for flakes. + + @param codeString: The Python source to check. + @type codeString: C{str} + + @param filename: The name of the file the source came from, used to report + errors. + @type filename: C{str} + + @param reporter: A L{Reporter} instance, where errors and warnings will be + reported. + + @return: The number of warnings emitted. + @rtype: C{int} + """ + if reporter is None: + reporter = modReporter._makeDefaultReporter() + # First, compile into an AST and handle syntax errors. + try: + tree = compile(codeString, filename, "exec", _ast.PyCF_ONLY_AST) + except SyntaxError: + value = sys.exc_info()[1] + msg = value.args[0] + + (lineno, offset, text) = value.lineno, value.offset, value.text + + # If there's an encoding problem with the file, the text is None. + if text is None: + # Avoid using msg, since for the only known case, it contains a + # bogus message that claims the encoding the file declared was + # unknown. + reporter.unexpectedError(filename, 'problem decoding source') + else: + reporter.syntaxError(filename, msg, lineno, offset, text) + return 1 + except Exception: + reporter.unexpectedError(filename, 'problem decoding source') + return 1 + else: + # Okay, it's syntactically valid. Now check it. + w = checker.Checker(tree, filename) + w.messages.sort(key=lambda m: m.lineno) + for warning in w.messages: + reporter.flake(warning) + return len(w.messages) + + +def checkPath(filename, reporter=None): + """ + Check the given path, printing out any warnings detected. + + @param reporter: A L{Reporter} instance, where errors and warnings will be + reported. + + @return: the number of warnings printed + """ + if reporter is None: + reporter = modReporter._makeDefaultReporter() + try: + f = open(filename, 'U') + try: + return check(f.read() + '\n', filename, reporter) + finally: + f.close() + except UnicodeError: + reporter.unexpectedError(filename, 'problem decoding source') + except IOError: + msg = sys.exc_info()[1] + reporter.unexpectedError(filename, msg.args[1]) + return 1 + + +def iterSourceCode(paths): + """ + Iterate over all Python source files in C{paths}. + + @param paths: A list of paths. Directories will be recursed into and + any .py files found will be yielded. Any non-directories will be + yielded as-is. + """ + for path in paths: + if os.path.isdir(path): + for dirpath, dirnames, filenames in os.walk(path): + for filename in filenames: + if filename.endswith('.py'): + yield os.path.join(dirpath, filename) + else: + yield path + + +def checkRecursive(paths, reporter): + """ + Recursively check all source files in C{paths}. + + @param paths: A list of paths to Python source files and directories + containing Python source files. + @param reporter: A L{Reporter} where all of the warnings and errors + will be reported to. + @return: The number of warnings found. + """ + warnings = 0 + for sourcePath in iterSourceCode(paths): + warnings += checkPath(sourcePath, reporter) + return warnings + + +def main(): + args = sys.argv[1:] + reporter = modReporter._makeDefaultReporter() + if args: + warnings = checkRecursive(args, reporter) + else: + warnings = check(sys.stdin.read(), '', reporter) + raise SystemExit(warnings > 0) diff --git a/vim/ftplugin/python/pyflakes/checker.py b/vim/ftplugin/python/pyflakes/checker.py new file mode 100755 index 0000000..fd38d18 --- /dev/null +++ b/vim/ftplugin/python/pyflakes/checker.py @@ -0,0 +1,723 @@ +# -*- test-case-name: pyflakes -*- +# (c) 2005-2010 Divmod, Inc. +# See LICENSE file for details + +import os.path +try: + import builtins + PY2 = False +except ImportError: + import __builtin__ as builtins + PY2 = True + +try: + import ast + iter_child_nodes = ast.iter_child_nodes +except (ImportError, AttributeError): # Python 2.5 + import _ast as ast + + def iter_child_nodes(node, astcls=ast.AST): + """ + Yield all direct child nodes of *node*, that is, all fields that are nodes + and all items of fields that are lists of nodes. + """ + for name in node._fields: + field = getattr(node, name, None) + if isinstance(field, astcls): + yield field + elif isinstance(field, list): + for item in field: + yield item +# Python >= 3.3 uses ast.Try instead of (ast.TryExcept + ast.TryFinally) +if hasattr(ast, 'Try'): + ast_TryExcept = ast.Try + ast_TryFinally = () +else: + ast_TryExcept = ast.TryExcept + ast_TryFinally = ast.TryFinally + +from pyflakes import messages + + +class Binding(object): + """ + Represents the binding of a value to a name. + + The checker uses this to keep track of which names have been bound and + which names have not. See L{Assignment} for a special type of binding that + is checked with stricter rules. + + @ivar used: pair of (L{Scope}, line-number) indicating the scope and + line number that this binding was last used + """ + + def __init__(self, name, source): + self.name = name + self.source = source + self.used = False + + def __str__(self): + return self.name + + def __repr__(self): + return '<%s object %r from line %r at 0x%x>' % (self.__class__.__name__, + self.name, + self.source.lineno, + id(self)) + + +class UnBinding(Binding): + """Created by the 'del' operator.""" + + +class Importation(Binding): + """ + A binding created by an import statement. + + @ivar fullName: The complete name given to the import statement, + possibly including multiple dotted components. + @type fullName: C{str} + """ + def __init__(self, name, source): + self.fullName = name + name = name.split('.')[0] + super(Importation, self).__init__(name, source) + + +class Argument(Binding): + """ + Represents binding a name as an argument. + """ + + +class Definition(Binding): + """ + A binding that defines a function or a class. + """ + + +class Assignment(Binding): + """ + Represents binding a name with an explicit assignment. + + The checker will raise warnings for any Assignment that isn't used. Also, + the checker does not consider assignments in tuple/list unpacking to be + Assignments, rather it treats them as simple Bindings. + """ + + +class FunctionDefinition(Definition): + pass + + +class ClassDefinition(Definition): + pass + + +class ExportBinding(Binding): + """ + A binding created by an C{__all__} assignment. If the names in the list + can be determined statically, they will be treated as names for export and + additional checking applied to them. + + The only C{__all__} assignment that can be recognized is one which takes + the value of a literal list containing literal strings. For example:: + + __all__ = ["foo", "bar"] + + Names which are imported and not otherwise used but appear in the value of + C{__all__} will not have an unused import warning reported for them. + """ + def names(self): + """ + Return a list of the names referenced by this binding. + """ + names = [] + if isinstance(self.source, ast.List): + for node in self.source.elts: + if isinstance(node, ast.Str): + names.append(node.s) + return names + + +class Scope(dict): + importStarred = False # set to True when import * is found + usesLocals = False + + def __repr__(self): + return '<%s at 0x%x %s>' % (self.__class__.__name__, id(self), dict.__repr__(self)) + + +class ClassScope(Scope): + pass + + +class FunctionScope(Scope): + """ + I represent a name scope for a function. + + @ivar globals: Names declared 'global' in this function. + """ + def __init__(self): + super(FunctionScope, self).__init__() + self.globals = {} + + +class ModuleScope(Scope): + pass + + +# Globally defined names which are not attributes of the builtins module, or +# are only present on some platforms. +_MAGIC_GLOBALS = ['__file__', '__builtins__', 'WindowsError'] + + +def getNodeName(node): + # Returns node.id, or node.name, or None + if hasattr(node, 'id'): # One of the many nodes with an id + return node.id + if hasattr(node, 'name'): # a ExceptHandler node + return node.name + + +class Checker(object): + """ + I check the cleanliness and sanity of Python code. + + @ivar _deferredFunctions: Tracking list used by L{deferFunction}. Elements + of the list are two-tuples. The first element is the callable passed + to L{deferFunction}. The second element is a copy of the scope stack + at the time L{deferFunction} was called. + + @ivar _deferredAssignments: Similar to C{_deferredFunctions}, but for + callables which are deferred assignment checks. + """ + + nodeDepth = 0 + traceTree = False + builtIns = set(dir(builtins)) | set(_MAGIC_GLOBALS) + + def __init__(self, tree, filename='(none)', builtins=None): + self._deferredFunctions = [] + self._deferredAssignments = [] + self.deadScopes = [] + self.messages = [] + self.filename = filename + if builtins: + self.builtIns = self.builtIns.union(builtins) + self.scopeStack = [ModuleScope()] + self.futuresAllowed = True + self.root = tree + self.handleChildren(tree) + self.runDeferred(self._deferredFunctions) + # Set _deferredFunctions to None so that deferFunction will fail + # noisily if called after we've run through the deferred functions. + self._deferredFunctions = None + self.runDeferred(self._deferredAssignments) + # Set _deferredAssignments to None so that deferAssignment will fail + # noisily if called after we've run through the deferred assignments. + self._deferredAssignments = None + del self.scopeStack[1:] + self.popScope() + self.checkDeadScopes() + + def deferFunction(self, callable): + """ + Schedule a function handler to be called just before completion. + + This is used for handling function bodies, which must be deferred + because code later in the file might modify the global scope. When + `callable` is called, the scope at the time this is called will be + restored, however it will contain any new bindings added to it. + """ + self._deferredFunctions.append((callable, self.scopeStack[:])) + + def deferAssignment(self, callable): + """ + Schedule an assignment handler to be called just after deferred + function handlers. + """ + self._deferredAssignments.append((callable, self.scopeStack[:])) + + def runDeferred(self, deferred): + """ + Run the callables in C{deferred} using their associated scope stack. + """ + for handler, scope in deferred: + self.scopeStack = scope + handler() + + @property + def scope(self): + return self.scopeStack[-1] + + def popScope(self): + self.deadScopes.append(self.scopeStack.pop()) + + def checkDeadScopes(self): + """ + Look at scopes which have been fully examined and report names in them + which were imported but unused. + """ + for scope in self.deadScopes: + export = isinstance(scope.get('__all__'), ExportBinding) + if export: + all = scope['__all__'].names() + if not scope.importStarred and os.path.basename(self.filename) != '__init__.py': + # Look for possible mistakes in the export list + undefined = set(all) - set(scope) + for name in undefined: + self.report(messages.UndefinedExport, + scope['__all__'].source.lineno, name) + else: + all = [] + + # Look for imported names that aren't used. + for importation in scope.values(): + if isinstance(importation, Importation): + if not importation.used and importation.name not in all: + self.report(messages.UnusedImport, + importation.source.lineno, importation.name) + + def pushFunctionScope(self): + self.scopeStack.append(FunctionScope()) + + def pushClassScope(self): + self.scopeStack.append(ClassScope()) + + def report(self, messageClass, *args, **kwargs): + self.messages.append(messageClass(self.filename, *args, **kwargs)) + + def hasParent(self, node, kind): + while hasattr(node, 'parent'): + node = node.parent + if isinstance(node, kind): + return True + + def getCommonAncestor(self, lnode, rnode, stop=None): + if not stop: + stop = self.root + if lnode is rnode: + return lnode + if stop in (lnode, rnode): + return stop + + if not hasattr(lnode, 'parent') or not hasattr(rnode, 'parent'): + return + if (lnode.level > rnode.level): + return self.getCommonAncestor(lnode.parent, rnode, stop) + if (rnode.level > lnode.level): + return self.getCommonAncestor(lnode, rnode.parent, stop) + return self.getCommonAncestor(lnode.parent, rnode.parent, stop) + + def descendantOf(self, node, ancestors, stop=None): + for a in ancestors: + if self.getCommonAncestor(node, a, stop) not in (stop, None): + return True + return False + + def onFork(self, parent, lnode, rnode, items): + return (self.descendantOf(lnode, items, parent) ^ + self.descendantOf(rnode, items, parent)) + + def differentForks(self, lnode, rnode): + """True, if lnode and rnode are located on different forks of IF/TRY""" + ancestor = self.getCommonAncestor(lnode, rnode) + if isinstance(ancestor, ast.If): + for fork in (ancestor.body, ancestor.orelse): + if self.onFork(ancestor, lnode, rnode, fork): + return True + elif isinstance(ancestor, ast_TryExcept): + body = ancestor.body + ancestor.orelse + for fork in [body] + [[hdl] for hdl in ancestor.handlers]: + if self.onFork(ancestor, lnode, rnode, fork): + return True + elif isinstance(ancestor, ast_TryFinally): + if self.onFork(ancestor, lnode, rnode, ancestor.body): + return True + return False + + def addBinding(self, node, value, reportRedef=True): + """ + Called when a binding is altered. + + - `node` is the statement responsible for the change + - `value` is the optional new value, a Binding instance, associated + with the binding; if None, the binding is deleted if it exists. + - if `reportRedef` is True (default), rebinding while unused will be + reported. + """ + redefinedWhileUnused = False + if not isinstance(self.scope, ClassScope): + for scope in self.scopeStack[::-1]: + existing = scope.get(value.name) + if (isinstance(existing, Importation) + and not existing.used + and (not isinstance(value, Importation) or value.fullName == existing.fullName) + and reportRedef + and not self.differentForks(node, existing.source)): + redefinedWhileUnused = True + self.report(messages.RedefinedWhileUnused, + node.lineno, value.name, existing.source.lineno) + + existing = self.scope.get(value.name) + if not redefinedWhileUnused and self.hasParent(value.source, ast.ListComp): + if (existing and reportRedef + and not self.hasParent(existing.source, (ast.For, ast.ListComp))): + self.report(messages.RedefinedInListComp, + node.lineno, value.name, existing.source.lineno) + + if isinstance(value, UnBinding): + try: + del self.scope[value.name] + except KeyError: + self.report(messages.UndefinedName, node.lineno, value.name) + elif (isinstance(existing, Definition) + and not existing.used + and not self.differentForks(node, existing.source)): + self.report(messages.RedefinedWhileUnused, + node.lineno, value.name, existing.source.lineno) + else: + self.scope[value.name] = value + + def handleNodeLoad(self, node): + name = getNodeName(node) + if not name: + return + # try local scope + importStarred = self.scope.importStarred + try: + self.scope[name].used = (self.scope, node.lineno) + except KeyError: + pass + else: + return + + # try enclosing function scopes + for scope in self.scopeStack[-2:0:-1]: + importStarred = importStarred or scope.importStarred + if not isinstance(scope, FunctionScope): + continue + try: + scope[name].used = (self.scope, node.lineno) + except KeyError: + pass + else: + return + + # try global scope + importStarred = importStarred or self.scopeStack[0].importStarred + try: + self.scopeStack[0][name].used = (self.scope, node.lineno) + except KeyError: + if not importStarred and name not in self.builtIns: + if (os.path.basename(self.filename) == '__init__.py' and name == '__path__'): + # the special name __path__ is valid only in packages + pass + else: + self.report(messages.UndefinedName, node.lineno, name) + + def handleNodeStore(self, node): + name = getNodeName(node) + if not name: + return + # if the name hasn't already been defined in the current scope + if isinstance(self.scope, FunctionScope) and name not in self.scope: + # for each function or module scope above us + for scope in self.scopeStack[:-1]: + if not isinstance(scope, (FunctionScope, ModuleScope)): + continue + # if the name was defined in that scope, and the name has + # been accessed already in the current scope, and hasn't + # been declared global + if (name in scope and scope[name].used and scope[name].used[0] is self.scope + and name not in self.scope.globals): + # then it's probably a mistake + self.report(messages.UndefinedLocal, + scope[name].used[1], name, scope[name].source.lineno) + break + + parent = getattr(node, 'parent', None) + if isinstance(parent, (ast.For, ast.comprehension, ast.Tuple, ast.List)): + binding = Binding(name, node) + elif parent is not None and name == '__all__' and isinstance(self.scope, ModuleScope): + binding = ExportBinding(name, parent.value) + else: + binding = Assignment(name, node) + if name in self.scope: + binding.used = self.scope[name].used + self.addBinding(node, binding) + + def handleNodeDelete(self, node): + name = getNodeName(node) + if not name: + return + if isinstance(self.scope, FunctionScope) and name in self.scope.globals: + del self.scope.globals[name] + else: + self.addBinding(node, UnBinding(name, node)) + + def handleChildren(self, tree): + for node in iter_child_nodes(tree): + self.handleNode(node, tree) + + def isDocstring(self, node): + """ + Determine if the given node is a docstring, as long as it is at the + correct place in the node tree. + """ + return isinstance(node, ast.Str) or (isinstance(node, ast.Expr) and + isinstance(node.value, ast.Str)) + + def handleNode(self, node, parent): + if node is None: + return + node.parent = parent + if self.traceTree: + print(' ' * self.nodeDepth + node.__class__.__name__) + self.nodeDepth += 1 + if self.futuresAllowed and not (isinstance(node, ast.ImportFrom) or + self.isDocstring(node)): + self.futuresAllowed = False + nodeType = node.__class__.__name__.upper() + node.level = self.nodeDepth + try: + handler = getattr(self, nodeType) + handler(node) + finally: + self.nodeDepth -= 1 + if self.traceTree: + print(' ' * self.nodeDepth + 'end ' + node.__class__.__name__) + + def ignore(self, node): + pass + + # "stmt" type nodes + RETURN = DELETE = PRINT = WHILE = IF = WITH = WITHITEM = RAISE = \ + TRYEXCEPT = TRYFINALLY = TRY = ASSERT = EXEC = EXPR = handleChildren + + CONTINUE = BREAK = PASS = ignore + + # "expr" type nodes + BOOLOP = BINOP = UNARYOP = IFEXP = DICT = SET = YIELD = YIELDFROM = \ + COMPARE = CALL = REPR = ATTRIBUTE = SUBSCRIPT = LIST = TUPLE = \ + STARRED = handleChildren + + NUM = STR = BYTES = ELLIPSIS = ignore + + # "slice" type nodes + SLICE = EXTSLICE = INDEX = handleChildren + + # expression contexts are node instances too, though being constants + LOAD = STORE = DEL = AUGLOAD = AUGSTORE = PARAM = ignore + + # same for operators + AND = OR = ADD = SUB = MULT = DIV = MOD = POW = LSHIFT = RSHIFT = \ + BITOR = BITXOR = BITAND = FLOORDIV = INVERT = NOT = UADD = USUB = \ + EQ = NOTEQ = LT = LTE = GT = GTE = IS = ISNOT = IN = NOTIN = ignore + + # additional node types + COMPREHENSION = KEYWORD = handleChildren + + def GLOBAL(self, node): + """ + Keep track of globals declarations. + """ + if isinstance(self.scope, FunctionScope): + self.scope.globals.update(dict.fromkeys(node.names)) + + NONLOCAL = GLOBAL + + def LISTCOMP(self, node): + # handle generators before element + for gen in node.generators: + self.handleNode(gen, node) + self.handleNode(node.elt, node) + + GENERATOREXP = SETCOMP = LISTCOMP + + def DICTCOMP(self, node): + for gen in node.generators: + self.handleNode(gen, node) + self.handleNode(node.key, node) + self.handleNode(node.value, node) + + def FOR(self, node): + """ + Process bindings for loop variables. + """ + vars = [] + + def collectLoopVars(n): + if isinstance(n, ast.Name): + vars.append(n.id) + elif isinstance(n, ast.expr_context): + return + else: + for c in iter_child_nodes(n): + collectLoopVars(c) + + collectLoopVars(node.target) + for varn in vars: + if (isinstance(self.scope.get(varn), Importation) + # unused ones will get an unused import warning + and self.scope[varn].used): + self.report(messages.ImportShadowedByLoopVar, + node.lineno, varn, self.scope[varn].source.lineno) + + self.handleChildren(node) + + def NAME(self, node): + """ + Handle occurrence of Name (which can be a load/store/delete access.) + """ + if node.id == 'locals' and isinstance(node.parent, ast.Call): + # we are doing locals() call in current scope + self.scope.usesLocals = True + # Locate the name in locals / function / globals scopes. + if isinstance(node.ctx, (ast.Load, ast.AugLoad)): + self.handleNodeLoad(node) + elif isinstance(node.ctx, (ast.Store, ast.AugStore)): + self.handleNodeStore(node) + elif isinstance(node.ctx, ast.Del): + self.handleNodeDelete(node) + else: + # must be a Param context -- this only happens for names in function + # arguments, but these aren't dispatched through here + raise RuntimeError("Got impossible expression context: %r" % (node.ctx,)) + + def FUNCTIONDEF(self, node): + if not hasattr(node, 'decorator_list'): # Python 2.5 + node.decorator_list = node.decorators + for deco in node.decorator_list: + self.handleNode(deco, node) + self.addBinding(node, FunctionDefinition(node.name, node)) + self.LAMBDA(node) + + def LAMBDA(self, node): + args = [] + + if PY2: + def addArgs(arglist): + for arg in arglist: + if isinstance(arg, ast.Tuple): + addArgs(arg.elts) + else: + if arg.id in args: + self.report(messages.DuplicateArgument, + node.lineno, arg.id) + args.append(arg.id) + addArgs(node.args.args) + defaults = node.args.defaults + else: + for arg in node.args.args + node.args.kwonlyargs: + if arg.arg in args: + self.report(messages.DuplicateArgument, + node.lineno, arg.arg) + args.append(arg.arg) + self.handleNode(arg.annotation, node) + if hasattr(node, 'returns'): # Only for FunctionDefs + for annotation in (node.args.varargannotation, + node.args.kwargannotation, node.returns): + self.handleNode(annotation, node) + defaults = node.args.defaults + node.args.kw_defaults + + # vararg/kwarg identifiers are not Name nodes + for wildcard in (node.args.vararg, node.args.kwarg): + if not wildcard: + continue + if wildcard in args: + self.report(messages.DuplicateArgument, node.lineno, wildcard) + args.append(wildcard) + for default in defaults: + self.handleNode(default, node) + + def runFunction(): + + self.pushFunctionScope() + for name in args: + self.addBinding(node, Argument(name, node), reportRedef=False) + if isinstance(node.body, list): + # case for FunctionDefs + for stmt in node.body: + self.handleNode(stmt, node) + else: + # case for Lambdas + self.handleNode(node.body, node) + + def checkUnusedAssignments(): + """ + Check to see if any assignments have not been used. + """ + for name, binding in self.scope.items(): + if (not binding.used and name not in self.scope.globals + and not self.scope.usesLocals + and isinstance(binding, Assignment)): + self.report(messages.UnusedVariable, + binding.source.lineno, name) + self.deferAssignment(checkUnusedAssignments) + self.popScope() + + self.deferFunction(runFunction) + + def CLASSDEF(self, node): + """ + Check names used in a class definition, including its decorators, base + classes, and the body of its definition. Additionally, add its name to + the current scope. + """ + # no class decorator in Python 2.5 + for deco in getattr(node, 'decorator_list', ''): + self.handleNode(deco, node) + for baseNode in node.bases: + self.handleNode(baseNode, node) + if not PY2: + for keywordNode in node.keywords: + self.handleNode(keywordNode, node) + self.pushClassScope() + for stmt in node.body: + self.handleNode(stmt, node) + self.popScope() + self.addBinding(node, ClassDefinition(node.name, node)) + + def ASSIGN(self, node): + self.handleNode(node.value, node) + for target in node.targets: + self.handleNode(target, node) + + def AUGASSIGN(self, node): + self.handleNodeLoad(node.target) + self.handleNode(node.value, node) + self.handleNode(node.target, node) + + def IMPORT(self, node): + for alias in node.names: + name = alias.asname or alias.name + importation = Importation(name, node) + self.addBinding(node, importation) + + def IMPORTFROM(self, node): + if node.module == '__future__': + if not self.futuresAllowed: + self.report(messages.LateFutureImport, + node.lineno, [n.name for n in node.names]) + else: + self.futuresAllowed = False + + for alias in node.names: + if alias.name == '*': + self.scope.importStarred = True + self.report(messages.ImportStarUsed, node.lineno, node.module) + continue + name = alias.asname or alias.name + importation = Importation(name, node) + if node.module == '__future__': + importation.used = (self.scope, node.lineno) + self.addBinding(node, importation) + + def EXCEPTHANDLER(self, node): + # 3.x: in addition to handling children, we must handle the name of + # the exception, which is not a Name node, but a simple string. + if isinstance(node.name, str): + self.handleNodeStore(node) + self.handleChildren(node) diff --git a/vim/ftplugin/python/pyflakes/messages.py b/vim/ftplugin/python/pyflakes/messages.py new file mode 100755 index 0000000..e3abab6 --- /dev/null +++ b/vim/ftplugin/python/pyflakes/messages.py @@ -0,0 +1,113 @@ +# (c) 2005 Divmod, Inc. See LICENSE file for details + + +class Message(object): + message = '' + message_args = () + + def __init__(self, filename, lineno): + self.filename = filename + self.lineno = lineno + + def __str__(self): + return '%s:%s: %s' % (self.filename, self.lineno, self.message % self.message_args) + + +class UnusedImport(Message): + message = '%r imported but unused' + + def __init__(self, filename, lineno, name): + Message.__init__(self, filename, lineno) + self.message_args = (name,) + + +class RedefinedWhileUnused(Message): + message = 'redefinition of unused %r from line %r' + + def __init__(self, filename, lineno, name, orig_lineno): + Message.__init__(self, filename, lineno) + self.message_args = (name, orig_lineno) + + +class RedefinedInListComp(Message): + message = 'list comprehension redefines %r from line %r' + + def __init__(self, filename, lineno, name, orig_lineno): + Message.__init__(self, filename, lineno) + self.message_args = (name, orig_lineno) + + +class ImportShadowedByLoopVar(Message): + message = 'import %r from line %r shadowed by loop variable' + + def __init__(self, filename, lineno, name, orig_lineno): + Message.__init__(self, filename, lineno) + self.message_args = (name, orig_lineno) + + +class ImportStarUsed(Message): + message = "'from %s import *' used; unable to detect undefined names" + + def __init__(self, filename, lineno, modname): + Message.__init__(self, filename, lineno) + self.message_args = (modname,) + + +class UndefinedName(Message): + message = 'undefined name %r' + + def __init__(self, filename, lineno, name): + Message.__init__(self, filename, lineno) + self.message_args = (name,) + + +class UndefinedExport(Message): + message = 'undefined name %r in __all__' + + def __init__(self, filename, lineno, name): + Message.__init__(self, filename, lineno) + self.message_args = (name,) + + +class UndefinedLocal(Message): + message = "local variable %r (defined in enclosing scope on line %r) referenced before assignment" + + def __init__(self, filename, lineno, name, orig_lineno): + Message.__init__(self, filename, lineno) + self.message_args = (name, orig_lineno) + + +class DuplicateArgument(Message): + message = 'duplicate argument %r in function definition' + + def __init__(self, filename, lineno, name): + Message.__init__(self, filename, lineno) + self.message_args = (name,) + + +class Redefined(Message): + message = 'redefinition of %r from line %r' + + def __init__(self, filename, lineno, name, orig_lineno): + Message.__init__(self, filename, lineno) + self.message_args = (name, orig_lineno) + + +class LateFutureImport(Message): + message = 'future import(s) %r after other statements' + + def __init__(self, filename, lineno, names): + Message.__init__(self, filename, lineno) + self.message_args = (names,) + + +class UnusedVariable(Message): + """ + Indicates that a variable has been explicity assigned to but not actually + used. + """ + message = 'local variable %r is assigned to but never used' + + def __init__(self, filename, lineno, names): + Message.__init__(self, filename, lineno) + self.message_args = (names,) diff --git a/vim/ftplugin/python/pyflakes/reporter.py b/vim/ftplugin/python/pyflakes/reporter.py new file mode 100755 index 0000000..09956f4 --- /dev/null +++ b/vim/ftplugin/python/pyflakes/reporter.py @@ -0,0 +1,79 @@ +# (c) 2005-2012 Divmod, Inc. +# See LICENSE file for details + +import sys +try: + u = unicode +except NameError: + u = str + + +class Reporter(object): + """ + Formats the results of pyflakes checks to users. + """ + + def __init__(self, warningStream, errorStream): + """ + Construct a L{Reporter}. + + @param warningStream: A file-like object where warnings will be + written to. The stream's C{write} method must accept unicode. + C{sys.stdout} is a good value. + @param errorStream: A file-like object where error output will be + written to. The stream's C{write} method must accept unicode. + C{sys.stderr} is a good value. + """ + self._stdout = warningStream + self._stderr = errorStream + + def unexpectedError(self, filename, msg): + """ + An unexpected error occurred trying to process C{filename}. + + @param filename: The path to a file that we could not process. + @ptype filename: C{unicode} + @param msg: A message explaining the problem. + @ptype msg: C{unicode} + """ + self._stderr.write(u("%s: %s\n") % (filename, msg)) + + def syntaxError(self, filename, msg, lineno, offset, text): + """ + There was a syntax errror in C{filename}. + + @param filename: The path to the file with the syntax error. + @ptype filename: C{unicode} + @param msg: An explanation of the syntax error. + @ptype msg: C{unicode} + @param lineno: The line number where the syntax error occurred. + @ptype lineno: C{int} + @param offset: The column on which the syntax error occurred. + @ptype offset: C{int} + @param text: The source code containing the syntax error. + @ptype text: C{unicode} + """ + line = text.splitlines()[-1] + if offset is not None: + offset = offset - (len(text) - len(line)) + self._stderr.write(u('%s:%d: %s\n') % (filename, lineno, msg)) + self._stderr.write(u(line)) + self._stderr.write(u('\n')) + if offset is not None: + self._stderr.write(u(" " * (offset + 1) + "^\n")) + + def flake(self, message): + """ + pyflakes found something wrong with the code. + + @param: A L{pyflakes.messages.Message}. + """ + self._stdout.write(u(message)) + self._stdout.write(u('\n')) + + +def _makeDefaultReporter(): + """ + Make a reporter that can be used when no reporter is specified. + """ + return Reporter(sys.stdout, sys.stderr) diff --git a/vim/plugin/vcsbzr.vim b/vim/plugin/vcsbzr.vim index 966f2ee..2e9c53b 100644 --- a/vim/plugin/vcsbzr.vim +++ b/vim/plugin/vcsbzr.vim @@ -65,7 +65,7 @@ let s:bzrFunctions = {} " Returns the executable used to invoke bzr suitable for use in a shell " command. function! s:Executable() - return VCSCommandGetOption('VCSCommandBZRExec', 'bzr') + return shellescape(VCSCommandGetOption('VCSCommandBZRExec', 'bzr')) endfunction " Function: s:DoCommand(cmd, cmdName, statusText) {{{2 diff --git a/vim/plugin/vcscommand.vim b/vim/plugin/vcscommand.vim index 2726436..f95d10c 100644 --- a/vim/plugin/vcscommand.vim +++ b/vim/plugin/vcscommand.vim @@ -378,31 +378,21 @@ endfunction " command line on Windows systems. function! s:VCSCommandUtility.system(...) - if (has("win32") || has("win64")) && &sxq !~ '"' - let save_sxq = &sxq - set sxq=\" - endif - try - let output = call('system', a:000) - if exists('*iconv') && has('multi_byte') - if(strlen(&tenc) && &tenc != &enc) - let output = iconv(output, &tenc, &enc) - else - let originalBuffer = VCSCommandGetOriginalBuffer(VCSCommandGetOption('VCSCommandEncodeAsFile', 0)) - if originalBuffer - let fenc = getbufvar(originalBuffer, '&fenc') - if fenc != &enc - let output = iconv(output, fenc, &enc) - endif + let output = call('system', a:000) + if exists('*iconv') && has('multi_byte') + if(strlen(&tenc) && &tenc != &enc) + let output = iconv(output, &tenc, &enc) + else + let originalBuffer = VCSCommandGetOriginalBuffer(VCSCommandGetOption('VCSCommandEncodeAsFile', 0)) + if originalBuffer + let fenc = getbufvar(originalBuffer, '&fenc') + if fenc != &enc + let output = iconv(output, fenc, &enc) endif endif + endif - endif - finally - if exists("save_sxq") - let &sxq = save_sxq - endif - endtry + endif return output endfunction @@ -1033,8 +1023,7 @@ function! s:VCSVimDiff(...) let b:VCSCommandCommand = 'vimdiff' diffthis let t:vcsCommandVimDiffScratchList = [resultBuffer] - " If no split method is defined, cheat, and set it to vertical. - call s:VCSCommandUtility.pushContext({'VCSCommandSplit': orientation}) + call s:VCSCommandUtility.pushContext({'VCSCommandEdit': 'split', 'VCSCommandSplit': orientation}) try let resultBuffer = s:VCSReview(a:2) finally @@ -1048,7 +1037,6 @@ function! s:VCSVimDiff(...) diffthis let t:vcsCommandVimDiffScratchList += [resultBuffer] else - " Add new buffer. Force splitting behavior, otherwise why use vimdiff? call s:VCSCommandUtility.pushContext({'VCSCommandEdit': 'split', 'VCSCommandSplit': orientation}) try if(a:0 == 0) diff --git a/vim/plugin/vcscvs.vim b/vim/plugin/vcscvs.vim index 11c7433..fe6db21 100644 --- a/vim/plugin/vcscvs.vim +++ b/vim/plugin/vcscvs.vim @@ -111,7 +111,7 @@ let s:cvsFunctions = {} " Returns the executable used to invoke cvs suitable for use in a shell " command. function! s:Executable() - return VCSCommandGetOption('VCSCommandCVSExec', 'cvs') + return shellescape(VCSCommandGetOption('VCSCommandCVSExec', 'cvs')) endfunction " Function: s:DoCommand(cmd, cmdName, statusText, options) {{{2 diff --git a/vim/plugin/vcsgit.vim b/vim/plugin/vcsgit.vim index 2667982..a893d15 100644 --- a/vim/plugin/vcsgit.vim +++ b/vim/plugin/vcsgit.vim @@ -70,7 +70,7 @@ let s:gitFunctions = {} " Returns the executable used to invoke git suitable for use in a shell " command. function! s:Executable() - return VCSCommandGetOption('VCSCommandGitExec', 'git') + return shellescape(VCSCommandGetOption('VCSCommandGitExec', 'git')) endfunction " Function: s:DoCommand(cmd, cmdName, statusText, options) {{{2 diff --git a/vim/plugin/vcshg.vim b/vim/plugin/vcshg.vim index 775ede0..9dbbde8 100644 --- a/vim/plugin/vcshg.vim +++ b/vim/plugin/vcshg.vim @@ -72,7 +72,7 @@ let s:hgFunctions = {} " Returns the executable used to invoke hg suitable for use in a shell " command. function! s:Executable() - return VCSCommandGetOption('VCSCommandHGExec', 'hg') + return shellescape(VCSCommandGetOption('VCSCommandHGExec', 'hg')) endfunction " Function: s:DoCommand(cmd, cmdName, statusText, options) {{{2 diff --git a/vim/plugin/vcssvk.vim b/vim/plugin/vcssvk.vim index bee84c3..f04c9cc 100644 --- a/vim/plugin/vcssvk.vim +++ b/vim/plugin/vcssvk.vim @@ -65,7 +65,7 @@ let s:svkFunctions = {} " Returns the executable used to invoke SVK suitable for use in a shell " command. function! s:Executable() - return VCSCommandGetOption('VCSCommandSVKExec', 'svk') + return shellescape(VCSCommandGetOption('VCSCommandSVKExec', 'svk')) endfunction " Function: s:DoCommand(cmd, cmdName, statusText, options) {{{2 diff --git a/vim/plugin/vcssvn.vim b/vim/plugin/vcssvn.vim index 8ad6388..eabe92e 100644 --- a/vim/plugin/vcssvn.vim +++ b/vim/plugin/vcssvn.vim @@ -72,7 +72,7 @@ let s:svnFunctions = {} " Returns the executable used to invoke git suitable for use in a shell " command. function! s:Executable() - return VCSCommandGetOption('VCSCommandSVNExec', 'svn') + return shellescape(VCSCommandGetOption('VCSCommandSVNExec', 'svn')) endfunction " Function: s:DoCommand(cmd, cmdName, statusText, options) {{{2 @@ -193,6 +193,10 @@ function! s:svnFunctions.GetBufferInfo() if statusText =~ '^?' return ['Unknown'] endif + " File explicitly ignored by SVN. + if statusText =~ '^I' + return ['Ignored'] + endif let [flags, revision, repository] = matchlist(statusText, '^\(.\{9}\)\s*\(\d\+\)\s\+\(\d\+\)')[1:3] if revision == ''