diff -Nru numpydoc-0.4/comment_eater.py numpydoc-0.5/comment_eater.py --- numpydoc-0.4/comment_eater.py 2010-09-15 19:18:51.000000000 +0000 +++ numpydoc-0.5/comment_eater.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,158 +0,0 @@ -from cStringIO import StringIO -import compiler -import inspect -import textwrap -import tokenize - -from compiler_unparse import unparse - - -class Comment(object): - """ A comment block. - """ - is_comment = True - def __init__(self, start_lineno, end_lineno, text): - # int : The first line number in the block. 1-indexed. - self.start_lineno = start_lineno - # int : The last line number. Inclusive! - self.end_lineno = end_lineno - # str : The text block including '#' character but not any leading spaces. - self.text = text - - def add(self, string, start, end, line): - """ Add a new comment line. - """ - self.start_lineno = min(self.start_lineno, start[0]) - self.end_lineno = max(self.end_lineno, end[0]) - self.text += string - - def __repr__(self): - return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno, - self.end_lineno, self.text) - - -class NonComment(object): - """ A non-comment block of code. - """ - is_comment = False - def __init__(self, start_lineno, end_lineno): - self.start_lineno = start_lineno - self.end_lineno = end_lineno - - def add(self, string, start, end, line): - """ Add lines to the block. - """ - if string.strip(): - # Only add if not entirely whitespace. - self.start_lineno = min(self.start_lineno, start[0]) - self.end_lineno = max(self.end_lineno, end[0]) - - def __repr__(self): - return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno, - self.end_lineno) - - -class CommentBlocker(object): - """ Pull out contiguous comment blocks. - """ - def __init__(self): - # Start with a dummy. - self.current_block = NonComment(0, 0) - - # All of the blocks seen so far. - self.blocks = [] - - # The index mapping lines of code to their associated comment blocks. - self.index = {} - - def process_file(self, file): - """ Process a file object. - """ - for token in tokenize.generate_tokens(file.next): - self.process_token(*token) - self.make_index() - - def process_token(self, kind, string, start, end, line): - """ Process a single token. - """ - if self.current_block.is_comment: - if kind == tokenize.COMMENT: - self.current_block.add(string, start, end, line) - else: - self.new_noncomment(start[0], end[0]) - else: - if kind == tokenize.COMMENT: - self.new_comment(string, start, end, line) - else: - self.current_block.add(string, start, end, line) - - def new_noncomment(self, start_lineno, end_lineno): - """ We are transitioning from a noncomment to a comment. - """ - block = NonComment(start_lineno, end_lineno) - self.blocks.append(block) - self.current_block = block - - def new_comment(self, string, start, end, line): - """ Possibly add a new comment. - - Only adds a new comment if this comment is the only thing on the line. - Otherwise, it extends the noncomment block. - """ - prefix = line[:start[1]] - if prefix.strip(): - # Oops! Trailing comment, not a comment block. - self.current_block.add(string, start, end, line) - else: - # A comment block. - block = Comment(start[0], end[0], string) - self.blocks.append(block) - self.current_block = block - - def make_index(self): - """ Make the index mapping lines of actual code to their associated - prefix comments. - """ - for prev, block in zip(self.blocks[:-1], self.blocks[1:]): - if not block.is_comment: - self.index[block.start_lineno] = prev - - def search_for_comment(self, lineno, default=None): - """ Find the comment block just before the given line number. - - Returns None (or the specified default) if there is no such block. - """ - if not self.index: - self.make_index() - block = self.index.get(lineno, None) - text = getattr(block, 'text', default) - return text - - -def strip_comment_marker(text): - """ Strip # markers at the front of a block of comment text. - """ - lines = [] - for line in text.splitlines(): - lines.append(line.lstrip('#')) - text = textwrap.dedent('\n'.join(lines)) - return text - - -def get_class_traits(klass): - """ Yield all of the documentation for trait definitions on a class object. - """ - # FIXME: gracefully handle errors here or in the caller? - source = inspect.getsource(klass) - cb = CommentBlocker() - cb.process_file(StringIO(source)) - mod_ast = compiler.parse(source) - class_ast = mod_ast.node.nodes[0] - for node in class_ast.code.nodes: - # FIXME: handle other kinds of assignments? - if isinstance(node, compiler.ast.Assign): - name = node.nodes[0].name - rhs = unparse(node.expr).strip() - doc = strip_comment_marker(cb.search_for_comment(node.lineno, default='')) - yield name, rhs, doc - diff -Nru numpydoc-0.4/compiler_unparse.py numpydoc-0.5/compiler_unparse.py --- numpydoc-0.4/compiler_unparse.py 2010-09-15 19:18:51.000000000 +0000 +++ numpydoc-0.5/compiler_unparse.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,860 +0,0 @@ -""" Turn compiler.ast structures back into executable python code. - - The unparse method takes a compiler.ast tree and transforms it back into - valid python code. It is incomplete and currently only works for - import statements, function calls, function definitions, assignments, and - basic expressions. - - Inspired by python-2.5-svn/Demo/parser/unparse.py - - fixme: We may want to move to using _ast trees because the compiler for - them is about 6 times faster than compiler.compile. -""" - -import sys -import cStringIO -from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add - -def unparse(ast, single_line_functions=False): - s = cStringIO.StringIO() - UnparseCompilerAst(ast, s, single_line_functions) - return s.getvalue().lstrip() - -op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2, - 'compiler.ast.Add':1, 'compiler.ast.Sub':1 } - -class UnparseCompilerAst: - """ Methods in this class recursively traverse an AST and - output source code for the abstract syntax; original formatting - is disregarged. - """ - - ######################################################################### - # object interface. - ######################################################################### - - def __init__(self, tree, file = sys.stdout, single_line_functions=False): - """ Unparser(tree, file=sys.stdout) -> None. - - Print the source for tree to file. - """ - self.f = file - self._single_func = single_line_functions - self._do_indent = True - self._indent = 0 - self._dispatch(tree) - self._write("\n") - self.f.flush() - - ######################################################################### - # Unparser private interface. - ######################################################################### - - ### format, output, and dispatch methods ################################ - - def _fill(self, text = ""): - "Indent a piece of text, according to the current indentation level" - if self._do_indent: - self._write("\n"+" "*self._indent + text) - else: - self._write(text) - - def _write(self, text): - "Append a piece of text to the current line." - self.f.write(text) - - def _enter(self): - "Print ':', and increase the indentation." - self._write(": ") - self._indent += 1 - - def _leave(self): - "Decrease the indentation level." - self._indent -= 1 - - def _dispatch(self, tree): - "_dispatcher function, _dispatching tree type T to method _T." - if isinstance(tree, list): - for t in tree: - self._dispatch(t) - return - meth = getattr(self, "_"+tree.__class__.__name__) - if tree.__class__.__name__ == 'NoneType' and not self._do_indent: - return - meth(tree) - - - ######################################################################### - # compiler.ast unparsing methods. - # - # There should be one method per concrete grammar type. They are - # organized in alphabetical order. - ######################################################################### - - def _Add(self, t): - self.__binary_op(t, '+') - - def _And(self, t): - self._write(" (") - for i, node in enumerate(t.nodes): - self._dispatch(node) - if i != len(t.nodes)-1: - self._write(") and (") - self._write(")") - - def _AssAttr(self, t): - """ Handle assigning an attribute of an object - """ - self._dispatch(t.expr) - self._write('.'+t.attrname) - - def _Assign(self, t): - """ Expression Assignment such as "a = 1". - - This only handles assignment in expressions. Keyword assignment - is handled separately. - """ - self._fill() - for target in t.nodes: - self._dispatch(target) - self._write(" = ") - self._dispatch(t.expr) - if not self._do_indent: - self._write('; ') - - def _AssName(self, t): - """ Name on left hand side of expression. - - Treat just like a name on the right side of an expression. - """ - self._Name(t) - - def _AssTuple(self, t): - """ Tuple on left hand side of an expression. - """ - - # _write each elements, separated by a comma. - for element in t.nodes[:-1]: - self._dispatch(element) - self._write(", ") - - # Handle the last one without writing comma - last_element = t.nodes[-1] - self._dispatch(last_element) - - def _AugAssign(self, t): - """ +=,-=,*=,/=,**=, etc. operations - """ - - self._fill() - self._dispatch(t.node) - self._write(' '+t.op+' ') - self._dispatch(t.expr) - if not self._do_indent: - self._write(';') - - def _Bitand(self, t): - """ Bit and operation. - """ - - for i, node in enumerate(t.nodes): - self._write("(") - self._dispatch(node) - self._write(")") - if i != len(t.nodes)-1: - self._write(" & ") - - def _Bitor(self, t): - """ Bit or operation - """ - - for i, node in enumerate(t.nodes): - self._write("(") - self._dispatch(node) - self._write(")") - if i != len(t.nodes)-1: - self._write(" | ") - - def _CallFunc(self, t): - """ Function call. - """ - self._dispatch(t.node) - self._write("(") - comma = False - for e in t.args: - if comma: self._write(", ") - else: comma = True - self._dispatch(e) - if t.star_args: - if comma: self._write(", ") - else: comma = True - self._write("*") - self._dispatch(t.star_args) - if t.dstar_args: - if comma: self._write(", ") - else: comma = True - self._write("**") - self._dispatch(t.dstar_args) - self._write(")") - - def _Compare(self, t): - self._dispatch(t.expr) - for op, expr in t.ops: - self._write(" " + op + " ") - self._dispatch(expr) - - def _Const(self, t): - """ A constant value such as an integer value, 3, or a string, "hello". - """ - self._dispatch(t.value) - - def _Decorators(self, t): - """ Handle function decorators (eg. @has_units) - """ - for node in t.nodes: - self._dispatch(node) - - def _Dict(self, t): - self._write("{") - for i, (k, v) in enumerate(t.items): - self._dispatch(k) - self._write(": ") - self._dispatch(v) - if i < len(t.items)-1: - self._write(", ") - self._write("}") - - def _Discard(self, t): - """ Node for when return value is ignored such as in "foo(a)". - """ - self._fill() - self._dispatch(t.expr) - - def _Div(self, t): - self.__binary_op(t, '/') - - def _Ellipsis(self, t): - self._write("...") - - def _From(self, t): - """ Handle "from xyz import foo, bar as baz". - """ - # fixme: Are From and ImportFrom handled differently? - self._fill("from ") - self._write(t.modname) - self._write(" import ") - for i, (name,asname) in enumerate(t.names): - if i != 0: - self._write(", ") - self._write(name) - if asname is not None: - self._write(" as "+asname) - - def _Function(self, t): - """ Handle function definitions - """ - if t.decorators is not None: - self._fill("@") - self._dispatch(t.decorators) - self._fill("def "+t.name + "(") - defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults) - for i, arg in enumerate(zip(t.argnames, defaults)): - self._write(arg[0]) - if arg[1] is not None: - self._write('=') - self._dispatch(arg[1]) - if i < len(t.argnames)-1: - self._write(', ') - self._write(")") - if self._single_func: - self._do_indent = False - self._enter() - self._dispatch(t.code) - self._leave() - self._do_indent = True - - def _Getattr(self, t): - """ Handle getting an attribute of an object - """ - if isinstance(t.expr, (Div, Mul, Sub, Add)): - self._write('(') - self._dispatch(t.expr) - self._write(')') - else: - self._dispatch(t.expr) - - self._write('.'+t.attrname) - - def _If(self, t): - self._fill() - - for i, (compare,code) in enumerate(t.tests): - if i == 0: - self._write("if ") - else: - self._write("elif ") - self._dispatch(compare) - self._enter() - self._fill() - self._dispatch(code) - self._leave() - self._write("\n") - - if t.else_ is not None: - self._write("else") - self._enter() - self._fill() - self._dispatch(t.else_) - self._leave() - self._write("\n") - - def _IfExp(self, t): - self._dispatch(t.then) - self._write(" if ") - self._dispatch(t.test) - - if t.else_ is not None: - self._write(" else (") - self._dispatch(t.else_) - self._write(")") - - def _Import(self, t): - """ Handle "import xyz.foo". - """ - self._fill("import ") - - for i, (name,asname) in enumerate(t.names): - if i != 0: - self._write(", ") - self._write(name) - if asname is not None: - self._write(" as "+asname) - - def _Keyword(self, t): - """ Keyword value assignment within function calls and definitions. - """ - self._write(t.name) - self._write("=") - self._dispatch(t.expr) - - def _List(self, t): - self._write("[") - for i,node in enumerate(t.nodes): - self._dispatch(node) - if i < len(t.nodes)-1: - self._write(", ") - self._write("]") - - def _Module(self, t): - if t.doc is not None: - self._dispatch(t.doc) - self._dispatch(t.node) - - def _Mul(self, t): - self.__binary_op(t, '*') - - def _Name(self, t): - self._write(t.name) - - def _NoneType(self, t): - self._write("None") - - def _Not(self, t): - self._write('not (') - self._dispatch(t.expr) - self._write(')') - - def _Or(self, t): - self._write(" (") - for i, node in enumerate(t.nodes): - self._dispatch(node) - if i != len(t.nodes)-1: - self._write(") or (") - self._write(")") - - def _Pass(self, t): - self._write("pass\n") - - def _Printnl(self, t): - self._fill("print ") - if t.dest: - self._write(">> ") - self._dispatch(t.dest) - self._write(", ") - comma = False - for node in t.nodes: - if comma: self._write(', ') - else: comma = True - self._dispatch(node) - - def _Power(self, t): - self.__binary_op(t, '**') - - def _Return(self, t): - self._fill("return ") - if t.value: - if isinstance(t.value, Tuple): - text = ', '.join([ name.name for name in t.value.asList() ]) - self._write(text) - else: - self._dispatch(t.value) - if not self._do_indent: - self._write('; ') - - def _Slice(self, t): - self._dispatch(t.expr) - self._write("[") - if t.lower: - self._dispatch(t.lower) - self._write(":") - if t.upper: - self._dispatch(t.upper) - #if t.step: - # self._write(":") - # self._dispatch(t.step) - self._write("]") - - def _Sliceobj(self, t): - for i, node in enumerate(t.nodes): - if i != 0: - self._write(":") - if not (isinstance(node, Const) and node.value is None): - self._dispatch(node) - - def _Stmt(self, tree): - for node in tree.nodes: - self._dispatch(node) - - def _Sub(self, t): - self.__binary_op(t, '-') - - def _Subscript(self, t): - self._dispatch(t.expr) - self._write("[") - for i, value in enumerate(t.subs): - if i != 0: - self._write(",") - self._dispatch(value) - self._write("]") - - def _TryExcept(self, t): - self._fill("try") - self._enter() - self._dispatch(t.body) - self._leave() - - for handler in t.handlers: - self._fill('except ') - self._dispatch(handler[0]) - if handler[1] is not None: - self._write(', ') - self._dispatch(handler[1]) - self._enter() - self._dispatch(handler[2]) - self._leave() - - if t.else_: - self._fill("else") - self._enter() - self._dispatch(t.else_) - self._leave() - - def _Tuple(self, t): - - if not t.nodes: - # Empty tuple. - self._write("()") - else: - self._write("(") - - # _write each elements, separated by a comma. - for element in t.nodes[:-1]: - self._dispatch(element) - self._write(", ") - - # Handle the last one without writing comma - last_element = t.nodes[-1] - self._dispatch(last_element) - - self._write(")") - - def _UnaryAdd(self, t): - self._write("+") - self._dispatch(t.expr) - - def _UnarySub(self, t): - self._write("-") - self._dispatch(t.expr) - - def _With(self, t): - self._fill('with ') - self._dispatch(t.expr) - if t.vars: - self._write(' as ') - self._dispatch(t.vars.name) - self._enter() - self._dispatch(t.body) - self._leave() - self._write('\n') - - def _int(self, t): - self._write(repr(t)) - - def __binary_op(self, t, symbol): - # Check if parenthesis are needed on left side and then dispatch - has_paren = False - left_class = str(t.left.__class__) - if (left_class in op_precedence.keys() and - op_precedence[left_class] < op_precedence[str(t.__class__)]): - has_paren = True - if has_paren: - self._write('(') - self._dispatch(t.left) - if has_paren: - self._write(')') - # Write the appropriate symbol for operator - self._write(symbol) - # Check if parenthesis are needed on the right side and then dispatch - has_paren = False - right_class = str(t.right.__class__) - if (right_class in op_precedence.keys() and - op_precedence[right_class] < op_precedence[str(t.__class__)]): - has_paren = True - if has_paren: - self._write('(') - self._dispatch(t.right) - if has_paren: - self._write(')') - - def _float(self, t): - # if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001' - # We prefer str here. - self._write(str(t)) - - def _str(self, t): - self._write(repr(t)) - - def _tuple(self, t): - self._write(str(t)) - - ######################################################################### - # These are the methods from the _ast modules unparse. - # - # As our needs to handle more advanced code increase, we may want to - # modify some of the methods below so that they work for compiler.ast. - ######################################################################### - -# # stmt -# def _Expr(self, tree): -# self._fill() -# self._dispatch(tree.value) -# -# def _Import(self, t): -# self._fill("import ") -# first = True -# for a in t.names: -# if first: -# first = False -# else: -# self._write(", ") -# self._write(a.name) -# if a.asname: -# self._write(" as "+a.asname) -# -## def _ImportFrom(self, t): -## self._fill("from ") -## self._write(t.module) -## self._write(" import ") -## for i, a in enumerate(t.names): -## if i == 0: -## self._write(", ") -## self._write(a.name) -## if a.asname: -## self._write(" as "+a.asname) -## # XXX(jpe) what is level for? -## -# -# def _Break(self, t): -# self._fill("break") -# -# def _Continue(self, t): -# self._fill("continue") -# -# def _Delete(self, t): -# self._fill("del ") -# self._dispatch(t.targets) -# -# def _Assert(self, t): -# self._fill("assert ") -# self._dispatch(t.test) -# if t.msg: -# self._write(", ") -# self._dispatch(t.msg) -# -# def _Exec(self, t): -# self._fill("exec ") -# self._dispatch(t.body) -# if t.globals: -# self._write(" in ") -# self._dispatch(t.globals) -# if t.locals: -# self._write(", ") -# self._dispatch(t.locals) -# -# def _Print(self, t): -# self._fill("print ") -# do_comma = False -# if t.dest: -# self._write(">>") -# self._dispatch(t.dest) -# do_comma = True -# for e in t.values: -# if do_comma:self._write(", ") -# else:do_comma=True -# self._dispatch(e) -# if not t.nl: -# self._write(",") -# -# def _Global(self, t): -# self._fill("global") -# for i, n in enumerate(t.names): -# if i != 0: -# self._write(",") -# self._write(" " + n) -# -# def _Yield(self, t): -# self._fill("yield") -# if t.value: -# self._write(" (") -# self._dispatch(t.value) -# self._write(")") -# -# def _Raise(self, t): -# self._fill('raise ') -# if t.type: -# self._dispatch(t.type) -# if t.inst: -# self._write(", ") -# self._dispatch(t.inst) -# if t.tback: -# self._write(", ") -# self._dispatch(t.tback) -# -# -# def _TryFinally(self, t): -# self._fill("try") -# self._enter() -# self._dispatch(t.body) -# self._leave() -# -# self._fill("finally") -# self._enter() -# self._dispatch(t.finalbody) -# self._leave() -# -# def _excepthandler(self, t): -# self._fill("except ") -# if t.type: -# self._dispatch(t.type) -# if t.name: -# self._write(", ") -# self._dispatch(t.name) -# self._enter() -# self._dispatch(t.body) -# self._leave() -# -# def _ClassDef(self, t): -# self._write("\n") -# self._fill("class "+t.name) -# if t.bases: -# self._write("(") -# for a in t.bases: -# self._dispatch(a) -# self._write(", ") -# self._write(")") -# self._enter() -# self._dispatch(t.body) -# self._leave() -# -# def _FunctionDef(self, t): -# self._write("\n") -# for deco in t.decorators: -# self._fill("@") -# self._dispatch(deco) -# self._fill("def "+t.name + "(") -# self._dispatch(t.args) -# self._write(")") -# self._enter() -# self._dispatch(t.body) -# self._leave() -# -# def _For(self, t): -# self._fill("for ") -# self._dispatch(t.target) -# self._write(" in ") -# self._dispatch(t.iter) -# self._enter() -# self._dispatch(t.body) -# self._leave() -# if t.orelse: -# self._fill("else") -# self._enter() -# self._dispatch(t.orelse) -# self._leave -# -# def _While(self, t): -# self._fill("while ") -# self._dispatch(t.test) -# self._enter() -# self._dispatch(t.body) -# self._leave() -# if t.orelse: -# self._fill("else") -# self._enter() -# self._dispatch(t.orelse) -# self._leave -# -# # expr -# def _Str(self, tree): -# self._write(repr(tree.s)) -## -# def _Repr(self, t): -# self._write("`") -# self._dispatch(t.value) -# self._write("`") -# -# def _Num(self, t): -# self._write(repr(t.n)) -# -# def _ListComp(self, t): -# self._write("[") -# self._dispatch(t.elt) -# for gen in t.generators: -# self._dispatch(gen) -# self._write("]") -# -# def _GeneratorExp(self, t): -# self._write("(") -# self._dispatch(t.elt) -# for gen in t.generators: -# self._dispatch(gen) -# self._write(")") -# -# def _comprehension(self, t): -# self._write(" for ") -# self._dispatch(t.target) -# self._write(" in ") -# self._dispatch(t.iter) -# for if_clause in t.ifs: -# self._write(" if ") -# self._dispatch(if_clause) -# -# def _IfExp(self, t): -# self._dispatch(t.body) -# self._write(" if ") -# self._dispatch(t.test) -# if t.orelse: -# self._write(" else ") -# self._dispatch(t.orelse) -# -# unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"} -# def _UnaryOp(self, t): -# self._write(self.unop[t.op.__class__.__name__]) -# self._write("(") -# self._dispatch(t.operand) -# self._write(")") -# -# binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%", -# "LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&", -# "FloorDiv":"//", "Pow": "**"} -# def _BinOp(self, t): -# self._write("(") -# self._dispatch(t.left) -# self._write(")" + self.binop[t.op.__class__.__name__] + "(") -# self._dispatch(t.right) -# self._write(")") -# -# boolops = {_ast.And: 'and', _ast.Or: 'or'} -# def _BoolOp(self, t): -# self._write("(") -# self._dispatch(t.values[0]) -# for v in t.values[1:]: -# self._write(" %s " % self.boolops[t.op.__class__]) -# self._dispatch(v) -# self._write(")") -# -# def _Attribute(self,t): -# self._dispatch(t.value) -# self._write(".") -# self._write(t.attr) -# -## def _Call(self, t): -## self._dispatch(t.func) -## self._write("(") -## comma = False -## for e in t.args: -## if comma: self._write(", ") -## else: comma = True -## self._dispatch(e) -## for e in t.keywords: -## if comma: self._write(", ") -## else: comma = True -## self._dispatch(e) -## if t.starargs: -## if comma: self._write(", ") -## else: comma = True -## self._write("*") -## self._dispatch(t.starargs) -## if t.kwargs: -## if comma: self._write(", ") -## else: comma = True -## self._write("**") -## self._dispatch(t.kwargs) -## self._write(")") -# -# # slice -# def _Index(self, t): -# self._dispatch(t.value) -# -# def _ExtSlice(self, t): -# for i, d in enumerate(t.dims): -# if i != 0: -# self._write(': ') -# self._dispatch(d) -# -# # others -# def _arguments(self, t): -# first = True -# nonDef = len(t.args)-len(t.defaults) -# for a in t.args[0:nonDef]: -# if first:first = False -# else: self._write(", ") -# self._dispatch(a) -# for a,d in zip(t.args[nonDef:], t.defaults): -# if first:first = False -# else: self._write(", ") -# self._dispatch(a), -# self._write("=") -# self._dispatch(d) -# if t.vararg: -# if first:first = False -# else: self._write(", ") -# self._write("*"+t.vararg) -# if t.kwarg: -# if first:first = False -# else: self._write(", ") -# self._write("**"+t.kwarg) -# -## def _keyword(self, t): -## self._write(t.arg) -## self._write("=") -## self._dispatch(t.value) -# -# def _Lambda(self, t): -# self._write("lambda ") -# self._dispatch(t.args) -# self._write(": ") -# self._dispatch(t.body) - - - diff -Nru numpydoc-0.4/debian/changelog numpydoc-0.5/debian/changelog --- numpydoc-0.4/debian/changelog 2012-12-09 21:09:05.000000000 +0000 +++ numpydoc-0.5/debian/changelog 2015-10-28 14:33:38.000000000 +0000 @@ -1,3 +1,33 @@ +numpydoc (0.5-2) unstable; urgency=medium + + * Update debian/copyright to mention all source files and license short + names; switch to BSD license for the Debian packaging to be consistent + with upstream (Closes: #803188). + + -- Denis Laxalde Wed, 28 Oct 2015 10:33:01 -0400 + +numpydoc (0.5-1) unstable; urgency=medium + + * Team upload. + + [ Jakub Wilk ] + * Use canonical URIs for Vcs-* fields. + + [ Denis Laxalde ] + * New upstream release (Closes: #801947). + * Build python3 package as it's now supported upstream + * debian/control: + + Build-Depends on python-setuptools + + Update Homepage (numpydoc is not anymore within numpy's source tree) + + Update Standards-Version (no change needed) + * debian/rules: use pybuild. + * Drop clean-setup patch (included upstream) + + [ Barry Warsaw ] + * debian/watch: Use the pypi.debian.net redirector. + + -- Barry Warsaw Tue, 27 Oct 2015 15:53:53 -0400 + numpydoc (0.4-1) unstable; urgency=low * Initial release (Closes: #559916) diff -Nru numpydoc-0.4/debian/compat numpydoc-0.5/debian/compat --- numpydoc-0.4/debian/compat 2012-12-09 21:09:05.000000000 +0000 +++ numpydoc-0.5/debian/compat 2015-10-28 14:33:38.000000000 +0000 @@ -1 +1 @@ -8 +9 diff -Nru numpydoc-0.4/debian/control numpydoc-0.5/debian/control --- numpydoc-0.4/debian/control 2012-12-09 21:09:05.000000000 +0000 +++ numpydoc-0.5/debian/control 2015-10-28 14:33:38.000000000 +0000 @@ -3,12 +3,23 @@ Priority: optional Maintainer: Debian Python Modules Team Uploaders: Denis Laxalde -Build-Depends: python-all (>= 2.6.6-3), debhelper (>= 8), python-nose, python-sphinx (>= 1.0.1) -Standards-Version: 3.9.3 -Homepage: https://github.com/numpy/numpy/tree/master/doc/sphinxext -X-Python-Version: >= 2.4 -Vcs-Svn: svn://svn.debian.org/python-modules/packages/numpydoc/trunk/ -Vcs-Browser: http://svn.debian.org/viewsvn/python-modules/packages/numpydoc/trunk/ +Build-Depends: python-all (>= 2.6.6-3), + python3-all (>= 3.3), + debhelper (>= 9), + python-setuptools, + python3-setuptools, + python-nose, + python3-nose, + python-sphinx (>= 1.0.1), + python3-sphinx (>= 1.0.1), + python-matplotlib, + python3-matplotlib, +Standards-Version: 3.9.6 +Homepage: https://github.com/numpy/numpydoc +X-Python-Version: >= 2.6 +X-Python3-Version: >= 3.3 +Vcs-Git: git://anonscm.debian.org/python-modules/packages/numpydoc.git +Vcs-Browser: https://anonscm.debian.org/cgit/python-modules/packages/numpydoc.git Package: python-numpydoc Architecture: all @@ -18,3 +29,14 @@ This package defines several extensions for the Sphinx documentation system, shipped in the numpydoc Python package. In particular, these provide support for the Numpy docstring format in Sphinx. + +Package: python3-numpydoc +Architecture: all +Depends: ${misc:Depends}, ${python3:Depends}, python3-sphinx (>= 1.0.1) +Suggests: python3-matplotlib +Description: Sphinx extension to support docstrings in Numpy format -- Python3 + This package defines several extensions for the Sphinx documentation + system, shipped in the numpydoc Python package. In particular, these provide + support for the Numpy docstring format in Sphinx. + . + This package provides the Python 3 version. diff -Nru numpydoc-0.4/debian/copyright numpydoc-0.5/debian/copyright --- numpydoc-0.4/debian/copyright 2012-12-09 21:09:05.000000000 +0000 +++ numpydoc-0.5/debian/copyright 2015-10-28 14:33:38.000000000 +0000 @@ -2,10 +2,11 @@ Upstream-Name: numpydoc Source: http://pypi.python.org/pypi/numpydoc -Files: numpydoc.py docscrape.py docscrape_sphinx.py phantom_import.py +Files: numpydoc/numpydoc.py numpydoc/docscrape.py + numpydoc/docscrape_sphinx.py numpydoc/phantom_import.py Copyright: 2008, Stefan van der Walt 2008, Pauli Virtanen -License: +License: BSD-2-clause Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: @@ -29,35 +30,14 @@ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -Files: compiler_unparse.py comment_eater.py traitsdoc.py +Files: numpydoc/compiler_unparse.py numpydoc/comment_eater.py + numpydoc/traitsdoc.py Copyright: 2006 Enthought, Inc. -License: - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - . - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - * Neither the name of Enthought, Inc. nor the names of its contributors may - be used to endorse or promote products derived from this software without - specific prior written permission. - . - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR - ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +License: BSD-3-clause -Files: plot_directive.py +Files: numpydoc/plot_directive.py Copyright: 2002-2008 John D. Hunter -License: +License: Python-2.0 1. This LICENSE AGREEMENT is between John D. Hunter (“JDH”), and the Individual or Organization (“Licensee”) accessing and otherwise using matplotlib software in source or binary form and its associated @@ -102,21 +82,35 @@ 8. By copying, installing or otherwise using matplotlib 0.98.3, Licensee agrees to be bound by the terms and conditions of this License Agreement. +Files: * +Copyright: Pauli Virtanen and others +License: BSD-3-clause + Files: debian/* Copyright: 2012 Denis Laxalde -License: GPL-3+ - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - . - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - . - You should have received a copy of the GNU General Public License - along with this program. If not, see . - . - On Debian systems, the complete text of the GNU General Public License - can be found in the `/usr/share/common-licenses/GPL' file. +License: BSD-3-clause + +License: BSD-3-clause + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + 1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + 3. Neither the names of the project authors nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + . + THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND + ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE + FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + SUCH DAMAGE. diff -Nru numpydoc-0.4/debian/.git-dpm numpydoc-0.5/debian/.git-dpm --- numpydoc-0.4/debian/.git-dpm 1970-01-01 00:00:00.000000000 +0000 +++ numpydoc-0.5/debian/.git-dpm 2015-10-28 14:33:38.000000000 +0000 @@ -0,0 +1,11 @@ +# see git-dpm(1) from git-dpm package +bde59512f89b264e6bb00cc9f353bbdf177bf18b +bde59512f89b264e6bb00cc9f353bbdf177bf18b +bde59512f89b264e6bb00cc9f353bbdf177bf18b +bde59512f89b264e6bb00cc9f353bbdf177bf18b +numpydoc_0.5.orig.tar.gz +5ac7ae0122669dc457d5abba1f9b81f03098bd38 +32051 +debianTag="debian/%e%v" +patchedTag="patched/%e%v" +upstreamTag="upstream/%e%u" diff -Nru numpydoc-0.4/debian/patches/clean-setup.patch numpydoc-0.5/debian/patches/clean-setup.patch --- numpydoc-0.4/debian/patches/clean-setup.patch 2012-12-09 21:09:05.000000000 +0000 +++ numpydoc-0.5/debian/patches/clean-setup.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,37 +0,0 @@ -Description: Clean setup.py - Remove obsolete arguments from setup() and use plain distutils not - setuptools. -Forwarded: https://github.com/numpy/numpy/pull/298 -Applied-Upstream: https://github.com/numpy/numpy/commit/adddbc72a7aa9faeb61c708f1c2c0568d0398303 ---- a/setup.py -+++ b/setup.py -@@ -1,13 +1,11 @@ - from distutils.core import setup --import setuptools --import sys, os - - version = "0.4" - - setup( - name="numpydoc", - packages=["numpydoc"], -- package_dir={"numpydoc": ""}, -+ package_dir={"numpydoc": "."}, - version=version, - description="Sphinx extension to support docstrings in Numpy format", - # classifiers from http://pypi.python.org/pypi?%3Aaction=list_classifiers -@@ -20,12 +18,6 @@ - author_email="pav@iki.fi", - url="http://github.com/numpy/numpy/tree/master/doc/sphinxext", - license="BSD", -- zip_safe=False, -- install_requires=["Sphinx >= 1.0.1"], -- package_data={'numpydoc': 'tests', '': ''}, -- entry_points={ -- "console_scripts": [ -- "autosummary_generate = numpydoc.autosummary_generate:main", -- ], -- }, -+ requires=["sphinx (>= 1.0.1)"], -+ package_data={'numpydoc': ['tests/test_*.py']}, - ) diff -Nru numpydoc-0.4/debian/patches/series numpydoc-0.5/debian/patches/series --- numpydoc-0.4/debian/patches/series 2012-12-09 21:09:05.000000000 +0000 +++ numpydoc-0.5/debian/patches/series 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -clean-setup.patch diff -Nru numpydoc-0.4/debian/rules numpydoc-0.5/debian/rules --- numpydoc-0.4/debian/rules 2012-12-09 21:09:05.000000000 +0000 +++ numpydoc-0.5/debian/rules 2015-10-28 14:33:38.000000000 +0000 @@ -4,14 +4,7 @@ # Uncomment this to turn on verbose mode. #export DH_VERBOSE=1 -PYTHON2=$(shell pyversions -vr) +export PYBUILD_NAME=numpydoc %: - dh $@ --with python2 - -ifeq (,$(filter nocheck,$(DEB_BUILD_OPTIONS))) -test-python%: - nosetests-$* tests/test_docscrape.py - -override_dh_auto_test: $(PYTHON2:%=test-python%) -endif + dh $@ --with python2,python3 --buildsystem=pybuild diff -Nru numpydoc-0.4/debian/watch numpydoc-0.5/debian/watch --- numpydoc-0.4/debian/watch 2012-12-09 21:09:05.000000000 +0000 +++ numpydoc-0.5/debian/watch 2015-10-28 14:33:38.000000000 +0000 @@ -1,2 +1,3 @@ version=3 -http://pypi.python.org/packages/source/n/numpydoc/numpydoc-([0-9a-z.]*)\.tar\.gz +opts=uversionmangle=s/(rc|a|b|c)/~$1/ \ +https://pypi.debian.net/numpydoc/numpydoc-(.+)\.(?:zip|tgz|tbz|txz|(?:tar\.(?:gz|bz2|xz))) diff -Nru numpydoc-0.4/docscrape.py numpydoc-0.5/docscrape.py --- numpydoc-0.4/docscrape.py 2010-10-01 10:34:32.000000000 +0000 +++ numpydoc-0.5/docscrape.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,500 +0,0 @@ -"""Extract reference documentation from the NumPy source tree. - -""" - -import inspect -import textwrap -import re -import pydoc -from StringIO import StringIO -from warnings import warn - -class Reader(object): - """A line-based string reader. - - """ - def __init__(self, data): - """ - Parameters - ---------- - data : str - String with lines separated by '\n'. - - """ - if isinstance(data,list): - self._str = data - else: - self._str = data.split('\n') # store string as list of lines - - self.reset() - - def __getitem__(self, n): - return self._str[n] - - def reset(self): - self._l = 0 # current line nr - - def read(self): - if not self.eof(): - out = self[self._l] - self._l += 1 - return out - else: - return '' - - def seek_next_non_empty_line(self): - for l in self[self._l:]: - if l.strip(): - break - else: - self._l += 1 - - def eof(self): - return self._l >= len(self._str) - - def read_to_condition(self, condition_func): - start = self._l - for line in self[start:]: - if condition_func(line): - return self[start:self._l] - self._l += 1 - if self.eof(): - return self[start:self._l+1] - return [] - - def read_to_next_empty_line(self): - self.seek_next_non_empty_line() - def is_empty(line): - return not line.strip() - return self.read_to_condition(is_empty) - - def read_to_next_unindented_line(self): - def is_unindented(line): - return (line.strip() and (len(line.lstrip()) == len(line))) - return self.read_to_condition(is_unindented) - - def peek(self,n=0): - if self._l + n < len(self._str): - return self[self._l + n] - else: - return '' - - def is_empty(self): - return not ''.join(self._str).strip() - - -class NumpyDocString(object): - def __init__(self, docstring, config={}): - docstring = textwrap.dedent(docstring).split('\n') - - self._doc = Reader(docstring) - self._parsed_data = { - 'Signature': '', - 'Summary': [''], - 'Extended Summary': [], - 'Parameters': [], - 'Returns': [], - 'Raises': [], - 'Warns': [], - 'Other Parameters': [], - 'Attributes': [], - 'Methods': [], - 'See Also': [], - 'Notes': [], - 'Warnings': [], - 'References': '', - 'Examples': '', - 'index': {} - } - - self._parse() - - def __getitem__(self,key): - return self._parsed_data[key] - - def __setitem__(self,key,val): - if not self._parsed_data.has_key(key): - warn("Unknown section %s" % key) - else: - self._parsed_data[key] = val - - def _is_at_section(self): - self._doc.seek_next_non_empty_line() - - if self._doc.eof(): - return False - - l1 = self._doc.peek().strip() # e.g. Parameters - - if l1.startswith('.. index::'): - return True - - l2 = self._doc.peek(1).strip() # ---------- or ========== - return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1)) - - def _strip(self,doc): - i = 0 - j = 0 - for i,line in enumerate(doc): - if line.strip(): break - - for j,line in enumerate(doc[::-1]): - if line.strip(): break - - return doc[i:len(doc)-j] - - def _read_to_next_section(self): - section = self._doc.read_to_next_empty_line() - - while not self._is_at_section() and not self._doc.eof(): - if not self._doc.peek(-1).strip(): # previous line was empty - section += [''] - - section += self._doc.read_to_next_empty_line() - - return section - - def _read_sections(self): - while not self._doc.eof(): - data = self._read_to_next_section() - name = data[0].strip() - - if name.startswith('..'): # index section - yield name, data[1:] - elif len(data) < 2: - yield StopIteration - else: - yield name, self._strip(data[2:]) - - def _parse_param_list(self,content): - r = Reader(content) - params = [] - while not r.eof(): - header = r.read().strip() - if ' : ' in header: - arg_name, arg_type = header.split(' : ')[:2] - else: - arg_name, arg_type = header, '' - - desc = r.read_to_next_unindented_line() - desc = dedent_lines(desc) - - params.append((arg_name,arg_type,desc)) - - return params - - - _name_rgx = re.compile(r"^\s*(:(?P\w+):`(?P[a-zA-Z0-9_.-]+)`|" - r" (?P[a-zA-Z0-9_.-]+))\s*", re.X) - def _parse_see_also(self, content): - """ - func_name : Descriptive text - continued text - another_func_name : Descriptive text - func_name1, func_name2, :meth:`func_name`, func_name3 - - """ - items = [] - - def parse_item_name(text): - """Match ':role:`name`' or 'name'""" - m = self._name_rgx.match(text) - if m: - g = m.groups() - if g[1] is None: - return g[3], None - else: - return g[2], g[1] - raise ValueError("%s is not a item name" % text) - - def push_item(name, rest): - if not name: - return - name, role = parse_item_name(name) - items.append((name, list(rest), role)) - del rest[:] - - current_func = None - rest = [] - - for line in content: - if not line.strip(): continue - - m = self._name_rgx.match(line) - if m and line[m.end():].strip().startswith(':'): - push_item(current_func, rest) - current_func, line = line[:m.end()], line[m.end():] - rest = [line.split(':', 1)[1].strip()] - if not rest[0]: - rest = [] - elif not line.startswith(' '): - push_item(current_func, rest) - current_func = None - if ',' in line: - for func in line.split(','): - if func.strip(): - push_item(func, []) - elif line.strip(): - current_func = line - elif current_func is not None: - rest.append(line.strip()) - push_item(current_func, rest) - return items - - def _parse_index(self, section, content): - """ - .. index: default - :refguide: something, else, and more - - """ - def strip_each_in(lst): - return [s.strip() for s in lst] - - out = {} - section = section.split('::') - if len(section) > 1: - out['default'] = strip_each_in(section[1].split(','))[0] - for line in content: - line = line.split(':') - if len(line) > 2: - out[line[1]] = strip_each_in(line[2].split(',')) - return out - - def _parse_summary(self): - """Grab signature (if given) and summary""" - if self._is_at_section(): - return - - summary = self._doc.read_to_next_empty_line() - summary_str = " ".join([s.strip() for s in summary]).strip() - if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str): - self['Signature'] = summary_str - if not self._is_at_section(): - self['Summary'] = self._doc.read_to_next_empty_line() - else: - self['Summary'] = summary - - if not self._is_at_section(): - self['Extended Summary'] = self._read_to_next_section() - - def _parse(self): - self._doc.reset() - self._parse_summary() - - for (section,content) in self._read_sections(): - if not section.startswith('..'): - section = ' '.join([s.capitalize() for s in section.split(' ')]) - if section in ('Parameters', 'Returns', 'Raises', 'Warns', - 'Other Parameters', 'Attributes', 'Methods'): - self[section] = self._parse_param_list(content) - elif section.startswith('.. index::'): - self['index'] = self._parse_index(section, content) - elif section == 'See Also': - self['See Also'] = self._parse_see_also(content) - else: - self[section] = content - - # string conversion routines - - def _str_header(self, name, symbol='-'): - return [name, len(name)*symbol] - - def _str_indent(self, doc, indent=4): - out = [] - for line in doc: - out += [' '*indent + line] - return out - - def _str_signature(self): - if self['Signature']: - return [self['Signature'].replace('*','\*')] + [''] - else: - return [''] - - def _str_summary(self): - if self['Summary']: - return self['Summary'] + [''] - else: - return [] - - def _str_extended_summary(self): - if self['Extended Summary']: - return self['Extended Summary'] + [''] - else: - return [] - - def _str_param_list(self, name): - out = [] - if self[name]: - out += self._str_header(name) - for param,param_type,desc in self[name]: - out += ['%s : %s' % (param, param_type)] - out += self._str_indent(desc) - out += [''] - return out - - def _str_section(self, name): - out = [] - if self[name]: - out += self._str_header(name) - out += self[name] - out += [''] - return out - - def _str_see_also(self, func_role): - if not self['See Also']: return [] - out = [] - out += self._str_header("See Also") - last_had_desc = True - for func, desc, role in self['See Also']: - if role: - link = ':%s:`%s`' % (role, func) - elif func_role: - link = ':%s:`%s`' % (func_role, func) - else: - link = "`%s`_" % func - if desc or last_had_desc: - out += [''] - out += [link] - else: - out[-1] += ", %s" % link - if desc: - out += self._str_indent([' '.join(desc)]) - last_had_desc = True - else: - last_had_desc = False - out += [''] - return out - - def _str_index(self): - idx = self['index'] - out = [] - out += ['.. index:: %s' % idx.get('default','')] - for section, references in idx.iteritems(): - if section == 'default': - continue - out += [' :%s: %s' % (section, ', '.join(references))] - return out - - def __str__(self, func_role=''): - out = [] - out += self._str_signature() - out += self._str_summary() - out += self._str_extended_summary() - for param_list in ('Parameters', 'Returns', 'Other Parameters', - 'Raises', 'Warns'): - out += self._str_param_list(param_list) - out += self._str_section('Warnings') - out += self._str_see_also(func_role) - for s in ('Notes','References','Examples'): - out += self._str_section(s) - for param_list in ('Attributes', 'Methods'): - out += self._str_param_list(param_list) - out += self._str_index() - return '\n'.join(out) - - -def indent(str,indent=4): - indent_str = ' '*indent - if str is None: - return indent_str - lines = str.split('\n') - return '\n'.join(indent_str + l for l in lines) - -def dedent_lines(lines): - """Deindent a list of lines maximally""" - return textwrap.dedent("\n".join(lines)).split("\n") - -def header(text, style='-'): - return text + '\n' + style*len(text) + '\n' - - -class FunctionDoc(NumpyDocString): - def __init__(self, func, role='func', doc=None, config={}): - self._f = func - self._role = role # e.g. "func" or "meth" - - if doc is None: - if func is None: - raise ValueError("No function or docstring given") - doc = inspect.getdoc(func) or '' - NumpyDocString.__init__(self, doc) - - if not self['Signature'] and func is not None: - func, func_name = self.get_func() - try: - # try to read signature - argspec = inspect.getargspec(func) - argspec = inspect.formatargspec(*argspec) - argspec = argspec.replace('*','\*') - signature = '%s%s' % (func_name, argspec) - except TypeError, e: - signature = '%s()' % func_name - self['Signature'] = signature - - def get_func(self): - func_name = getattr(self._f, '__name__', self.__class__.__name__) - if inspect.isclass(self._f): - func = getattr(self._f, '__call__', self._f.__init__) - else: - func = self._f - return func, func_name - - def __str__(self): - out = '' - - func, func_name = self.get_func() - signature = self['Signature'].replace('*', '\*') - - roles = {'func': 'function', - 'meth': 'method'} - - if self._role: - if not roles.has_key(self._role): - print "Warning: invalid role %s" % self._role - out += '.. %s:: %s\n \n\n' % (roles.get(self._role,''), - func_name) - - out += super(FunctionDoc, self).__str__(func_role=self._role) - return out - - -class ClassDoc(NumpyDocString): - def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc, - config={}): - if not inspect.isclass(cls) and cls is not None: - raise ValueError("Expected a class or None, but got %r" % cls) - self._cls = cls - - if modulename and not modulename.endswith('.'): - modulename += '.' - self._mod = modulename - - if doc is None: - if cls is None: - raise ValueError("No class or documentation string given") - doc = pydoc.getdoc(cls) - - NumpyDocString.__init__(self, doc) - - if config.get('show_class_members', True): - if not self['Methods']: - self['Methods'] = [(name, '', '') - for name in sorted(self.methods)] - if not self['Attributes']: - self['Attributes'] = [(name, '', '') - for name in sorted(self.properties)] - - @property - def methods(self): - if self._cls is None: - return [] - return [name for name,func in inspect.getmembers(self._cls) - if not name.startswith('_') and callable(func)] - - @property - def properties(self): - if self._cls is None: - return [] - return [name for name,func in inspect.getmembers(self._cls) - if not name.startswith('_') and func is None] diff -Nru numpydoc-0.4/docscrape_sphinx.py numpydoc-0.5/docscrape_sphinx.py --- numpydoc-0.4/docscrape_sphinx.py 2010-10-01 10:34:32.000000000 +0000 +++ numpydoc-0.5/docscrape_sphinx.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,227 +0,0 @@ -import re, inspect, textwrap, pydoc -import sphinx -from docscrape import NumpyDocString, FunctionDoc, ClassDoc - -class SphinxDocString(NumpyDocString): - def __init__(self, docstring, config={}): - self.use_plots = config.get('use_plots', False) - NumpyDocString.__init__(self, docstring, config=config) - - # string conversion routines - def _str_header(self, name, symbol='`'): - return ['.. rubric:: ' + name, ''] - - def _str_field_list(self, name): - return [':' + name + ':'] - - def _str_indent(self, doc, indent=4): - out = [] - for line in doc: - out += [' '*indent + line] - return out - - def _str_signature(self): - return [''] - if self['Signature']: - return ['``%s``' % self['Signature']] + [''] - else: - return [''] - - def _str_summary(self): - return self['Summary'] + [''] - - def _str_extended_summary(self): - return self['Extended Summary'] + [''] - - def _str_param_list(self, name): - out = [] - if self[name]: - out += self._str_field_list(name) - out += [''] - for param,param_type,desc in self[name]: - out += self._str_indent(['**%s** : %s' % (param.strip(), - param_type)]) - out += [''] - out += self._str_indent(desc,8) - out += [''] - return out - - @property - def _obj(self): - if hasattr(self, '_cls'): - return self._cls - elif hasattr(self, '_f'): - return self._f - return None - - def _str_member_list(self, name): - """ - Generate a member listing, autosummary:: table where possible, - and a table where not. - - """ - out = [] - if self[name]: - out += ['.. rubric:: %s' % name, ''] - prefix = getattr(self, '_name', '') - - if prefix: - prefix = '~%s.' % prefix - - autosum = [] - others = [] - for param, param_type, desc in self[name]: - param = param.strip() - if not self._obj or hasattr(self._obj, param): - autosum += [" %s%s" % (prefix, param)] - else: - others.append((param, param_type, desc)) - - if autosum: - out += ['.. autosummary::', ' :toctree:', ''] - out += autosum - - if others: - maxlen_0 = max([len(x[0]) for x in others]) - maxlen_1 = max([len(x[1]) for x in others]) - hdr = "="*maxlen_0 + " " + "="*maxlen_1 + " " + "="*10 - fmt = '%%%ds %%%ds ' % (maxlen_0, maxlen_1) - n_indent = maxlen_0 + maxlen_1 + 4 - out += [hdr] - for param, param_type, desc in others: - out += [fmt % (param.strip(), param_type)] - out += self._str_indent(desc, n_indent) - out += [hdr] - out += [''] - return out - - def _str_section(self, name): - out = [] - if self[name]: - out += self._str_header(name) - out += [''] - content = textwrap.dedent("\n".join(self[name])).split("\n") - out += content - out += [''] - return out - - def _str_see_also(self, func_role): - out = [] - if self['See Also']: - see_also = super(SphinxDocString, self)._str_see_also(func_role) - out = ['.. seealso::', ''] - out += self._str_indent(see_also[2:]) - return out - - def _str_warnings(self): - out = [] - if self['Warnings']: - out = ['.. warning::', ''] - out += self._str_indent(self['Warnings']) - return out - - def _str_index(self): - idx = self['index'] - out = [] - if len(idx) == 0: - return out - - out += ['.. index:: %s' % idx.get('default','')] - for section, references in idx.iteritems(): - if section == 'default': - continue - elif section == 'refguide': - out += [' single: %s' % (', '.join(references))] - else: - out += [' %s: %s' % (section, ','.join(references))] - return out - - def _str_references(self): - out = [] - if self['References']: - out += self._str_header('References') - if isinstance(self['References'], str): - self['References'] = [self['References']] - out.extend(self['References']) - out += [''] - # Latex collects all references to a separate bibliography, - # so we need to insert links to it - if sphinx.__version__ >= "0.6": - out += ['.. only:: latex',''] - else: - out += ['.. latexonly::',''] - items = [] - for line in self['References']: - m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I) - if m: - items.append(m.group(1)) - out += [' ' + ", ".join(["[%s]_" % item for item in items]), ''] - return out - - def _str_examples(self): - examples_str = "\n".join(self['Examples']) - - if (self.use_plots and 'import matplotlib' in examples_str - and 'plot::' not in examples_str): - out = [] - out += self._str_header('Examples') - out += ['.. plot::', ''] - out += self._str_indent(self['Examples']) - out += [''] - return out - else: - return self._str_section('Examples') - - def __str__(self, indent=0, func_role="obj"): - out = [] - out += self._str_signature() - out += self._str_index() + [''] - out += self._str_summary() - out += self._str_extended_summary() - for param_list in ('Parameters', 'Returns', 'Other Parameters', - 'Raises', 'Warns'): - out += self._str_param_list(param_list) - out += self._str_warnings() - out += self._str_see_also(func_role) - out += self._str_section('Notes') - out += self._str_references() - out += self._str_examples() - for param_list in ('Attributes', 'Methods'): - out += self._str_member_list(param_list) - out = self._str_indent(out,indent) - return '\n'.join(out) - -class SphinxFunctionDoc(SphinxDocString, FunctionDoc): - def __init__(self, obj, doc=None, config={}): - self.use_plots = config.get('use_plots', False) - FunctionDoc.__init__(self, obj, doc=doc, config=config) - -class SphinxClassDoc(SphinxDocString, ClassDoc): - def __init__(self, obj, doc=None, func_doc=None, config={}): - self.use_plots = config.get('use_plots', False) - ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config) - -class SphinxObjDoc(SphinxDocString): - def __init__(self, obj, doc=None, config={}): - self._f = obj - SphinxDocString.__init__(self, doc, config=config) - -def get_doc_object(obj, what=None, doc=None, config={}): - if what is None: - if inspect.isclass(obj): - what = 'class' - elif inspect.ismodule(obj): - what = 'module' - elif callable(obj): - what = 'function' - else: - what = 'object' - if what == 'class': - return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc, - config=config) - elif what in ('function', 'method'): - return SphinxFunctionDoc(obj, doc=doc, config=config) - else: - if doc is None: - doc = pydoc.getdoc(obj) - return SphinxObjDoc(obj, doc, config=config) diff -Nru numpydoc-0.4/__init__.py numpydoc-0.5/__init__.py --- numpydoc-0.4/__init__.py 2010-09-15 19:34:24.000000000 +0000 +++ numpydoc-0.5/__init__.py 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -from numpydoc import setup diff -Nru numpydoc-0.4/LICENSE.txt numpydoc-0.5/LICENSE.txt --- numpydoc-0.4/LICENSE.txt 2010-09-15 19:18:51.000000000 +0000 +++ numpydoc-0.5/LICENSE.txt 2013-07-24 16:15:41.000000000 +0000 @@ -1,8 +1,6 @@ ------------------------------------------------------------------------------- The files - numpydoc.py - - autosummary.py - - autosummary_generate.py - docscrape.py - docscrape_sphinx.py - phantom_import.py @@ -71,10 +69,9 @@ ------------------------------------------------------------------------------- - The files - - only_directives.py + The file - plot_directive.py - originate from Matplotlib (http://matplotlib.sf.net/) which has + originates from Matplotlib (http://matplotlib.sf.net/) which has the following license: Copyright (c) 2002-2008 John D. Hunter; All Rights Reserved. diff -Nru numpydoc-0.4/MANIFEST.in numpydoc-0.5/MANIFEST.in --- numpydoc-0.4/MANIFEST.in 2010-09-15 19:34:24.000000000 +0000 +++ numpydoc-0.5/MANIFEST.in 2013-07-24 16:15:41.000000000 +0000 @@ -1,2 +1,2 @@ -recursive-include tests *.py +recursive-include numpydoc/tests *.py include *.txt diff -Nru numpydoc-0.4/numpydoc/comment_eater.py numpydoc-0.5/numpydoc/comment_eater.py --- numpydoc-0.4/numpydoc/comment_eater.py 1970-01-01 00:00:00.000000000 +0000 +++ numpydoc-0.5/numpydoc/comment_eater.py 2013-07-24 16:15:41.000000000 +0000 @@ -0,0 +1,169 @@ +from __future__ import division, absolute_import, print_function + +import sys +if sys.version_info[0] >= 3: + from io import StringIO +else: + from io import StringIO + +import compiler +import inspect +import textwrap +import tokenize + +from .compiler_unparse import unparse + + +class Comment(object): + """ A comment block. + """ + is_comment = True + def __init__(self, start_lineno, end_lineno, text): + # int : The first line number in the block. 1-indexed. + self.start_lineno = start_lineno + # int : The last line number. Inclusive! + self.end_lineno = end_lineno + # str : The text block including '#' character but not any leading spaces. + self.text = text + + def add(self, string, start, end, line): + """ Add a new comment line. + """ + self.start_lineno = min(self.start_lineno, start[0]) + self.end_lineno = max(self.end_lineno, end[0]) + self.text += string + + def __repr__(self): + return '%s(%r, %r, %r)' % (self.__class__.__name__, self.start_lineno, + self.end_lineno, self.text) + + +class NonComment(object): + """ A non-comment block of code. + """ + is_comment = False + def __init__(self, start_lineno, end_lineno): + self.start_lineno = start_lineno + self.end_lineno = end_lineno + + def add(self, string, start, end, line): + """ Add lines to the block. + """ + if string.strip(): + # Only add if not entirely whitespace. + self.start_lineno = min(self.start_lineno, start[0]) + self.end_lineno = max(self.end_lineno, end[0]) + + def __repr__(self): + return '%s(%r, %r)' % (self.__class__.__name__, self.start_lineno, + self.end_lineno) + + +class CommentBlocker(object): + """ Pull out contiguous comment blocks. + """ + def __init__(self): + # Start with a dummy. + self.current_block = NonComment(0, 0) + + # All of the blocks seen so far. + self.blocks = [] + + # The index mapping lines of code to their associated comment blocks. + self.index = {} + + def process_file(self, file): + """ Process a file object. + """ + if sys.version_info[0] >= 3: + nxt = file.__next__ + else: + nxt = file.next + for token in tokenize.generate_tokens(nxt): + self.process_token(*token) + self.make_index() + + def process_token(self, kind, string, start, end, line): + """ Process a single token. + """ + if self.current_block.is_comment: + if kind == tokenize.COMMENT: + self.current_block.add(string, start, end, line) + else: + self.new_noncomment(start[0], end[0]) + else: + if kind == tokenize.COMMENT: + self.new_comment(string, start, end, line) + else: + self.current_block.add(string, start, end, line) + + def new_noncomment(self, start_lineno, end_lineno): + """ We are transitioning from a noncomment to a comment. + """ + block = NonComment(start_lineno, end_lineno) + self.blocks.append(block) + self.current_block = block + + def new_comment(self, string, start, end, line): + """ Possibly add a new comment. + + Only adds a new comment if this comment is the only thing on the line. + Otherwise, it extends the noncomment block. + """ + prefix = line[:start[1]] + if prefix.strip(): + # Oops! Trailing comment, not a comment block. + self.current_block.add(string, start, end, line) + else: + # A comment block. + block = Comment(start[0], end[0], string) + self.blocks.append(block) + self.current_block = block + + def make_index(self): + """ Make the index mapping lines of actual code to their associated + prefix comments. + """ + for prev, block in zip(self.blocks[:-1], self.blocks[1:]): + if not block.is_comment: + self.index[block.start_lineno] = prev + + def search_for_comment(self, lineno, default=None): + """ Find the comment block just before the given line number. + + Returns None (or the specified default) if there is no such block. + """ + if not self.index: + self.make_index() + block = self.index.get(lineno, None) + text = getattr(block, 'text', default) + return text + + +def strip_comment_marker(text): + """ Strip # markers at the front of a block of comment text. + """ + lines = [] + for line in text.splitlines(): + lines.append(line.lstrip('#')) + text = textwrap.dedent('\n'.join(lines)) + return text + + +def get_class_traits(klass): + """ Yield all of the documentation for trait definitions on a class object. + """ + # FIXME: gracefully handle errors here or in the caller? + source = inspect.getsource(klass) + cb = CommentBlocker() + cb.process_file(StringIO(source)) + mod_ast = compiler.parse(source) + class_ast = mod_ast.node.nodes[0] + for node in class_ast.code.nodes: + # FIXME: handle other kinds of assignments? + if isinstance(node, compiler.ast.Assign): + name = node.nodes[0].name + rhs = unparse(node.expr).strip() + doc = strip_comment_marker(cb.search_for_comment(node.lineno, default='')) + yield name, rhs, doc + diff -Nru numpydoc-0.4/numpydoc/compiler_unparse.py numpydoc-0.5/numpydoc/compiler_unparse.py --- numpydoc-0.4/numpydoc/compiler_unparse.py 1970-01-01 00:00:00.000000000 +0000 +++ numpydoc-0.5/numpydoc/compiler_unparse.py 2013-07-24 16:15:41.000000000 +0000 @@ -0,0 +1,865 @@ +""" Turn compiler.ast structures back into executable python code. + + The unparse method takes a compiler.ast tree and transforms it back into + valid python code. It is incomplete and currently only works for + import statements, function calls, function definitions, assignments, and + basic expressions. + + Inspired by python-2.5-svn/Demo/parser/unparse.py + + fixme: We may want to move to using _ast trees because the compiler for + them is about 6 times faster than compiler.compile. +""" +from __future__ import division, absolute_import, print_function + +import sys +from compiler.ast import Const, Name, Tuple, Div, Mul, Sub, Add + +if sys.version_info[0] >= 3: + from io import StringIO +else: + from StringIO import StringIO + +def unparse(ast, single_line_functions=False): + s = StringIO() + UnparseCompilerAst(ast, s, single_line_functions) + return s.getvalue().lstrip() + +op_precedence = { 'compiler.ast.Power':3, 'compiler.ast.Mul':2, 'compiler.ast.Div':2, + 'compiler.ast.Add':1, 'compiler.ast.Sub':1 } + +class UnparseCompilerAst: + """ Methods in this class recursively traverse an AST and + output source code for the abstract syntax; original formatting + is disregarged. + """ + + ######################################################################### + # object interface. + ######################################################################### + + def __init__(self, tree, file = sys.stdout, single_line_functions=False): + """ Unparser(tree, file=sys.stdout) -> None. + + Print the source for tree to file. + """ + self.f = file + self._single_func = single_line_functions + self._do_indent = True + self._indent = 0 + self._dispatch(tree) + self._write("\n") + self.f.flush() + + ######################################################################### + # Unparser private interface. + ######################################################################### + + ### format, output, and dispatch methods ################################ + + def _fill(self, text = ""): + "Indent a piece of text, according to the current indentation level" + if self._do_indent: + self._write("\n"+" "*self._indent + text) + else: + self._write(text) + + def _write(self, text): + "Append a piece of text to the current line." + self.f.write(text) + + def _enter(self): + "Print ':', and increase the indentation." + self._write(": ") + self._indent += 1 + + def _leave(self): + "Decrease the indentation level." + self._indent -= 1 + + def _dispatch(self, tree): + "_dispatcher function, _dispatching tree type T to method _T." + if isinstance(tree, list): + for t in tree: + self._dispatch(t) + return + meth = getattr(self, "_"+tree.__class__.__name__) + if tree.__class__.__name__ == 'NoneType' and not self._do_indent: + return + meth(tree) + + + ######################################################################### + # compiler.ast unparsing methods. + # + # There should be one method per concrete grammar type. They are + # organized in alphabetical order. + ######################################################################### + + def _Add(self, t): + self.__binary_op(t, '+') + + def _And(self, t): + self._write(" (") + for i, node in enumerate(t.nodes): + self._dispatch(node) + if i != len(t.nodes)-1: + self._write(") and (") + self._write(")") + + def _AssAttr(self, t): + """ Handle assigning an attribute of an object + """ + self._dispatch(t.expr) + self._write('.'+t.attrname) + + def _Assign(self, t): + """ Expression Assignment such as "a = 1". + + This only handles assignment in expressions. Keyword assignment + is handled separately. + """ + self._fill() + for target in t.nodes: + self._dispatch(target) + self._write(" = ") + self._dispatch(t.expr) + if not self._do_indent: + self._write('; ') + + def _AssName(self, t): + """ Name on left hand side of expression. + + Treat just like a name on the right side of an expression. + """ + self._Name(t) + + def _AssTuple(self, t): + """ Tuple on left hand side of an expression. + """ + + # _write each elements, separated by a comma. + for element in t.nodes[:-1]: + self._dispatch(element) + self._write(", ") + + # Handle the last one without writing comma + last_element = t.nodes[-1] + self._dispatch(last_element) + + def _AugAssign(self, t): + """ +=,-=,*=,/=,**=, etc. operations + """ + + self._fill() + self._dispatch(t.node) + self._write(' '+t.op+' ') + self._dispatch(t.expr) + if not self._do_indent: + self._write(';') + + def _Bitand(self, t): + """ Bit and operation. + """ + + for i, node in enumerate(t.nodes): + self._write("(") + self._dispatch(node) + self._write(")") + if i != len(t.nodes)-1: + self._write(" & ") + + def _Bitor(self, t): + """ Bit or operation + """ + + for i, node in enumerate(t.nodes): + self._write("(") + self._dispatch(node) + self._write(")") + if i != len(t.nodes)-1: + self._write(" | ") + + def _CallFunc(self, t): + """ Function call. + """ + self._dispatch(t.node) + self._write("(") + comma = False + for e in t.args: + if comma: self._write(", ") + else: comma = True + self._dispatch(e) + if t.star_args: + if comma: self._write(", ") + else: comma = True + self._write("*") + self._dispatch(t.star_args) + if t.dstar_args: + if comma: self._write(", ") + else: comma = True + self._write("**") + self._dispatch(t.dstar_args) + self._write(")") + + def _Compare(self, t): + self._dispatch(t.expr) + for op, expr in t.ops: + self._write(" " + op + " ") + self._dispatch(expr) + + def _Const(self, t): + """ A constant value such as an integer value, 3, or a string, "hello". + """ + self._dispatch(t.value) + + def _Decorators(self, t): + """ Handle function decorators (eg. @has_units) + """ + for node in t.nodes: + self._dispatch(node) + + def _Dict(self, t): + self._write("{") + for i, (k, v) in enumerate(t.items): + self._dispatch(k) + self._write(": ") + self._dispatch(v) + if i < len(t.items)-1: + self._write(", ") + self._write("}") + + def _Discard(self, t): + """ Node for when return value is ignored such as in "foo(a)". + """ + self._fill() + self._dispatch(t.expr) + + def _Div(self, t): + self.__binary_op(t, '/') + + def _Ellipsis(self, t): + self._write("...") + + def _From(self, t): + """ Handle "from xyz import foo, bar as baz". + """ + # fixme: Are From and ImportFrom handled differently? + self._fill("from ") + self._write(t.modname) + self._write(" import ") + for i, (name,asname) in enumerate(t.names): + if i != 0: + self._write(", ") + self._write(name) + if asname is not None: + self._write(" as "+asname) + + def _Function(self, t): + """ Handle function definitions + """ + if t.decorators is not None: + self._fill("@") + self._dispatch(t.decorators) + self._fill("def "+t.name + "(") + defaults = [None] * (len(t.argnames) - len(t.defaults)) + list(t.defaults) + for i, arg in enumerate(zip(t.argnames, defaults)): + self._write(arg[0]) + if arg[1] is not None: + self._write('=') + self._dispatch(arg[1]) + if i < len(t.argnames)-1: + self._write(', ') + self._write(")") + if self._single_func: + self._do_indent = False + self._enter() + self._dispatch(t.code) + self._leave() + self._do_indent = True + + def _Getattr(self, t): + """ Handle getting an attribute of an object + """ + if isinstance(t.expr, (Div, Mul, Sub, Add)): + self._write('(') + self._dispatch(t.expr) + self._write(')') + else: + self._dispatch(t.expr) + + self._write('.'+t.attrname) + + def _If(self, t): + self._fill() + + for i, (compare,code) in enumerate(t.tests): + if i == 0: + self._write("if ") + else: + self._write("elif ") + self._dispatch(compare) + self._enter() + self._fill() + self._dispatch(code) + self._leave() + self._write("\n") + + if t.else_ is not None: + self._write("else") + self._enter() + self._fill() + self._dispatch(t.else_) + self._leave() + self._write("\n") + + def _IfExp(self, t): + self._dispatch(t.then) + self._write(" if ") + self._dispatch(t.test) + + if t.else_ is not None: + self._write(" else (") + self._dispatch(t.else_) + self._write(")") + + def _Import(self, t): + """ Handle "import xyz.foo". + """ + self._fill("import ") + + for i, (name,asname) in enumerate(t.names): + if i != 0: + self._write(", ") + self._write(name) + if asname is not None: + self._write(" as "+asname) + + def _Keyword(self, t): + """ Keyword value assignment within function calls and definitions. + """ + self._write(t.name) + self._write("=") + self._dispatch(t.expr) + + def _List(self, t): + self._write("[") + for i,node in enumerate(t.nodes): + self._dispatch(node) + if i < len(t.nodes)-1: + self._write(", ") + self._write("]") + + def _Module(self, t): + if t.doc is not None: + self._dispatch(t.doc) + self._dispatch(t.node) + + def _Mul(self, t): + self.__binary_op(t, '*') + + def _Name(self, t): + self._write(t.name) + + def _NoneType(self, t): + self._write("None") + + def _Not(self, t): + self._write('not (') + self._dispatch(t.expr) + self._write(')') + + def _Or(self, t): + self._write(" (") + for i, node in enumerate(t.nodes): + self._dispatch(node) + if i != len(t.nodes)-1: + self._write(") or (") + self._write(")") + + def _Pass(self, t): + self._write("pass\n") + + def _Printnl(self, t): + self._fill("print ") + if t.dest: + self._write(">> ") + self._dispatch(t.dest) + self._write(", ") + comma = False + for node in t.nodes: + if comma: self._write(', ') + else: comma = True + self._dispatch(node) + + def _Power(self, t): + self.__binary_op(t, '**') + + def _Return(self, t): + self._fill("return ") + if t.value: + if isinstance(t.value, Tuple): + text = ', '.join([ name.name for name in t.value.asList() ]) + self._write(text) + else: + self._dispatch(t.value) + if not self._do_indent: + self._write('; ') + + def _Slice(self, t): + self._dispatch(t.expr) + self._write("[") + if t.lower: + self._dispatch(t.lower) + self._write(":") + if t.upper: + self._dispatch(t.upper) + #if t.step: + # self._write(":") + # self._dispatch(t.step) + self._write("]") + + def _Sliceobj(self, t): + for i, node in enumerate(t.nodes): + if i != 0: + self._write(":") + if not (isinstance(node, Const) and node.value is None): + self._dispatch(node) + + def _Stmt(self, tree): + for node in tree.nodes: + self._dispatch(node) + + def _Sub(self, t): + self.__binary_op(t, '-') + + def _Subscript(self, t): + self._dispatch(t.expr) + self._write("[") + for i, value in enumerate(t.subs): + if i != 0: + self._write(",") + self._dispatch(value) + self._write("]") + + def _TryExcept(self, t): + self._fill("try") + self._enter() + self._dispatch(t.body) + self._leave() + + for handler in t.handlers: + self._fill('except ') + self._dispatch(handler[0]) + if handler[1] is not None: + self._write(', ') + self._dispatch(handler[1]) + self._enter() + self._dispatch(handler[2]) + self._leave() + + if t.else_: + self._fill("else") + self._enter() + self._dispatch(t.else_) + self._leave() + + def _Tuple(self, t): + + if not t.nodes: + # Empty tuple. + self._write("()") + else: + self._write("(") + + # _write each elements, separated by a comma. + for element in t.nodes[:-1]: + self._dispatch(element) + self._write(", ") + + # Handle the last one without writing comma + last_element = t.nodes[-1] + self._dispatch(last_element) + + self._write(")") + + def _UnaryAdd(self, t): + self._write("+") + self._dispatch(t.expr) + + def _UnarySub(self, t): + self._write("-") + self._dispatch(t.expr) + + def _With(self, t): + self._fill('with ') + self._dispatch(t.expr) + if t.vars: + self._write(' as ') + self._dispatch(t.vars.name) + self._enter() + self._dispatch(t.body) + self._leave() + self._write('\n') + + def _int(self, t): + self._write(repr(t)) + + def __binary_op(self, t, symbol): + # Check if parenthesis are needed on left side and then dispatch + has_paren = False + left_class = str(t.left.__class__) + if (left_class in op_precedence.keys() and + op_precedence[left_class] < op_precedence[str(t.__class__)]): + has_paren = True + if has_paren: + self._write('(') + self._dispatch(t.left) + if has_paren: + self._write(')') + # Write the appropriate symbol for operator + self._write(symbol) + # Check if parenthesis are needed on the right side and then dispatch + has_paren = False + right_class = str(t.right.__class__) + if (right_class in op_precedence.keys() and + op_precedence[right_class] < op_precedence[str(t.__class__)]): + has_paren = True + if has_paren: + self._write('(') + self._dispatch(t.right) + if has_paren: + self._write(')') + + def _float(self, t): + # if t is 0.1, str(t)->'0.1' while repr(t)->'0.1000000000001' + # We prefer str here. + self._write(str(t)) + + def _str(self, t): + self._write(repr(t)) + + def _tuple(self, t): + self._write(str(t)) + + ######################################################################### + # These are the methods from the _ast modules unparse. + # + # As our needs to handle more advanced code increase, we may want to + # modify some of the methods below so that they work for compiler.ast. + ######################################################################### + +# # stmt +# def _Expr(self, tree): +# self._fill() +# self._dispatch(tree.value) +# +# def _Import(self, t): +# self._fill("import ") +# first = True +# for a in t.names: +# if first: +# first = False +# else: +# self._write(", ") +# self._write(a.name) +# if a.asname: +# self._write(" as "+a.asname) +# +## def _ImportFrom(self, t): +## self._fill("from ") +## self._write(t.module) +## self._write(" import ") +## for i, a in enumerate(t.names): +## if i == 0: +## self._write(", ") +## self._write(a.name) +## if a.asname: +## self._write(" as "+a.asname) +## # XXX(jpe) what is level for? +## +# +# def _Break(self, t): +# self._fill("break") +# +# def _Continue(self, t): +# self._fill("continue") +# +# def _Delete(self, t): +# self._fill("del ") +# self._dispatch(t.targets) +# +# def _Assert(self, t): +# self._fill("assert ") +# self._dispatch(t.test) +# if t.msg: +# self._write(", ") +# self._dispatch(t.msg) +# +# def _Exec(self, t): +# self._fill("exec ") +# self._dispatch(t.body) +# if t.globals: +# self._write(" in ") +# self._dispatch(t.globals) +# if t.locals: +# self._write(", ") +# self._dispatch(t.locals) +# +# def _Print(self, t): +# self._fill("print ") +# do_comma = False +# if t.dest: +# self._write(">>") +# self._dispatch(t.dest) +# do_comma = True +# for e in t.values: +# if do_comma:self._write(", ") +# else:do_comma=True +# self._dispatch(e) +# if not t.nl: +# self._write(",") +# +# def _Global(self, t): +# self._fill("global") +# for i, n in enumerate(t.names): +# if i != 0: +# self._write(",") +# self._write(" " + n) +# +# def _Yield(self, t): +# self._fill("yield") +# if t.value: +# self._write(" (") +# self._dispatch(t.value) +# self._write(")") +# +# def _Raise(self, t): +# self._fill('raise ') +# if t.type: +# self._dispatch(t.type) +# if t.inst: +# self._write(", ") +# self._dispatch(t.inst) +# if t.tback: +# self._write(", ") +# self._dispatch(t.tback) +# +# +# def _TryFinally(self, t): +# self._fill("try") +# self._enter() +# self._dispatch(t.body) +# self._leave() +# +# self._fill("finally") +# self._enter() +# self._dispatch(t.finalbody) +# self._leave() +# +# def _excepthandler(self, t): +# self._fill("except ") +# if t.type: +# self._dispatch(t.type) +# if t.name: +# self._write(", ") +# self._dispatch(t.name) +# self._enter() +# self._dispatch(t.body) +# self._leave() +# +# def _ClassDef(self, t): +# self._write("\n") +# self._fill("class "+t.name) +# if t.bases: +# self._write("(") +# for a in t.bases: +# self._dispatch(a) +# self._write(", ") +# self._write(")") +# self._enter() +# self._dispatch(t.body) +# self._leave() +# +# def _FunctionDef(self, t): +# self._write("\n") +# for deco in t.decorators: +# self._fill("@") +# self._dispatch(deco) +# self._fill("def "+t.name + "(") +# self._dispatch(t.args) +# self._write(")") +# self._enter() +# self._dispatch(t.body) +# self._leave() +# +# def _For(self, t): +# self._fill("for ") +# self._dispatch(t.target) +# self._write(" in ") +# self._dispatch(t.iter) +# self._enter() +# self._dispatch(t.body) +# self._leave() +# if t.orelse: +# self._fill("else") +# self._enter() +# self._dispatch(t.orelse) +# self._leave +# +# def _While(self, t): +# self._fill("while ") +# self._dispatch(t.test) +# self._enter() +# self._dispatch(t.body) +# self._leave() +# if t.orelse: +# self._fill("else") +# self._enter() +# self._dispatch(t.orelse) +# self._leave +# +# # expr +# def _Str(self, tree): +# self._write(repr(tree.s)) +## +# def _Repr(self, t): +# self._write("`") +# self._dispatch(t.value) +# self._write("`") +# +# def _Num(self, t): +# self._write(repr(t.n)) +# +# def _ListComp(self, t): +# self._write("[") +# self._dispatch(t.elt) +# for gen in t.generators: +# self._dispatch(gen) +# self._write("]") +# +# def _GeneratorExp(self, t): +# self._write("(") +# self._dispatch(t.elt) +# for gen in t.generators: +# self._dispatch(gen) +# self._write(")") +# +# def _comprehension(self, t): +# self._write(" for ") +# self._dispatch(t.target) +# self._write(" in ") +# self._dispatch(t.iter) +# for if_clause in t.ifs: +# self._write(" if ") +# self._dispatch(if_clause) +# +# def _IfExp(self, t): +# self._dispatch(t.body) +# self._write(" if ") +# self._dispatch(t.test) +# if t.orelse: +# self._write(" else ") +# self._dispatch(t.orelse) +# +# unop = {"Invert":"~", "Not": "not", "UAdd":"+", "USub":"-"} +# def _UnaryOp(self, t): +# self._write(self.unop[t.op.__class__.__name__]) +# self._write("(") +# self._dispatch(t.operand) +# self._write(")") +# +# binop = { "Add":"+", "Sub":"-", "Mult":"*", "Div":"/", "Mod":"%", +# "LShift":">>", "RShift":"<<", "BitOr":"|", "BitXor":"^", "BitAnd":"&", +# "FloorDiv":"//", "Pow": "**"} +# def _BinOp(self, t): +# self._write("(") +# self._dispatch(t.left) +# self._write(")" + self.binop[t.op.__class__.__name__] + "(") +# self._dispatch(t.right) +# self._write(")") +# +# boolops = {_ast.And: 'and', _ast.Or: 'or'} +# def _BoolOp(self, t): +# self._write("(") +# self._dispatch(t.values[0]) +# for v in t.values[1:]: +# self._write(" %s " % self.boolops[t.op.__class__]) +# self._dispatch(v) +# self._write(")") +# +# def _Attribute(self,t): +# self._dispatch(t.value) +# self._write(".") +# self._write(t.attr) +# +## def _Call(self, t): +## self._dispatch(t.func) +## self._write("(") +## comma = False +## for e in t.args: +## if comma: self._write(", ") +## else: comma = True +## self._dispatch(e) +## for e in t.keywords: +## if comma: self._write(", ") +## else: comma = True +## self._dispatch(e) +## if t.starargs: +## if comma: self._write(", ") +## else: comma = True +## self._write("*") +## self._dispatch(t.starargs) +## if t.kwargs: +## if comma: self._write(", ") +## else: comma = True +## self._write("**") +## self._dispatch(t.kwargs) +## self._write(")") +# +# # slice +# def _Index(self, t): +# self._dispatch(t.value) +# +# def _ExtSlice(self, t): +# for i, d in enumerate(t.dims): +# if i != 0: +# self._write(': ') +# self._dispatch(d) +# +# # others +# def _arguments(self, t): +# first = True +# nonDef = len(t.args)-len(t.defaults) +# for a in t.args[0:nonDef]: +# if first:first = False +# else: self._write(", ") +# self._dispatch(a) +# for a,d in zip(t.args[nonDef:], t.defaults): +# if first:first = False +# else: self._write(", ") +# self._dispatch(a), +# self._write("=") +# self._dispatch(d) +# if t.vararg: +# if first:first = False +# else: self._write(", ") +# self._write("*"+t.vararg) +# if t.kwarg: +# if first:first = False +# else: self._write(", ") +# self._write("**"+t.kwarg) +# +## def _keyword(self, t): +## self._write(t.arg) +## self._write("=") +## self._dispatch(t.value) +# +# def _Lambda(self, t): +# self._write("lambda ") +# self._dispatch(t.args) +# self._write(": ") +# self._dispatch(t.body) + + + diff -Nru numpydoc-0.4/numpydoc/docscrape.py numpydoc-0.5/numpydoc/docscrape.py --- numpydoc-0.4/numpydoc/docscrape.py 1970-01-01 00:00:00.000000000 +0000 +++ numpydoc-0.5/numpydoc/docscrape.py 2014-06-08 20:03:45.000000000 +0000 @@ -0,0 +1,531 @@ +"""Extract reference documentation from the NumPy source tree. + +""" +from __future__ import division, absolute_import, print_function + +import inspect +import textwrap +import re +import pydoc +from warnings import warn +import collections +import sys + + +class Reader(object): + """A line-based string reader. + + """ + def __init__(self, data): + """ + Parameters + ---------- + data : str + String with lines separated by '\n'. + + """ + if isinstance(data,list): + self._str = data + else: + self._str = data.split('\n') # store string as list of lines + + self.reset() + + def __getitem__(self, n): + return self._str[n] + + def reset(self): + self._l = 0 # current line nr + + def read(self): + if not self.eof(): + out = self[self._l] + self._l += 1 + return out + else: + return '' + + def seek_next_non_empty_line(self): + for l in self[self._l:]: + if l.strip(): + break + else: + self._l += 1 + + def eof(self): + return self._l >= len(self._str) + + def read_to_condition(self, condition_func): + start = self._l + for line in self[start:]: + if condition_func(line): + return self[start:self._l] + self._l += 1 + if self.eof(): + return self[start:self._l+1] + return [] + + def read_to_next_empty_line(self): + self.seek_next_non_empty_line() + def is_empty(line): + return not line.strip() + return self.read_to_condition(is_empty) + + def read_to_next_unindented_line(self): + def is_unindented(line): + return (line.strip() and (len(line.lstrip()) == len(line))) + return self.read_to_condition(is_unindented) + + def peek(self,n=0): + if self._l + n < len(self._str): + return self[self._l + n] + else: + return '' + + def is_empty(self): + return not ''.join(self._str).strip() + + +class NumpyDocString(object): + def __init__(self, docstring, config={}): + docstring = textwrap.dedent(docstring).split('\n') + + self._doc = Reader(docstring) + self._parsed_data = { + 'Signature': '', + 'Summary': [''], + 'Extended Summary': [], + 'Parameters': [], + 'Returns': [], + 'Raises': [], + 'Warns': [], + 'Other Parameters': [], + 'Attributes': [], + 'Methods': [], + 'See Also': [], + 'Notes': [], + 'Warnings': [], + 'References': '', + 'Examples': '', + 'index': {} + } + + self._parse() + + def __getitem__(self,key): + return self._parsed_data[key] + + def __setitem__(self,key,val): + if key not in self._parsed_data: + warn("Unknown section %s" % key) + else: + self._parsed_data[key] = val + + def _is_at_section(self): + self._doc.seek_next_non_empty_line() + + if self._doc.eof(): + return False + + l1 = self._doc.peek().strip() # e.g. Parameters + + if l1.startswith('.. index::'): + return True + + l2 = self._doc.peek(1).strip() # ---------- or ========== + return l2.startswith('-'*len(l1)) or l2.startswith('='*len(l1)) + + def _strip(self,doc): + i = 0 + j = 0 + for i,line in enumerate(doc): + if line.strip(): break + + for j,line in enumerate(doc[::-1]): + if line.strip(): break + + return doc[i:len(doc)-j] + + def _read_to_next_section(self): + section = self._doc.read_to_next_empty_line() + + while not self._is_at_section() and not self._doc.eof(): + if not self._doc.peek(-1).strip(): # previous line was empty + section += [''] + + section += self._doc.read_to_next_empty_line() + + return section + + def _read_sections(self): + while not self._doc.eof(): + data = self._read_to_next_section() + name = data[0].strip() + + if name.startswith('..'): # index section + yield name, data[1:] + elif len(data) < 2: + yield StopIteration + else: + yield name, self._strip(data[2:]) + + def _parse_param_list(self,content): + r = Reader(content) + params = [] + while not r.eof(): + header = r.read().strip() + if ' : ' in header: + arg_name, arg_type = header.split(' : ')[:2] + else: + arg_name, arg_type = header, '' + + desc = r.read_to_next_unindented_line() + desc = dedent_lines(desc) + + params.append((arg_name,arg_type,desc)) + + return params + + + _name_rgx = re.compile(r"^\s*(:(?P\w+):`(?P[a-zA-Z0-9_.-]+)`|" + r" (?P[a-zA-Z0-9_.-]+))\s*", re.X) + def _parse_see_also(self, content): + """ + func_name : Descriptive text + continued text + another_func_name : Descriptive text + func_name1, func_name2, :meth:`func_name`, func_name3 + + """ + items = [] + + def parse_item_name(text): + """Match ':role:`name`' or 'name'""" + m = self._name_rgx.match(text) + if m: + g = m.groups() + if g[1] is None: + return g[3], None + else: + return g[2], g[1] + raise ValueError("%s is not a item name" % text) + + def push_item(name, rest): + if not name: + return + name, role = parse_item_name(name) + items.append((name, list(rest), role)) + del rest[:] + + current_func = None + rest = [] + + for line in content: + if not line.strip(): continue + + m = self._name_rgx.match(line) + if m and line[m.end():].strip().startswith(':'): + push_item(current_func, rest) + current_func, line = line[:m.end()], line[m.end():] + rest = [line.split(':', 1)[1].strip()] + if not rest[0]: + rest = [] + elif not line.startswith(' '): + push_item(current_func, rest) + current_func = None + if ',' in line: + for func in line.split(','): + if func.strip(): + push_item(func, []) + elif line.strip(): + current_func = line + elif current_func is not None: + rest.append(line.strip()) + push_item(current_func, rest) + return items + + def _parse_index(self, section, content): + """ + .. index: default + :refguide: something, else, and more + + """ + def strip_each_in(lst): + return [s.strip() for s in lst] + + out = {} + section = section.split('::') + if len(section) > 1: + out['default'] = strip_each_in(section[1].split(','))[0] + for line in content: + line = line.split(':') + if len(line) > 2: + out[line[1]] = strip_each_in(line[2].split(',')) + return out + + def _parse_summary(self): + """Grab signature (if given) and summary""" + if self._is_at_section(): + return + + # If several signatures present, take the last one + while True: + summary = self._doc.read_to_next_empty_line() + summary_str = " ".join([s.strip() for s in summary]).strip() + if re.compile('^([\w., ]+=)?\s*[\w\.]+\(.*\)$').match(summary_str): + self['Signature'] = summary_str + if not self._is_at_section(): + continue + break + + if summary is not None: + self['Summary'] = summary + + if not self._is_at_section(): + self['Extended Summary'] = self._read_to_next_section() + + def _parse(self): + self._doc.reset() + self._parse_summary() + + for (section,content) in self._read_sections(): + if not section.startswith('..'): + section = ' '.join([s.capitalize() for s in section.split(' ')]) + if section in ('Parameters', 'Returns', 'Raises', 'Warns', + 'Other Parameters', 'Attributes', 'Methods'): + self[section] = self._parse_param_list(content) + elif section.startswith('.. index::'): + self['index'] = self._parse_index(section, content) + elif section == 'See Also': + self['See Also'] = self._parse_see_also(content) + else: + self[section] = content + + # string conversion routines + + def _str_header(self, name, symbol='-'): + return [name, len(name)*symbol] + + def _str_indent(self, doc, indent=4): + out = [] + for line in doc: + out += [' '*indent + line] + return out + + def _str_signature(self): + if self['Signature']: + return [self['Signature'].replace('*','\*')] + [''] + else: + return [''] + + def _str_summary(self): + if self['Summary']: + return self['Summary'] + [''] + else: + return [] + + def _str_extended_summary(self): + if self['Extended Summary']: + return self['Extended Summary'] + [''] + else: + return [] + + def _str_param_list(self, name): + out = [] + if self[name]: + out += self._str_header(name) + for param,param_type,desc in self[name]: + if param_type: + out += ['%s : %s' % (param, param_type)] + else: + out += [param] + out += self._str_indent(desc) + out += [''] + return out + + def _str_section(self, name): + out = [] + if self[name]: + out += self._str_header(name) + out += self[name] + out += [''] + return out + + def _str_see_also(self, func_role): + if not self['See Also']: return [] + out = [] + out += self._str_header("See Also") + last_had_desc = True + for func, desc, role in self['See Also']: + if role: + link = ':%s:`%s`' % (role, func) + elif func_role: + link = ':%s:`%s`' % (func_role, func) + else: + link = "`%s`_" % func + if desc or last_had_desc: + out += [''] + out += [link] + else: + out[-1] += ", %s" % link + if desc: + out += self._str_indent([' '.join(desc)]) + last_had_desc = True + else: + last_had_desc = False + out += [''] + return out + + def _str_index(self): + idx = self['index'] + out = [] + out += ['.. index:: %s' % idx.get('default','')] + for section, references in idx.items(): + if section == 'default': + continue + out += [' :%s: %s' % (section, ', '.join(references))] + return out + + def __str__(self, func_role=''): + out = [] + out += self._str_signature() + out += self._str_summary() + out += self._str_extended_summary() + for param_list in ('Parameters', 'Returns', 'Other Parameters', + 'Raises', 'Warns'): + out += self._str_param_list(param_list) + out += self._str_section('Warnings') + out += self._str_see_also(func_role) + for s in ('Notes','References','Examples'): + out += self._str_section(s) + for param_list in ('Attributes', 'Methods'): + out += self._str_param_list(param_list) + out += self._str_index() + return '\n'.join(out) + + +def indent(str,indent=4): + indent_str = ' '*indent + if str is None: + return indent_str + lines = str.split('\n') + return '\n'.join(indent_str + l for l in lines) + +def dedent_lines(lines): + """Deindent a list of lines maximally""" + return textwrap.dedent("\n".join(lines)).split("\n") + +def header(text, style='-'): + return text + '\n' + style*len(text) + '\n' + + +class FunctionDoc(NumpyDocString): + def __init__(self, func, role='func', doc=None, config={}): + self._f = func + self._role = role # e.g. "func" or "meth" + + if doc is None: + if func is None: + raise ValueError("No function or docstring given") + doc = inspect.getdoc(func) or '' + NumpyDocString.__init__(self, doc) + + if not self['Signature'] and func is not None: + func, func_name = self.get_func() + try: + # try to read signature + if sys.version_info[0] >= 3: + argspec = inspect.getfullargspec(func) + else: + argspec = inspect.getargspec(func) + argspec = inspect.formatargspec(*argspec) + argspec = argspec.replace('*','\*') + signature = '%s%s' % (func_name, argspec) + except TypeError as e: + signature = '%s()' % func_name + self['Signature'] = signature + + def get_func(self): + func_name = getattr(self._f, '__name__', self.__class__.__name__) + if inspect.isclass(self._f): + func = getattr(self._f, '__call__', self._f.__init__) + else: + func = self._f + return func, func_name + + def __str__(self): + out = '' + + func, func_name = self.get_func() + signature = self['Signature'].replace('*', '\*') + + roles = {'func': 'function', + 'meth': 'method'} + + if self._role: + if self._role not in roles: + print("Warning: invalid role %s" % self._role) + out += '.. %s:: %s\n \n\n' % (roles.get(self._role,''), + func_name) + + out += super(FunctionDoc, self).__str__(func_role=self._role) + return out + + +class ClassDoc(NumpyDocString): + + extra_public_methods = ['__call__'] + + def __init__(self, cls, doc=None, modulename='', func_doc=FunctionDoc, + config={}): + if not inspect.isclass(cls) and cls is not None: + raise ValueError("Expected a class or None, but got %r" % cls) + self._cls = cls + + if modulename and not modulename.endswith('.'): + modulename += '.' + self._mod = modulename + + if doc is None: + if cls is None: + raise ValueError("No class or documentation string given") + doc = pydoc.getdoc(cls) + + NumpyDocString.__init__(self, doc) + + if config.get('show_class_members', True): + def splitlines_x(s): + if not s: + return [] + else: + return s.splitlines() + + for field, items in [('Methods', self.methods), + ('Attributes', self.properties)]: + if not self[field]: + doc_list = [] + for name in sorted(items): + try: + doc_item = pydoc.getdoc(getattr(self._cls, name)) + doc_list.append((name, '', splitlines_x(doc_item))) + except AttributeError: + pass # method doesn't exist + self[field] = doc_list + + @property + def methods(self): + if self._cls is None: + return [] + return [name for name,func in inspect.getmembers(self._cls) + if ((not name.startswith('_') + or name in self.extra_public_methods) + and isinstance(func, collections.Callable))] + + @property + def properties(self): + if self._cls is None: + return [] + return [name for name,func in inspect.getmembers(self._cls) + if not name.startswith('_') and + (func is None or isinstance(func, property) or + inspect.isgetsetdescriptor(func))] diff -Nru numpydoc-0.4/numpydoc/docscrape_sphinx.py numpydoc-0.5/numpydoc/docscrape_sphinx.py --- numpydoc-0.4/numpydoc/docscrape_sphinx.py 1970-01-01 00:00:00.000000000 +0000 +++ numpydoc-0.5/numpydoc/docscrape_sphinx.py 2014-01-25 12:49:54.000000000 +0000 @@ -0,0 +1,274 @@ +from __future__ import division, absolute_import, print_function + +import sys, re, inspect, textwrap, pydoc +import sphinx +import collections +from .docscrape import NumpyDocString, FunctionDoc, ClassDoc + +if sys.version_info[0] >= 3: + sixu = lambda s: s +else: + sixu = lambda s: unicode(s, 'unicode_escape') + + +class SphinxDocString(NumpyDocString): + def __init__(self, docstring, config={}): + NumpyDocString.__init__(self, docstring, config=config) + self.load_config(config) + + def load_config(self, config): + self.use_plots = config.get('use_plots', False) + self.class_members_toctree = config.get('class_members_toctree', True) + + # string conversion routines + def _str_header(self, name, symbol='`'): + return ['.. rubric:: ' + name, ''] + + def _str_field_list(self, name): + return [':' + name + ':'] + + def _str_indent(self, doc, indent=4): + out = [] + for line in doc: + out += [' '*indent + line] + return out + + def _str_signature(self): + return [''] + if self['Signature']: + return ['``%s``' % self['Signature']] + [''] + else: + return [''] + + def _str_summary(self): + return self['Summary'] + [''] + + def _str_extended_summary(self): + return self['Extended Summary'] + [''] + + def _str_returns(self): + out = [] + if self['Returns']: + out += self._str_field_list('Returns') + out += [''] + for param, param_type, desc in self['Returns']: + if param_type: + out += self._str_indent(['**%s** : %s' % (param.strip(), + param_type)]) + else: + out += self._str_indent([param.strip()]) + if desc: + out += [''] + out += self._str_indent(desc, 8) + out += [''] + return out + + def _str_param_list(self, name): + out = [] + if self[name]: + out += self._str_field_list(name) + out += [''] + for param, param_type, desc in self[name]: + if param_type: + out += self._str_indent(['**%s** : %s' % (param.strip(), + param_type)]) + else: + out += self._str_indent(['**%s**' % param.strip()]) + if desc: + out += [''] + out += self._str_indent(desc, 8) + out += [''] + return out + + @property + def _obj(self): + if hasattr(self, '_cls'): + return self._cls + elif hasattr(self, '_f'): + return self._f + return None + + def _str_member_list(self, name): + """ + Generate a member listing, autosummary:: table where possible, + and a table where not. + + """ + out = [] + if self[name]: + out += ['.. rubric:: %s' % name, ''] + prefix = getattr(self, '_name', '') + + if prefix: + prefix = '~%s.' % prefix + + autosum = [] + others = [] + for param, param_type, desc in self[name]: + param = param.strip() + + # Check if the referenced member can have a docstring or not + param_obj = getattr(self._obj, param, None) + if not (callable(param_obj) + or isinstance(param_obj, property) + or inspect.isgetsetdescriptor(param_obj)): + param_obj = None + + if param_obj and (pydoc.getdoc(param_obj) or not desc): + # Referenced object has a docstring + autosum += [" %s%s" % (prefix, param)] + else: + others.append((param, param_type, desc)) + + if autosum: + out += ['.. autosummary::'] + if self.class_members_toctree: + out += [' :toctree:'] + out += [''] + autosum + + if others: + maxlen_0 = max(3, max([len(x[0]) for x in others])) + hdr = sixu("=")*maxlen_0 + sixu(" ") + sixu("=")*10 + fmt = sixu('%%%ds %%s ') % (maxlen_0,) + out += ['', hdr] + for param, param_type, desc in others: + desc = sixu(" ").join(x.strip() for x in desc).strip() + if param_type: + desc = "(%s) %s" % (param_type, desc) + out += [fmt % (param.strip(), desc)] + out += [hdr] + out += [''] + return out + + def _str_section(self, name): + out = [] + if self[name]: + out += self._str_header(name) + out += [''] + content = textwrap.dedent("\n".join(self[name])).split("\n") + out += content + out += [''] + return out + + def _str_see_also(self, func_role): + out = [] + if self['See Also']: + see_also = super(SphinxDocString, self)._str_see_also(func_role) + out = ['.. seealso::', ''] + out += self._str_indent(see_also[2:]) + return out + + def _str_warnings(self): + out = [] + if self['Warnings']: + out = ['.. warning::', ''] + out += self._str_indent(self['Warnings']) + return out + + def _str_index(self): + idx = self['index'] + out = [] + if len(idx) == 0: + return out + + out += ['.. index:: %s' % idx.get('default','')] + for section, references in idx.items(): + if section == 'default': + continue + elif section == 'refguide': + out += [' single: %s' % (', '.join(references))] + else: + out += [' %s: %s' % (section, ','.join(references))] + return out + + def _str_references(self): + out = [] + if self['References']: + out += self._str_header('References') + if isinstance(self['References'], str): + self['References'] = [self['References']] + out.extend(self['References']) + out += [''] + # Latex collects all references to a separate bibliography, + # so we need to insert links to it + if sphinx.__version__ >= "0.6": + out += ['.. only:: latex',''] + else: + out += ['.. latexonly::',''] + items = [] + for line in self['References']: + m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I) + if m: + items.append(m.group(1)) + out += [' ' + ", ".join(["[%s]_" % item for item in items]), ''] + return out + + def _str_examples(self): + examples_str = "\n".join(self['Examples']) + + if (self.use_plots and 'import matplotlib' in examples_str + and 'plot::' not in examples_str): + out = [] + out += self._str_header('Examples') + out += ['.. plot::', ''] + out += self._str_indent(self['Examples']) + out += [''] + return out + else: + return self._str_section('Examples') + + def __str__(self, indent=0, func_role="obj"): + out = [] + out += self._str_signature() + out += self._str_index() + [''] + out += self._str_summary() + out += self._str_extended_summary() + out += self._str_param_list('Parameters') + out += self._str_returns() + for param_list in ('Other Parameters', 'Raises', 'Warns'): + out += self._str_param_list(param_list) + out += self._str_warnings() + out += self._str_see_also(func_role) + out += self._str_section('Notes') + out += self._str_references() + out += self._str_examples() + for param_list in ('Attributes', 'Methods'): + out += self._str_member_list(param_list) + out = self._str_indent(out,indent) + return '\n'.join(out) + +class SphinxFunctionDoc(SphinxDocString, FunctionDoc): + def __init__(self, obj, doc=None, config={}): + self.load_config(config) + FunctionDoc.__init__(self, obj, doc=doc, config=config) + +class SphinxClassDoc(SphinxDocString, ClassDoc): + def __init__(self, obj, doc=None, func_doc=None, config={}): + self.load_config(config) + ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config) + +class SphinxObjDoc(SphinxDocString): + def __init__(self, obj, doc=None, config={}): + self._f = obj + self.load_config(config) + SphinxDocString.__init__(self, doc, config=config) + +def get_doc_object(obj, what=None, doc=None, config={}): + if what is None: + if inspect.isclass(obj): + what = 'class' + elif inspect.ismodule(obj): + what = 'module' + elif isinstance(obj, collections.Callable): + what = 'function' + else: + what = 'object' + if what == 'class': + return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc, + config=config) + elif what in ('function', 'method'): + return SphinxFunctionDoc(obj, doc=doc, config=config) + else: + if doc is None: + doc = pydoc.getdoc(obj) + return SphinxObjDoc(obj, doc, config=config) diff -Nru numpydoc-0.4/numpydoc/__init__.py numpydoc-0.5/numpydoc/__init__.py --- numpydoc-0.4/numpydoc/__init__.py 1970-01-01 00:00:00.000000000 +0000 +++ numpydoc-0.5/numpydoc/__init__.py 2013-07-24 16:15:41.000000000 +0000 @@ -0,0 +1,3 @@ +from __future__ import division, absolute_import, print_function + +from .numpydoc import setup diff -Nru numpydoc-0.4/numpydoc/linkcode.py numpydoc-0.5/numpydoc/linkcode.py --- numpydoc-0.4/numpydoc/linkcode.py 1970-01-01 00:00:00.000000000 +0000 +++ numpydoc-0.5/numpydoc/linkcode.py 2013-07-24 16:15:41.000000000 +0000 @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- +""" + linkcode + ~~~~~~~~ + + Add external links to module code in Python object descriptions. + + :copyright: Copyright 2007-2011 by the Sphinx team, see AUTHORS. + :license: BSD, see LICENSE for details. + +""" +from __future__ import division, absolute_import, print_function + +import warnings +import collections + +warnings.warn("This extension has been accepted to Sphinx upstream. " + "Use the version from there (Sphinx >= 1.2) " + "https://bitbucket.org/birkenfeld/sphinx/pull-request/47/sphinxextlinkcode", + FutureWarning, stacklevel=1) + + +from docutils import nodes + +from sphinx import addnodes +from sphinx.locale import _ +from sphinx.errors import SphinxError + +class LinkcodeError(SphinxError): + category = "linkcode error" + +def doctree_read(app, doctree): + env = app.builder.env + + resolve_target = getattr(env.config, 'linkcode_resolve', None) + if not isinstance(env.config.linkcode_resolve, collections.Callable): + raise LinkcodeError( + "Function `linkcode_resolve` is not given in conf.py") + + domain_keys = dict( + py=['module', 'fullname'], + c=['names'], + cpp=['names'], + js=['object', 'fullname'], + ) + + for objnode in doctree.traverse(addnodes.desc): + domain = objnode.get('domain') + uris = set() + for signode in objnode: + if not isinstance(signode, addnodes.desc_signature): + continue + + # Convert signode to a specified format + info = {} + for key in domain_keys.get(domain, []): + value = signode.get(key) + if not value: + value = '' + info[key] = value + if not info: + continue + + # Call user code to resolve the link + uri = resolve_target(domain, info) + if not uri: + # no source + continue + + if uri in uris or not uri: + # only one link per name, please + continue + uris.add(uri) + + onlynode = addnodes.only(expr='html') + onlynode += nodes.reference('', '', internal=False, refuri=uri) + onlynode[0] += nodes.inline('', _('[source]'), + classes=['viewcode-link']) + signode += onlynode + +def setup(app): + app.connect('doctree-read', doctree_read) + app.add_config_value('linkcode_resolve', None, '') diff -Nru numpydoc-0.4/numpydoc/numpydoc.py numpydoc-0.5/numpydoc/numpydoc.py --- numpydoc-0.4/numpydoc/numpydoc.py 1970-01-01 00:00:00.000000000 +0000 +++ numpydoc-0.5/numpydoc/numpydoc.py 2014-01-25 12:49:54.000000000 +0000 @@ -0,0 +1,187 @@ +""" +======== +numpydoc +======== + +Sphinx extension that handles docstrings in the Numpy standard format. [1] + +It will: + +- Convert Parameters etc. sections to field lists. +- Convert See Also section to a See also entry. +- Renumber references. +- Extract the signature from the docstring, if it can't be determined otherwise. + +.. [1] https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt + +""" +from __future__ import division, absolute_import, print_function + +import os, sys, re, pydoc +import sphinx +import inspect +import collections + +if sphinx.__version__ < '1.0.1': + raise RuntimeError("Sphinx 1.0.1 or newer is required") + +from .docscrape_sphinx import get_doc_object, SphinxDocString +from sphinx.util.compat import Directive + +if sys.version_info[0] >= 3: + sixu = lambda s: s +else: + sixu = lambda s: unicode(s, 'unicode_escape') + + +def mangle_docstrings(app, what, name, obj, options, lines, + reference_offset=[0]): + + cfg = dict(use_plots=app.config.numpydoc_use_plots, + show_class_members=app.config.numpydoc_show_class_members, + class_members_toctree=app.config.numpydoc_class_members_toctree, + ) + + if what == 'module': + # Strip top title + title_re = re.compile(sixu('^\\s*[#*=]{4,}\\n[a-z0-9 -]+\\n[#*=]{4,}\\s*'), + re.I|re.S) + lines[:] = title_re.sub(sixu(''), sixu("\n").join(lines)).split(sixu("\n")) + else: + doc = get_doc_object(obj, what, sixu("\n").join(lines), config=cfg) + if sys.version_info[0] >= 3: + doc = str(doc) + else: + doc = unicode(doc) + lines[:] = doc.split(sixu("\n")) + + if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \ + obj.__name__: + if hasattr(obj, '__module__'): + v = dict(full_name=sixu("%s.%s") % (obj.__module__, obj.__name__)) + else: + v = dict(full_name=obj.__name__) + lines += [sixu(''), sixu('.. htmlonly::'), sixu('')] + lines += [sixu(' %s') % x for x in + (app.config.numpydoc_edit_link % v).split("\n")] + + # replace reference numbers so that there are no duplicates + references = [] + for line in lines: + line = line.strip() + m = re.match(sixu('^.. \\[([a-z0-9_.-])\\]'), line, re.I) + if m: + references.append(m.group(1)) + + # start renaming from the longest string, to avoid overwriting parts + references.sort(key=lambda x: -len(x)) + if references: + for i, line in enumerate(lines): + for r in references: + if re.match(sixu('^\\d+$'), r): + new_r = sixu("R%d") % (reference_offset[0] + int(r)) + else: + new_r = sixu("%s%d") % (r, reference_offset[0]) + lines[i] = lines[i].replace(sixu('[%s]_') % r, + sixu('[%s]_') % new_r) + lines[i] = lines[i].replace(sixu('.. [%s]') % r, + sixu('.. [%s]') % new_r) + + reference_offset[0] += len(references) + +def mangle_signature(app, what, name, obj, options, sig, retann): + # Do not try to inspect classes that don't define `__init__` + if (inspect.isclass(obj) and + (not hasattr(obj, '__init__') or + 'initializes x; see ' in pydoc.getdoc(obj.__init__))): + return '', '' + + if not (isinstance(obj, collections.Callable) or hasattr(obj, '__argspec_is_invalid_')): return + if not hasattr(obj, '__doc__'): return + + doc = SphinxDocString(pydoc.getdoc(obj)) + if doc['Signature']: + sig = re.sub(sixu("^[^(]*"), sixu(""), doc['Signature']) + return sig, sixu('') + +def setup(app, get_doc_object_=get_doc_object): + if not hasattr(app, 'add_config_value'): + return # probably called by nose, better bail out + + global get_doc_object + get_doc_object = get_doc_object_ + + app.connect('autodoc-process-docstring', mangle_docstrings) + app.connect('autodoc-process-signature', mangle_signature) + app.add_config_value('numpydoc_edit_link', None, False) + app.add_config_value('numpydoc_use_plots', None, False) + app.add_config_value('numpydoc_show_class_members', True, True) + app.add_config_value('numpydoc_class_members_toctree', True, True) + + # Extra mangling domains + app.add_domain(NumpyPythonDomain) + app.add_domain(NumpyCDomain) + +#------------------------------------------------------------------------------ +# Docstring-mangling domains +#------------------------------------------------------------------------------ + +from docutils.statemachine import ViewList +from sphinx.domains.c import CDomain +from sphinx.domains.python import PythonDomain + +class ManglingDomainBase(object): + directive_mangling_map = {} + + def __init__(self, *a, **kw): + super(ManglingDomainBase, self).__init__(*a, **kw) + self.wrap_mangling_directives() + + def wrap_mangling_directives(self): + for name, objtype in list(self.directive_mangling_map.items()): + self.directives[name] = wrap_mangling_directive( + self.directives[name], objtype) + +class NumpyPythonDomain(ManglingDomainBase, PythonDomain): + name = 'np' + directive_mangling_map = { + 'function': 'function', + 'class': 'class', + 'exception': 'class', + 'method': 'function', + 'classmethod': 'function', + 'staticmethod': 'function', + 'attribute': 'attribute', + } + indices = [] + +class NumpyCDomain(ManglingDomainBase, CDomain): + name = 'np-c' + directive_mangling_map = { + 'function': 'function', + 'member': 'attribute', + 'macro': 'function', + 'type': 'class', + 'var': 'object', + } + +def wrap_mangling_directive(base_directive, objtype): + class directive(base_directive): + def run(self): + env = self.state.document.settings.env + + name = None + if self.arguments: + m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0]) + name = m.group(2).strip() + + if not name: + name = self.arguments[0] + + lines = list(self.content) + mangle_docstrings(env.app, objtype, name, None, None, lines) + self.content = ViewList(lines, self.content.parent) + + return base_directive.run(self) + + return directive diff -Nru numpydoc-0.4/numpydoc/phantom_import.py numpydoc-0.5/numpydoc/phantom_import.py --- numpydoc-0.4/numpydoc/phantom_import.py 1970-01-01 00:00:00.000000000 +0000 +++ numpydoc-0.5/numpydoc/phantom_import.py 2013-07-24 16:15:41.000000000 +0000 @@ -0,0 +1,167 @@ +""" +============== +phantom_import +============== + +Sphinx extension to make directives from ``sphinx.ext.autodoc`` and similar +extensions to use docstrings loaded from an XML file. + +This extension loads an XML file in the Pydocweb format [1] and +creates a dummy module that contains the specified docstrings. This +can be used to get the current docstrings from a Pydocweb instance +without needing to rebuild the documented module. + +.. [1] http://code.google.com/p/pydocweb + +""" +from __future__ import division, absolute_import, print_function + +import imp, sys, compiler, types, os, inspect, re + +def setup(app): + app.connect('builder-inited', initialize) + app.add_config_value('phantom_import_file', None, True) + +def initialize(app): + fn = app.config.phantom_import_file + if (fn and os.path.isfile(fn)): + print("[numpydoc] Phantom importing modules from", fn, "...") + import_phantom_module(fn) + +#------------------------------------------------------------------------------ +# Creating 'phantom' modules from an XML description +#------------------------------------------------------------------------------ +def import_phantom_module(xml_file): + """ + Insert a fake Python module to sys.modules, based on a XML file. + + The XML file is expected to conform to Pydocweb DTD. The fake + module will contain dummy objects, which guarantee the following: + + - Docstrings are correct. + - Class inheritance relationships are correct (if present in XML). + - Function argspec is *NOT* correct (even if present in XML). + Instead, the function signature is prepended to the function docstring. + - Class attributes are *NOT* correct; instead, they are dummy objects. + + Parameters + ---------- + xml_file : str + Name of an XML file to read + + """ + import lxml.etree as etree + + object_cache = {} + + tree = etree.parse(xml_file) + root = tree.getroot() + + # Sort items so that + # - Base classes come before classes inherited from them + # - Modules come before their contents + all_nodes = dict([(n.attrib['id'], n) for n in root]) + + def _get_bases(node, recurse=False): + bases = [x.attrib['ref'] for x in node.findall('base')] + if recurse: + j = 0 + while True: + try: + b = bases[j] + except IndexError: break + if b in all_nodes: + bases.extend(_get_bases(all_nodes[b])) + j += 1 + return bases + + type_index = ['module', 'class', 'callable', 'object'] + + def base_cmp(a, b): + x = cmp(type_index.index(a.tag), type_index.index(b.tag)) + if x != 0: return x + + if a.tag == 'class' and b.tag == 'class': + a_bases = _get_bases(a, recurse=True) + b_bases = _get_bases(b, recurse=True) + x = cmp(len(a_bases), len(b_bases)) + if x != 0: return x + if a.attrib['id'] in b_bases: return -1 + if b.attrib['id'] in a_bases: return 1 + + return cmp(a.attrib['id'].count('.'), b.attrib['id'].count('.')) + + nodes = root.getchildren() + nodes.sort(base_cmp) + + # Create phantom items + for node in nodes: + name = node.attrib['id'] + doc = (node.text or '').decode('string-escape') + "\n" + if doc == "\n": doc = "" + + # create parent, if missing + parent = name + while True: + parent = '.'.join(parent.split('.')[:-1]) + if not parent: break + if parent in object_cache: break + obj = imp.new_module(parent) + object_cache[parent] = obj + sys.modules[parent] = obj + + # create object + if node.tag == 'module': + obj = imp.new_module(name) + obj.__doc__ = doc + sys.modules[name] = obj + elif node.tag == 'class': + bases = [object_cache[b] for b in _get_bases(node) + if b in object_cache] + bases.append(object) + init = lambda self: None + init.__doc__ = doc + obj = type(name, tuple(bases), {'__doc__': doc, '__init__': init}) + obj.__name__ = name.split('.')[-1] + elif node.tag == 'callable': + funcname = node.attrib['id'].split('.')[-1] + argspec = node.attrib.get('argspec') + if argspec: + argspec = re.sub('^[^(]*', '', argspec) + doc = "%s%s\n\n%s" % (funcname, argspec, doc) + obj = lambda: 0 + obj.__argspec_is_invalid_ = True + if sys.version_info[0] >= 3: + obj.__name__ = funcname + else: + obj.func_name = funcname + obj.__name__ = name + obj.__doc__ = doc + if inspect.isclass(object_cache[parent]): + obj.__objclass__ = object_cache[parent] + else: + class Dummy(object): pass + obj = Dummy() + obj.__name__ = name + obj.__doc__ = doc + if inspect.isclass(object_cache[parent]): + obj.__get__ = lambda: None + object_cache[name] = obj + + if parent: + if inspect.ismodule(object_cache[parent]): + obj.__module__ = parent + setattr(object_cache[parent], name.split('.')[-1], obj) + + # Populate items + for node in root: + obj = object_cache.get(node.attrib['id']) + if obj is None: continue + for ref in node.findall('ref'): + if node.tag == 'class': + if ref.attrib['ref'].startswith(node.attrib['id'] + '.'): + setattr(obj, ref.attrib['name'], + object_cache.get(ref.attrib['ref'])) + else: + setattr(obj, ref.attrib['name'], + object_cache.get(ref.attrib['ref'])) diff -Nru numpydoc-0.4/numpydoc/plot_directive.py numpydoc-0.5/numpydoc/plot_directive.py --- numpydoc-0.4/numpydoc/plot_directive.py 1970-01-01 00:00:00.000000000 +0000 +++ numpydoc-0.5/numpydoc/plot_directive.py 2013-07-24 16:15:41.000000000 +0000 @@ -0,0 +1,642 @@ +""" +A special directive for generating a matplotlib plot. + +.. warning:: + + This is a hacked version of plot_directive.py from Matplotlib. + It's very much subject to change! + + +Usage +----- + +Can be used like this:: + + .. plot:: examples/example.py + + .. plot:: + + import matplotlib.pyplot as plt + plt.plot([1,2,3], [4,5,6]) + + .. plot:: + + A plotting example: + + >>> import matplotlib.pyplot as plt + >>> plt.plot([1,2,3], [4,5,6]) + +The content is interpreted as doctest formatted if it has a line starting +with ``>>>``. + +The ``plot`` directive supports the options + + format : {'python', 'doctest'} + Specify the format of the input + + include-source : bool + Whether to display the source code. Default can be changed in conf.py + +and the ``image`` directive options ``alt``, ``height``, ``width``, +``scale``, ``align``, ``class``. + +Configuration options +--------------------- + +The plot directive has the following configuration options: + + plot_include_source + Default value for the include-source option + + plot_pre_code + Code that should be executed before each plot. + + plot_basedir + Base directory, to which plot:: file names are relative to. + (If None or empty, file names are relative to the directoly where + the file containing the directive is.) + + plot_formats + File formats to generate. List of tuples or strings:: + + [(suffix, dpi), suffix, ...] + + that determine the file format and the DPI. For entries whose + DPI was omitted, sensible defaults are chosen. + + plot_html_show_formats + Whether to show links to the files in HTML. + +TODO +---- + +* Refactor Latex output; now it's plain images, but it would be nice + to make them appear side-by-side, or in floats. + +""" +from __future__ import division, absolute_import, print_function + +import sys, os, glob, shutil, imp, warnings, re, textwrap, traceback +import sphinx + +if sys.version_info[0] >= 3: + from io import StringIO +else: + from io import StringIO + +import warnings +warnings.warn("A plot_directive module is also available under " + "matplotlib.sphinxext; expect this numpydoc.plot_directive " + "module to be deprecated after relevant features have been " + "integrated there.", + FutureWarning, stacklevel=2) + + +#------------------------------------------------------------------------------ +# Registration hook +#------------------------------------------------------------------------------ + +def setup(app): + setup.app = app + setup.config = app.config + setup.confdir = app.confdir + + app.add_config_value('plot_pre_code', '', True) + app.add_config_value('plot_include_source', False, True) + app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True) + app.add_config_value('plot_basedir', None, True) + app.add_config_value('plot_html_show_formats', True, True) + + app.add_directive('plot', plot_directive, True, (0, 1, False), + **plot_directive_options) + +#------------------------------------------------------------------------------ +# plot:: directive +#------------------------------------------------------------------------------ +from docutils.parsers.rst import directives +from docutils import nodes + +def plot_directive(name, arguments, options, content, lineno, + content_offset, block_text, state, state_machine): + return run(arguments, content, options, state_machine, state, lineno) +plot_directive.__doc__ = __doc__ + +def _option_boolean(arg): + if not arg or not arg.strip(): + # no argument given, assume used as a flag + return True + elif arg.strip().lower() in ('no', '0', 'false'): + return False + elif arg.strip().lower() in ('yes', '1', 'true'): + return True + else: + raise ValueError('"%s" unknown boolean' % arg) + +def _option_format(arg): + return directives.choice(arg, ('python', 'lisp')) + +def _option_align(arg): + return directives.choice(arg, ("top", "middle", "bottom", "left", "center", + "right")) + +plot_directive_options = {'alt': directives.unchanged, + 'height': directives.length_or_unitless, + 'width': directives.length_or_percentage_or_unitless, + 'scale': directives.nonnegative_int, + 'align': _option_align, + 'class': directives.class_option, + 'include-source': _option_boolean, + 'format': _option_format, + } + +#------------------------------------------------------------------------------ +# Generating output +#------------------------------------------------------------------------------ + +from docutils import nodes, utils + +try: + # Sphinx depends on either Jinja or Jinja2 + import jinja2 + def format_template(template, **kw): + return jinja2.Template(template).render(**kw) +except ImportError: + import jinja + def format_template(template, **kw): + return jinja.from_string(template, **kw) + +TEMPLATE = """ +{{ source_code }} + +{{ only_html }} + + {% if source_link or (html_show_formats and not multi_image) %} + ( + {%- if source_link -%} + `Source code <{{ source_link }}>`__ + {%- endif -%} + {%- if html_show_formats and not multi_image -%} + {%- for img in images -%} + {%- for fmt in img.formats -%} + {%- if source_link or not loop.first -%}, {% endif -%} + `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__ + {%- endfor -%} + {%- endfor -%} + {%- endif -%} + ) + {% endif %} + + {% for img in images %} + .. figure:: {{ build_dir }}/{{ img.basename }}.png + {%- for option in options %} + {{ option }} + {% endfor %} + + {% if html_show_formats and multi_image -%} + ( + {%- for fmt in img.formats -%} + {%- if not loop.first -%}, {% endif -%} + `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__ + {%- endfor -%} + ) + {%- endif -%} + {% endfor %} + +{{ only_latex }} + + {% for img in images %} + .. image:: {{ build_dir }}/{{ img.basename }}.pdf + {% endfor %} + +""" + +class ImageFile(object): + def __init__(self, basename, dirname): + self.basename = basename + self.dirname = dirname + self.formats = [] + + def filename(self, format): + return os.path.join(self.dirname, "%s.%s" % (self.basename, format)) + + def filenames(self): + return [self.filename(fmt) for fmt in self.formats] + +def run(arguments, content, options, state_machine, state, lineno): + if arguments and content: + raise RuntimeError("plot:: directive can't have both args and content") + + document = state_machine.document + config = document.settings.env.config + + options.setdefault('include-source', config.plot_include_source) + + # determine input + rst_file = document.attributes['source'] + rst_dir = os.path.dirname(rst_file) + + if arguments: + if not config.plot_basedir: + source_file_name = os.path.join(rst_dir, + directives.uri(arguments[0])) + else: + source_file_name = os.path.join(setup.confdir, config.plot_basedir, + directives.uri(arguments[0])) + code = open(source_file_name, 'r').read() + output_base = os.path.basename(source_file_name) + else: + source_file_name = rst_file + code = textwrap.dedent("\n".join(map(str, content))) + counter = document.attributes.get('_plot_counter', 0) + 1 + document.attributes['_plot_counter'] = counter + base, ext = os.path.splitext(os.path.basename(source_file_name)) + output_base = '%s-%d.py' % (base, counter) + + base, source_ext = os.path.splitext(output_base) + if source_ext in ('.py', '.rst', '.txt'): + output_base = base + else: + source_ext = '' + + # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames + output_base = output_base.replace('.', '-') + + # is it in doctest format? + is_doctest = contains_doctest(code) + if 'format' in options: + if options['format'] == 'python': + is_doctest = False + else: + is_doctest = True + + # determine output directory name fragment + source_rel_name = relpath(source_file_name, setup.confdir) + source_rel_dir = os.path.dirname(source_rel_name) + while source_rel_dir.startswith(os.path.sep): + source_rel_dir = source_rel_dir[1:] + + # build_dir: where to place output files (temporarily) + build_dir = os.path.join(os.path.dirname(setup.app.doctreedir), + 'plot_directive', + source_rel_dir) + if not os.path.exists(build_dir): + os.makedirs(build_dir) + + # output_dir: final location in the builder's directory + dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir, + source_rel_dir)) + + # how to link to files from the RST file + dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir), + source_rel_dir).replace(os.path.sep, '/') + build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/') + source_link = dest_dir_link + '/' + output_base + source_ext + + # make figures + try: + results = makefig(code, source_file_name, build_dir, output_base, + config) + errors = [] + except PlotError as err: + reporter = state.memo.reporter + sm = reporter.system_message( + 2, "Exception occurred in plotting %s: %s" % (output_base, err), + line=lineno) + results = [(code, [])] + errors = [sm] + + # generate output restructuredtext + total_lines = [] + for j, (code_piece, images) in enumerate(results): + if options['include-source']: + if is_doctest: + lines = [''] + lines += [row.rstrip() for row in code_piece.split('\n')] + else: + lines = ['.. code-block:: python', ''] + lines += [' %s' % row.rstrip() + for row in code_piece.split('\n')] + source_code = "\n".join(lines) + else: + source_code = "" + + opts = [':%s: %s' % (key, val) for key, val in list(options.items()) + if key in ('alt', 'height', 'width', 'scale', 'align', 'class')] + + only_html = ".. only:: html" + only_latex = ".. only:: latex" + + if j == 0: + src_link = source_link + else: + src_link = None + + result = format_template( + TEMPLATE, + dest_dir=dest_dir_link, + build_dir=build_dir_link, + source_link=src_link, + multi_image=len(images) > 1, + only_html=only_html, + only_latex=only_latex, + options=opts, + images=images, + source_code=source_code, + html_show_formats=config.plot_html_show_formats) + + total_lines.extend(result.split("\n")) + total_lines.extend("\n") + + if total_lines: + state_machine.insert_input(total_lines, source=source_file_name) + + # copy image files to builder's output directory + if not os.path.exists(dest_dir): + os.makedirs(dest_dir) + + for code_piece, images in results: + for img in images: + for fn in img.filenames(): + shutil.copyfile(fn, os.path.join(dest_dir, + os.path.basename(fn))) + + # copy script (if necessary) + if source_file_name == rst_file: + target_name = os.path.join(dest_dir, output_base + source_ext) + f = open(target_name, 'w') + f.write(unescape_doctest(code)) + f.close() + + return errors + + +#------------------------------------------------------------------------------ +# Run code and capture figures +#------------------------------------------------------------------------------ + +import matplotlib +matplotlib.use('Agg') +import matplotlib.pyplot as plt +import matplotlib.image as image +from matplotlib import _pylab_helpers + +import exceptions + +def contains_doctest(text): + try: + # check if it's valid Python as-is + compile(text, '', 'exec') + return False + except SyntaxError: + pass + r = re.compile(r'^\s*>>>', re.M) + m = r.search(text) + return bool(m) + +def unescape_doctest(text): + """ + Extract code from a piece of text, which contains either Python code + or doctests. + + """ + if not contains_doctest(text): + return text + + code = "" + for line in text.split("\n"): + m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line) + if m: + code += m.group(2) + "\n" + elif line.strip(): + code += "# " + line.strip() + "\n" + else: + code += "\n" + return code + +def split_code_at_show(text): + """ + Split code at plt.show() + + """ + + parts = [] + is_doctest = contains_doctest(text) + + part = [] + for line in text.split("\n"): + if (not is_doctest and line.strip() == 'plt.show()') or \ + (is_doctest and line.strip() == '>>> plt.show()'): + part.append(line) + parts.append("\n".join(part)) + part = [] + else: + part.append(line) + if "\n".join(part).strip(): + parts.append("\n".join(part)) + return parts + +class PlotError(RuntimeError): + pass + +def run_code(code, code_path, ns=None): + # Change the working directory to the directory of the example, so + # it can get at its data files, if any. + pwd = os.getcwd() + old_sys_path = list(sys.path) + if code_path is not None: + dirname = os.path.abspath(os.path.dirname(code_path)) + os.chdir(dirname) + sys.path.insert(0, dirname) + + # Redirect stdout + stdout = sys.stdout + sys.stdout = StringIO() + + # Reset sys.argv + old_sys_argv = sys.argv + sys.argv = [code_path] + + try: + try: + code = unescape_doctest(code) + if ns is None: + ns = {} + if not ns: + exec(setup.config.plot_pre_code, ns) + exec(code, ns) + except (Exception, SystemExit) as err: + raise PlotError(traceback.format_exc()) + finally: + os.chdir(pwd) + sys.argv = old_sys_argv + sys.path[:] = old_sys_path + sys.stdout = stdout + return ns + + +#------------------------------------------------------------------------------ +# Generating figures +#------------------------------------------------------------------------------ + +def out_of_date(original, derived): + """ + Returns True if derivative is out-of-date wrt original, + both of which are full file paths. + """ + return (not os.path.exists(derived) + or os.stat(derived).st_mtime < os.stat(original).st_mtime) + + +def makefig(code, code_path, output_dir, output_base, config): + """ + Run a pyplot script *code* and save the images under *output_dir* + with file names derived from *output_base* + + """ + + # -- Parse format list + default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50} + formats = [] + for fmt in config.plot_formats: + if isinstance(fmt, str): + formats.append((fmt, default_dpi.get(fmt, 80))) + elif type(fmt) in (tuple, list) and len(fmt)==2: + formats.append((str(fmt[0]), int(fmt[1]))) + else: + raise PlotError('invalid image format "%r" in plot_formats' % fmt) + + # -- Try to determine if all images already exist + + code_pieces = split_code_at_show(code) + + # Look for single-figure output files first + all_exists = True + img = ImageFile(output_base, output_dir) + for format, dpi in formats: + if out_of_date(code_path, img.filename(format)): + all_exists = False + break + img.formats.append(format) + + if all_exists: + return [(code, [img])] + + # Then look for multi-figure output files + results = [] + all_exists = True + for i, code_piece in enumerate(code_pieces): + images = [] + for j in range(1000): + img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir) + for format, dpi in formats: + if out_of_date(code_path, img.filename(format)): + all_exists = False + break + img.formats.append(format) + + # assume that if we have one, we have them all + if not all_exists: + all_exists = (j > 0) + break + images.append(img) + if not all_exists: + break + results.append((code_piece, images)) + + if all_exists: + return results + + # -- We didn't find the files, so build them + + results = [] + ns = {} + + for i, code_piece in enumerate(code_pieces): + # Clear between runs + plt.close('all') + + # Run code + run_code(code_piece, code_path, ns) + + # Collect images + images = [] + fig_managers = _pylab_helpers.Gcf.get_all_fig_managers() + for j, figman in enumerate(fig_managers): + if len(fig_managers) == 1 and len(code_pieces) == 1: + img = ImageFile(output_base, output_dir) + else: + img = ImageFile("%s_%02d_%02d" % (output_base, i, j), + output_dir) + images.append(img) + for format, dpi in formats: + try: + figman.canvas.figure.savefig(img.filename(format), dpi=dpi) + except exceptions.BaseException as err: + raise PlotError(traceback.format_exc()) + img.formats.append(format) + + # Results + results.append((code_piece, images)) + + return results + + +#------------------------------------------------------------------------------ +# Relative pathnames +#------------------------------------------------------------------------------ + +try: + from os.path import relpath +except ImportError: + # Copied from Python 2.7 + if 'posix' in sys.builtin_module_names: + def relpath(path, start=os.path.curdir): + """Return a relative version of a path""" + from os.path import sep, curdir, join, abspath, commonprefix, \ + pardir + + if not path: + raise ValueError("no path specified") + + start_list = abspath(start).split(sep) + path_list = abspath(path).split(sep) + + # Work out how much of the filepath is shared by start and path. + i = len(commonprefix([start_list, path_list])) + + rel_list = [pardir] * (len(start_list)-i) + path_list[i:] + if not rel_list: + return curdir + return join(*rel_list) + elif 'nt' in sys.builtin_module_names: + def relpath(path, start=os.path.curdir): + """Return a relative version of a path""" + from os.path import sep, curdir, join, abspath, commonprefix, \ + pardir, splitunc + + if not path: + raise ValueError("no path specified") + start_list = abspath(start).split(sep) + path_list = abspath(path).split(sep) + if start_list[0].lower() != path_list[0].lower(): + unc_path, rest = splitunc(path) + unc_start, rest = splitunc(start) + if bool(unc_path) ^ bool(unc_start): + raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)" + % (path, start)) + else: + raise ValueError("path is on drive %s, start on drive %s" + % (path_list[0], start_list[0])) + # Work out how much of the filepath is shared by start and path. + for i in range(min(len(start_list), len(path_list))): + if start_list[i].lower() != path_list[i].lower(): + break + else: + i += 1 + + rel_list = [pardir] * (len(start_list)-i) + path_list[i:] + if not rel_list: + return curdir + return join(*rel_list) + else: + raise RuntimeError("Unsupported platform (no relpath available!)") diff -Nru numpydoc-0.4/numpydoc/tests/test_docscrape.py numpydoc-0.5/numpydoc/tests/test_docscrape.py --- numpydoc-0.4/numpydoc/tests/test_docscrape.py 1970-01-01 00:00:00.000000000 +0000 +++ numpydoc-0.5/numpydoc/tests/test_docscrape.py 2013-07-24 16:15:41.000000000 +0000 @@ -0,0 +1,767 @@ +# -*- encoding:utf-8 -*- +from __future__ import division, absolute_import, print_function + +import sys, textwrap + +from numpydoc.docscrape import NumpyDocString, FunctionDoc, ClassDoc +from numpydoc.docscrape_sphinx import SphinxDocString, SphinxClassDoc +from nose.tools import * + +if sys.version_info[0] >= 3: + sixu = lambda s: s +else: + sixu = lambda s: unicode(s, 'unicode_escape') + + +doc_txt = '''\ + numpy.multivariate_normal(mean, cov, shape=None, spam=None) + + Draw values from a multivariate normal distribution with specified + mean and covariance. + + The multivariate normal or Gaussian distribution is a generalisation + of the one-dimensional normal distribution to higher dimensions. + + Parameters + ---------- + mean : (N,) ndarray + Mean of the N-dimensional distribution. + + .. math:: + + (1+2+3)/3 + + cov : (N, N) ndarray + Covariance matrix of the distribution. + shape : tuple of ints + Given a shape of, for example, (m,n,k), m*n*k samples are + generated, and packed in an m-by-n-by-k arrangement. Because + each sample is N-dimensional, the output shape is (m,n,k,N). + + Returns + ------- + out : ndarray + The drawn samples, arranged according to `shape`. If the + shape given is (m,n,...), then the shape of `out` is is + (m,n,...,N). + + In other words, each entry ``out[i,j,...,:]`` is an N-dimensional + value drawn from the distribution. + list of str + This is not a real return value. It exists to test + anonymous return values. + + Other Parameters + ---------------- + spam : parrot + A parrot off its mortal coil. + + Raises + ------ + RuntimeError + Some error + + Warns + ----- + RuntimeWarning + Some warning + + Warnings + -------- + Certain warnings apply. + + Notes + ----- + Instead of specifying the full covariance matrix, popular + approximations include: + + - Spherical covariance (`cov` is a multiple of the identity matrix) + - Diagonal covariance (`cov` has non-negative elements only on the diagonal) + + This geometrical property can be seen in two dimensions by plotting + generated data-points: + + >>> mean = [0,0] + >>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis + + >>> x,y = multivariate_normal(mean,cov,5000).T + >>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() + + Note that the covariance matrix must be symmetric and non-negative + definite. + + References + ---------- + .. [1] A. Papoulis, "Probability, Random Variables, and Stochastic + Processes," 3rd ed., McGraw-Hill Companies, 1991 + .. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," + 2nd ed., Wiley, 2001. + + See Also + -------- + some, other, funcs + otherfunc : relationship + + Examples + -------- + >>> mean = (1,2) + >>> cov = [[1,0],[1,0]] + >>> x = multivariate_normal(mean,cov,(3,3)) + >>> print x.shape + (3, 3, 2) + + The following is probably true, given that 0.6 is roughly twice the + standard deviation: + + >>> print list( (x[0,0,:] - mean) < 0.6 ) + [True, True] + + .. index:: random + :refguide: random;distributions, random;gauss + + ''' +doc = NumpyDocString(doc_txt) + + +def test_signature(): + assert doc['Signature'].startswith('numpy.multivariate_normal(') + assert doc['Signature'].endswith('spam=None)') + +def test_summary(): + assert doc['Summary'][0].startswith('Draw values') + assert doc['Summary'][-1].endswith('covariance.') + +def test_extended_summary(): + assert doc['Extended Summary'][0].startswith('The multivariate normal') + +def test_parameters(): + assert_equal(len(doc['Parameters']), 3) + assert_equal([n for n,_,_ in doc['Parameters']], ['mean','cov','shape']) + + arg, arg_type, desc = doc['Parameters'][1] + assert_equal(arg_type, '(N, N) ndarray') + assert desc[0].startswith('Covariance matrix') + assert doc['Parameters'][0][-1][-2] == ' (1+2+3)/3' + +def test_other_parameters(): + assert_equal(len(doc['Other Parameters']), 1) + assert_equal([n for n,_,_ in doc['Other Parameters']], ['spam']) + arg, arg_type, desc = doc['Other Parameters'][0] + assert_equal(arg_type, 'parrot') + assert desc[0].startswith('A parrot off its mortal coil') + +def test_returns(): + assert_equal(len(doc['Returns']), 2) + arg, arg_type, desc = doc['Returns'][0] + assert_equal(arg, 'out') + assert_equal(arg_type, 'ndarray') + assert desc[0].startswith('The drawn samples') + assert desc[-1].endswith('distribution.') + + arg, arg_type, desc = doc['Returns'][1] + assert_equal(arg, 'list of str') + assert_equal(arg_type, '') + assert desc[0].startswith('This is not a real') + assert desc[-1].endswith('anonymous return values.') + +def test_notes(): + assert doc['Notes'][0].startswith('Instead') + assert doc['Notes'][-1].endswith('definite.') + assert_equal(len(doc['Notes']), 17) + +def test_references(): + assert doc['References'][0].startswith('..') + assert doc['References'][-1].endswith('2001.') + +def test_examples(): + assert doc['Examples'][0].startswith('>>>') + assert doc['Examples'][-1].endswith('True]') + +def test_index(): + assert_equal(doc['index']['default'], 'random') + assert_equal(len(doc['index']), 2) + assert_equal(len(doc['index']['refguide']), 2) + +def non_blank_line_by_line_compare(a,b): + a = textwrap.dedent(a) + b = textwrap.dedent(b) + a = [l.rstrip() for l in a.split('\n') if l.strip()] + b = [l.rstrip() for l in b.split('\n') if l.strip()] + for n,line in enumerate(a): + if not line == b[n]: + raise AssertionError("Lines %s of a and b differ: " + "\n>>> %s\n<<< %s\n" % + (n,line,b[n])) +def test_str(): + non_blank_line_by_line_compare(str(doc), +"""numpy.multivariate_normal(mean, cov, shape=None, spam=None) + +Draw values from a multivariate normal distribution with specified +mean and covariance. + +The multivariate normal or Gaussian distribution is a generalisation +of the one-dimensional normal distribution to higher dimensions. + +Parameters +---------- +mean : (N,) ndarray + Mean of the N-dimensional distribution. + + .. math:: + + (1+2+3)/3 + +cov : (N, N) ndarray + Covariance matrix of the distribution. +shape : tuple of ints + Given a shape of, for example, (m,n,k), m*n*k samples are + generated, and packed in an m-by-n-by-k arrangement. Because + each sample is N-dimensional, the output shape is (m,n,k,N). + +Returns +------- +out : ndarray + The drawn samples, arranged according to `shape`. If the + shape given is (m,n,...), then the shape of `out` is is + (m,n,...,N). + + In other words, each entry ``out[i,j,...,:]`` is an N-dimensional + value drawn from the distribution. +list of str + This is not a real return value. It exists to test + anonymous return values. + +Other Parameters +---------------- +spam : parrot + A parrot off its mortal coil. + +Raises +------ +RuntimeError + Some error + +Warns +----- +RuntimeWarning + Some warning + +Warnings +-------- +Certain warnings apply. + +See Also +-------- +`some`_, `other`_, `funcs`_ + +`otherfunc`_ + relationship + +Notes +----- +Instead of specifying the full covariance matrix, popular +approximations include: + + - Spherical covariance (`cov` is a multiple of the identity matrix) + - Diagonal covariance (`cov` has non-negative elements only on the diagonal) + +This geometrical property can be seen in two dimensions by plotting +generated data-points: + +>>> mean = [0,0] +>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis + +>>> x,y = multivariate_normal(mean,cov,5000).T +>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() + +Note that the covariance matrix must be symmetric and non-negative +definite. + +References +---------- +.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic + Processes," 3rd ed., McGraw-Hill Companies, 1991 +.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," + 2nd ed., Wiley, 2001. + +Examples +-------- +>>> mean = (1,2) +>>> cov = [[1,0],[1,0]] +>>> x = multivariate_normal(mean,cov,(3,3)) +>>> print x.shape +(3, 3, 2) + +The following is probably true, given that 0.6 is roughly twice the +standard deviation: + +>>> print list( (x[0,0,:] - mean) < 0.6 ) +[True, True] + +.. index:: random + :refguide: random;distributions, random;gauss""") + + +def test_sphinx_str(): + sphinx_doc = SphinxDocString(doc_txt) + non_blank_line_by_line_compare(str(sphinx_doc), +""" +.. index:: random + single: random;distributions, random;gauss + +Draw values from a multivariate normal distribution with specified +mean and covariance. + +The multivariate normal or Gaussian distribution is a generalisation +of the one-dimensional normal distribution to higher dimensions. + +:Parameters: + + **mean** : (N,) ndarray + + Mean of the N-dimensional distribution. + + .. math:: + + (1+2+3)/3 + + **cov** : (N, N) ndarray + + Covariance matrix of the distribution. + + **shape** : tuple of ints + + Given a shape of, for example, (m,n,k), m*n*k samples are + generated, and packed in an m-by-n-by-k arrangement. Because + each sample is N-dimensional, the output shape is (m,n,k,N). + +:Returns: + + **out** : ndarray + + The drawn samples, arranged according to `shape`. If the + shape given is (m,n,...), then the shape of `out` is is + (m,n,...,N). + + In other words, each entry ``out[i,j,...,:]`` is an N-dimensional + value drawn from the distribution. + + list of str + + This is not a real return value. It exists to test + anonymous return values. + +:Other Parameters: + + **spam** : parrot + + A parrot off its mortal coil. + +:Raises: + + **RuntimeError** + + Some error + +:Warns: + + **RuntimeWarning** + + Some warning + +.. warning:: + + Certain warnings apply. + +.. seealso:: + + :obj:`some`, :obj:`other`, :obj:`funcs` + + :obj:`otherfunc` + relationship + +.. rubric:: Notes + +Instead of specifying the full covariance matrix, popular +approximations include: + + - Spherical covariance (`cov` is a multiple of the identity matrix) + - Diagonal covariance (`cov` has non-negative elements only on the diagonal) + +This geometrical property can be seen in two dimensions by plotting +generated data-points: + +>>> mean = [0,0] +>>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis + +>>> x,y = multivariate_normal(mean,cov,5000).T +>>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() + +Note that the covariance matrix must be symmetric and non-negative +definite. + +.. rubric:: References + +.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic + Processes," 3rd ed., McGraw-Hill Companies, 1991 +.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," + 2nd ed., Wiley, 2001. + +.. only:: latex + + [1]_, [2]_ + +.. rubric:: Examples + +>>> mean = (1,2) +>>> cov = [[1,0],[1,0]] +>>> x = multivariate_normal(mean,cov,(3,3)) +>>> print x.shape +(3, 3, 2) + +The following is probably true, given that 0.6 is roughly twice the +standard deviation: + +>>> print list( (x[0,0,:] - mean) < 0.6 ) +[True, True] +""") + + +doc2 = NumpyDocString(""" + Returns array of indices of the maximum values of along the given axis. + + Parameters + ---------- + a : {array_like} + Array to look in. + axis : {None, integer} + If None, the index is into the flattened array, otherwise along + the specified axis""") + +def test_parameters_without_extended_description(): + assert_equal(len(doc2['Parameters']), 2) + +doc3 = NumpyDocString(""" + my_signature(*params, **kwds) + + Return this and that. + """) + +def test_escape_stars(): + signature = str(doc3).split('\n')[0] + assert_equal(signature, 'my_signature(\*params, \*\*kwds)') + +doc4 = NumpyDocString( + """a.conj() + + Return an array with all complex-valued elements conjugated.""") + +def test_empty_extended_summary(): + assert_equal(doc4['Extended Summary'], []) + +doc5 = NumpyDocString( + """ + a.something() + + Raises + ------ + LinAlgException + If array is singular. + + Warns + ----- + SomeWarning + If needed + """) + +def test_raises(): + assert_equal(len(doc5['Raises']), 1) + name,_,desc = doc5['Raises'][0] + assert_equal(name,'LinAlgException') + assert_equal(desc,['If array is singular.']) + +def test_warns(): + assert_equal(len(doc5['Warns']), 1) + name,_,desc = doc5['Warns'][0] + assert_equal(name,'SomeWarning') + assert_equal(desc,['If needed']) + +def test_see_also(): + doc6 = NumpyDocString( + """ + z(x,theta) + + See Also + -------- + func_a, func_b, func_c + func_d : some equivalent func + foo.func_e : some other func over + multiple lines + func_f, func_g, :meth:`func_h`, func_j, + func_k + :obj:`baz.obj_q` + :class:`class_j`: fubar + foobar + """) + + assert len(doc6['See Also']) == 12 + for func, desc, role in doc6['See Also']: + if func in ('func_a', 'func_b', 'func_c', 'func_f', + 'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'): + assert(not desc) + else: + assert(desc) + + if func == 'func_h': + assert role == 'meth' + elif func == 'baz.obj_q': + assert role == 'obj' + elif func == 'class_j': + assert role == 'class' + else: + assert role is None + + if func == 'func_d': + assert desc == ['some equivalent func'] + elif func == 'foo.func_e': + assert desc == ['some other func over', 'multiple lines'] + elif func == 'class_j': + assert desc == ['fubar', 'foobar'] + +def test_see_also_print(): + class Dummy(object): + """ + See Also + -------- + func_a, func_b + func_c : some relationship + goes here + func_d + """ + pass + + obj = Dummy() + s = str(FunctionDoc(obj, role='func')) + assert(':func:`func_a`, :func:`func_b`' in s) + assert(' some relationship' in s) + assert(':func:`func_d`' in s) + +doc7 = NumpyDocString(""" + + Doc starts on second line. + + """) + +def test_empty_first_line(): + assert doc7['Summary'][0].startswith('Doc starts') + + +def test_no_summary(): + str(SphinxDocString(""" + Parameters + ----------""")) + + +def test_unicode(): + doc = SphinxDocString(""" + öäöäöäöäöåååå + + öäöäöäööäååå + + Parameters + ---------- + ååå : äää + ööö + + Returns + ------- + ååå : ööö + äää + + """) + assert isinstance(doc['Summary'][0], str) + assert doc['Summary'][0] == 'öäöäöäöäöåååå' + +def test_plot_examples(): + cfg = dict(use_plots=True) + + doc = SphinxDocString(""" + Examples + -------- + >>> import matplotlib.pyplot as plt + >>> plt.plot([1,2,3],[4,5,6]) + >>> plt.show() + """, config=cfg) + assert 'plot::' in str(doc), str(doc) + + doc = SphinxDocString(""" + Examples + -------- + .. plot:: + + import matplotlib.pyplot as plt + plt.plot([1,2,3],[4,5,6]) + plt.show() + """, config=cfg) + assert str(doc).count('plot::') == 1, str(doc) + +def test_class_members(): + + class Dummy(object): + """ + Dummy class. + + """ + def spam(self, a, b): + """Spam\n\nSpam spam.""" + pass + def ham(self, c, d): + """Cheese\n\nNo cheese.""" + pass + @property + def spammity(self): + """Spammity index""" + return 0.95 + + class Ignorable(object): + """local class, to be ignored""" + pass + + for cls in (ClassDoc, SphinxClassDoc): + doc = cls(Dummy, config=dict(show_class_members=False)) + assert 'Methods' not in str(doc), (cls, str(doc)) + assert 'spam' not in str(doc), (cls, str(doc)) + assert 'ham' not in str(doc), (cls, str(doc)) + assert 'spammity' not in str(doc), (cls, str(doc)) + assert 'Spammity index' not in str(doc), (cls, str(doc)) + + doc = cls(Dummy, config=dict(show_class_members=True)) + assert 'Methods' in str(doc), (cls, str(doc)) + assert 'spam' in str(doc), (cls, str(doc)) + assert 'ham' in str(doc), (cls, str(doc)) + assert 'spammity' in str(doc), (cls, str(doc)) + + if cls is SphinxClassDoc: + assert '.. autosummary::' in str(doc), str(doc) + else: + assert 'Spammity index' in str(doc), str(doc) + +def test_duplicate_signature(): + # Duplicate function signatures occur e.g. in ufuncs, when the + # automatic mechanism adds one, and a more detailed comes from the + # docstring itself. + + doc = NumpyDocString( + """ + z(x1, x2) + + z(a, theta) + """) + + assert doc['Signature'].strip() == 'z(a, theta)' + + +class_doc_txt = """ + Foo + + Parameters + ---------- + f : callable ``f(t, y, *f_args)`` + Aaa. + jac : callable ``jac(t, y, *jac_args)`` + Bbb. + + Attributes + ---------- + t : float + Current time. + y : ndarray + Current variable values. + + Methods + ------- + a + b + c + + Examples + -------- + For usage examples, see `ode`. +""" + +def test_class_members_doc(): + doc = ClassDoc(None, class_doc_txt) + non_blank_line_by_line_compare(str(doc), + """ + Foo + + Parameters + ---------- + f : callable ``f(t, y, *f_args)`` + Aaa. + jac : callable ``jac(t, y, *jac_args)`` + Bbb. + + Examples + -------- + For usage examples, see `ode`. + + Attributes + ---------- + t : float + Current time. + y : ndarray + Current variable values. + + Methods + ------- + a + + b + + c + + .. index:: + + """) + +def test_class_members_doc_sphinx(): + doc = SphinxClassDoc(None, class_doc_txt) + non_blank_line_by_line_compare(str(doc), + """ + Foo + + :Parameters: + + **f** : callable ``f(t, y, *f_args)`` + + Aaa. + + **jac** : callable ``jac(t, y, *jac_args)`` + + Bbb. + + .. rubric:: Examples + + For usage examples, see `ode`. + + .. rubric:: Attributes + + === ========== + t (float) Current time. + y (ndarray) Current variable values. + === ========== + + .. rubric:: Methods + + === ========== + a + b + c + === ========== + + """) + +if __name__ == "__main__": + import nose + nose.run() diff -Nru numpydoc-0.4/numpydoc/tests/test_linkcode.py numpydoc-0.5/numpydoc/tests/test_linkcode.py --- numpydoc-0.4/numpydoc/tests/test_linkcode.py 1970-01-01 00:00:00.000000000 +0000 +++ numpydoc-0.5/numpydoc/tests/test_linkcode.py 2013-07-24 16:15:41.000000000 +0000 @@ -0,0 +1,5 @@ +from __future__ import division, absolute_import, print_function + +import numpydoc.linkcode + +# No tests at the moment... diff -Nru numpydoc-0.4/numpydoc/tests/test_phantom_import.py numpydoc-0.5/numpydoc/tests/test_phantom_import.py --- numpydoc-0.4/numpydoc/tests/test_phantom_import.py 1970-01-01 00:00:00.000000000 +0000 +++ numpydoc-0.5/numpydoc/tests/test_phantom_import.py 2014-01-25 12:49:54.000000000 +0000 @@ -0,0 +1,12 @@ +from __future__ import division, absolute_import, print_function + +import sys +from nose import SkipTest + +def test_import(): + if sys.version_info[0] >= 3: + raise SkipTest("phantom_import not ported to Py3") + + import numpydoc.phantom_import + +# No tests at the moment... diff -Nru numpydoc-0.4/numpydoc/tests/test_plot_directive.py numpydoc-0.5/numpydoc/tests/test_plot_directive.py --- numpydoc-0.4/numpydoc/tests/test_plot_directive.py 1970-01-01 00:00:00.000000000 +0000 +++ numpydoc-0.5/numpydoc/tests/test_plot_directive.py 2014-01-25 12:49:54.000000000 +0000 @@ -0,0 +1,11 @@ +from __future__ import division, absolute_import, print_function + +import sys +from nose import SkipTest + +def test_import(): + if sys.version_info[0] >= 3: + raise SkipTest("plot_directive not ported to Python 3 (use the one from Matplotlib instead)") + import numpydoc.plot_directive + +# No tests at the moment... diff -Nru numpydoc-0.4/numpydoc/tests/test_traitsdoc.py numpydoc-0.5/numpydoc/tests/test_traitsdoc.py --- numpydoc-0.4/numpydoc/tests/test_traitsdoc.py 1970-01-01 00:00:00.000000000 +0000 +++ numpydoc-0.5/numpydoc/tests/test_traitsdoc.py 2014-01-25 12:49:54.000000000 +0000 @@ -0,0 +1,11 @@ +from __future__ import division, absolute_import, print_function + +import sys +from nose import SkipTest + +def test_import(): + if sys.version_info[0] >= 3: + raise SkipTest("traitsdoc not ported to Python3") + import numpydoc.traitsdoc + +# No tests at the moment... diff -Nru numpydoc-0.4/numpydoc/traitsdoc.py numpydoc-0.5/numpydoc/traitsdoc.py --- numpydoc-0.4/numpydoc/traitsdoc.py 1970-01-01 00:00:00.000000000 +0000 +++ numpydoc-0.5/numpydoc/traitsdoc.py 2013-07-24 16:15:41.000000000 +0000 @@ -0,0 +1,142 @@ +""" +========= +traitsdoc +========= + +Sphinx extension that handles docstrings in the Numpy standard format, [1] +and support Traits [2]. + +This extension can be used as a replacement for ``numpydoc`` when support +for Traits is required. + +.. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard +.. [2] http://code.enthought.com/projects/traits/ + +""" +from __future__ import division, absolute_import, print_function + +import inspect +import os +import pydoc +import collections + +from . import docscrape +from . import docscrape_sphinx +from .docscrape_sphinx import SphinxClassDoc, SphinxFunctionDoc, SphinxDocString + +from . import numpydoc + +from . import comment_eater + +class SphinxTraitsDoc(SphinxClassDoc): + def __init__(self, cls, modulename='', func_doc=SphinxFunctionDoc): + if not inspect.isclass(cls): + raise ValueError("Initialise using a class. Got %r" % cls) + self._cls = cls + + if modulename and not modulename.endswith('.'): + modulename += '.' + self._mod = modulename + self._name = cls.__name__ + self._func_doc = func_doc + + docstring = pydoc.getdoc(cls) + docstring = docstring.split('\n') + + # De-indent paragraph + try: + indent = min(len(s) - len(s.lstrip()) for s in docstring + if s.strip()) + except ValueError: + indent = 0 + + for n,line in enumerate(docstring): + docstring[n] = docstring[n][indent:] + + self._doc = docscrape.Reader(docstring) + self._parsed_data = { + 'Signature': '', + 'Summary': '', + 'Description': [], + 'Extended Summary': [], + 'Parameters': [], + 'Returns': [], + 'Raises': [], + 'Warns': [], + 'Other Parameters': [], + 'Traits': [], + 'Methods': [], + 'See Also': [], + 'Notes': [], + 'References': '', + 'Example': '', + 'Examples': '', + 'index': {} + } + + self._parse() + + def _str_summary(self): + return self['Summary'] + [''] + + def _str_extended_summary(self): + return self['Description'] + self['Extended Summary'] + [''] + + def __str__(self, indent=0, func_role="func"): + out = [] + out += self._str_signature() + out += self._str_index() + [''] + out += self._str_summary() + out += self._str_extended_summary() + for param_list in ('Parameters', 'Traits', 'Methods', + 'Returns','Raises'): + out += self._str_param_list(param_list) + out += self._str_see_also("obj") + out += self._str_section('Notes') + out += self._str_references() + out += self._str_section('Example') + out += self._str_section('Examples') + out = self._str_indent(out,indent) + return '\n'.join(out) + +def looks_like_issubclass(obj, classname): + """ Return True if the object has a class or superclass with the given class + name. + + Ignores old-style classes. + """ + t = obj + if t.__name__ == classname: + return True + for klass in t.__mro__: + if klass.__name__ == classname: + return True + return False + +def get_doc_object(obj, what=None, config=None): + if what is None: + if inspect.isclass(obj): + what = 'class' + elif inspect.ismodule(obj): + what = 'module' + elif isinstance(obj, collections.Callable): + what = 'function' + else: + what = 'object' + if what == 'class': + doc = SphinxTraitsDoc(obj, '', func_doc=SphinxFunctionDoc, config=config) + if looks_like_issubclass(obj, 'HasTraits'): + for name, trait, comment in comment_eater.get_class_traits(obj): + # Exclude private traits. + if not name.startswith('_'): + doc['Traits'].append((name, trait, comment.splitlines())) + return doc + elif what in ('function', 'method'): + return SphinxFunctionDoc(obj, '', config=config) + else: + return SphinxDocString(pydoc.getdoc(obj), config=config) + +def setup(app): + # init numpydoc + numpydoc.setup(app, get_doc_object) + diff -Nru numpydoc-0.4/numpydoc.egg-info/entry_points.txt numpydoc-0.5/numpydoc.egg-info/entry_points.txt --- numpydoc-0.4/numpydoc.egg-info/entry_points.txt 2010-10-01 19:32:14.000000000 +0000 +++ numpydoc-0.5/numpydoc.egg-info/entry_points.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,3 +0,0 @@ -[console_scripts] -autosummary_generate = numpydoc.autosummary_generate:main - diff -Nru numpydoc-0.4/numpydoc.egg-info/not-zip-safe numpydoc-0.5/numpydoc.egg-info/not-zip-safe --- numpydoc-0.4/numpydoc.egg-info/not-zip-safe 2010-10-01 19:08:08.000000000 +0000 +++ numpydoc-0.5/numpydoc.egg-info/not-zip-safe 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ - diff -Nru numpydoc-0.4/numpydoc.egg-info/PKG-INFO numpydoc-0.5/numpydoc.egg-info/PKG-INFO --- numpydoc-0.4/numpydoc.egg-info/PKG-INFO 2010-10-01 19:32:14.000000000 +0000 +++ numpydoc-0.5/numpydoc.egg-info/PKG-INFO 2014-06-08 20:14:48.000000000 +0000 @@ -1,8 +1,8 @@ -Metadata-Version: 1.0 +Metadata-Version: 1.1 Name: numpydoc -Version: 0.4 +Version: 0.5 Summary: Sphinx extension to support docstrings in Numpy format -Home-page: http://github.com/numpy/numpy/tree/master/doc/sphinxext +Home-page: https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt Author: Pauli Virtanen and others Author-email: pav@iki.fi License: BSD @@ -13,3 +13,4 @@ Classifier: Environment :: Plugins Classifier: License :: OSI Approved :: BSD License Classifier: Topic :: Documentation +Requires: sphinx (>= 1.0.1) diff -Nru numpydoc-0.4/numpydoc.egg-info/requires.txt numpydoc-0.5/numpydoc.egg-info/requires.txt --- numpydoc-0.4/numpydoc.egg-info/requires.txt 2010-10-01 19:32:14.000000000 +0000 +++ numpydoc-0.5/numpydoc.egg-info/requires.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -Sphinx >= 1.0.1 \ No newline at end of file diff -Nru numpydoc-0.4/numpydoc.egg-info/SOURCES.txt numpydoc-0.5/numpydoc.egg-info/SOURCES.txt --- numpydoc-0.4/numpydoc.egg-info/SOURCES.txt 2010-10-01 19:32:14.000000000 +0000 +++ numpydoc-0.5/numpydoc.egg-info/SOURCES.txt 2014-06-08 20:14:48.000000000 +0000 @@ -1,21 +1,23 @@ LICENSE.txt MANIFEST.in -README.txt -__init__.py -comment_eater.py -compiler_unparse.py -docscrape.py -docscrape_sphinx.py -numpydoc.py -phantom_import.py -plot_directive.py +README.rst setup.py -traitsdoc.py +numpydoc/__init__.py +numpydoc/comment_eater.py +numpydoc/compiler_unparse.py +numpydoc/docscrape.py +numpydoc/docscrape_sphinx.py +numpydoc/linkcode.py +numpydoc/numpydoc.py +numpydoc/phantom_import.py +numpydoc/plot_directive.py +numpydoc/traitsdoc.py numpydoc.egg-info/PKG-INFO numpydoc.egg-info/SOURCES.txt numpydoc.egg-info/dependency_links.txt -numpydoc.egg-info/entry_points.txt -numpydoc.egg-info/not-zip-safe -numpydoc.egg-info/requires.txt numpydoc.egg-info/top_level.txt -tests/test_docscrape.py \ No newline at end of file +numpydoc/tests/test_docscrape.py +numpydoc/tests/test_linkcode.py +numpydoc/tests/test_phantom_import.py +numpydoc/tests/test_plot_directive.py +numpydoc/tests/test_traitsdoc.py \ No newline at end of file diff -Nru numpydoc-0.4/numpydoc.py numpydoc-0.5/numpydoc.py --- numpydoc-0.4/numpydoc.py 2010-10-01 10:10:27.000000000 +0000 +++ numpydoc-0.5/numpydoc.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,164 +0,0 @@ -""" -======== -numpydoc -======== - -Sphinx extension that handles docstrings in the Numpy standard format. [1] - -It will: - -- Convert Parameters etc. sections to field lists. -- Convert See Also section to a See also entry. -- Renumber references. -- Extract the signature from the docstring, if it can't be determined otherwise. - -.. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard - -""" - -import os, re, pydoc -from docscrape_sphinx import get_doc_object, SphinxDocString -from sphinx.util.compat import Directive -import inspect - -def mangle_docstrings(app, what, name, obj, options, lines, - reference_offset=[0]): - - cfg = dict(use_plots=app.config.numpydoc_use_plots, - show_class_members=app.config.numpydoc_show_class_members) - - if what == 'module': - # Strip top title - title_re = re.compile(ur'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*', - re.I|re.S) - lines[:] = title_re.sub(u'', u"\n".join(lines)).split(u"\n") - else: - doc = get_doc_object(obj, what, u"\n".join(lines), config=cfg) - lines[:] = unicode(doc).split(u"\n") - - if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \ - obj.__name__: - if hasattr(obj, '__module__'): - v = dict(full_name=u"%s.%s" % (obj.__module__, obj.__name__)) - else: - v = dict(full_name=obj.__name__) - lines += [u'', u'.. htmlonly::', ''] - lines += [u' %s' % x for x in - (app.config.numpydoc_edit_link % v).split("\n")] - - # replace reference numbers so that there are no duplicates - references = [] - for line in lines: - line = line.strip() - m = re.match(ur'^.. \[([a-z0-9_.-])\]', line, re.I) - if m: - references.append(m.group(1)) - - # start renaming from the longest string, to avoid overwriting parts - references.sort(key=lambda x: -len(x)) - if references: - for i, line in enumerate(lines): - for r in references: - if re.match(ur'^\d+$', r): - new_r = u"R%d" % (reference_offset[0] + int(r)) - else: - new_r = u"%s%d" % (r, reference_offset[0]) - lines[i] = lines[i].replace(u'[%s]_' % r, - u'[%s]_' % new_r) - lines[i] = lines[i].replace(u'.. [%s]' % r, - u'.. [%s]' % new_r) - - reference_offset[0] += len(references) - -def mangle_signature(app, what, name, obj, options, sig, retann): - # Do not try to inspect classes that don't define `__init__` - if (inspect.isclass(obj) and - (not hasattr(obj, '__init__') or - 'initializes x; see ' in pydoc.getdoc(obj.__init__))): - return '', '' - - if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): return - if not hasattr(obj, '__doc__'): return - - doc = SphinxDocString(pydoc.getdoc(obj)) - if doc['Signature']: - sig = re.sub(u"^[^(]*", u"", doc['Signature']) - return sig, u'' - -def setup(app, get_doc_object_=get_doc_object): - global get_doc_object - get_doc_object = get_doc_object_ - - app.connect('autodoc-process-docstring', mangle_docstrings) - app.connect('autodoc-process-signature', mangle_signature) - app.add_config_value('numpydoc_edit_link', None, False) - app.add_config_value('numpydoc_use_plots', None, False) - app.add_config_value('numpydoc_show_class_members', True, True) - - # Extra mangling domains - app.add_domain(NumpyPythonDomain) - app.add_domain(NumpyCDomain) - -#------------------------------------------------------------------------------ -# Docstring-mangling domains -#------------------------------------------------------------------------------ - -from docutils.statemachine import ViewList -from sphinx.domains.c import CDomain -from sphinx.domains.python import PythonDomain - -class ManglingDomainBase(object): - directive_mangling_map = {} - - def __init__(self, *a, **kw): - super(ManglingDomainBase, self).__init__(*a, **kw) - self.wrap_mangling_directives() - - def wrap_mangling_directives(self): - for name, objtype in self.directive_mangling_map.items(): - self.directives[name] = wrap_mangling_directive( - self.directives[name], objtype) - -class NumpyPythonDomain(ManglingDomainBase, PythonDomain): - name = 'np' - directive_mangling_map = { - 'function': 'function', - 'class': 'class', - 'exception': 'class', - 'method': 'function', - 'classmethod': 'function', - 'staticmethod': 'function', - 'attribute': 'attribute', - } - -class NumpyCDomain(ManglingDomainBase, CDomain): - name = 'np-c' - directive_mangling_map = { - 'function': 'function', - 'member': 'attribute', - 'macro': 'function', - 'type': 'class', - 'var': 'object', - } - -def wrap_mangling_directive(base_directive, objtype): - class directive(base_directive): - def run(self): - env = self.state.document.settings.env - - name = None - if self.arguments: - m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0]) - name = m.group(2).strip() - - if not name: - name = self.arguments[0] - - lines = list(self.content) - mangle_docstrings(env.app, objtype, name, None, None, lines) - self.content = ViewList(lines, self.content.parent) - - return base_directive.run(self) - - return directive - diff -Nru numpydoc-0.4/phantom_import.py numpydoc-0.5/phantom_import.py --- numpydoc-0.4/phantom_import.py 2010-09-15 19:18:51.000000000 +0000 +++ numpydoc-0.5/phantom_import.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,162 +0,0 @@ -""" -============== -phantom_import -============== - -Sphinx extension to make directives from ``sphinx.ext.autodoc`` and similar -extensions to use docstrings loaded from an XML file. - -This extension loads an XML file in the Pydocweb format [1] and -creates a dummy module that contains the specified docstrings. This -can be used to get the current docstrings from a Pydocweb instance -without needing to rebuild the documented module. - -.. [1] http://code.google.com/p/pydocweb - -""" -import imp, sys, compiler, types, os, inspect, re - -def setup(app): - app.connect('builder-inited', initialize) - app.add_config_value('phantom_import_file', None, True) - -def initialize(app): - fn = app.config.phantom_import_file - if (fn and os.path.isfile(fn)): - print "[numpydoc] Phantom importing modules from", fn, "..." - import_phantom_module(fn) - -#------------------------------------------------------------------------------ -# Creating 'phantom' modules from an XML description -#------------------------------------------------------------------------------ -def import_phantom_module(xml_file): - """ - Insert a fake Python module to sys.modules, based on a XML file. - - The XML file is expected to conform to Pydocweb DTD. The fake - module will contain dummy objects, which guarantee the following: - - - Docstrings are correct. - - Class inheritance relationships are correct (if present in XML). - - Function argspec is *NOT* correct (even if present in XML). - Instead, the function signature is prepended to the function docstring. - - Class attributes are *NOT* correct; instead, they are dummy objects. - - Parameters - ---------- - xml_file : str - Name of an XML file to read - - """ - import lxml.etree as etree - - object_cache = {} - - tree = etree.parse(xml_file) - root = tree.getroot() - - # Sort items so that - # - Base classes come before classes inherited from them - # - Modules come before their contents - all_nodes = dict([(n.attrib['id'], n) for n in root]) - - def _get_bases(node, recurse=False): - bases = [x.attrib['ref'] for x in node.findall('base')] - if recurse: - j = 0 - while True: - try: - b = bases[j] - except IndexError: break - if b in all_nodes: - bases.extend(_get_bases(all_nodes[b])) - j += 1 - return bases - - type_index = ['module', 'class', 'callable', 'object'] - - def base_cmp(a, b): - x = cmp(type_index.index(a.tag), type_index.index(b.tag)) - if x != 0: return x - - if a.tag == 'class' and b.tag == 'class': - a_bases = _get_bases(a, recurse=True) - b_bases = _get_bases(b, recurse=True) - x = cmp(len(a_bases), len(b_bases)) - if x != 0: return x - if a.attrib['id'] in b_bases: return -1 - if b.attrib['id'] in a_bases: return 1 - - return cmp(a.attrib['id'].count('.'), b.attrib['id'].count('.')) - - nodes = root.getchildren() - nodes.sort(base_cmp) - - # Create phantom items - for node in nodes: - name = node.attrib['id'] - doc = (node.text or '').decode('string-escape') + "\n" - if doc == "\n": doc = "" - - # create parent, if missing - parent = name - while True: - parent = '.'.join(parent.split('.')[:-1]) - if not parent: break - if parent in object_cache: break - obj = imp.new_module(parent) - object_cache[parent] = obj - sys.modules[parent] = obj - - # create object - if node.tag == 'module': - obj = imp.new_module(name) - obj.__doc__ = doc - sys.modules[name] = obj - elif node.tag == 'class': - bases = [object_cache[b] for b in _get_bases(node) - if b in object_cache] - bases.append(object) - init = lambda self: None - init.__doc__ = doc - obj = type(name, tuple(bases), {'__doc__': doc, '__init__': init}) - obj.__name__ = name.split('.')[-1] - elif node.tag == 'callable': - funcname = node.attrib['id'].split('.')[-1] - argspec = node.attrib.get('argspec') - if argspec: - argspec = re.sub('^[^(]*', '', argspec) - doc = "%s%s\n\n%s" % (funcname, argspec, doc) - obj = lambda: 0 - obj.__argspec_is_invalid_ = True - obj.func_name = funcname - obj.__name__ = name - obj.__doc__ = doc - if inspect.isclass(object_cache[parent]): - obj.__objclass__ = object_cache[parent] - else: - class Dummy(object): pass - obj = Dummy() - obj.__name__ = name - obj.__doc__ = doc - if inspect.isclass(object_cache[parent]): - obj.__get__ = lambda: None - object_cache[name] = obj - - if parent: - if inspect.ismodule(object_cache[parent]): - obj.__module__ = parent - setattr(object_cache[parent], name.split('.')[-1], obj) - - # Populate items - for node in root: - obj = object_cache.get(node.attrib['id']) - if obj is None: continue - for ref in node.findall('ref'): - if node.tag == 'class': - if ref.attrib['ref'].startswith(node.attrib['id'] + '.'): - setattr(obj, ref.attrib['name'], - object_cache.get(ref.attrib['ref'])) - else: - setattr(obj, ref.attrib['name'], - object_cache.get(ref.attrib['ref'])) diff -Nru numpydoc-0.4/PKG-INFO numpydoc-0.5/PKG-INFO --- numpydoc-0.4/PKG-INFO 2010-10-01 19:32:14.000000000 +0000 +++ numpydoc-0.5/PKG-INFO 2014-06-08 20:14:48.000000000 +0000 @@ -1,8 +1,8 @@ -Metadata-Version: 1.0 +Metadata-Version: 1.1 Name: numpydoc -Version: 0.4 +Version: 0.5 Summary: Sphinx extension to support docstrings in Numpy format -Home-page: http://github.com/numpy/numpy/tree/master/doc/sphinxext +Home-page: https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt Author: Pauli Virtanen and others Author-email: pav@iki.fi License: BSD @@ -13,3 +13,4 @@ Classifier: Environment :: Plugins Classifier: License :: OSI Approved :: BSD License Classifier: Topic :: Documentation +Requires: sphinx (>= 1.0.1) diff -Nru numpydoc-0.4/plot_directive.py numpydoc-0.5/plot_directive.py --- numpydoc-0.4/plot_directive.py 2010-10-01 13:19:50.000000000 +0000 +++ numpydoc-0.5/plot_directive.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,619 +0,0 @@ -""" -A special directive for generating a matplotlib plot. - -.. warning:: - - This is a hacked version of plot_directive.py from Matplotlib. - It's very much subject to change! - - -Usage ------ - -Can be used like this:: - - .. plot:: examples/example.py - - .. plot:: - - import matplotlib.pyplot as plt - plt.plot([1,2,3], [4,5,6]) - - .. plot:: - - A plotting example: - - >>> import matplotlib.pyplot as plt - >>> plt.plot([1,2,3], [4,5,6]) - -The content is interpreted as doctest formatted if it has a line starting -with ``>>>``. - -The ``plot`` directive supports the options - - format : {'python', 'doctest'} - Specify the format of the input - - include-source : bool - Whether to display the source code. Default can be changed in conf.py - -and the ``image`` directive options ``alt``, ``height``, ``width``, -``scale``, ``align``, ``class``. - -Configuration options ---------------------- - -The plot directive has the following configuration options: - - plot_include_source - Default value for the include-source option - - plot_pre_code - Code that should be executed before each plot. - - plot_basedir - Base directory, to which plot:: file names are relative to. - (If None or empty, file names are relative to the directoly where - the file containing the directive is.) - - plot_formats - File formats to generate. List of tuples or strings:: - - [(suffix, dpi), suffix, ...] - - that determine the file format and the DPI. For entries whose - DPI was omitted, sensible defaults are chosen. - - plot_html_show_formats - Whether to show links to the files in HTML. - -TODO ----- - -* Refactor Latex output; now it's plain images, but it would be nice - to make them appear side-by-side, or in floats. - -""" - -import sys, os, glob, shutil, imp, warnings, cStringIO, re, textwrap, traceback -import sphinx - -import warnings -warnings.warn("A plot_directive module is also available under " - "matplotlib.sphinxext; expect this numpydoc.plot_directive " - "module to be deprecated after relevant features have been " - "integrated there.", - FutureWarning, stacklevel=2) - - -#------------------------------------------------------------------------------ -# Registration hook -#------------------------------------------------------------------------------ - -def setup(app): - setup.app = app - setup.config = app.config - setup.confdir = app.confdir - - app.add_config_value('plot_pre_code', '', True) - app.add_config_value('plot_include_source', False, True) - app.add_config_value('plot_formats', ['png', 'hires.png', 'pdf'], True) - app.add_config_value('plot_basedir', None, True) - app.add_config_value('plot_html_show_formats', True, True) - - app.add_directive('plot', plot_directive, True, (0, 1, False), - **plot_directive_options) - -#------------------------------------------------------------------------------ -# plot:: directive -#------------------------------------------------------------------------------ -from docutils.parsers.rst import directives -from docutils import nodes - -def plot_directive(name, arguments, options, content, lineno, - content_offset, block_text, state, state_machine): - return run(arguments, content, options, state_machine, state, lineno) -plot_directive.__doc__ = __doc__ - -def _option_boolean(arg): - if not arg or not arg.strip(): - # no argument given, assume used as a flag - return True - elif arg.strip().lower() in ('no', '0', 'false'): - return False - elif arg.strip().lower() in ('yes', '1', 'true'): - return True - else: - raise ValueError('"%s" unknown boolean' % arg) - -def _option_format(arg): - return directives.choice(arg, ('python', 'lisp')) - -def _option_align(arg): - return directives.choice(arg, ("top", "middle", "bottom", "left", "center", - "right")) - -plot_directive_options = {'alt': directives.unchanged, - 'height': directives.length_or_unitless, - 'width': directives.length_or_percentage_or_unitless, - 'scale': directives.nonnegative_int, - 'align': _option_align, - 'class': directives.class_option, - 'include-source': _option_boolean, - 'format': _option_format, - } - -#------------------------------------------------------------------------------ -# Generating output -#------------------------------------------------------------------------------ - -from docutils import nodes, utils - -try: - # Sphinx depends on either Jinja or Jinja2 - import jinja2 - def format_template(template, **kw): - return jinja2.Template(template).render(**kw) -except ImportError: - import jinja - def format_template(template, **kw): - return jinja.from_string(template, **kw) - -TEMPLATE = """ -{{ source_code }} - -{{ only_html }} - - {% if source_link or (html_show_formats and not multi_image) %} - ( - {%- if source_link -%} - `Source code <{{ source_link }}>`__ - {%- endif -%} - {%- if html_show_formats and not multi_image -%} - {%- for img in images -%} - {%- for fmt in img.formats -%} - {%- if source_link or not loop.first -%}, {% endif -%} - `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__ - {%- endfor -%} - {%- endfor -%} - {%- endif -%} - ) - {% endif %} - - {% for img in images %} - .. figure:: {{ build_dir }}/{{ img.basename }}.png - {%- for option in options %} - {{ option }} - {% endfor %} - - {% if html_show_formats and multi_image -%} - ( - {%- for fmt in img.formats -%} - {%- if not loop.first -%}, {% endif -%} - `{{ fmt }} <{{ dest_dir }}/{{ img.basename }}.{{ fmt }}>`__ - {%- endfor -%} - ) - {%- endif -%} - {% endfor %} - -{{ only_latex }} - - {% for img in images %} - .. image:: {{ build_dir }}/{{ img.basename }}.pdf - {% endfor %} - -""" - -class ImageFile(object): - def __init__(self, basename, dirname): - self.basename = basename - self.dirname = dirname - self.formats = [] - - def filename(self, format): - return os.path.join(self.dirname, "%s.%s" % (self.basename, format)) - - def filenames(self): - return [self.filename(fmt) for fmt in self.formats] - -def run(arguments, content, options, state_machine, state, lineno): - if arguments and content: - raise RuntimeError("plot:: directive can't have both args and content") - - document = state_machine.document - config = document.settings.env.config - - options.setdefault('include-source', config.plot_include_source) - - # determine input - rst_file = document.attributes['source'] - rst_dir = os.path.dirname(rst_file) - - if arguments: - if not config.plot_basedir: - source_file_name = os.path.join(rst_dir, - directives.uri(arguments[0])) - else: - source_file_name = os.path.join(setup.confdir, config.plot_basedir, - directives.uri(arguments[0])) - code = open(source_file_name, 'r').read() - output_base = os.path.basename(source_file_name) - else: - source_file_name = rst_file - code = textwrap.dedent("\n".join(map(str, content))) - counter = document.attributes.get('_plot_counter', 0) + 1 - document.attributes['_plot_counter'] = counter - base, ext = os.path.splitext(os.path.basename(source_file_name)) - output_base = '%s-%d.py' % (base, counter) - - base, source_ext = os.path.splitext(output_base) - if source_ext in ('.py', '.rst', '.txt'): - output_base = base - else: - source_ext = '' - - # ensure that LaTeX includegraphics doesn't choke in foo.bar.pdf filenames - output_base = output_base.replace('.', '-') - - # is it in doctest format? - is_doctest = contains_doctest(code) - if options.has_key('format'): - if options['format'] == 'python': - is_doctest = False - else: - is_doctest = True - - # determine output directory name fragment - source_rel_name = relpath(source_file_name, setup.confdir) - source_rel_dir = os.path.dirname(source_rel_name) - while source_rel_dir.startswith(os.path.sep): - source_rel_dir = source_rel_dir[1:] - - # build_dir: where to place output files (temporarily) - build_dir = os.path.join(os.path.dirname(setup.app.doctreedir), - 'plot_directive', - source_rel_dir) - if not os.path.exists(build_dir): - os.makedirs(build_dir) - - # output_dir: final location in the builder's directory - dest_dir = os.path.abspath(os.path.join(setup.app.builder.outdir, - source_rel_dir)) - - # how to link to files from the RST file - dest_dir_link = os.path.join(relpath(setup.confdir, rst_dir), - source_rel_dir).replace(os.path.sep, '/') - build_dir_link = relpath(build_dir, rst_dir).replace(os.path.sep, '/') - source_link = dest_dir_link + '/' + output_base + source_ext - - # make figures - try: - results = makefig(code, source_file_name, build_dir, output_base, - config) - errors = [] - except PlotError, err: - reporter = state.memo.reporter - sm = reporter.system_message( - 2, "Exception occurred in plotting %s: %s" % (output_base, err), - line=lineno) - results = [(code, [])] - errors = [sm] - - # generate output restructuredtext - total_lines = [] - for j, (code_piece, images) in enumerate(results): - if options['include-source']: - if is_doctest: - lines = [''] - lines += [row.rstrip() for row in code_piece.split('\n')] - else: - lines = ['.. code-block:: python', ''] - lines += [' %s' % row.rstrip() - for row in code_piece.split('\n')] - source_code = "\n".join(lines) - else: - source_code = "" - - opts = [':%s: %s' % (key, val) for key, val in options.items() - if key in ('alt', 'height', 'width', 'scale', 'align', 'class')] - - only_html = ".. only:: html" - only_latex = ".. only:: latex" - - if j == 0: - src_link = source_link - else: - src_link = None - - result = format_template( - TEMPLATE, - dest_dir=dest_dir_link, - build_dir=build_dir_link, - source_link=src_link, - multi_image=len(images) > 1, - only_html=only_html, - only_latex=only_latex, - options=opts, - images=images, - source_code=source_code, - html_show_formats=config.plot_html_show_formats) - - total_lines.extend(result.split("\n")) - total_lines.extend("\n") - - if total_lines: - state_machine.insert_input(total_lines, source=source_file_name) - - # copy image files to builder's output directory - if not os.path.exists(dest_dir): - os.makedirs(dest_dir) - - for code_piece, images in results: - for img in images: - for fn in img.filenames(): - shutil.copyfile(fn, os.path.join(dest_dir, - os.path.basename(fn))) - - # copy script (if necessary) - if source_file_name == rst_file: - target_name = os.path.join(dest_dir, output_base + source_ext) - f = open(target_name, 'w') - f.write(unescape_doctest(code)) - f.close() - - return errors - - -#------------------------------------------------------------------------------ -# Run code and capture figures -#------------------------------------------------------------------------------ - -import matplotlib -matplotlib.use('Agg') -import matplotlib.pyplot as plt -import matplotlib.image as image -from matplotlib import _pylab_helpers - -import exceptions - -def contains_doctest(text): - try: - # check if it's valid Python as-is - compile(text, '', 'exec') - return False - except SyntaxError: - pass - r = re.compile(r'^\s*>>>', re.M) - m = r.search(text) - return bool(m) - -def unescape_doctest(text): - """ - Extract code from a piece of text, which contains either Python code - or doctests. - - """ - if not contains_doctest(text): - return text - - code = "" - for line in text.split("\n"): - m = re.match(r'^\s*(>>>|\.\.\.) (.*)$', line) - if m: - code += m.group(2) + "\n" - elif line.strip(): - code += "# " + line.strip() + "\n" - else: - code += "\n" - return code - -def split_code_at_show(text): - """ - Split code at plt.show() - - """ - - parts = [] - is_doctest = contains_doctest(text) - - part = [] - for line in text.split("\n"): - if (not is_doctest and line.strip() == 'plt.show()') or \ - (is_doctest and line.strip() == '>>> plt.show()'): - part.append(line) - parts.append("\n".join(part)) - part = [] - else: - part.append(line) - if "\n".join(part).strip(): - parts.append("\n".join(part)) - return parts - -class PlotError(RuntimeError): - pass - -def run_code(code, code_path, ns=None): - # Change the working directory to the directory of the example, so - # it can get at its data files, if any. - pwd = os.getcwd() - old_sys_path = list(sys.path) - if code_path is not None: - dirname = os.path.abspath(os.path.dirname(code_path)) - os.chdir(dirname) - sys.path.insert(0, dirname) - - # Redirect stdout - stdout = sys.stdout - sys.stdout = cStringIO.StringIO() - - # Reset sys.argv - old_sys_argv = sys.argv - sys.argv = [code_path] - - try: - try: - code = unescape_doctest(code) - if ns is None: - ns = {} - if not ns: - exec setup.config.plot_pre_code in ns - exec code in ns - except (Exception, SystemExit), err: - raise PlotError(traceback.format_exc()) - finally: - os.chdir(pwd) - sys.argv = old_sys_argv - sys.path[:] = old_sys_path - sys.stdout = stdout - return ns - - -#------------------------------------------------------------------------------ -# Generating figures -#------------------------------------------------------------------------------ - -def out_of_date(original, derived): - """ - Returns True if derivative is out-of-date wrt original, - both of which are full file paths. - """ - return (not os.path.exists(derived) - or os.stat(derived).st_mtime < os.stat(original).st_mtime) - - -def makefig(code, code_path, output_dir, output_base, config): - """ - Run a pyplot script *code* and save the images under *output_dir* - with file names derived from *output_base* - - """ - - # -- Parse format list - default_dpi = {'png': 80, 'hires.png': 200, 'pdf': 50} - formats = [] - for fmt in config.plot_formats: - if isinstance(fmt, str): - formats.append((fmt, default_dpi.get(fmt, 80))) - elif type(fmt) in (tuple, list) and len(fmt)==2: - formats.append((str(fmt[0]), int(fmt[1]))) - else: - raise PlotError('invalid image format "%r" in plot_formats' % fmt) - - # -- Try to determine if all images already exist - - code_pieces = split_code_at_show(code) - - # Look for single-figure output files first - all_exists = True - img = ImageFile(output_base, output_dir) - for format, dpi in formats: - if out_of_date(code_path, img.filename(format)): - all_exists = False - break - img.formats.append(format) - - if all_exists: - return [(code, [img])] - - # Then look for multi-figure output files - results = [] - all_exists = True - for i, code_piece in enumerate(code_pieces): - images = [] - for j in xrange(1000): - img = ImageFile('%s_%02d_%02d' % (output_base, i, j), output_dir) - for format, dpi in formats: - if out_of_date(code_path, img.filename(format)): - all_exists = False - break - img.formats.append(format) - - # assume that if we have one, we have them all - if not all_exists: - all_exists = (j > 0) - break - images.append(img) - if not all_exists: - break - results.append((code_piece, images)) - - if all_exists: - return results - - # -- We didn't find the files, so build them - - results = [] - ns = {} - - for i, code_piece in enumerate(code_pieces): - # Clear between runs - plt.close('all') - - # Run code - run_code(code_piece, code_path, ns) - - # Collect images - images = [] - fig_managers = _pylab_helpers.Gcf.get_all_fig_managers() - for j, figman in enumerate(fig_managers): - if len(fig_managers) == 1 and len(code_pieces) == 1: - img = ImageFile(output_base, output_dir) - else: - img = ImageFile("%s_%02d_%02d" % (output_base, i, j), - output_dir) - images.append(img) - for format, dpi in formats: - try: - figman.canvas.figure.savefig(img.filename(format), dpi=dpi) - except exceptions.BaseException, err: - raise PlotError(traceback.format_exc()) - img.formats.append(format) - - # Results - results.append((code_piece, images)) - - return results - - -#------------------------------------------------------------------------------ -# Relative pathnames -#------------------------------------------------------------------------------ - -try: - from os.path import relpath -except ImportError: - def relpath(target, base=os.curdir): - """ - Return a relative path to the target from either the current - dir or an optional base dir. Base can be a directory - specified either as absolute or relative to current dir. - """ - - if not os.path.exists(target): - raise OSError, 'Target does not exist: '+target - - if not os.path.isdir(base): - raise OSError, 'Base is not a directory or does not exist: '+base - - base_list = (os.path.abspath(base)).split(os.sep) - target_list = (os.path.abspath(target)).split(os.sep) - - # On the windows platform the target may be on a completely - # different drive from the base. - if os.name in ['nt','dos','os2'] and base_list[0] <> target_list[0]: - raise OSError, 'Target is on a different drive to base. Target: '+target_list[0].upper()+', base: '+base_list[0].upper() - - # Starting from the filepath root, work out how much of the - # filepath is shared by base and target. - for i in range(min(len(base_list), len(target_list))): - if base_list[i] <> target_list[i]: break - else: - # If we broke out of the loop, i is pointing to the first - # differing path elements. If we didn't break out of the - # loop, i is pointing to identical path elements. - # Increment i so that in all cases it points to the first - # differing path elements. - i+=1 - - rel_list = [os.pardir] * (len(base_list)-i) + target_list[i:] - return os.path.join(*rel_list) diff -Nru numpydoc-0.4/README.rst numpydoc-0.5/README.rst --- numpydoc-0.4/README.rst 1970-01-01 00:00:00.000000000 +0000 +++ numpydoc-0.5/README.rst 2014-06-08 20:03:45.000000000 +0000 @@ -0,0 +1,57 @@ +.. image:: https://travis-ci.org/numpy/numpydoc.png?branch=master + :target: https://travis-ci.org/numpy/numpydoc/ + +===================================== +numpydoc -- Numpy's Sphinx extensions +===================================== + +Numpy's documentation uses several custom extensions to Sphinx. These +are shipped in this ``numpydoc`` package, in case you want to make use +of them in third-party projects. + +The following extensions are available: + + - ``numpydoc``: support for the Numpy docstring format in Sphinx, and add + the code description directives ``np:function``, ``np-c:function``, etc. + that support the Numpy docstring syntax. + + - ``numpydoc.traitsdoc``: For gathering documentation about Traits attributes. + + - ``numpydoc.plot_directive``: Adaptation of Matplotlib's ``plot::`` + directive. Note that this implementation may still undergo severe + changes or eventually be deprecated. + +See `A Guide to NumPy/SciPy Documentation `_ +for how to write docs that use this extension. + + +numpydoc +======== + +Numpydoc inserts a hook into Sphinx's autodoc that converts docstrings +following the Numpy/Scipy format to a form palatable to Sphinx. + +Options +------- + +The following options can be set in conf.py: + +- numpydoc_use_plots: bool + + Whether to produce ``plot::`` directives for Examples sections that + contain ``import matplotlib``. + +- numpydoc_show_class_members: bool + + Whether to show all members of a class in the Methods and Attributes + sections automatically. + +- numpydoc_class_members_toctree: bool + + Whether to create a Sphinx table of contents for the lists of class + methods and attributes. If a table of contents is made, Sphinx expects + each entry to have a separate page. + +- numpydoc_edit_link: bool (DEPRECATED -- edit your HTML template instead) + + Whether to insert an edit link after docstrings. diff -Nru numpydoc-0.4/README.txt numpydoc-0.5/README.txt --- numpydoc-0.4/README.txt 2010-10-01 13:20:54.000000000 +0000 +++ numpydoc-0.5/README.txt 1970-01-01 00:00:00.000000000 +0000 @@ -1,45 +0,0 @@ -===================================== -numpydoc -- Numpy's Sphinx extensions -===================================== - -Numpy's documentation uses several custom extensions to Sphinx. These -are shipped in this ``numpydoc`` package, in case you want to make use -of them in third-party projects. - -The following extensions are available: - - - ``numpydoc``: support for the Numpy docstring format in Sphinx, and add - the code description directives ``np:function``, ``np-c:function``, etc. - that support the Numpy docstring syntax. - - - ``numpydoc.traitsdoc``: For gathering documentation about Traits attributes. - - - ``numpydoc.plot_directive``: Adaptation of Matplotlib's ``plot::`` - directive. Note that this implementation may still undergo severe - changes or eventually be deprecated. - - -numpydoc -======== - -Numpydoc inserts a hook into Sphinx's autodoc that converts docstrings -following the Numpy/Scipy format to a form palatable to Sphinx. - -Options -------- - -The following options can be set in conf.py: - -- numpydoc_use_plots: bool - - Whether to produce ``plot::`` directives for Examples sections that - contain ``import matplotlib``. - -- numpydoc_show_class_members: bool - - Whether to show all members of a class in the Methods and Attributes - sections automatically. - -- numpydoc_edit_link: bool (DEPRECATED -- edit your HTML template instead) - - Whether to insert an edit link after docstrings. diff -Nru numpydoc-0.4/setup.py numpydoc-0.5/setup.py --- numpydoc-0.4/setup.py 2010-10-01 13:22:28.000000000 +0000 +++ numpydoc-0.5/setup.py 2014-06-08 20:04:13.000000000 +0000 @@ -1,13 +1,17 @@ -from distutils.core import setup +from __future__ import division, print_function + +import sys import setuptools -import sys, os +from distutils.core import setup + +if sys.version_info[:2] < (2, 6) or (3, 0) <= sys.version_info[0:2] < (3, 3): + raise RuntimeError("Python version 2.6, 2.7 or >= 3.3 required.") -version = "0.4" +version = "0.5" setup( name="numpydoc", packages=["numpydoc"], - package_dir={"numpydoc": ""}, version=version, description="Sphinx extension to support docstrings in Numpy format", # classifiers from http://pypi.python.org/pypi?%3Aaction=list_classifiers @@ -18,14 +22,9 @@ keywords="sphinx numpy", author="Pauli Virtanen and others", author_email="pav@iki.fi", - url="http://github.com/numpy/numpy/tree/master/doc/sphinxext", + url="https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt", license="BSD", - zip_safe=False, - install_requires=["Sphinx >= 1.0.1"], - package_data={'numpydoc': 'tests', '': ''}, - entry_points={ - "console_scripts": [ - "autosummary_generate = numpydoc.autosummary_generate:main", - ], - }, + requires=["sphinx (>= 1.0.1)"], + package_data={'numpydoc': ['tests/test_*.py']}, + test_suite = 'nose.collector', ) diff -Nru numpydoc-0.4/tests/test_docscrape.py numpydoc-0.5/tests/test_docscrape.py --- numpydoc-0.4/tests/test_docscrape.py 2010-10-01 10:34:32.000000000 +0000 +++ numpydoc-0.5/tests/test_docscrape.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,615 +0,0 @@ -# -*- encoding:utf-8 -*- - -import sys, os -sys.path.append(os.path.join(os.path.dirname(__file__), '..')) - -from docscrape import NumpyDocString, FunctionDoc, ClassDoc -from docscrape_sphinx import SphinxDocString, SphinxClassDoc -from nose.tools import * - -doc_txt = '''\ - numpy.multivariate_normal(mean, cov, shape=None, spam=None) - - Draw values from a multivariate normal distribution with specified - mean and covariance. - - The multivariate normal or Gaussian distribution is a generalisation - of the one-dimensional normal distribution to higher dimensions. - - Parameters - ---------- - mean : (N,) ndarray - Mean of the N-dimensional distribution. - - .. math:: - - (1+2+3)/3 - - cov : (N,N) ndarray - Covariance matrix of the distribution. - shape : tuple of ints - Given a shape of, for example, (m,n,k), m*n*k samples are - generated, and packed in an m-by-n-by-k arrangement. Because - each sample is N-dimensional, the output shape is (m,n,k,N). - - Returns - ------- - out : ndarray - The drawn samples, arranged according to `shape`. If the - shape given is (m,n,...), then the shape of `out` is is - (m,n,...,N). - - In other words, each entry ``out[i,j,...,:]`` is an N-dimensional - value drawn from the distribution. - - Other Parameters - ---------------- - spam : parrot - A parrot off its mortal coil. - - Raises - ------ - RuntimeError - Some error - - Warns - ----- - RuntimeWarning - Some warning - - Warnings - -------- - Certain warnings apply. - - Notes - ----- - - Instead of specifying the full covariance matrix, popular - approximations include: - - - Spherical covariance (`cov` is a multiple of the identity matrix) - - Diagonal covariance (`cov` has non-negative elements only on the diagonal) - - This geometrical property can be seen in two dimensions by plotting - generated data-points: - - >>> mean = [0,0] - >>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis - - >>> x,y = multivariate_normal(mean,cov,5000).T - >>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() - - Note that the covariance matrix must be symmetric and non-negative - definite. - - References - ---------- - .. [1] A. Papoulis, "Probability, Random Variables, and Stochastic - Processes," 3rd ed., McGraw-Hill Companies, 1991 - .. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," - 2nd ed., Wiley, 2001. - - See Also - -------- - some, other, funcs - otherfunc : relationship - - Examples - -------- - >>> mean = (1,2) - >>> cov = [[1,0],[1,0]] - >>> x = multivariate_normal(mean,cov,(3,3)) - >>> print x.shape - (3, 3, 2) - - The following is probably true, given that 0.6 is roughly twice the - standard deviation: - - >>> print list( (x[0,0,:] - mean) < 0.6 ) - [True, True] - - .. index:: random - :refguide: random;distributions, random;gauss - - ''' -doc = NumpyDocString(doc_txt) - - -def test_signature(): - assert doc['Signature'].startswith('numpy.multivariate_normal(') - assert doc['Signature'].endswith('spam=None)') - -def test_summary(): - assert doc['Summary'][0].startswith('Draw values') - assert doc['Summary'][-1].endswith('covariance.') - -def test_extended_summary(): - assert doc['Extended Summary'][0].startswith('The multivariate normal') - -def test_parameters(): - assert_equal(len(doc['Parameters']), 3) - assert_equal([n for n,_,_ in doc['Parameters']], ['mean','cov','shape']) - - arg, arg_type, desc = doc['Parameters'][1] - assert_equal(arg_type, '(N,N) ndarray') - assert desc[0].startswith('Covariance matrix') - assert doc['Parameters'][0][-1][-2] == ' (1+2+3)/3' - -def test_other_parameters(): - assert_equal(len(doc['Other Parameters']), 1) - assert_equal([n for n,_,_ in doc['Other Parameters']], ['spam']) - arg, arg_type, desc = doc['Other Parameters'][0] - assert_equal(arg_type, 'parrot') - assert desc[0].startswith('A parrot off its mortal coil') - -def test_returns(): - assert_equal(len(doc['Returns']), 1) - arg, arg_type, desc = doc['Returns'][0] - assert_equal(arg, 'out') - assert_equal(arg_type, 'ndarray') - assert desc[0].startswith('The drawn samples') - assert desc[-1].endswith('distribution.') - -def test_notes(): - assert doc['Notes'][0].startswith('Instead') - assert doc['Notes'][-1].endswith('definite.') - assert_equal(len(doc['Notes']), 17) - -def test_references(): - assert doc['References'][0].startswith('..') - assert doc['References'][-1].endswith('2001.') - -def test_examples(): - assert doc['Examples'][0].startswith('>>>') - assert doc['Examples'][-1].endswith('True]') - -def test_index(): - assert_equal(doc['index']['default'], 'random') - print doc['index'] - assert_equal(len(doc['index']), 2) - assert_equal(len(doc['index']['refguide']), 2) - -def non_blank_line_by_line_compare(a,b): - a = [l for l in a.split('\n') if l.strip()] - b = [l for l in b.split('\n') if l.strip()] - for n,line in enumerate(a): - if not line == b[n]: - raise AssertionError("Lines %s of a and b differ: " - "\n>>> %s\n<<< %s\n" % - (n,line,b[n])) -def test_str(): - non_blank_line_by_line_compare(str(doc), -"""numpy.multivariate_normal(mean, cov, shape=None, spam=None) - -Draw values from a multivariate normal distribution with specified -mean and covariance. - -The multivariate normal or Gaussian distribution is a generalisation -of the one-dimensional normal distribution to higher dimensions. - -Parameters ----------- -mean : (N,) ndarray - Mean of the N-dimensional distribution. - - .. math:: - - (1+2+3)/3 - -cov : (N,N) ndarray - Covariance matrix of the distribution. -shape : tuple of ints - Given a shape of, for example, (m,n,k), m*n*k samples are - generated, and packed in an m-by-n-by-k arrangement. Because - each sample is N-dimensional, the output shape is (m,n,k,N). - -Returns -------- -out : ndarray - The drawn samples, arranged according to `shape`. If the - shape given is (m,n,...), then the shape of `out` is is - (m,n,...,N). - - In other words, each entry ``out[i,j,...,:]`` is an N-dimensional - value drawn from the distribution. - -Other Parameters ----------------- -spam : parrot - A parrot off its mortal coil. - -Raises ------- -RuntimeError : - Some error - -Warns ------ -RuntimeWarning : - Some warning - -Warnings --------- -Certain warnings apply. - -See Also --------- -`some`_, `other`_, `funcs`_ - -`otherfunc`_ - relationship - -Notes ------ -Instead of specifying the full covariance matrix, popular -approximations include: - - - Spherical covariance (`cov` is a multiple of the identity matrix) - - Diagonal covariance (`cov` has non-negative elements only on the diagonal) - -This geometrical property can be seen in two dimensions by plotting -generated data-points: - ->>> mean = [0,0] ->>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis - ->>> x,y = multivariate_normal(mean,cov,5000).T ->>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() - -Note that the covariance matrix must be symmetric and non-negative -definite. - -References ----------- -.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic - Processes," 3rd ed., McGraw-Hill Companies, 1991 -.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," - 2nd ed., Wiley, 2001. - -Examples --------- ->>> mean = (1,2) ->>> cov = [[1,0],[1,0]] ->>> x = multivariate_normal(mean,cov,(3,3)) ->>> print x.shape -(3, 3, 2) - -The following is probably true, given that 0.6 is roughly twice the -standard deviation: - ->>> print list( (x[0,0,:] - mean) < 0.6 ) -[True, True] - -.. index:: random - :refguide: random;distributions, random;gauss""") - - -def test_sphinx_str(): - sphinx_doc = SphinxDocString(doc_txt) - non_blank_line_by_line_compare(str(sphinx_doc), -""" -.. index:: random - single: random;distributions, random;gauss - -Draw values from a multivariate normal distribution with specified -mean and covariance. - -The multivariate normal or Gaussian distribution is a generalisation -of the one-dimensional normal distribution to higher dimensions. - -:Parameters: - - **mean** : (N,) ndarray - - Mean of the N-dimensional distribution. - - .. math:: - - (1+2+3)/3 - - **cov** : (N,N) ndarray - - Covariance matrix of the distribution. - - **shape** : tuple of ints - - Given a shape of, for example, (m,n,k), m*n*k samples are - generated, and packed in an m-by-n-by-k arrangement. Because - each sample is N-dimensional, the output shape is (m,n,k,N). - -:Returns: - - **out** : ndarray - - The drawn samples, arranged according to `shape`. If the - shape given is (m,n,...), then the shape of `out` is is - (m,n,...,N). - - In other words, each entry ``out[i,j,...,:]`` is an N-dimensional - value drawn from the distribution. - -:Other Parameters: - - **spam** : parrot - - A parrot off its mortal coil. - -:Raises: - - **RuntimeError** : - - Some error - -:Warns: - - **RuntimeWarning** : - - Some warning - -.. warning:: - - Certain warnings apply. - -.. seealso:: - - :obj:`some`, :obj:`other`, :obj:`funcs` - - :obj:`otherfunc` - relationship - -.. rubric:: Notes - -Instead of specifying the full covariance matrix, popular -approximations include: - - - Spherical covariance (`cov` is a multiple of the identity matrix) - - Diagonal covariance (`cov` has non-negative elements only on the diagonal) - -This geometrical property can be seen in two dimensions by plotting -generated data-points: - ->>> mean = [0,0] ->>> cov = [[1,0],[0,100]] # diagonal covariance, points lie on x or y-axis - ->>> x,y = multivariate_normal(mean,cov,5000).T ->>> plt.plot(x,y,'x'); plt.axis('equal'); plt.show() - -Note that the covariance matrix must be symmetric and non-negative -definite. - -.. rubric:: References - -.. [1] A. Papoulis, "Probability, Random Variables, and Stochastic - Processes," 3rd ed., McGraw-Hill Companies, 1991 -.. [2] R.O. Duda, P.E. Hart, and D.G. Stork, "Pattern Classification," - 2nd ed., Wiley, 2001. - -.. only:: latex - - [1]_, [2]_ - -.. rubric:: Examples - ->>> mean = (1,2) ->>> cov = [[1,0],[1,0]] ->>> x = multivariate_normal(mean,cov,(3,3)) ->>> print x.shape -(3, 3, 2) - -The following is probably true, given that 0.6 is roughly twice the -standard deviation: - ->>> print list( (x[0,0,:] - mean) < 0.6 ) -[True, True] -""") - - -doc2 = NumpyDocString(""" - Returns array of indices of the maximum values of along the given axis. - - Parameters - ---------- - a : {array_like} - Array to look in. - axis : {None, integer} - If None, the index is into the flattened array, otherwise along - the specified axis""") - -def test_parameters_without_extended_description(): - assert_equal(len(doc2['Parameters']), 2) - -doc3 = NumpyDocString(""" - my_signature(*params, **kwds) - - Return this and that. - """) - -def test_escape_stars(): - signature = str(doc3).split('\n')[0] - assert_equal(signature, 'my_signature(\*params, \*\*kwds)') - -doc4 = NumpyDocString( - """a.conj() - - Return an array with all complex-valued elements conjugated.""") - -def test_empty_extended_summary(): - assert_equal(doc4['Extended Summary'], []) - -doc5 = NumpyDocString( - """ - a.something() - - Raises - ------ - LinAlgException - If array is singular. - - Warns - ----- - SomeWarning - If needed - """) - -def test_raises(): - assert_equal(len(doc5['Raises']), 1) - name,_,desc = doc5['Raises'][0] - assert_equal(name,'LinAlgException') - assert_equal(desc,['If array is singular.']) - -def test_warns(): - assert_equal(len(doc5['Warns']), 1) - name,_,desc = doc5['Warns'][0] - assert_equal(name,'SomeWarning') - assert_equal(desc,['If needed']) - -def test_see_also(): - doc6 = NumpyDocString( - """ - z(x,theta) - - See Also - -------- - func_a, func_b, func_c - func_d : some equivalent func - foo.func_e : some other func over - multiple lines - func_f, func_g, :meth:`func_h`, func_j, - func_k - :obj:`baz.obj_q` - :class:`class_j`: fubar - foobar - """) - - assert len(doc6['See Also']) == 12 - for func, desc, role in doc6['See Also']: - if func in ('func_a', 'func_b', 'func_c', 'func_f', - 'func_g', 'func_h', 'func_j', 'func_k', 'baz.obj_q'): - assert(not desc) - else: - assert(desc) - - if func == 'func_h': - assert role == 'meth' - elif func == 'baz.obj_q': - assert role == 'obj' - elif func == 'class_j': - assert role == 'class' - else: - assert role is None - - if func == 'func_d': - assert desc == ['some equivalent func'] - elif func == 'foo.func_e': - assert desc == ['some other func over', 'multiple lines'] - elif func == 'class_j': - assert desc == ['fubar', 'foobar'] - -def test_see_also_print(): - class Dummy(object): - """ - See Also - -------- - func_a, func_b - func_c : some relationship - goes here - func_d - """ - pass - - obj = Dummy() - s = str(FunctionDoc(obj, role='func')) - assert(':func:`func_a`, :func:`func_b`' in s) - assert(' some relationship' in s) - assert(':func:`func_d`' in s) - -doc7 = NumpyDocString(""" - - Doc starts on second line. - - """) - -def test_empty_first_line(): - assert doc7['Summary'][0].startswith('Doc starts') - - -def test_no_summary(): - str(SphinxDocString(""" - Parameters - ----------""")) - - -def test_unicode(): - doc = SphinxDocString(""" - öäöäöäöäöåååå - - öäöäöäööäååå - - Parameters - ---------- - ååå : äää - ööö - - Returns - ------- - ååå : ööö - äää - - """) - assert doc['Summary'][0] == u'öäöäöäöäöåååå'.encode('utf-8') - -def test_plot_examples(): - cfg = dict(use_plots=True) - - doc = SphinxDocString(""" - Examples - -------- - >>> import matplotlib.pyplot as plt - >>> plt.plot([1,2,3],[4,5,6]) - >>> plt.show() - """, config=cfg) - assert 'plot::' in str(doc), str(doc) - - doc = SphinxDocString(""" - Examples - -------- - .. plot:: - - import matplotlib.pyplot as plt - plt.plot([1,2,3],[4,5,6]) - plt.show() - """, config=cfg) - assert str(doc).count('plot::') == 1, str(doc) - -def test_class_members(): - - class Dummy(object): - """ - Dummy class. - - """ - def spam(self, a, b): - """Spam\n\nSpam spam.""" - pass - def ham(self, c, d): - """Cheese\n\nNo cheese.""" - pass - - for cls in (ClassDoc, SphinxClassDoc): - doc = cls(Dummy, config=dict(show_class_members=False)) - assert 'Methods' not in str(doc), (cls, str(doc)) - assert 'spam' not in str(doc), (cls, str(doc)) - assert 'ham' not in str(doc), (cls, str(doc)) - - doc = cls(Dummy, config=dict(show_class_members=True)) - assert 'Methods' in str(doc), (cls, str(doc)) - assert 'spam' in str(doc), (cls, str(doc)) - assert 'ham' in str(doc), (cls, str(doc)) - - if cls is SphinxClassDoc: - assert '.. autosummary::' in str(doc), str(doc) - -if __name__ == "__main__": - import nose - nose.run() - diff -Nru numpydoc-0.4/traitsdoc.py numpydoc-0.5/traitsdoc.py --- numpydoc-0.4/traitsdoc.py 2010-09-15 19:35:53.000000000 +0000 +++ numpydoc-0.5/traitsdoc.py 1970-01-01 00:00:00.000000000 +0000 @@ -1,140 +0,0 @@ -""" -========= -traitsdoc -========= - -Sphinx extension that handles docstrings in the Numpy standard format, [1] -and support Traits [2]. - -This extension can be used as a replacement for ``numpydoc`` when support -for Traits is required. - -.. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard -.. [2] http://code.enthought.com/projects/traits/ - -""" - -import inspect -import os -import pydoc - -import docscrape -import docscrape_sphinx -from docscrape_sphinx import SphinxClassDoc, SphinxFunctionDoc, SphinxDocString - -import numpydoc - -import comment_eater - -class SphinxTraitsDoc(SphinxClassDoc): - def __init__(self, cls, modulename='', func_doc=SphinxFunctionDoc): - if not inspect.isclass(cls): - raise ValueError("Initialise using a class. Got %r" % cls) - self._cls = cls - - if modulename and not modulename.endswith('.'): - modulename += '.' - self._mod = modulename - self._name = cls.__name__ - self._func_doc = func_doc - - docstring = pydoc.getdoc(cls) - docstring = docstring.split('\n') - - # De-indent paragraph - try: - indent = min(len(s) - len(s.lstrip()) for s in docstring - if s.strip()) - except ValueError: - indent = 0 - - for n,line in enumerate(docstring): - docstring[n] = docstring[n][indent:] - - self._doc = docscrape.Reader(docstring) - self._parsed_data = { - 'Signature': '', - 'Summary': '', - 'Description': [], - 'Extended Summary': [], - 'Parameters': [], - 'Returns': [], - 'Raises': [], - 'Warns': [], - 'Other Parameters': [], - 'Traits': [], - 'Methods': [], - 'See Also': [], - 'Notes': [], - 'References': '', - 'Example': '', - 'Examples': '', - 'index': {} - } - - self._parse() - - def _str_summary(self): - return self['Summary'] + [''] - - def _str_extended_summary(self): - return self['Description'] + self['Extended Summary'] + [''] - - def __str__(self, indent=0, func_role="func"): - out = [] - out += self._str_signature() - out += self._str_index() + [''] - out += self._str_summary() - out += self._str_extended_summary() - for param_list in ('Parameters', 'Traits', 'Methods', - 'Returns','Raises'): - out += self._str_param_list(param_list) - out += self._str_see_also("obj") - out += self._str_section('Notes') - out += self._str_references() - out += self._str_section('Example') - out += self._str_section('Examples') - out = self._str_indent(out,indent) - return '\n'.join(out) - -def looks_like_issubclass(obj, classname): - """ Return True if the object has a class or superclass with the given class - name. - - Ignores old-style classes. - """ - t = obj - if t.__name__ == classname: - return True - for klass in t.__mro__: - if klass.__name__ == classname: - return True - return False - -def get_doc_object(obj, what=None, config=None): - if what is None: - if inspect.isclass(obj): - what = 'class' - elif inspect.ismodule(obj): - what = 'module' - elif callable(obj): - what = 'function' - else: - what = 'object' - if what == 'class': - doc = SphinxTraitsDoc(obj, '', func_doc=SphinxFunctionDoc, config=config) - if looks_like_issubclass(obj, 'HasTraits'): - for name, trait, comment in comment_eater.get_class_traits(obj): - # Exclude private traits. - if not name.startswith('_'): - doc['Traits'].append((name, trait, comment.splitlines())) - return doc - elif what in ('function', 'method'): - return SphinxFunctionDoc(obj, '', config=config) - else: - return SphinxDocString(pydoc.getdoc(obj), config=config) - -def setup(app): - # init numpydoc - numpydoc.setup(app, get_doc_object) -