gentoo/dev-python/epydoc/files/epydoc-python-2.6.patch
Robin H. Johnson 56bd759df1
proj/gentoo: Initial commit
This commit represents a new era for Gentoo:
Storing the gentoo-x86 tree in Git, as converted from CVS.

This commit is the start of the NEW history.
Any historical data is intended to be grafted onto this point.

Creation process:
1. Take final CVS checkout snapshot
2. Remove ALL ChangeLog* files
3. Transform all Manifests to thin
4. Remove empty Manifests
5. Convert all stale $Header$/$Id$ CVS keywords to non-expanded Git $Id$
5.1. Do not touch files with -kb/-ko keyword flags.

Signed-off-by: Robin H. Johnson <robbat2@gentoo.org>
X-Thanks: Alec Warner <antarus@gentoo.org> - did the GSoC 2006 migration tests
X-Thanks: Robin H. Johnson <robbat2@gentoo.org> - infra guy, herding this project
X-Thanks: Nguyen Thai Ngoc Duy <pclouds@gentoo.org> - Former Gentoo developer, wrote Git features for the migration
X-Thanks: Brian Harring <ferringb@gentoo.org> - wrote much python to improve cvs2svn
X-Thanks: Rich Freeman <rich0@gentoo.org> - validation scripts
X-Thanks: Patrick Lauer <patrick@gentoo.org> - Gentoo dev, running new 2014 work in migration
X-Thanks: Michał Górny <mgorny@gentoo.org> - scripts, QA, nagging
X-Thanks: All of other Gentoo developers - many ideas and lots of paint on the bikeshed
2015-08-08 17:38:18 -07:00

61 lines
2.4 KiB
Diff

diff -Nur epydoc-3.0.1/epydoc/docparser.py epydoc-3.0.1/epydoc/docparser.py
--- epydoc-3.0.1/epydoc/docparser.py
+++ epydoc-3.0.1/epydoc/docparser.py
@@ -72,6 +72,26 @@
from epydoc.compat import *
######################################################################
+## Tokenizer change in 2.6
+######################################################################
+
+def comment_includes_nl():
+ """ Determine whether comments are parsed as one or two tokens... """
+ readline = iter(u'\n#\n\n'.splitlines(True)).next
+ tokens = [
+ token.tok_name[tup[0]] for tup in tokenize.generate_tokens(readline)
+ ]
+ if tokens == ['NL', 'COMMENT', 'NL', 'ENDMARKER']:
+ return True
+ elif tokens == ['NL', 'COMMENT', 'NL', 'NL', 'ENDMARKER']:
+ return False
+ raise AssertionError(
+ "Tokenizer returns unexexpected tokens: %r" % tokens
+ )
+
+comment_includes_nl = comment_includes_nl()
+
+######################################################################
## Doc Parser
######################################################################
@@ -520,6 +540,10 @@
# inside that block, not outside it.
start_group = None
+ # If the comment tokens do not include the NL, every comment token
+ # sets this to True in order to swallow the next NL token unprocessed.
+ comment_nl_waiting = False
+
# Check if the source file declares an encoding.
encoding = get_module_encoding(module_doc.filename)
@@ -570,7 +594,9 @@
# then discard them: blank lines are not allowed between a
# comment block and the thing it describes.
elif toktype == tokenize.NL:
- if comments and not line_toks:
+ if comment_nl_waiting:
+ comment_nl_waiting = False
+ elif comments and not line_toks:
log.warning('Ignoring docstring comment block followed by '
'a blank line in %r on line %r' %
(module_doc.filename, srow-1))
@@ -578,6 +604,7 @@
# Comment token: add to comments if appropriate.
elif toktype == tokenize.COMMENT:
+ comment_nl_waiting = not comment_includes_nl
if toktext.startswith(COMMENT_DOCSTRING_MARKER):
comment_line = toktext[len(COMMENT_DOCSTRING_MARKER):].rstrip()
if comment_line.startswith(" "):