diff -Nru zzzeeksphinx-1.4.0/.github/workflows/publish.yaml zzzeeksphinx-1.5.0/.github/workflows/publish.yaml --- zzzeeksphinx-1.4.0/.github/workflows/publish.yaml 1970-01-01 00:00:00.000000000 +0000 +++ zzzeeksphinx-1.5.0/.github/workflows/publish.yaml 2021-07-15 20:16:13.000000000 +0000 @@ -0,0 +1,36 @@ +name: Publish tag + +on: + # run when a pushing a tag + push: + tags: + - '1.*' + +jobs: + create-dist: + name: create-dist + runs-on: ubuntu-latest + steps: + - name: Checkout repo + uses: actions/checkout@v2 + + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: '3.9' + + - name: Create dist + run: | + python -m pip install --upgrade pip + pip --version + pip install setuptools wheel + python setup.py sdist --dist-dir dist + + - name: Publish wheel + uses: pypa/gh-action-pypi-publish@v1.4.2 + with: + user: __token__ + password: ${{ secrets.pypi_token }} + # comment repository_url to use the real pypi + # repository_url: https://test.pypi.org/legacy/ + packages_dir: dist/ diff -Nru zzzeeksphinx-1.4.0/.gitignore zzzeeksphinx-1.5.0/.gitignore --- zzzeeksphinx-1.4.0/.gitignore 1970-01-01 00:00:00.000000000 +0000 +++ zzzeeksphinx-1.5.0/.gitignore 2019-11-01 01:46:41.000000000 +0000 @@ -0,0 +1,8 @@ +*.pyc +*.pyo +/build/ +dist/ +*.orig +.venv +*.egg-info +.tox diff -Nru zzzeeksphinx-1.4.0/.pre-commit-config.yaml zzzeeksphinx-1.5.0/.pre-commit-config.yaml --- zzzeeksphinx-1.4.0/.pre-commit-config.yaml 1970-01-01 00:00:00.000000000 +0000 +++ zzzeeksphinx-1.5.0/.pre-commit-config.yaml 2023-05-18 15:48:37.000000000 +0000 @@ -0,0 +1,28 @@ +# See https://pre-commit.com for more information +# See https://pre-commit.com/hooks.html for more hooks +repos: +- repo: https://github.com/python/black + rev: 22.8.0 + hooks: + - id: black + +- repo: https://github.com/sqlalchemyorg/zimports + rev: v0.6.0 + hooks: + - id: zimports + +- repo: https://github.com/pycqa/flake8 + rev: 5.0.0 + hooks: + - id: flake8 + additional_dependencies: + - flake8-import-order + - flake8-builtins + - flake8-docstrings + - flake8-rst-docstrings + - pydocstyle<4.0.0 + - pygments + + + + diff -Nru zzzeeksphinx-1.4.0/PKG-INFO zzzeeksphinx-1.5.0/PKG-INFO --- zzzeeksphinx-1.4.0/PKG-INFO 2023-04-25 23:30:03.421863600 +0000 +++ zzzeeksphinx-1.5.0/PKG-INFO 2024-02-29 21:35:00.533564600 +0000 @@ -1,19 +1,23 @@ Metadata-Version: 2.1 Name: zzzeeksphinx -Version: 1.4.0 +Version: 1.5.0 Summary: Zzzeek's Sphinx Layout and Utilities. Home-page: https://github.com/sqlalchemyorg/zzzeeksphinx Author: Mike Bayer Author-email: mike@zzzcomputing.com License: MIT Keywords: Sphinx -Platform: UNKNOWN Classifier: Development Status :: 3 - Alpha Classifier: Environment :: Console Classifier: Intended Audience :: Developers Classifier: Programming Language :: Python :: 3 Classifier: Topic :: Documentation License-File: LICENSE +Requires-Dist: libsass +Requires-Dist: mako +Requires-Dist: requests +Requires-Dist: sphinx<7.3,>=7.2 +Requires-Dist: sphinxcontrib-jquery ============= zzzeeksphinx @@ -112,7 +116,5 @@ - - diff -Nru zzzeeksphinx-1.4.0/debian/changelog zzzeeksphinx-1.5.0/debian/changelog --- zzzeeksphinx-1.4.0/debian/changelog 2024-01-05 13:26:34.000000000 +0000 +++ zzzeeksphinx-1.5.0/debian/changelog 2024-03-04 15:25:39.000000000 +0000 @@ -1,3 +1,10 @@ +zzzeeksphinx (1.5.0-1) unstable; urgency=medium + + * New upstream release + * Drop patches - no longer needed + + -- Piotr Ożarowski Mon, 04 Mar 2024 16:25:39 +0100 + zzzeeksphinx (1.4.0-2) unstable; urgency=medium [ Thomas Goirand ] diff -Nru zzzeeksphinx-1.4.0/debian/patches/fix-data-url_root-into-data-content_root.patch zzzeeksphinx-1.5.0/debian/patches/fix-data-url_root-into-data-content_root.patch --- zzzeeksphinx-1.4.0/debian/patches/fix-data-url_root-into-data-content_root.patch 2024-01-05 13:26:34.000000000 +0000 +++ zzzeeksphinx-1.5.0/debian/patches/fix-data-url_root-into-data-content_root.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,44 +0,0 @@ -Description: Fix data-url_root into data-content_root - This makes the package Sphinx 7.2 compliant. -Author: Thomas Goirand -Forwarded: no -Last-Update: 2024-01-05 - ---- zzzeeksphinx-1.4.0.orig/zzzeeksphinx/themes/zsbase/layout.mako -+++ zzzeeksphinx-1.4.0/zzzeeksphinx/themes/zsbase/layout.mako -@@ -84,6 +84,8 @@ withsidebar = bool(toc) and ( - - % endif - -+ -+ - - - -@@ -325,8 +327,6 @@ withsidebar = bool(toc) and ( - }; - - -- -- - - % for scriptfile in script_files + self.attr.local_script_files: - ---- zzzeeksphinx-1.4.0.orig/zzzeeksphinx/themes/zsmako/layout.mako -+++ zzzeeksphinx-1.4.0/zzzeeksphinx/themes/zsmako/layout.mako -@@ -66,6 +66,7 @@ withsidebar = bool(toc) and current_page - - % endif - -+ - - - -@@ -208,7 +209,6 @@ withsidebar = bool(toc) and current_page - }; - - -- - - - % for scriptfile in script_files + self.attr.local_script_files: diff -Nru zzzeeksphinx-1.4.0/debian/patches/remove_imp.patch zzzeeksphinx-1.5.0/debian/patches/remove_imp.patch --- zzzeeksphinx-1.4.0/debian/patches/remove_imp.patch 2024-01-05 13:26:15.000000000 +0000 +++ zzzeeksphinx-1.5.0/debian/patches/remove_imp.patch 1970-01-01 00:00:00.000000000 +0000 @@ -1,65 +0,0 @@ -Author: Mike Bayer -Date: Wed, 8 Nov 2023 15:18:00 -0500 -Subject: remove imp - Python 3.12 removed this entirely. replace with importlib thing -Origin: upstream, https://github.com/sqlalchemyorg/zzzeeksphinx/commit/8ae5b47850df4a02bb256d53e69f740c49167c09.patch -Last-Update: 2023-01-05 - -diff --git a/zzzeeksphinx/viewsource.py b/zzzeeksphinx/viewsource.py -index bea7f2e..4264bca 100644 ---- a/zzzeeksphinx/viewsource.py -+++ b/zzzeeksphinx/viewsource.py -@@ -1,5 +1,5 @@ - import ast --import imp -+import importlib - import os - import re - import warnings -@@ -17,7 +17,6 @@ - - - def view_source(name, rawtext, text, lineno, inliner, options={}, content=[]): -- - env = inliner.document.settings.env - - node = _view_source_node(env, text, None) -@@ -211,16 +210,24 @@ def _view_source_node(env, text, state): - - for tok in modname.split("."): - try: -- file_, pathname, desc = imp.find_module( -- tok, [pathname] if pathname else None -+ thing = importlib.machinery.PathFinder().find_spec( -+ tok, -+ [pathname] if pathname else None, - ) - except ImportError as ie: - raise ImportError("Error trying to import %s: %s" % (modname, ie)) - else: -- if file_: -- if state: -- module_docstring = _get_module_docstring(file_) -- file_.close() -+ pathname = ( -+ thing.submodule_search_locations[0] -+ if thing.submodule_search_locations -+ else thing.origin -+ ) -+ if thing.origin: -+ assert thing.origin -+ -+ with open(thing.origin, "r") as tfile: -+ if state: -+ module_docstring = _get_module_docstring(tfile) - - # unlike viewcode which silently traps exceptions, - # I want this to totally barf if the file can't be loaded. -@@ -345,7 +352,6 @@ def run(self): - - - def setup(app): -- - app.add_role("viewsource", view_source) - - app.add_directive("autosource", AutoSourceDirective) diff -Nru zzzeeksphinx-1.4.0/debian/patches/series zzzeeksphinx-1.5.0/debian/patches/series --- zzzeeksphinx-1.4.0/debian/patches/series 2024-01-05 13:26:15.000000000 +0000 +++ zzzeeksphinx-1.5.0/debian/patches/series 1970-01-01 00:00:00.000000000 +0000 @@ -1,2 +0,0 @@ -remove_imp.patch -fix-data-url_root-into-data-content_root.patch diff -Nru zzzeeksphinx-1.4.0/setup.py zzzeeksphinx-1.5.0/setup.py --- zzzeeksphinx-1.4.0/setup.py 2023-04-25 23:29:57.000000000 +0000 +++ zzzeeksphinx-1.5.0/setup.py 2024-01-06 16:51:51.000000000 +0000 @@ -39,7 +39,7 @@ "libsass", "mako", "requests", - "sphinx>=5.3.0,<6.3", + "sphinx>=7.2,<7.3", "sphinxcontrib-jquery", ], include_package_data=True, Binary files /tmp/tmpyrllckwh/I4avEudbud/zzzeeksphinx-1.4.0/tools/annotated.xcf and /tmp/tmpyrllckwh/ncLjDo_6nM/zzzeeksphinx-1.5.0/tools/annotated.xcf differ diff -Nru zzzeeksphinx-1.4.0/tools/dev_anno_detection.py zzzeeksphinx-1.5.0/tools/dev_anno_detection.py --- zzzeeksphinx-1.4.0/tools/dev_anno_detection.py 1970-01-01 00:00:00.000000000 +0000 +++ zzzeeksphinx-1.5.0/tools/dev_anno_detection.py 2023-01-14 19:06:15.000000000 +0000 @@ -0,0 +1,220 @@ +from __future__ import absolute_import + +import re + +import pygments +from pygments import highlight +from pygments.filter import apply_filters +from pygments.filter import Filter +from pygments.formatters import HtmlFormatter +from pygments.lexers import PythonLexer +from pygments.lexers import SqlLexer +from pygments.token import Token + + +def _strip_trailing_whitespace(iter_): + buf = list(iter_) + if buf: + buf[-1] = (buf[-1][0], buf[-1][1].rstrip()) + for t, v in buf: + yield t, v + + +class StripDocTestFilter(Filter): + def filter(self, lexer, stream): + for ttype, value in stream: + if ( + ttype is Token.Comment or ttype.parent is Token.Comment + ) and re.match(r"#\s*doctest:", value): + continue + yield ttype, value + + +ARROW_ANNOTATION = ( + (Token.Operator, "-"), + (Token.Operator, ">"), +) +COLON_ANNOTATION = ( + (Token.Name,), + (Token.Punctuation, ":"), +) + + +NEWLINE = (Token.Text, "\n") + + +class DetectAnnotationsFilter(Filter): + def filter(self, lexer, stream): + first, second = None, None + found_colon = False + should_report = False + annotated = None + found_sql = False + + for ttype, value in stream: + + # any encounting of SQL blocks, stop immediately, we would + # have detected annotations by now if they applied. + # don't misinterpret SQL tokens + if ttype is Token.Name and value in ( + "execsql", + "printsql", + "opensql", + "sqlpopup", + ): + found_sql = True + + if found_sql: + yield ttype, value + continue + + if ttype is Token.Name.Builtin: + ttype = Token.Name + + if ttype is Token.Keyword and value == "class": + should_report = True + + first = second + second = ttype, value + + yield ttype, value + + if annotated: + continue + elif annotated is None and ttype is not Token.Text: + annotated = False + + if (first, second) == ARROW_ANNOTATION: + annotated = True + elif found_colon: + if (ttype, value) == NEWLINE: + found_colon = False + elif ttype == Token.Name: + found_colon = False + annotated = True + elif first and ((first[0:1], second) == COLON_ANNOTATION): + found_colon = True + should_report = True + + # report only on examples that have class defs + if annotated is not None and should_report: + yield Token.Other, f"pep484 annotations detected: {annotated}" + + +class DetectAnnotationsFormatterMixin: + annotated = None + + def _format_lines(self, tokensource): + + self.annotated = None + + def go(tokensource): + for ttype, value in tokensource: + if ttype is Token.Other and value.startswith( + "pep484 annotations detected:" + ): + self.annotated = ( + value == "pep484 annotations detected: True" + ) + continue + + yield ttype, value + + for level, tag in super()._format_lines(go(tokensource)): + yield level, tag + + def _wrap_pre(self, inner): + for level, tag in super()._wrap_pre(inner): + yield level, tag + + if level == 0 and self.annotated is not None and tag == "": + yield ( + 1, + '
' + if self.annotated + else '
', + ) + + def _wrap_code(self, inner): + + for level, tag in super()._wrap_code(inner): + yield level, tag + + if level == 0 and self.annotated is not None and tag == "": + yield ( + 1, + '
' + if self.annotated + else '
', + ) + + +class PopupSQLFormatter(HtmlFormatter): + def _format_lines(self, tokensource): + sql_lexer = SqlLexer() + formatter = HtmlFormatter(nowrap=True) + buf = [] + for ttype, value in apply_filters(tokensource, [StripDocTestFilter()]): + if ttype in Token.Sql: + + for t, v in HtmlFormatter._format_lines(self, iter(buf)): + yield t, v + buf = [] + + if ttype is Token.Sql: + yield ( + 1, + "
%s
" + % pygments.highlight( + re.sub(r"(?:{stop}|\n+)\s*$", "", value), + sql_lexer, + formatter, + ), + ) + elif ttype is Token.Sql.Link: + yield 1, "sql" + elif ttype is Token.Sql.Popup: + yield ( + 1, + "" + % pygments.highlight( + re.sub(r"(?:{stop}|\n+)$", "", value), + sql_lexer, + formatter, + ), + ) + else: + buf.append((ttype, value)) + + for t, v in _strip_trailing_whitespace( + HtmlFormatter._format_lines(self, iter(buf)) + ): + yield t, v + + +class AnnoPopupSQLFormatter( + DetectAnnotationsFormatterMixin, PopupSQLFormatter +): + pass + + +code1 = """ + +>>> from sqlalchemy import text + +>>> with engine.connect() as conn: +... result = conn.execute(text("select 'hello world'")) +... print(result.all()) +{execsql}BEGIN (implicit) +select 'hello world' +[...] () +{stop}[('hello world',)] +{execsql}ROLLBACK{stop} + + +""" + +opt = {} +lex = PythonLexer() + +print(highlight(code1, lex, AnnoPopupSQLFormatter())) diff -Nru zzzeeksphinx-1.4.0/tools/fix_xrefs.py zzzeeksphinx-1.5.0/tools/fix_xrefs.py --- zzzeeksphinx-1.4.0/tools/fix_xrefs.py 1970-01-01 00:00:00.000000000 +0000 +++ zzzeeksphinx-1.5.0/tools/fix_xrefs.py 2020-09-09 14:11:22.000000000 +0000 @@ -0,0 +1,565 @@ +#!/usr/bin/env python + +import argparse +import os +import re +import sys + +import readchar + +BOLD = "\033[1m" +NORMAL = "\033[0m" +UNDERLINE = "\033[4m" +PURPLE = "\033[95m" +CYAN = "\033[96m" +DARKCYAN = "\033[36m" +BLUE = "\033[94m" +GREEN = "\033[92m" + + +def _token_to_str(token): + if isinstance(token, str): + return token + else: + return token.group(0) + + +def _matched_portion(match): + a, b, c = match.group(2, 3, 4) + return a or b or c + + +def color(text, color_code): + return "%s%s%s" % (color_code, text, NORMAL) + + +def highlighted(line_tokens, match_idx, group): + + match = line_tokens[match_idx] + display_of_match = ( + BOLD + + color( + match.group(0).replace( + _matched_portion(match), color(_matched_portion(match), CYAN) + ), + PURPLE, + ) + + NORMAL + ) + + return ( + "".join(_token_to_str(tok) for tok in line_tokens[0:match_idx]) + + display_of_match + + "".join(_token_to_str(tok) for tok in line_tokens[match_idx + 1 :]) + ) + + +def prompt( + fname, + state, + lines, + linenum, + line_tokens, + token_idx, + rec, + replacements, + app_state, +): + """Present the prompt screen for a single token in a line in a file and + receive user input. + + handle_line() calls this function repeated times for a given token + in a loop until the user command indicates we are done working with + this particular token. + + """ + + if app_state.get("do_prompt", True) and rec.get("do_prompt", True): + context_lines = 12 + print("\033c") + print( + "-----------------------------------------------------------------" + ) + print(UNDERLINE + fname + NORMAL) + print("") + for index in range( + linenum - context_lines // 2, linenum + context_lines // 2 + ): + if index >= 0 and index <= len(lines): + if index == linenum: + content = highlighted(line_tokens, token_idx, 0).rstrip() + else: + content = lines[index].rstrip() + print("%4.d: %s" % (index + 1, content)) + print( + "-----------------------------------------------------------------" + ) + + print( + "EXISTING SYMBOL TO REPLACE: %s" + % color(_matched_portion(line_tokens[token_idx]), CYAN) + ) + print( + "REPLACEMENTS: %s" + % " ".join( + "[%d] %s" % (num, text) + for num, text in enumerate(replacements, 1) + ) + ) + print( + "-----------------------------------------------------------------" + ) + + sys.stdout.write( + "[s]kip skip [a]ll of these skip [f]ile " + "[w]rite file and go to next \n" + "[A]pply all current non-ambiguous replacements from state\n" + "[F]inish all files with current " + "instructions " + "[e]nter new replacement \n" + "[u]se numbered replacement for all " + "future occurrences [p]db [q]uit [s]? " + ) + sys.stdout.flush() + cmd = readchar.readchar() + print("") + if ord(cmd) in (10, 13): + cmd = "s" + else: + if "apply_all" in rec: + return rec["apply_all"] + else: + return "s" + + if cmd == "q": + sys.exit() + elif cmd == "A": + for rec in state.values(): + if len(rec["replacements"]) == 1 and "apply_all" not in rec: + rec["apply_all"] = 0 + rec["do_prompt"] = False + return True + elif cmd == "F": + app_state["do_prompt"] = False + return "s" + elif cmd in ("s", "f", "a", "w"): + return cmd + elif cmd == "p": + import pdb + + pdb.set_trace() + elif cmd == "e": + replacement_text = input( + "Enter replacement text for the portion in " + + CYAN + + "CYAN" + + NORMAL + + ": " + ).strip() + replacements.append(replacement_text) + return True + elif cmd == "u" or re.match(r"\d+", cmd): + + if cmd == "u": + num = input("Enter number of replacement: ") + if re.match(r"\d+", num): + replacement_index = int(num) + else: + input("not a number: %s, press enter" % num) + else: + replacement_index = int(cmd) + + try: + replacements[replacement_index - 1] + except IndexError: + input( + "no such replacement %d; press enter to continue" + % replacement_index + ) + else: + if cmd == "u": + rec["apply_all"] = replacement_index - 1 + rec["do_prompt"] = False + return replacement_index - 1 + + +# reg = re.compile(r"\:(class|attr|func|meth|obj|paramref)\:`~?(\.\w+).*?`") + +# reg = re.compile(r"\:(class|attr|func|meth|obj|paramref)\:`~?([\.a-z]+)`") + +# one token alone, to the end, we want the whole thing +# :func:`.foo` +# :class:`.Foo` +# +# two tokens, we want the first +# +# :func:`.foo.bar` +# :meth:`.Foo.bar` +# +# three tokens, we want the first two. +# +# :func:`.sql.expression.select` +# +# + +reg = re.compile( + r"\:(class|attr|func|meth|obj|paramref)\:" + r"`~?(?:(\.\w+)|(?:(\.\w+)\.\w+)|(?:(\.[a-z]+\.[a-z]+)\.\w+))(?:\(\))?`" +) + + +def tokenize_line(app_state, line): + """Search a line for py references. + + Return the line as as list of tokens, with non matched portions + and matched portions together, using Match objects for the matches + and plain strings for the non matched. + + max_missing_package_tokens indicates which xrefs we will prompt + for, based on how many package tokens are not present. zero means + only xrefs that are straight class, class + method, function etc. + with no package qualification at all. + + """ + + search_reg = app_state["opts"].search + tokens = [] + start = -1 + mend = 0 + has_tokens = False + for match in reg.finditer(line): + + if search_reg and not re.match(search_reg, match.group(0)): + continue + + has_tokens = True + mstart, mend = match.span(0) + if start == -1: + start = 0 + + # extend w/ non match tokens. keep commas and periods + # as separate tokens so that they don't get linebroken. + tokens.extend(re.split(r"([.,])", line[start:mstart])) + + tokens.append(match) + + start = mend + tokens.extend(re.split(r"([.,])", line[mend:])) + + if has_tokens: + return tokens + else: + return None + + +def process(fname, state, app_state): + """Parse, process, and write a single file. + + Creates a list of lines and then passes each one off to handle_line(). + handle_line() then has the option to replace that line in the list + of lines. The list of lines is then rejoined to write out the new file. + + """ + + write = False + with open(fname) as file_: + lines = list(file_) + + result = None + for linenum, line in enumerate(lines): + result = handle_line(fname, state, lines, linenum, line, app_state) + if result == "f": # skipfile + return + elif result == "w": # write and finish + write = True + break + elif result == "c": # has changes but keep going + write = True + if write: + sys.stdout.write("Writing %s..\n" % fname) + sys.stdout.flush() + with open(fname, "w") as file_: + file_.write("".join(lines)) + + return result + + +def handle_line(fname, state, lines, linenum, line, app_state): + """Parse, process and replace a single line in a list of lines.""" + + # skip comments + if re.match(r"^ *#", line): + return "n" + + # skip function decorators that are inline, these are too hard + elif re.match(r"^ *@", line): + return "n" + + # skip keyword arguments that are also too hard to format + elif re.match(r"^ *\w+ *= *", line): + return "n" + + line_tokens = tokenize_line(app_state, line) + + if not line_tokens: + return "n" + + has_replacements = False + + for idx, token in enumerate(line_tokens): + if isinstance(token, str): + continue + + if _matched_portion(token) not in state: + rec = state[_matched_portion(token)] = { + "replacements": [], + "cmd": None, + } + else: + rec = state[_matched_portion(token)] + + if rec.get("cmd") == "a": + # skip all of these, don't do prompt + result = "s" + else: + # do prompt + local_replacements = list(rec["replacements"]) + + while True: + result = prompt( + fname, + state, + lines, + linenum, + line_tokens, + idx, + rec, + local_replacements, + app_state, + ) + if result is True: + continue + else: + break + + if result in ("f", "w"): + return result # skipfile + elif result == "s": + continue # skip this token + elif result == "a": + rec["cmd"] = "a" # skip all of these + continue + elif result == "F": + # continue without prompting + continue + + elif isinstance(result, int): + replacement_text = local_replacements[result] + if replacement_text not in rec["replacements"]: + rec["replacements"].append(replacement_text) + write_replacement_rec( + app_state["fixes_file"], + _matched_portion(token), + replacement_text, + ) + has_replacements = True + line_tokens[idx] = ( + token.group(0) + .replace(_matched_portion(token), replacement_text) + .replace("~", "") + ) + + if has_replacements: + + if fname.endswith(".py"): + + # reformatting lines for max line length is VERY hard. Give + # ourselves a break by not enforcing it for .rst files right now. + # python is hard enough. still rst docstrings in the py files + # though + reformat_py_line(lines, linenum, line_tokens) + else: + reformat_rst_line(lines, linenum, line_tokens) + return "c" + + +def reformat_rst_line(lines, linenum, line_tokens): + newline = "".join(_token_to_str(token) for token in line_tokens) + + if linenum == len(lines) - 1: + subsequent_line = None + else: + subsequent_line = lines[linenum + 1] + + # find an rst underline and rewrite it to be the same length + # as the new line + if subsequent_line and subsequent_line[0] in ("=-~^"): + underline_token = subsequent_line[0] + if subsequent_line.strip() == underline_token * len( + subsequent_line.strip() + ): + lines[linenum + 1] = underline_token * len(newline) + + lines[linenum] = newline + + +def reformat_py_line(lines, linenum, line_tokens, length=79): + """Given line tokens where one or more of the tokens has been replaced, + write out a new line, while ensuring that the max length is maintained. + + When the resulting line would be longer than the length, the line is + split at that point. Heuristics are used to determine what the + left-leading indentation should be, as well as if individual lines + have quotes on both sides. + + """ + if linenum == len(lines) - 1: + subsequent_line = None + else: + subsequent_line = lines[linenum + 1] + + line_tokens = [_token_to_str(token) for token in line_tokens] + printed_line = "".join(line_tokens) + if len(printed_line) <= length: + lines[linenum] = printed_line + return + + quote_char = "" + stripped = printed_line.strip() + lq = re.match(r'^("""|"|\'|\'' ")", stripped) + rq = re.match(r'.*?("""|"|\'|\'' ")(,)?$", stripped) + + if lq and rq and lq.group(1) == rq.group(1): + quote_char = lq.group(1) + else: + quote_char = "" + + # figure out current line whitespace. looking for zero or more whitespace, + # then optional markup that suggests the next line needs to be futher + # indented, such as numbering, bullet, :return:, :param: or ".. + # rstdirective" of some kind + whitespace_match = re.match( + r"^( *)(\d+[\:\.] |\* |\:param .+?\:|\:return\:|\.\. \w+)?", + printed_line, + ) + if whitespace_match: + whitespace = whitespace_match.group(1) + ( + " " * len(whitespace_match.group(2) or "") + ) + else: + whitespace = "" + + # if it looks like we're a bullet or a paramref or something, see if + # we can get the indentation from the next line. + if ( + whitespace_match + and not quote_char + and subsequent_line.strip() + and whitespace_match.group(2) + ): + # make sure we match :param : and not + # :paramref: that happens to be inline :) + subsq_whitespace_match = re.match( + r"^( +)(\d+\: |\* |\:param .+?\:)?", subsequent_line + ) + + # if we have the next line, anb it does not look like it is + # itself a bullet or param, and it has indentation that is the + # same or greater than ours, we will use that indentation. + if ( + subsq_whitespace_match + and not subsq_whitespace_match.group(2) + and len(subsq_whitespace_match.group(0)) + >= len(whitespace_match.group(1)) + ): + whitespace = subsq_whitespace_match.group(1) + + len_ = 0 + + for idx, token in enumerate(line_tokens): + + len_ += len(token) + if len_ >= length: + len_ = 0 + + if idx > 0: + line_tokens[idx - 1] = ( + line_tokens[idx - 1].rstrip() + + (" " if quote_char else "") + + quote_char + ) + if token.strip() != quote_char: + token = "\n" + whitespace + quote_char + token.lstrip() + else: + token = "\n" + line_tokens[idx] = token + + newline = "".join(line_tokens) + lines[linenum] = newline + + +def restore_state_file(state_file_name): + """Read the state file if any and restore existing replacement tokens + that were established from a previous run. + + """ + state = {} + if not os.path.exists(state_file_name): + return state + + with open(state_file_name, "r") as file_: + for line in file_: + old, new = line.strip().split(" ", 1) + if old not in state: + state[old] = rec = { + "replacements": [], + "cmd": None, + } + else: + rec = state[old] + + if new not in rec["replacements"]: + rec["replacements"].append(new) + + return state + + +def write_replacement_rec(state_file_name, old, new): + """Write a single replacement token to the state file.""" + with open(state_file_name, "a") as file_: + file_.write("%s %s\n" % (old, new)) + + +def main(argv=None): + parser = argparse.ArgumentParser() + parser.add_argument("filespec", help="file or directory", nargs="+") + parser.add_argument( + "-f", + "--fixes-file", + help="path to the current list of fixes", + default="fix_xref_state.txt", + ) + parser.add_argument( + "--search", help="only work with symbols matching this regexp" + ) + args = parser.parse_args() + + state = restore_state_file(args.fixes_file) + app_state = {"opts": args, "symbols": state, "fixes_file": args.fixes_file} + + for filespec in args.filespec: + file_ = os.path.abspath(filespec) + if os.path.isdir(file_): + for root, dirs, files in os.walk(file_): + for fname in files: + if not fname.endswith(".py") and not fname.endswith( + ".rst" + ): + continue + process(os.path.join(root, fname), state, app_state) + else: + process(file_, state, app_state) + + +if __name__ == "__main__": + main() diff -Nru zzzeeksphinx-1.4.0/zzzeeksphinx/__init__.py zzzeeksphinx-1.5.0/zzzeeksphinx/__init__.py --- zzzeeksphinx-1.4.0/zzzeeksphinx/__init__.py 2023-04-25 23:29:57.000000000 +0000 +++ zzzeeksphinx-1.5.0/zzzeeksphinx/__init__.py 2024-01-06 16:35:10.000000000 +0000 @@ -1,4 +1,4 @@ -__version__ = "1.4.0" +__version__ = "1.5.0" def setup(app): diff -Nru zzzeeksphinx-1.4.0/zzzeeksphinx/themes/zsbase/layout.mako zzzeeksphinx-1.5.0/zzzeeksphinx/themes/zsbase/layout.mako --- zzzeeksphinx-1.4.0/zzzeeksphinx/themes/zsbase/layout.mako 2023-04-25 23:29:57.000000000 +0000 +++ zzzeeksphinx-1.5.0/zzzeeksphinx/themes/zsbase/layout.mako 2024-02-29 21:23:44.000000000 +0000 @@ -1,7 +1,19 @@ ## coding: utf-8 <%! - import datetime + import os + import time + from datetime import datetime, timezone + + if "SOURCE_DATE_EPOCH" in os.environ: + generated_at = datetime.fromtimestamp( + timestamp=float(os.environ['SOURCE_DATE_EPOCH']), + tz=timezone.utc + ) + else: + generated_at = datetime.fromtimestamp( + timestamp=time.time(), + ).astimezone() local_script_files = [] @@ -307,7 +319,7 @@ Created using Sphinx ${sphinx_version|h}. % endif - Documentation last generated: ${datetime.datetime.now().strftime("%c")} + Documentation last generated: ${generated_at.strftime("%c %Z")} @@ -317,15 +329,10 @@ <%block name="lower_scripts"> + ## see https://github.com/sphinx-doc/sphinx/commit/8e730ae303ae686705ea12f44ef11da926a87cf5 + document.documentElement.dataset.content_root = '${content_root}'; - + % for scriptfile in script_files + self.attr.local_script_files: diff -Nru zzzeeksphinx-1.4.0/zzzeeksphinx/themes/zsbase/static_base.mako zzzeeksphinx-1.5.0/zzzeeksphinx/themes/zsbase/static_base.mako --- zzzeeksphinx-1.4.0/zzzeeksphinx/themes/zsbase/static_base.mako 2023-04-25 23:29:57.000000000 +0000 +++ zzzeeksphinx-1.5.0/zzzeeksphinx/themes/zsbase/static_base.mako 2024-01-06 16:51:51.000000000 +0000 @@ -1,5 +1,4 @@ - + <%def name="bannerad()"> diff -Nru zzzeeksphinx-1.4.0/zzzeeksphinx/themes/zsmako/layout.mako zzzeeksphinx-1.5.0/zzzeeksphinx/themes/zsmako/layout.mako --- zzzeeksphinx-1.4.0/zzzeeksphinx/themes/zsmako/layout.mako 2023-04-25 23:29:57.000000000 +0000 +++ zzzeeksphinx-1.5.0/zzzeeksphinx/themes/zsmako/layout.mako 2024-01-06 16:51:51.000000000 +0000 @@ -200,15 +200,10 @@ <%block name="lower_scripts"> + ## see https://github.com/sphinx-doc/sphinx/commit/8e730ae303ae686705ea12f44ef11da926a87cf5 + document.documentElement.dataset.content_root = '${content_root}'; - + % for scriptfile in script_files + self.attr.local_script_files: diff -Nru zzzeeksphinx-1.4.0/zzzeeksphinx/viewsource.py zzzeeksphinx-1.5.0/zzzeeksphinx/viewsource.py --- zzzeeksphinx-1.4.0/zzzeeksphinx/viewsource.py 2023-04-25 23:29:57.000000000 +0000 +++ zzzeeksphinx-1.5.0/zzzeeksphinx/viewsource.py 2023-11-08 20:14:08.000000000 +0000 @@ -1,5 +1,5 @@ import ast -import imp +import importlib import os import re import warnings @@ -11,13 +11,12 @@ from sphinx.locale import _ from sphinx.locale import __ from sphinx.pycode import ModuleAnalyzer -from sphinx.util import status_iterator +from sphinx.util.display import status_iterator from . import util def view_source(name, rawtext, text, lineno, inliner, options={}, content=[]): - env = inliner.document.settings.env node = _view_source_node(env, text, None) @@ -211,16 +210,24 @@ for tok in modname.split("."): try: - file_, pathname, desc = imp.find_module( - tok, [pathname] if pathname else None + thing = importlib.machinery.PathFinder().find_spec( + tok, + [pathname] if pathname else None, ) except ImportError as ie: raise ImportError("Error trying to import %s: %s" % (modname, ie)) else: - if file_: - if state: - module_docstring = _get_module_docstring(file_) - file_.close() + pathname = ( + thing.submodule_search_locations[0] + if thing.submodule_search_locations + else thing.origin + ) + if thing.origin: + assert thing.origin + + with open(thing.origin, "r") as tfile: + if state: + module_docstring = _get_module_docstring(tfile) # unlike viewcode which silently traps exceptions, # I want this to totally barf if the file can't be loaded. @@ -345,7 +352,6 @@ def setup(app): - app.add_role("viewsource", view_source) app.add_directive("autosource", AutoSourceDirective) diff -Nru zzzeeksphinx-1.4.0/zzzeeksphinx.egg-info/PKG-INFO zzzeeksphinx-1.5.0/zzzeeksphinx.egg-info/PKG-INFO --- zzzeeksphinx-1.4.0/zzzeeksphinx.egg-info/PKG-INFO 2023-04-25 23:30:03.000000000 +0000 +++ zzzeeksphinx-1.5.0/zzzeeksphinx.egg-info/PKG-INFO 2024-02-29 21:35:00.000000000 +0000 @@ -1,19 +1,23 @@ Metadata-Version: 2.1 Name: zzzeeksphinx -Version: 1.4.0 +Version: 1.5.0 Summary: Zzzeek's Sphinx Layout and Utilities. Home-page: https://github.com/sqlalchemyorg/zzzeeksphinx Author: Mike Bayer Author-email: mike@zzzcomputing.com License: MIT Keywords: Sphinx -Platform: UNKNOWN Classifier: Development Status :: 3 - Alpha Classifier: Environment :: Console Classifier: Intended Audience :: Developers Classifier: Programming Language :: Python :: 3 Classifier: Topic :: Documentation License-File: LICENSE +Requires-Dist: libsass +Requires-Dist: mako +Requires-Dist: requests +Requires-Dist: sphinx<7.3,>=7.2 +Requires-Dist: sphinxcontrib-jquery ============= zzzeeksphinx @@ -112,7 +116,5 @@ - - diff -Nru zzzeeksphinx-1.4.0/zzzeeksphinx.egg-info/SOURCES.txt zzzeeksphinx-1.5.0/zzzeeksphinx.egg-info/SOURCES.txt --- zzzeeksphinx-1.4.0/zzzeeksphinx.egg-info/SOURCES.txt 2023-04-25 23:30:03.000000000 +0000 +++ zzzeeksphinx-1.5.0/zzzeeksphinx.egg-info/SOURCES.txt 2024-02-29 21:35:00.000000000 +0000 @@ -1,8 +1,14 @@ +.gitignore +.pre-commit-config.yaml LICENSE MANIFEST.in README.rst setup.cfg setup.py +.github/workflows/publish.yaml +tools/annotated.xcf +tools/dev_anno_detection.py +tools/fix_xrefs.py zzzeeksphinx/__init__.py zzzeeksphinx/autodoc_mods.py zzzeeksphinx/dialect_info.py diff -Nru zzzeeksphinx-1.4.0/zzzeeksphinx.egg-info/entry_points.txt zzzeeksphinx-1.5.0/zzzeeksphinx.egg-info/entry_points.txt --- zzzeeksphinx-1.4.0/zzzeeksphinx.egg-info/entry_points.txt 2023-04-25 23:30:03.000000000 +0000 +++ zzzeeksphinx-1.5.0/zzzeeksphinx.egg-info/entry_points.txt 2024-02-29 21:35:00.000000000 +0000 @@ -6,4 +6,3 @@ zsbase = zzzeeksphinx.theme zsmako = zzzeeksphinx.theme zzzeeksphinx = zzzeeksphinx.theme - diff -Nru zzzeeksphinx-1.4.0/zzzeeksphinx.egg-info/requires.txt zzzeeksphinx-1.5.0/zzzeeksphinx.egg-info/requires.txt --- zzzeeksphinx-1.4.0/zzzeeksphinx.egg-info/requires.txt 2023-04-25 23:30:03.000000000 +0000 +++ zzzeeksphinx-1.5.0/zzzeeksphinx.egg-info/requires.txt 2024-02-29 21:35:00.000000000 +0000 @@ -1,5 +1,5 @@ libsass mako requests -sphinx<6.3,>=5.3.0 +sphinx<7.3,>=7.2 sphinxcontrib-jquery