diff -Nru seqan-1.4.2+dfsg/debian/changelog seqan-1.4.2+dfsg/debian/changelog --- seqan-1.4.2+dfsg/debian/changelog 2018-07-16 10:21:54.000000000 +0000 +++ seqan-1.4.2+dfsg/debian/changelog 2020-03-02 13:48:29.000000000 +0000 @@ -1,3 +1,26 @@ +seqan (1.4.2+dfsg-4) unstable; urgency=medium + + * Team upload. + + [ Andreas Tille ] + * Use 2to3 to port from Python2 to Python3 + Closes: #943260 + * debhelper-compat 12 + * Respect DEB_BUILD_OPTIONS in override_dh_auto_test target + * Remove trailing whitespace in debian/changelog + * Remove trailing whitespace in debian/copyright + * Set upstream metadata fields: Bug-Database. + * Remove obsolete fields Contact, Name from debian/upstream/metadata. + * Rely on pre-initialized dpkg-architecture variables. + + [ Michael R. Crusoe ] + * Skip some broken tests (Closes: #952203) + * Finished the Python3 conversion for demo_checker.py + * Mark seqan-dev as Multi-Arch: foreign + * Standards-Version: 4.5.0 + + -- Michael R. Crusoe Mon, 02 Mar 2020 14:48:29 +0100 + seqan (1.4.2+dfsg-3) unstable; urgency=medium * debhelper 11 @@ -17,7 +40,7 @@ seqan (1.4.2+dfsg-1) unstable; urgency=medium - [Andreas Tille] + [Andreas Tille] * New upstream version Closes: #766741 * Moved packaging to Git @@ -33,9 +56,9 @@ Closes: #816988, #809058 * Install cmake files Closes: #818088 - + [Gert Wollny] - * d/rules: Add -DNDEBUG to CXXFLAGS, Closes: #811841 + * d/rules: Add -DNDEBUG to CXXFLAGS, Closes: #811841 * d/rules: enable parallel build -- Andreas Tille Tue, 02 Aug 2016 15:58:06 +0200 diff -Nru seqan-1.4.2+dfsg/debian/compat seqan-1.4.2+dfsg/debian/compat --- seqan-1.4.2+dfsg/debian/compat 2018-07-16 10:21:47.000000000 +0000 +++ seqan-1.4.2+dfsg/debian/compat 1970-01-01 00:00:00.000000000 +0000 @@ -1 +0,0 @@ -11 diff -Nru seqan-1.4.2+dfsg/debian/control seqan-1.4.2+dfsg/debian/control --- seqan-1.4.2+dfsg/debian/control 2018-07-16 10:21:51.000000000 +0000 +++ seqan-1.4.2+dfsg/debian/control 2020-03-02 13:48:29.000000000 +0000 @@ -5,21 +5,22 @@ Kevin Murray Section: science Priority: optional -Build-Depends: debhelper (>= 11~), +Build-Depends: debhelper-compat (= 12), cmake, zlib1g-dev, libbam-dev, libboost-dev, dh-exec, help2man, - python -Standards-Version: 4.1.5 + python3 +Standards-Version: 4.5.0 Vcs-Browser: https://salsa.debian.org/med-team/seqan Vcs-Git: https://salsa.debian.org/med-team/seqan.git Homepage: http://www.seqan.de/ Package: seqan-dev Architecture: all +Multi-Arch: foreign Section: libdevel Depends: ${shlibs:Depends}, ${misc:Depends} diff -Nru seqan-1.4.2+dfsg/debian/copyright seqan-1.4.2+dfsg/debian/copyright --- seqan-1.4.2+dfsg/debian/copyright 2016-08-06 15:05:16.000000000 +0000 +++ seqan-1.4.2+dfsg/debian/copyright 2020-02-25 10:28:15.000000000 +0000 @@ -144,7 +144,7 @@ modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. - . + . This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU diff -Nru seqan-1.4.2+dfsg/debian/patches/2to3.patch seqan-1.4.2+dfsg/debian/patches/2to3.patch --- seqan-1.4.2+dfsg/debian/patches/2to3.patch 1970-01-01 00:00:00.000000000 +0000 +++ seqan-1.4.2+dfsg/debian/patches/2to3.patch 2020-03-02 13:47:14.000000000 +0000 @@ -0,0 +1,3098 @@ +Description: Use 2to3 to port from Python2 to Python3 +Bug-Debian: https://bugs.debian.org/943260 +Author: Andreas Tille +Last-Update: Mon, 16 Dec 2019 17:19:24 +0100 + +--- seqan.orig/misc/add_include.py ++++ seqan/misc/add_include.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/env python2.5 ++#!/usr/bin/python3 + """DDDoc Helper Script -- add includes. + + Called the path to the SeqAn checkout, this tool will search for all +@@ -12,7 +12,7 @@ + first line containing "*/" to find the end of a comment. + """ + +-from __future__ import with_statement ++ + + import os + import os.path +@@ -28,7 +28,7 @@ + def processHeaderFile(base_path, relative_path): + """Process the header and insert the .include line if it is not already there.""" + full_path = os.path.join(base_path, relative_path) +- print full_path ++ print(full_path) + # Read file. + with open(full_path, 'r') as f: + contents = f.readlines() +@@ -91,8 +91,8 @@ + + def main(): + if len(sys.argv) != 2: +- print >>sys.stderr, "ERROR: Wrong argument count.""" +- print >>sys.stderr, USAGE_STR ++ print("ERROR: Wrong argument count.""", file=sys.stderr) ++ print(USAGE_STR, file=sys.stderr) + return 1 + + base_path = sys.argv[1] +@@ -101,7 +101,7 @@ + header_rel_paths = collectHeaderFilenames(base_path) + + for header_rel_path in header_rel_paths: +- print header_rel_path ++ print(header_rel_path) + processHeaderFile(base_path, header_rel_path) + + if __name__ == '__main__': +--- seqan.orig/misc/adjust_linebreaks.py ++++ seqan/misc/adjust_linebreaks.py +@@ -15,7 +15,7 @@ + if parseFile(path): counter += 1 + if 'CVS' in dirs: + dirs.remove('CVS') +- print counter, "lf corrupted files corrected" ++ print(counter, "lf corrupted files corrected") + + def testFileType(filename): + pos = filename.rfind(".") +@@ -66,9 +66,9 @@ + f = open(path, "wb") + f.write(s) + f.close() +- print '\n', path, ++ print('\n', path, end=' ') + else: +- print '.', ++ print('.', end=' ') + + return corrupt + +--- seqan.orig/misc/iorev_tagger.py ++++ seqan/misc/iorev_tagger.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/env python2.5 ++#!/usr/bin/env3 + """SeqAn IO-Revision code tagger step1 + + Usage: build_forwards.py BASE_PATH [all] +@@ -274,7 +274,7 @@ + elif len(namespaces) > 0: + namespaces = namespaces[:len(namespaces)-1] + else: +- print "ERROR in" , filename , "(", lineNumber, "): Too many }" ++ print("ERROR in" , filename , "(", lineNumber, "): Too many }") + + elif (pos6 >= 0): + if curlyCount == 0: +@@ -497,7 +497,7 @@ + + + def printOutFilesAndLines(): +- print FILES_LINES["projects/library/seqan/statistics/statistics_markov_model.h"] ++ print(FILES_LINES["projects/library/seqan/statistics/statistics_markov_model.h"]) + #TODO further processing + + +@@ -516,7 +516,7 @@ + if (lines_typedef != ""): + cmd += " -v lines_t=" + lines_typedef + cmd += " " + lastfilename + " > " + lastfilename + ".new" +- print cmd ++ print(cmd) + #if (lastfilename.find("file") >= 0): + os.system(cmd) + shutil.move(lastfilename, lastfilename + ".old") +@@ -536,7 +536,7 @@ + if (lines_typedef != ""): + cmd += " -v lines_t=" + lines_typedef + cmd += " " + lastfilename + " > " + lastfilename + ".new" +- print cmd ++ print(cmd) + #if (lastfilename.find("file") >= 0): + os.system(cmd) + shutil.move(lastfilename, lastfilename + ".old") +@@ -548,19 +548,19 @@ + + if order == "fltn": # file line type name + for i in sorted(FILTERED_SIGS, key=lambda x:(x[0],x[1],x[2],x[3])): +- print i[0], "\t", i[1], "\t", i[2], "\t", i[3] ++ print(i[0], "\t", i[1], "\t", i[2], "\t", i[3]) + elif order == "ntfl": # name type file line + for i in sorted(FILTERED_SIGS, key=lambda x:(x[3],x[2],x[0],x[1])): +- print i[3], "\t", i[2], "\t", i[0], "\t", i[1] ++ print(i[3], "\t", i[2], "\t", i[0], "\t", i[1]) + elif order == "fl": # file and lines in one line + lastfilename = "" + for i in sorted(FILTERED_SIGS, key=lambda x:(x[0],x[1])): + if i[0] != lastfilename: + lastfilename = i[0] +- print "\n", i[0], +- print " ", i[1], ++ print("\n", i[0], end=' ') ++ print(" ", i[1], end=' ') + +- print ++ print() + + + +@@ -614,8 +614,8 @@ + def main(): + """Main entry point for the forwards generator.""" + if len(sys.argv) < 2: +- print >>sys.stderr, 'ERROR: Too few arguments.' +- print >>sys.stderr, PROGRAM_USAGE ++ print('ERROR: Too few arguments.', file=sys.stderr) ++ print(PROGRAM_USAGE, file=sys.stderr) + return 1 + + force_rebuild = 1 +--- seqan.orig/misc/mat2cpp.py ++++ seqan/misc/mat2cpp.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/env python2.5 ++#!/usr/bin/python3 + """Simple converter for matrix files to C++ fragments. + + A matrix file is read from stdin and appropriate C++ code for +@@ -30,10 +30,10 @@ + data_lines.append(INDENT + ', '.join(formatted_data) + ',') + + +- print '\n'.join([INDENT_LEVEL * INDENT + l for l in comments]) +- print INDENT_LEVEL * INDENT + 'static int const _data[TAB_SIZE] = {' +- print '\n'.join([INDENT_LEVEL * INDENT + l for l in data_lines]) +- print INDENT_LEVEL * INDENT + '};' ++ print('\n'.join([INDENT_LEVEL * INDENT + l for l in comments])) ++ print(INDENT_LEVEL * INDENT + 'static int const _data[TAB_SIZE] = {') ++ print('\n'.join([INDENT_LEVEL * INDENT + l for l in data_lines])) ++ print(INDENT_LEVEL * INDENT + '};') + return 0 + + +--- seqan.orig/misc/profile2pdf.py ++++ seqan/misc/profile2pdf.py +@@ -1,10 +1,10 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 + """Convert SeqAn profiling information into PDF graphic. + + USAGE: profile2pdf.py + """ + +-from __future__ import with_statement ++ + + __author__ = 'Manuel Holtgrewe ' + +@@ -38,11 +38,11 @@ + colorstring = colorstring.strip() + if colorstring[0] == '#': colorstring = colorstring[1:] + if len(colorstring) != 6: +- raise ValueError, "input #%s is not in #RRGGBB format" % colorstring ++ raise ValueError("input #%s is not in #RRGGBB format" % colorstring) + r, g, b = colorstring[:2], colorstring[2:4], colorstring[4:] + r, g, b = [int(n, 16) for n in (r, g, b)] + return (r / 255.0, g / 255.0, b / 255.0) +-COLORS = map(htmlColorToRgb, COLORS) ++COLORS = list(map(htmlColorToRgb, COLORS)) + + class Meta(object): + def __init__(self, beginTimestamp, endTimestamp): +@@ -61,7 +61,7 @@ + def fromString(klass, s): + columns = s.split('\t') + if columns[0] != '@EVENT': +- print >>sys.stderr, 'First column\'s value was not "@EVENT@' ++ print('First column\'s value was not "@EVENT@', file=sys.stderr) + sys.exit(1) + identifier = int(columns[1]) + shortName = columns[2] +@@ -88,7 +88,7 @@ + columns = s.split('\t') + threadId = int(columns[0]) + if columns[1] not in ['BEGIN', 'END']: +- print >>sys.stderr, 'Second column\'s value was not BEGIN or END' ++ print('Second column\'s value was not BEGIN or END', file=sys.stderr) + sys.exit(1) + isBegin = columns[1] == 'BEGIN' + jobType = int(columns[2]) +@@ -130,7 +130,7 @@ + + def printSection(section, jobTypes, offset, level=0): + span = section.endTime - section.beginTime +- print '%s%s %f (%f to %f)' % ('\t' * level, jobTypes[section.jobType].shortName, span, section.beginTime - offset, section.endTime - offset) ++ print('%s%s %f (%f to %f)' % ('\t' * level, jobTypes[section.jobType].shortName, span, section.beginTime - offset, section.endTime - offset)) + for s in section.children: + printSection(s, jobTypes, offset, level+1) + +@@ -138,13 +138,13 @@ + with open(path, 'r') as f: + line = f.readline() + if line.strip() != '@SQN:PROFILE': +- print >>sys.stderr, 'Invalid file, does not start with "@SQN:PROFILE"' ++ print('Invalid file, does not start with "@SQN:PROFILE"', file=sys.stderr) + sys.exit(1) + line = f.readline() + if not line.startswith('@TIME'): +- print >>sys.stderr, 'Invalid file, second line does not start with "@TIME"' ++ print('Invalid file, second line does not start with "@TIME"', file=sys.stderr) + sys.exit(1) +- meta = Meta(*map(float, line.strip().split('\t')[1:])) ++ meta = Meta(*list(map(float, line.strip().split('\t')[1:]))) + # Load job types. + jobTypes = [] + while True: +@@ -248,12 +248,12 @@ + + def breakDownTimes(jobTypes, forests): + for threadId in sorted(forests.keys()): +- print 'Breakdown for thread #%d' % threadId ++ print('Breakdown for thread #%d' % threadId) + counter = {} + for section in forests[threadId]: + breakDownTimesHelper(counter, section) + for jobType in jobTypes: +- print ' %20s %10.5f' % (jobType.shortName, counter.get(jobType.identifier, 0)) ++ print(' %20s %10.5f' % (jobType.shortName, counter.get(jobType.identifier, 0))) + + def createDiagram(meta, jobTypes, forests, path): + totalBegin = meta.beginTimestamp +@@ -266,7 +266,7 @@ + cs = cairo.PDFSurface(path, width, height) + cr = cairo.Context(cs) + +- for threadId, forest in forests.iteritems(): ++ for threadId, forest in forests.items(): + for section in forest: + drawBoxesForSection(cr, jobTypes, section, totalBegin, threadId) + drawKey(cr, jobTypes, len(forests)) +@@ -276,21 +276,21 @@ + + def main(args): + if len(args) != 3: +- print >>sys.stderr, 'Invalid number of arguments!' +- print >>sys.stderr, 'USAGE: profile2pdf.py ' ++ print('Invalid number of arguments!', file=sys.stderr) ++ print('USAGE: profile2pdf.py ', file=sys.stderr) + return 1 + + # Load input file. +- print >>sys.stderr, 'Loading file', args[1] ++ print('Loading file', args[1], file=sys.stderr) + meta, jobTypes, events = loadFile(args[1]) + # Partition events by thread id. +- print >>sys.stderr, 'Partition events' ++ print('Partition events', file=sys.stderr) + eventsForThread = {} + for e in events: + eventsForThread.setdefault(e.threadId, []).append(e) + + # Build sections list and forest for each thread. +- print >>sys.stderr, 'Build sections' ++ print('Build sections', file=sys.stderr) + forests = {} + sections = {} + for threadId in sorted(eventsForThread.keys()): +@@ -303,12 +303,12 @@ + # printSection(x, jobTypes, s[0].beginTime) + + # Build diagram. +- print >>sys.stderr, 'Create diagram' ++ print('Create diagram', file=sys.stderr) + createDiagram(meta, jobTypes, forests, args[2]) + + # Show how much time each thread spent in each job type. + breakDownTimes(jobTypes, forests) +- print 'TOTAL TIME: %f s' % (meta.endTimestamp - meta.beginTimestamp) ++ print('TOTAL TIME: %f s' % (meta.endTimestamp - meta.beginTimestamp)) + + return 0 + +--- seqan.orig/misc/record_resources.py ++++ seqan/misc/record_resources.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 + + import os.path + import sys +@@ -14,16 +14,16 @@ + + def main(args): + if len(args) <= 2: +- print >>sys.stderr, 'Wrong number of parameters!' +- print >>sys.stderr, USAGE ++ print('Wrong number of parameters!', file=sys.stderr) ++ print(USAGE, file=sys.stderr) + return 1 + + target_file = args[1] + binary = args[2] + arguments = args[3:] + +- print 'Executing', binary, ' '.join(arguments) +- print 'target file:', target_file ++ print('Executing', binary, ' '.join(arguments)) ++ print('target file:', target_file) + + c.Task(commands=[c.Command(binary, arguments)], result=target_file).execute() + +--- seqan.orig/misc/renaming/invalid_identifiers.py ++++ seqan/misc/renaming/invalid_identifiers.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 + import sys + import re + +@@ -20,9 +20,7 @@ + REPLACEMENT_ID = re.compile(r'\b(__?)(\w*)\b') + # The following IDs are exempted from replacement since they are either defined + # by some compiler (-specific library) or are solely used within a string. +-VALID_IDENTIFIERS = map( +- lambda rx: re.compile(rx), +- [ '___+', ++VALID_IDENTIFIERS = [re.compile(rx) for rx in [ '___+', + '^__$', + '_N', + '_L', +@@ -144,7 +142,7 @@ + '__SUNPRO_C', + '__DECC', + '__IBMC__', +- '_MSC_VER' ]) ++ '_MSC_VER' ]] + + def valid(id): + """ +@@ -183,8 +181,8 @@ + + def main(): + if len(sys.argv) != 2: +- print >>sys.stderr, 'ERROR: Invalid number of arguments.' +- print >>sys.stderr, PROGRAM_USAGE ++ print('ERROR: Invalid number of arguments.', file=sys.stderr) ++ print(PROGRAM_USAGE, file=sys.stderr) + return 1 + + results = {} +@@ -194,13 +192,13 @@ + results[file] = set(find_all(file)) + + all_ids = set() +- for ids in results.values(): ++ for ids in list(results.values()): + all_ids |= ids + + replacements = generate_replacements(all_ids) + + for id in sorted(all_ids): +- print '%s: %s' % (id, replacements[id]) ++ print('%s: %s' % (id, replacements[id])) + + #for file in sorted(results.keys()): + # for id in results[file]: +--- seqan.orig/misc/renaming/replace_identifiers.py ++++ seqan/misc/renaming/replace_identifiers.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 + + import sys + import re +@@ -22,7 +22,7 @@ + """ + Perform the substitutions given by the dictionary ``subst`` on ``text``. + """ +- for old in subst.keys(): ++ for old in list(subst.keys()): + text = old.sub(subst[old], text) + + return text +@@ -67,12 +67,12 @@ + # Windows, just assume good faith if the file name isn't given. + use_stdin = len(sys.argv) == 2 + if not (len(sys.argv) == 3 or use_stdin): +- print >>sys.stderr, 'ERROR: Invalid number of arguments.' +- print >>sys.stderr, PROGRAM_USAGE ++ print('ERROR: Invalid number of arguments.', file=sys.stderr) ++ print(PROGRAM_USAGE, file=sys.stderr) + return 1 + + if use_stdin: +- print >>sys.stderr, "Attempting to read from stdin ..." ++ print("Attempting to read from stdin ...", file=sys.stderr) + + project_path = sys.argv[1] + replacements_file = sys.stdin if use_stdin else open(sys.argv[2], 'r') +--- seqan.orig/misc/swig/example.py ++++ seqan/misc/swig/example.py +@@ -6,9 +6,9 @@ + print(alignScore); + + dnaList=align.printDnaAlignment(alignObject); +-print(dnaList[0]); +-print(dnaList[1]); +-print(dnaList[2]); ++print((dnaList[0])); ++print((dnaList[1])); ++print((dnaList[2])); + + + scoreMatrix=align.getAminoAcidScoreMatrix("Blosum30"); +@@ -17,8 +17,8 @@ + + aminoList=align.printAminoAcidAlignment(alignObject); + print(alignScore) +-print(aminoList[0]); +-print(aminoList[1]); +-print(aminoList[2]); ++print((aminoList[0])); ++print((aminoList[1])); ++print((aminoList[2])); + + +--- seqan.orig/misc/trac_plugins/DocLinks/doc_links/macro.py ++++ seqan/misc/trac_plugins/DocLinks/doc_links/macro.py +@@ -19,7 +19,7 @@ + * {{{seqan:"Concept.Simple Type"}}} seqan:"Concept.Simple Type" + * {{{seqan:"Spec.Chunk Pool Allocator}}} seqan:"Spec.Chunk Pool Allocator" + """ +-import urllib ++import urllib.request, urllib.parse, urllib.error + import sys + + from trac.core import * +--- seqan.orig/misc/trac_plugins/FoldOut/FoldOutMacro.py ++++ seqan/misc/trac_plugins/FoldOut/FoldOutMacro.py +@@ -23,10 +23,10 @@ + from genshi.builder import tag + import genshi.core + import uuid +-import StringIO ++import io + +-ARROW_RIGHT = u'\u25B6' +-ARROW_DOWN = u'\u25BC' ++ARROW_RIGHT = '\u25B6' ++ARROW_DOWN = '\u25BC' + + class FoldOutMacro(trac.wiki.macros.WikiMacroBase): + def expand_macro(self, formatter, name, args): +@@ -48,13 +48,13 @@ + body_html = self.format_wiki(formatter, '\n'.join(body)) + hidden = tag.div(genshi.core.Markup(body_html), id=hidden_id, style='display:none;') + toggle_class = uuid.uuid4() +- toggle_js = genshi.core.Markup(u'$(\'#%s\').toggle();$(\'.%s\').toggle();return false;') % (hidden_id, toggle_class) ++ toggle_js = genshi.core.Markup('$(\'#%s\').toggle();$(\'.%s\').toggle();return false;') % (hidden_id, toggle_class) + toggle_link = tag.a(tag.span(ARROW_RIGHT + ' more...', class_=toggle_class) + tag.span(ARROW_DOWN + ' less...', class_=toggle_class, style='display:none;'), onclick=toggle_js, href='#') + summary_html = self.format_wiki(formatter, '\n'.join(summary)) + return genshi.core.Markup(summary_html) + toggle_link + genshi.core.Markup(hidden) + + def format_wiki(self, formatter, wiki_string): + """Format the given string wiki_string to HTML.""" +- out = StringIO.StringIO() ++ out = io.StringIO() + trac.wiki.Formatter(self.env, formatter.context).format(wiki_string, out) + return out.getvalue() +--- seqan.orig/misc/trac_plugins/IncludeMacro/includemacro/macros.py ++++ seqan/misc/trac_plugins/IncludeMacro/includemacro/macros.py +@@ -1,7 +1,7 @@ + # TracIncludeMacro macros + import re +-import urllib2 +-from StringIO import StringIO ++import urllib.request, urllib.error, urllib.parse ++from io import StringIO + + from trac.core import * + from trac.wiki.macros import WikiMacroBase +@@ -74,11 +74,11 @@ + self.log.info('IncludeMacro: Blocking attempt by %s to include URL %s on page %s', req.authname, source, req.path_info) + return '' + try: +- urlf = urllib2.urlopen(source) ++ urlf = urllib.request.urlopen(source) + out = urlf.read() +- except urllib2.URLError, e: ++ except urllib.error.URLError as e: + return system_message('Error while retrieving file', str(e)) +- except TracError, e: ++ except TracError as e: + return system_message('Error while previewing', str(e)) + ctxt = Context.from_request(req) + elif source_format == 'wiki': +--- seqan.orig/misc/trac_plugins/IncludeMacro/setup.py ++++ seqan/misc/trac_plugins/IncludeMacro/setup.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 + # -*- coding: iso-8859-1 -*- + import os.path + +--- seqan.orig/misc/trac_plugins/TextBoxes/text_boxes/macro.py ++++ seqan/misc/trac_plugins/TextBoxes/text_boxes/macro.py +@@ -21,7 +21,7 @@ + + import itertools + import operator +-import StringIO ++import io + + from pkg_resources import resource_filename + +@@ -84,7 +84,7 @@ + return None + result = tag.span(args[0][0], class_='menu_item') + for text in args[0][1:]: +- result += tag.span(u' \u25B8 ', class_='arrow') ++ result += tag.span(' \u25B8 ', class_='arrow') + result += tag.span(text, class_='menu_item') + return tag.span(result, class_='menu_trace') + elif name in ['WarningBox', 'InfoBox', 'ImportantBox', 'AssignmentBox']: +@@ -95,7 +95,7 @@ + + def format_wiki(self, formatter, wiki_string): + """Format the given string wiki_string to HTML.""" +- out = StringIO.StringIO() ++ out = io.StringIO() + trac.wiki.Formatter(self.env, formatter.context).format(wiki_string, out) + return out.getvalue() + +--- seqan.orig/util/bin/auto_build.py ++++ seqan/util/bin/auto_build.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 + """SeqAn Automatic Build System.""" + + import os.path +--- seqan.orig/util/bin/build_forwards.py ++++ seqan/util/bin/build_forwards.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/env python2.5 ++#!/usr/bin/python3 + """SeqAn Automatic Forwards Generator. + + Usage: build_forwards.py BASE_PATH [TARGET_PATH] [all] +@@ -42,7 +42,7 @@ + if not os.path.exists(project_path): + return + +- print "create forwards for", project_path ++ print("create forwards for", project_path) + + global FUNCS + FUNCS = {} +@@ -79,7 +79,7 @@ + # if FUNCS != {}: + outAll(target_path, project) + +- print ++ print() + + + def forwardFilename(module): +@@ -102,10 +102,10 @@ + + for line in lines: + if (line.find("SEQAN_NO_GENERATED_FORWARDS") >= 0): +- print "-", ++ print("-", end=' ') + return + +- print ".", ++ print(".", end=' ') + + sigs = preprocess(lines, filename); + +@@ -220,7 +220,7 @@ + elif len(namespaces) > 0: + namespaces = namespaces[:len(namespaces)-1] + else: +- print "ERROR in" , filename , "(", lineNumber, "): Too many }" ++ print("ERROR in" , filename , "(", lineNumber, "): Too many }") + + elif (pos6 >= 0): + if curlyCount == 0: +@@ -350,7 +350,7 @@ + if pos2 >= 0: + str = str[pos2:] + else: +- print "ERROR while deleting default arguments" ++ print("ERROR while deleting default arguments") + break + + return ret +@@ -366,8 +366,8 @@ + edge_count = 0 + while pos < len(str): + if verbose: +- print pos, edge_count +- print str[pos:] ++ print(pos, edge_count) ++ print(str[pos:]) + + p1 = str.find(char, pos) + p2 = str.find("<", pos) +@@ -414,7 +414,7 @@ + def addEntry(arr, name, entry, namespaces): + """Adds a signature to FUNCS or CLASSES.""" + key = getSortKey(name, namespaces) +- if not arr.has_key(key): ++ if key not in arr: + arr[key] = [] + arr[key] += [[name, entry, namespaces]] + +@@ -530,7 +530,7 @@ + fl.close() + + def outList(lst): +- keys = lst.keys() ++ keys = list(lst.keys()) + keys.sort() + + namespaces = [] +@@ -626,8 +626,8 @@ + def main(): + """Main entry point for the forwards generator.""" + if len(sys.argv) < 2: +- print >>sys.stderr, 'ERROR: Too few arguments.' +- print >>sys.stderr, PROGRAM_USAGE ++ print('ERROR: Too few arguments.', file=sys.stderr) ++ print(PROGRAM_USAGE, file=sys.stderr) + return 1 + + force_rebuild = sys.argv[-1] == 'all' +--- seqan.orig/util/bin/dddoc.py ++++ seqan/util/bin/dddoc.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 + """SeqAn Documentation System DDDoc.""" + + import os.path +--- seqan.orig/util/bin/demo_checker.py ++++ seqan/util/bin/demo_checker.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 + """Demo checker script. + + Given a demo .cpp file PATH.cpp we can make it a small test if there is a file +@@ -23,7 +23,7 @@ + + def t(s): + """Force Windows line endings to Unix line endings.""" +- return s.replace("\r\n", "\n") ++ return s #.replace("\r\n", "\n") + + + def loadExpected(args): +@@ -40,7 +40,7 @@ + + def runDemo(args): + cmd = [args.binary_path] +- p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) ++ p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + p.wait() + return t(p.stdout.read()), t(p.stderr.read()), p.returncode + +@@ -58,32 +58,32 @@ + default=None) + args = parser.parse_args() + +- print >>sys.stderr, 'Loading files "%s", "%s".' % (args.stdout_path, args.stderr_path) ++ print('Loading files "%s", "%s".' % (args.stdout_path, args.stderr_path), file=sys.stderr) + expected_out, expected_err = loadExpected(args) +- print >>sys.stderr, 'Running %s.' % args.binary_path ++ print('Running %s.' % args.binary_path, file=sys.stderr) + actual_out, actual_err, ret = runDemo(args) + + if ret != 0: +- print >>sys.stderr, 'ERROR: Return code of %s was %s.' % (args.binary_path, ret) ++ print('ERROR: Return code of %s was %s.' % (args.binary_path, ret), file=sys.stderr) + return 1 + else: +- print >>sys.stderr, 'Return code was %s.' % ret ++ print('Return code was %s.' % ret, file=sys.stderr) + + if expected_out != actual_out: +- print >>sys.stderr, 'The standard output was not as expected!' ++ print('The standard output was not as expected!', file=sys.stderr) + l = difflib.context_diff(expected_out, actual_out, + fromfile='expected', tofile='actual') +- print >>sys.stderr, '\n'.join(l) ++ print('\n'.join(l), file=sys.stderr) + else: +- print >>sys.stderr, 'Standard output was as expected.' ++ print('Standard output was as expected.', file=sys.stderr) + + if expected_err != actual_err: +- print >>sys.stderr, 'The standard errput was not as expected!' ++ print('The standard errput was not as expected!', file=sys.stderr) + l = difflib.context_diff(expected_err, actual_err, + fromfile='expected', tofile='actual') +- print >>sys.stderr, '\n'.join(l) ++ print('\n'.join(l), file=sys.stderr) + else: +- print >>sys.stderr, 'Standard error was as expected.' ++ print('Standard error was as expected.', file=sys.stderr) + + return not (expected_out == expected_out and expected_err == actual_err) + +--- seqan.orig/util/bin/dox.py ++++ seqan/util/bin/dox.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 + """SeqAn doxygen-style documentation system.""" + + import os.path +--- seqan.orig/util/bin/fixgcov.py ++++ seqan/util/bin/fixgcov.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 + """Fix gcov output. + + Fix gcov output with templates. This is done by first parsing in the .cpp files +--- seqan.orig/util/bin/pyclangcheck.py ++++ seqan/util/bin/pyclangcheck.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 + """llvm-clang based style checker.""" + + __author__ = 'Manuel Holtgrewe ' +--- seqan.orig/util/bin/release_page.py ++++ seqan/util/bin/release_page.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 + """Build SeqAn Release Page.""" + + import os.path +--- seqan.orig/util/bin/skel.py ++++ seqan/util/bin/skel.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 + """SeqAn Skelleton Creation.""" + + import os.path +--- seqan.orig/util/ctd2galaxy.py ++++ seqan/util/ctd2galaxy.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 + + import argparse + import sys +@@ -182,10 +182,10 @@ + + ctd_parser = CTDParser() + tool = ctd_parser.parse(args.in_file) +- print tool ++ print(tool) + for cli in tool.cli_elements: +- print ' %s' % cli +- print tool.parameters ++ print(' %s' % cli) ++ print(tool.parameters) + + + if __name__ == '__main__': +--- seqan.orig/util/py_lib/clang/cindex.py ++++ seqan/util/py_lib/clang/cindex.py +@@ -276,7 +276,7 @@ + if value >= len(CursorKind._kinds): + CursorKind._kinds += [None] * (value - len(CursorKind._kinds) + 1) + if CursorKind._kinds[value] is not None: +- raise ValueError,'CursorKind already loaded' ++ raise ValueError('CursorKind already loaded') + self.value = value + CursorKind._kinds[value] = self + CursorKind._name_map = None +@@ -289,7 +289,7 @@ + """Get the enumeration name of this cursor kind.""" + if self._name_map is None: + self._name_map = {} +- for key,value in CursorKind.__dict__.items(): ++ for key,value in list(CursorKind.__dict__.items()): + if isinstance(value,CursorKind): + self._name_map[value] = key + return self._name_map[self] +@@ -297,13 +297,13 @@ + @staticmethod + def from_id(id): + if id >= len(CursorKind._kinds) or CursorKind._kinds[id] is None: +- raise ValueError,'Unknown cursor kind' ++ raise ValueError('Unknown cursor kind') + return CursorKind._kinds[id] + + @staticmethod + def get_all_kinds(): + """Return all CursorKind enumeration instances.""" +- return filter(None, CursorKind._kinds) ++ return [_f for _f in CursorKind._kinds if _f] + + def is_declaration(self): + """Test if this is a declaration kind.""" +@@ -948,7 +948,7 @@ + if value >= len(TypeKind._kinds): + TypeKind._kinds += [None] * (value - len(TypeKind._kinds) + 1) + if TypeKind._kinds[value] is not None: +- raise ValueError,'TypeKind already loaded' ++ raise ValueError('TypeKind already loaded') + self.value = value + TypeKind._kinds[value] = self + TypeKind._name_map = None +@@ -961,7 +961,7 @@ + """Get the enumeration name of this cursor kind.""" + if self._name_map is None: + self._name_map = {} +- for key,value in TypeKind.__dict__.items(): ++ for key,value in list(TypeKind.__dict__.items()): + if isinstance(value,TypeKind): + self._name_map[value] = key + return self._name_map[self] +@@ -969,7 +969,7 @@ + @staticmethod + def from_id(id): + if id >= len(TypeKind._kinds) or TypeKind._kinds[id] is None: +- raise ValueError,'Unknown cursor kind' ++ raise ValueError('Unknown cursor kind') + return TypeKind._kinds[id] + + def __repr__(self): +@@ -1374,9 +1374,9 @@ + # FIXME: It would be great to support an efficient version + # of this, one day. + value = value.read() +- print value ++ print(value) + if not isinstance(value, str): +- raise TypeError,'Unexpected unsaved file contents.' ++ raise TypeError('Unexpected unsaved file contents.') + unsaved_files_array[i].name = name + unsaved_files_array[i].contents = value + unsaved_files_array[i].length = len(value) +@@ -1465,9 +1465,9 @@ + # FIXME: It would be great to support an efficient version + # of this, one day. + value = value.read() +- print value ++ print(value) + if not isinstance(value, str): +- raise TypeError,'Unexpected unsaved file contents.' ++ raise TypeError('Unexpected unsaved file contents.') + unsaved_files_array[i].name = name + unsaved_files_array[i].contents = value + unsaved_files_array[i].length = len(value) +@@ -1491,9 +1491,9 @@ + # FIXME: It would be great to support an efficient version + # of this, one day. + value = value.read() +- print value ++ print(value) + if not isinstance(value, str): +- raise TypeError,'Unexpected unsaved file contents.' ++ raise TypeError('Unexpected unsaved file contents.') + unsaved_files_array[i].name = name + unsaved_files_array[i].contents = value + unsaved_files_array[i].length = len(value) +--- seqan.orig/util/py_lib/pyratemp.py ++++ seqan/util/py_lib/pyratemp.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 + # -*- coding: utf-8 -*- + """ + Small, simple and powerful template-engine for python. +@@ -179,7 +179,7 @@ + + #========================================= + +-import __builtin__, os ++import builtins, os + import re + + #========================================= +@@ -224,7 +224,7 @@ + """Convert all keys of the dict `d` to strings. + """ + new_d = {} +- for k, v in d.iteritems(): ++ for k, v in d.items(): + new_d[str(k)] = v + return new_d + +@@ -247,7 +247,7 @@ + #========================================= + # escaping + +-(NONE, HTML, LATEX) = range(0, 3) ++(NONE, HTML, LATEX) = list(range(0, 3)) + ESCAPE_SUPPORTED = {"NONE":None, "HTML":HTML, "LATEX":LATEX} #for error-/parameter-checking + + def escape(s, format=HTML): +@@ -277,25 +277,25 @@ + if format is None or format == NONE: + pass + elif format == HTML: +- s = s.replace(u"&", u"&") # must be done first! +- s = s.replace(u"<", u"<") +- s = s.replace(u">", u">") +- s = s.replace(u'"', u""") +- s = s.replace(u"'", u"'") ++ s = s.replace("&", "&") # must be done first! ++ s = s.replace("<", "<") ++ s = s.replace(">", ">") ++ s = s.replace('"', """) ++ s = s.replace("'", "'") + elif format == LATEX: + #TODO: which are the "reserved" characters for LaTeX? + # are there more than these? +- s = s.replace("\\", u"\\backslash{}") #must be done first! +- s = s.replace("#", u"\\#") +- s = s.replace("$", u"\\$") +- s = s.replace("%", u"\\%") +- s = s.replace("&", u"\\&") +- s = s.replace("_", u"\\_") +- s = s.replace("{", u"\\{") +- s = s.replace("}", u"\\}") ++ s = s.replace("\\", "\\backslash{}") #must be done first! ++ s = s.replace("#", "\\#") ++ s = s.replace("$", "\\$") ++ s = s.replace("%", "\\%") ++ s = s.replace("&", "\\&") ++ s = s.replace("_", "\\_") ++ s = s.replace("{", "\\{") ++ s = s.replace("}", "\\}") + else: + raise ValueError('Invalid format (only None, HTML and LATEX are supported).') +- return unicode(s) ++ return str(s) + + #========================================= + +@@ -349,10 +349,10 @@ + def load(self, string): + """Return template-string as unicode. + """ +- if isinstance(string, unicode): ++ if isinstance(string, str): + u = string + else: +- u = unicode(string, self.encoding) ++ u = str(string, self.encoding) + return u + + class LoaderFile: +@@ -397,7 +397,7 @@ + string = f.read() + f.close() + +- u = unicode(string, self.encoding) ++ u = str(string, self.encoding) + + return u + +@@ -504,11 +504,11 @@ + else: + try: # test if testexpr() works + testexpr("i==1") +- except Exception,err: ++ except Exception as err: + raise ValueError("Invalid 'testexpr' (%s)." %(err)) + self._testexprfunc = testexpr + +- if escape not in ESCAPE_SUPPORTED.values(): ++ if escape not in list(ESCAPE_SUPPORTED.values()): + raise ValueError("Unsupported 'escape' (%s)." %(escape)) + self.escape = escape + self._includestack = [] +@@ -536,7 +536,7 @@ + """Test a template-expression to detect errors.""" + try: + self._testexprfunc(expr) +- except SyntaxError,err: ++ except SyntaxError as err: + raise TemplateSyntaxError(err, self._errpos(fpos)) + + def _parse_sub(self, parsetree, text, fpos=0): +@@ -707,7 +707,7 @@ + block_type = 'include' + try: + u = self._load(content.strip()) +- except Exception,err: ++ except Exception as err: + raise TemplateIncludeError(err, self._errpos(pos__)) + self._includestack.append((content.strip(), u)) # current filename/template for error-msg. + p = self._parse(u) +@@ -773,42 +773,42 @@ + """ + + safe_builtins = { +- "True" : __builtin__.True, +- "False" : __builtin__.False, +- "None" : __builtin__.None, +- +- "abs" : __builtin__.abs, +- "chr" : __builtin__.chr, +- "cmp" : __builtin__.cmp, +- "divmod" : __builtin__.divmod, +- "hash" : __builtin__.hash, +- "hex" : __builtin__.hex, +- "len" : __builtin__.len, +- "max" : __builtin__.max, +- "min" : __builtin__.min, +- "oct" : __builtin__.oct, +- "ord" : __builtin__.ord, +- "pow" : __builtin__.pow, +- "range" : __builtin__.range, +- "round" : __builtin__.round, +- "sorted" : __builtin__.sorted, +- "sum" : __builtin__.sum, +- "unichr" : __builtin__.unichr, +- "zip" : __builtin__.zip, +- +- "bool" : __builtin__.bool, +- "complex" : __builtin__.complex, +- "dict" : __builtin__.dict, +- "enumerate" : __builtin__.enumerate, +- "float" : __builtin__.float, +- "int" : __builtin__.int, +- "list" : __builtin__.list, +- "long" : __builtin__.long, +- "reversed" : __builtin__.reversed, +- "str" : __builtin__.str, +- "tuple" : __builtin__.tuple, +- "unicode" : __builtin__.unicode, +- "xrange" : __builtin__.xrange, ++ "True" : builtins.True, ++ "False" : builtins.False, ++ "None" : builtins.None, ++ ++ "abs" : builtins.abs, ++ "chr" : builtins.chr, ++ "cmp" : builtins.cmp, ++ "divmod" : builtins.divmod, ++ "hash" : builtins.hash, ++ "hex" : builtins.hex, ++ "len" : builtins.len, ++ "max" : builtins.max, ++ "min" : builtins.min, ++ "oct" : builtins.oct, ++ "ord" : builtins.ord, ++ "pow" : builtins.pow, ++ "range" : builtins.range, ++ "round" : builtins.round, ++ "sorted" : builtins.sorted, ++ "sum" : builtins.sum, ++ "unichr" : builtins.chr, ++ "zip" : builtins.zip, ++ ++ "bool" : builtins.bool, ++ "complex" : builtins.complex, ++ "dict" : builtins.dict, ++ "enumerate" : builtins.enumerate, ++ "float" : builtins.float, ++ "int" : builtins.int, ++ "list" : builtins.list, ++ "long" : builtins.long, ++ "reversed" : builtins.reversed, ++ "str" : builtins.str, ++ "tuple" : builtins.tuple, ++ "unicode" : builtins.str, ++ "xrange" : builtins.xrange, + } + + def __init__(self): +@@ -991,7 +991,7 @@ + """ + self.current_data = self.data.copy() + self.current_data.update(override) +- u = u"".join(self._render(self.parsetree, self.current_data)) ++ u = "".join(self._render(self.parsetree, self.current_data)) + self.current_data = self.data # restore current_data + return _dontescape(u) # (see class _dontescape) + +@@ -1005,7 +1005,7 @@ + #----------------------------------------- + # Renderer + +-class _dontescape(unicode): ++class _dontescape(str): + """Unicode-string which should not be escaped. + + If ``isinstance(object,_dontescape)``, then don't escape the object in +@@ -1042,7 +1042,7 @@ + try: + return self.evalfunc(expr, data) + #TODO: any other errors to catch here? +- except (TypeError,NameError,IndexError,KeyError,AttributeError, SyntaxError), err: ++ except (TypeError,NameError,IndexError,KeyError,AttributeError, SyntaxError) as err: + raise TemplateRenderError("Cannot eval expression '%s'. (%s: %s)" %(expr, err.__class__.__name__, err)) + + def render(self, parsetree, data): +@@ -1065,31 +1065,31 @@ + if "str" == elem[0]: + output.append(elem[1]) + elif "sub" == elem[0]: +- output.append(unicode(_eval(elem[1], data))) ++ output.append(str(_eval(elem[1], data))) + elif "esc" == elem[0]: + obj = _eval(elem[2], data) + #prevent double-escape + if isinstance(obj, _dontescape) or isinstance(obj, TemplateBase): +- output.append(unicode(obj)) ++ output.append(str(obj)) + else: +- output.append(self.escapefunc(unicode(obj), elem[1])) ++ output.append(self.escapefunc(str(obj), elem[1])) + elif "for" == elem[0]: + do_else = True + (names, iterable) = elem[1:3] + try: + loop_iter = iter(_eval(iterable, data)) +- except TypeError, e: +- print data +- print iterable +- print type(_eval(iterable, data)) +- print e ++ except TypeError as e: ++ print(data) ++ print(iterable) ++ print(type(_eval(iterable, data))) ++ print(e) + raise TemplateRenderError("Cannot loop over '%s'." % iterable) + for i in loop_iter: + do_else = False + if len(names) == 1: + data[names[0]] = i + else: +- data.update(zip(names, i)) #"for a,b,.. in list" ++ data.update(list(zip(names, i))) #"for a,b,.. in list" + output.extend(self.render(elem[3], data)) + elif "if" == elem[0]: + do_else = True +--- seqan.orig/util/py_lib/seqan/app_tests.py ++++ seqan/util/py_lib/seqan/app_tests.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 + """Helper code for app tests. + + This module contains helper functions and classes for making app tests easy. +@@ -18,7 +18,7 @@ + directory. + """ + +-from __future__ import with_statement ++ + + __author__ = 'Manuel Holtgrewe ' + +@@ -165,7 +165,7 @@ + + def deleteTempDir(self): + """Remove the temporary directory created earlier and all files below.""" +- print >>sys.stderr, 'DELETING TEMP DIR', self.temp_dir ++ print('DELETING TEMP DIR', self.temp_dir, file=sys.stderr) + if self.temp_dir: + shutil.rmtree(self.temp_dir) + +@@ -242,20 +242,20 @@ + logging.debug(' return code is %d', retcode) + if retcode != 0: + fmt = 'Return code of command "%s" was %d.' +- print >>sys.stderr, '--- stdout begin --' +- print >>sys.stderr, fmt % (' '.join(test_conf.commandLineArgs()), retcode) +- print >>sys.stderr, stdout_file.read() +- print >>sys.stderr, '--- stdout end --' ++ print('--- stdout begin --', file=sys.stderr) ++ print(fmt % (' '.join(test_conf.commandLineArgs()), retcode), file=sys.stderr) ++ print(stdout_file.read(), file=sys.stderr) ++ print('--- stdout end --', file=sys.stderr) + stdout_file.close() + if process.stderr: + stderr_contents = process.stderr.read() + else: + stderr_contents = '' +- print >>sys.stderr, '-- stderr begin --' +- print >>sys.stderr, stderr_contents +- print >>sys.stderr, '-- stderr end --' ++ print('-- stderr begin --', file=sys.stderr) ++ print(stderr_contents, file=sys.stderr) ++ print('-- stderr end --', file=sys.stderr) + return False +- except Exception, e: ++ except Exception as e: + # Print traceback. + import traceback + exc_type, exc_value, exc_traceback = sys.exc_info() +@@ -263,28 +263,28 @@ + fmt = 'ERROR (when executing "%s"): %s' + if stdout_file is not subprocess.PIPE: + stdout_file.close() +- print >>sys.stderr, fmt % (' '.join(test_conf.commandLineArgs()), e) ++ print(fmt % (' '.join(test_conf.commandLineArgs()), e), file=sys.stderr) + return False + # Handle error of program, indicated by return code != 0. + if retcode != 0: +- print >>sys.stderr, 'Error when executing "%s".' % ' '.join(test_conf.commandLineArgs()) +- print >>sys.stderr, 'Return code is %d' % retcode ++ print('Error when executing "%s".' % ' '.join(test_conf.commandLineArgs()), file=sys.stderr) ++ print('Return code is %d' % retcode, file=sys.stderr) + if stdout_file is not subprocess.PIPE: + stdout_file.seek(0) + stdout_contents = process.stdout.read() + if stdout_contents: +- print >>sys.stderr, '-- stdout begin --' +- print >>sys.stderr, stdout_contents +- print >>sys.stderr, '-- stdout end --' ++ print('-- stdout begin --', file=sys.stderr) ++ print(stdout_contents, file=sys.stderr) ++ print('-- stdout end --', file=sys.stderr) + else: +- print >>sys.stderr, '-- stdout is empty --' ++ print('-- stdout is empty --', file=sys.stderr) + stderr_contents = process.stderr.read() + if stderr_contents: +- print >>sys.stderr, '-- stderr begin --' +- print >>sys.stderr, stderr_contents +- print >>sys.stderr, '-- stderr end --' ++ print('-- stderr begin --', file=sys.stderr) ++ print(stderr_contents, file=sys.stderr) ++ print('-- stderr end --', file=sys.stderr) + else: +- print >>sys.stderr, '-- stderr is empty --' ++ print('-- stderr is empty --', file=sys.stderr) + # Close standard out file if necessary. + if stdout_file is not subprocess.PIPE: + stdout_file.close() +@@ -310,7 +310,7 @@ + continue + else: + tpl = (expected_path, expected_md5, result_md5, result_path) +- print >>sys.stderr, 'md5(%s) == %s != %s == md5(%s)' % tpl ++ print('md5(%s) == %s != %s == md5(%s)' % tpl, file=sys.stderr) + result = False + else: + with open(expected_path, 'rb') as f: +@@ -324,25 +324,25 @@ + if expected_str == result_str: + continue + fmt = 'Comparing %s against %s' +- print >>sys.stderr, fmt % (expected_path, result_path) ++ print(fmt % (expected_path, result_path), file=sys.stderr) + diff = difflib.unified_diff(expected_str.splitlines(), + result_str.splitlines()) + for line in diff: +- print >>sys.stderr, line ++ print(line, file=sys.stderr) + result = False +- except Exception, e: ++ except Exception as e: + fmt = 'Error when trying to compare %s to %s: %s ' + str(type(e)) +- print >>sys.stderr, fmt % (expected_path, result_path, e) ++ print(fmt % (expected_path, result_path, e), file=sys.stderr) + result = False + # Call check callable. + if test_conf.check_callback: + try: + test_conf.check_callback() +- except BadResultException, e: +- print >>sys.stderr, 'Bad result: ' + str(e) ++ except BadResultException as e: ++ print('Bad result: ' + str(e), file=sys.stderr) + result = False +- except Exception, e: +- print >>sys.stderr, 'Error in checker: ' + str(type(e)) + ' ' + str(e) ++ except Exception as e: ++ print('Error in checker: ' + str(type(e)) + ' ' + str(e), file=sys.stderr) + result = False + return result + +--- seqan.orig/util/py_lib/seqan/auto_build.py ++++ seqan/util/py_lib/seqan/auto_build.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 + """ + Automatic building of SeqAn apps and releases. + """ +@@ -29,12 +29,12 @@ + + def ls(self, url): + """Execute 'svn ls ${url}'.""" +- print >>sys.stderr, 'Executing "%s %s %s"' % (SVN_BINARY, 'ls -v', url) ++ print('Executing "%s %s %s"' % (SVN_BINARY, 'ls -v', url), file=sys.stderr) + popen = subprocess.Popen([SVN_BINARY, 'ls', '-v', url], + stdout=subprocess.PIPE) + out_data, err_data = popen.communicate() + if popen.returncode != 0: +- print >>sys.stderr, 'ERROR during SVN call.' ++ print('ERROR during SVN call.', file=sys.stderr) + return 1 + lines = out_data.splitlines() + revs_tags = [(int(line.split()[0]), line.split()[-1]) for line in lines] +@@ -48,7 +48,7 @@ + + def co(self, url, dest_dir): + """Execute 'svn co ${url} ${dest_dir}'.""" +- print >>sys.stderr, 'Executing "%s %s %s %s"' % (SVN_BINARY, 'co', url, dest_dir) ++ print('Executing "%s %s %s %s"' % (SVN_BINARY, 'co', url, dest_dir), file=sys.stderr) + popen = subprocess.Popen([SVN_BINARY, 'co', url, dest_dir]) + popen.wait() + return popen.returncode +@@ -117,10 +117,10 @@ + package_path = package_path.replace('Darwin', 'Mac') + if not os.path.exists(package_path): + if self.options.verbosity >= 1: +- print >>sys.stderr, 'File %s does not exist yet.' % package_path ++ print('File %s does not exist yet.' % package_path, file=sys.stderr) + return True + elif self.options.verbosity >= 1: +- print >>sys.stderr, 'File %s exists.' % package_path ++ print('File %s exists.' % package_path, file=sys.stderr) + return False + + def copyArchives(self, build_dir): +@@ -131,7 +131,7 @@ + to = os.path.join(self.base_path, p.name, os.path.basename(from_)) + if not os.path.exists(os.path.dirname(to)): # Create directory if necessary. + os.makedirs(os.path.dirname(to)) +- print >>sys.stderr, "Copying %s => %s" % (from_, to) ++ print("Copying %s => %s" % (from_, to), file=sys.stderr) + if 'x86' in to and 'x86_64' not in to: # fix processor name + to = to.replace('x86', 'x86_64') + if 'win32' in to or 'win64' in to: # fix OS name +@@ -140,7 +140,7 @@ + to = to.replace('Darwin', 'Mac') + shutil.copyfile(from_, to) + else: +- print >>sys.stderr, '%s does not exist (not fatal)' % from_ ++ print('%s does not exist (not fatal)' % from_, file=sys.stderr) + + def buildSeqAnRelease(self, checkout_dir, build_dir): + """Build SeqAn release: Apps and library build.""" +@@ -148,7 +148,7 @@ + # + # Create build directory. + if not os.path.exists(build_dir): +- print >>sys.stderr, 'Creating build directory %s' % (build_dir,) ++ print('Creating build directory %s' % (build_dir,), file=sys.stderr) + os.mkdir(build_dir) + # Execute CMake. + cmake_args = [CMAKE_BINARY, checkout_dir, +@@ -165,76 +165,76 @@ + cmake_args.append('-DSEQAN_SYSTEM_PROCESSOR=x86_64') + if self.os == 'Windows': + cmake_args += ['-G', 'Visual Studio 10 Win64'] +- print >>sys.stderr, 'Executing CMake: "%s"' % (' '.join(cmake_args),) ++ print('Executing CMake: "%s"' % (' '.join(cmake_args),), file=sys.stderr) + popen = subprocess.Popen(cmake_args, cwd=build_dir, env=os.environ.copy()) + out_data, err_data = popen.communicate() + if popen.returncode != 0: +- print >>sys.stderr, 'ERROR during make call.' +- print out_data +- print err_data ++ print('ERROR during make call.', file=sys.stderr) ++ print(out_data) ++ print(err_data) + return 1 + # Execute Make. + cmake_args = [CMAKE_BINARY, '--build', build_dir, '--target', 'package', '--config', 'Release'] + self.make_args +- print >>sys.stderr, 'Building with CMake: "%s"' % (' '.join(cmake_args),) ++ print('Building with CMake: "%s"' % (' '.join(cmake_args),), file=sys.stderr) + popen = subprocess.Popen(cmake_args, cwd=build_dir, env=os.environ.copy()) + out_data, err_data = popen.communicate() + if popen.returncode != 0: +- print >>sys.stderr, 'ERROR during make call.' +- print out_data +- print err_data ++ print('ERROR during make call.', file=sys.stderr) ++ print(out_data) ++ print(err_data) + return 1 + # Copy over the archives. + self.copyArchives(build_dir) + # Remove build directory. + if not self.options.keep_build_dir: +- print >>sys.stderr, 'Removing build directory %s' % build_dir ++ print('Removing build directory %s' % build_dir, file=sys.stderr) + shutil.rmtree(build_dir) + # Build seqan-library. + # + # Create build directory. + if not os.path.exists(build_dir): +- print >>sys.stderr, "Creating build directory %s" % (build_dir,) ++ print("Creating build directory %s" % (build_dir,), file=sys.stderr) + os.mkdir(build_dir) + # Execute CMake. + cmake_args = [CMAKE_BINARY, checkout_dir, + "-DSEQAN_BUILD_SYSTEM=SEQAN_RELEASE_LIBRARY"] +- print >>sys.stderr, 'Executing CMake: "%s"' % (' '.join(cmake_args),) ++ print('Executing CMake: "%s"' % (' '.join(cmake_args),), file=sys.stderr) + popen = subprocess.Popen(cmake_args, cwd=build_dir, env=os.environ.copy()) + out_data, err_data = popen.communicate() + if popen.returncode != 0: +- print >>sys.stderr, 'ERROR during make call.' +- print out_data +- print err_data ++ print('ERROR during make call.', file=sys.stderr) ++ print(out_data) ++ print(err_data) + return 1 + # Build Docs + cmake_args = [CMAKE_BINARY, '--build', build_dir, '--target', 'docs'] + self.make_args +- print >>sys.stderr, 'Building with CMake: "%s"' % (' '.join(cmake_args),) ++ print('Building with CMake: "%s"' % (' '.join(cmake_args),), file=sys.stderr) + popen = subprocess.Popen(cmake_args, cwd=build_dir, env=os.environ.copy()) + out_data, err_data = popen.communicate() + if popen.returncode != 0: +- print >>sys.stderr, 'ERROR during make docs call.' +- print out_data +- print err_data ++ print('ERROR during make docs call.', file=sys.stderr) ++ print(out_data) ++ print(err_data) + # Execute Make. + cmake_args = [CMAKE_BINARY, '--build', build_dir, '--target', 'package'] + self.make_args +- print >>sys.stderr, 'Building with CMake: "%s"' % (' '.join(cmake_args),) ++ print('Building with CMake: "%s"' % (' '.join(cmake_args),), file=sys.stderr) + popen = subprocess.Popen(cmake_args, cwd=build_dir, env=os.environ.copy()) + out_data, err_data = popen.communicate() + if popen.returncode != 0: +- print >>sys.stderr, 'ERROR during make call.' +- print out_data +- print err_data ++ print('ERROR during make call.', file=sys.stderr) ++ print(out_data) ++ print(err_data) + return 1 + self.copyArchives(build_dir) + # Remove build directory. + if not self.options.keep_build_dir: +- print >>sys.stderr, 'Removing build directory %s' % build_dir ++ print('Removing build directory %s' % build_dir, file=sys.stderr) + shutil.rmtree(build_dir) + + def buildApp(self, checkout_dir, build_dir): + """Build an application.""" + # Create build directory. +- print >>sys.stderr, "Creating build directory %s" % (build_dir,) ++ print("Creating build directory %s" % (build_dir,), file=sys.stderr) + if not os.path.exists(build_dir): + os.mkdir(build_dir) + # Execute CMake. +@@ -254,37 +254,37 @@ + cmake_args.append('-DSEQAN_SYSTEM_PROCESSOR=x86_64') + if self.os == 'Windows': + cmake_args += ['-G', 'Visual Studio 10 Win64'] +- print >>sys.stderr, 'Executing CMake: "%s"' % (' '.join(cmake_args),) ++ print('Executing CMake: "%s"' % (' '.join(cmake_args),), file=sys.stderr) + #for key in sorted(os.environ.keys()): + # print key, ': ', os.environ[key] + popen = subprocess.Popen(cmake_args, cwd=build_dir, env=os.environ.copy()) + out_data, err_data = popen.communicate() + if popen.returncode != 0: +- print >>sys.stderr, 'ERROR during make call.' +- print out_data +- print err_data ++ print('ERROR during make call.', file=sys.stderr) ++ print(out_data) ++ print(err_data) + return 1 + # Build and package project. + make_args = [CMAKE_BINARY, '--build', build_dir, '--target', 'package', '--config', 'Release'] + if self.options.verbosity > 1: + make_args.insert(1, 'VERBOSE=1') +- print >>sys.stderr, 'Building with CMake: "%s"' % (' '.join(make_args),) ++ print('Building with CMake: "%s"' % (' '.join(make_args),), file=sys.stderr) + popen = subprocess.Popen(make_args, cwd=build_dir) + out_data, err_data = popen.communicate() + if popen.returncode != 0: +- print >>sys.stderr, 'ERROR during make call.' +- print out_data +- print err_data ++ print('ERROR during make call.', file=sys.stderr) ++ print(out_data) ++ print(err_data) + return 1 + # Copy out archives. + self.copyArchives(build_dir) + # Remove build directory. + if not self.options.keep_co_dir: +- print >>sys.stderr, 'Removing build directory %s' % build_dir ++ print('Removing build directory %s' % build_dir, file=sys.stderr) + shutil.rmtree(build_dir) + + def tmpDir(self): +- print 'self.tmp_dir = %s' % self.tmp_dir ++ print('self.tmp_dir = %s' % self.tmp_dir) + if self.tmp_dir: + if not os.path.exists(self.tmp_dir): + os.makedirs(self.tmp_dir) +@@ -296,17 +296,17 @@ + """Execute build step.""" + # Create temporary directory. + tmp_dir = self.tmpDir() +- print >>sys.stderr, 'Temporary directory is %s' % (tmp_dir,) ++ print('Temporary directory is %s' % (tmp_dir,), file=sys.stderr) + # Create SVN checkout in temporary directory. + checkout_dir = os.path.join(tmp_dir, os.path.basename(self.svn_url)) +- print >>sys.stderr, 'Creating checkout in %s' % checkout_dir ++ print('Creating checkout in %s' % checkout_dir, file=sys.stderr) + svn = MinisculeSvnWrapper() + svn.co(self.svn_url, checkout_dir) + # Create build directory. + suffix = '-build-%s-%s' % (self.os, self.word_size) + build_dir = os.path.join(tmp_dir, os.path.basename(self.svn_url) + suffix) + if os.path.exists(build_dir) and not self.options.keep_build_dir: +- print >>sys.stderr, 'Removing build directory %s' % (build_dir,) ++ print('Removing build directory %s' % (build_dir,), file=sys.stderr) + shutil.rmtree(build_dir) + # Perform the build. We have to separate between app and whole SeqAn releases. + if self.name == 'seqan': +@@ -314,12 +314,12 @@ + else: + self.buildApp(checkout_dir, build_dir) + if not self.options.keep_co_dir: +- print >>sys.stderr, 'Removing checkout directory %s' % (checkout_dir,) ++ print('Removing checkout directory %s' % (checkout_dir,), file=sys.stderr) + shutil.rmtree(checkout_dir) + # Remove temporary directory again. + if self.tmp_dir and not self.options.keep_tmp_dir: + # Only remove if not explicitely given and not forced to keep. +- print >>sys.stderr, 'Removing temporary directory %s' % (tmp_dir,) ++ print('Removing temporary directory %s' % (tmp_dir,), file=sys.stderr) + shutil.rmtree(tmp_dir) + + +@@ -331,8 +331,8 @@ + if rev >= options.start_revision and + '-' in tag] + # Enumerate all package names that we could enumerate. +- print 'revs_tags = %s' % revs_tags +- print 'word_sizes = %s' % options.word_sizes ++ print('revs_tags = %s' % revs_tags) ++ print('word_sizes = %s' % options.word_sizes) + for rev, tag in revs_tags: + name, version = tag.rsplit('-', 1) + for word_size in options.word_sizes.split(','): +@@ -355,8 +355,8 @@ + # Get the revisions and tag names. + svn = MinisculeSvnWrapper() + # Enumerate all package names that we could enumerate. +- print 'fake tag = %s' % options.build_trunk_as +- print 'word_sizes = %s' % options.word_sizes ++ print('fake tag = %s' % options.build_trunk_as) ++ print('word_sizes = %s' % options.word_sizes) + name, version = options.build_trunk_as.rsplit('-', 1) + for word_size in options.word_sizes.split(','): + # Create build step for this package name. +@@ -393,7 +393,7 @@ + dest = os.path.join(options.package_db, '%s-src' % name, file_name) + # Check whether we need to rebuild. + if os.path.exists(dest): +- print >>sys.stderr, 'Skipping %s; already exists.' % dest ++ print('Skipping %s; already exists.' % dest, file=sys.stderr) + continue + # Create temporary directory. + if options.tmp_dir: +@@ -402,31 +402,31 @@ + tmp_dir = options.tmp_dir + else: + tmp_dir = tempfile.mkdtemp() +- print >>sys.stderr, 'Temporary directory is %s' % (tmp_dir,) ++ print('Temporary directory is %s' % (tmp_dir,), file=sys.stderr) + # Create SVN checkout in temporary directory. + checkout_dir = os.path.join(tmp_dir, os.path.basename(svn_url)) +- print >>sys.stderr, 'Creating checkout in %s' % checkout_dir ++ print('Creating checkout in %s' % checkout_dir, file=sys.stderr) + from_ = os.path.join(tmp_dir, file_name) + args = ['tar', '--exclude=.svn', '-z', '-c', '-f', from_, os.path.basename(svn_url)] +- print ' '.join(args) ++ print(' '.join(args)) + svn = MinisculeSvnWrapper() + svn.co(svn_url, checkout_dir) + # Create tarball. + popen = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=tmp_dir) + out_data, err_data = popen.communicate() + if popen.returncode != 0: +- print >>sys.stderr, 'ERROR during SVN call.' ++ print('ERROR during SVN call.', file=sys.stderr) + return 1 + # Create target directory if it does not exist yet. + if not os.path.exists(os.path.dirname(dest)): # Create directory if necessary. + os.makedirs(os.path.dirname(dest)) + # Create tarball in target directory. +- print >>sys.stderr, 'Copying %s => %s' % (from_, dest) ++ print('Copying %s => %s' % (from_, dest), file=sys.stderr) + shutil.copyfile(from_, dest) + # Remove temporary directory again. + if tmp_dir and not options.keep_tmp_dir: + # Only remove if not explicitely given and not forced to keep. +- print >>sys.stderr, 'Removing temporary directory %s' % (tmp_dir,) ++ print('Removing temporary directory %s' % (tmp_dir,), file=sys.stderr) + shutil.rmtree(tmp_dir) + return 0 + +@@ -489,5 +489,5 @@ + return 1 + + # Fire up work. +- print >>sys.stderr, 'Running SeqAn Auto Builder' ++ print('Running SeqAn Auto Builder', file=sys.stderr) + return work(options) +--- seqan.orig/util/py_lib/seqan/dddoc/__init__.py ++++ seqan/util/py_lib/seqan/dddoc/__init__.py +@@ -1,6 +1,6 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 + +-import main as m ++from . import main as m + import sys + + def main(): +--- seqan.orig/util/py_lib/seqan/dddoc/core.py ++++ seqan/util/py_lib/seqan/dddoc/core.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 + + import copy + import operator +@@ -7,6 +7,7 @@ + import pickle + import string + import sys ++from functools import reduce + + # Constant for C++ files. + FILETYPE_CPP = 2 +@@ -178,9 +179,9 @@ + with open(self.path, 'rb') as f: + self.content = pickle.load(f) + except: +- print >>sys.stderr, 'Could not load cache %s' % self.path ++ print('Could not load cache %s' % self.path, file=sys.stderr) + return False +- print >>sys.stderr, 'Successfully loaded cache %s' % self.path ++ print('Successfully loaded cache %s' % self.path, file=sys.stderr) + return True + + def flush(self): +@@ -189,14 +190,14 @@ + with open(self.path, 'wb') as f: + pickle.dump(self.content, f) + except: +- print >>sys.stderr, 'Could not store cache %s' % self.path ++ print('Could not store cache %s' % self.path, file=sys.stderr) + return False +- print >>sys.stderr, 'Successfully stored cache %s' % self.path ++ print('Successfully stored cache %s' % self.path, file=sys.stderr) + return True + + def has_key(self, key): + """Returns True if the cache has data for this key.""" +- return self.content.has_key(key) ++ return key in self.content + + def isFresh(self, filename): + """Returns True if the cache is fresh. +@@ -204,7 +205,7 @@ + The cache is fresh if the file at the given path is not newer than the + data in the cache. + """ +- if not self.has_key(filename): ++ if filename not in self: + return False + mtime = os.stat(filename).st_mtime + return mtime >= self.content[filename][0] +@@ -256,9 +257,9 @@ + >>> splitKeys('.Adaption.\'std::string\'.summary') + ['', 'Adaption', '\'std::string\'', 'summary'] + """ +- if '\u0001' in text: +- text = text.split('\u0001', 1)[0] # Remove optional label, used in inheritance. +- if _cache.has_key((text, delimiters)): ++ if '\\u0001' in text: ++ text = text.split('\\u0001', 1)[0] # Remove optional label, used in inheritance. ++ if (text, delimiters) in _cache: + return _cache[(text, delimiters)] + count = 0 + current = [] +@@ -299,7 +300,7 @@ + """ + def _cleanPathElement(x): + return x.strip().replace('\'', '').replace('"', '') +- return map(_cleanPathElement, path_arr) ++ return list(map(_cleanPathElement, path_arr)) + + + class FileLoader(object): +@@ -522,7 +523,7 @@ + ## print ' empty_count', empty_count + if empty_count <= len(stack): + stack = stack[:empty_count] +- stack_len_sum = reduce(operator.add, map(len, stack), 0) ++ stack_len_sum = reduce(operator.add, list(map(len, stack)), 0) + stack.append(path[empty_count:]) + stack_len_sum += len(stack[-1]) + path = reduce(operator.add, stack, []) +@@ -600,14 +601,14 @@ + if prefix: + prefix = prefix + ' --> ' + res = '%s %sDddocTreeNode(key=%s, texts=%s)' % (space, prefix, repr(node.key), repr(node.texts)) +- for k, child in node.children.iteritems(): ++ for k, child in node.children.items(): + res += '\n' + _str(child, level + 1, k) + return res + return _str(self) + + def dump(self, stream=sys.stdout): + """Debug recursive dumping of a tree node.""" +- print >>stream, self ++ print(self, file=stream) + + + class DddocTree(object): +@@ -642,12 +643,12 @@ + Enables caching and builds some indices. + """ + self._enableFindCache() +- print >>sys.stderr, 'Indexing Glossary Pages' ++ print('Indexing Glossary Pages', file=sys.stderr) + if 'Page' in self.root.children: +- for key, node in self.root.children['Page'].children.iteritems(): ++ for key, node in self.root.children['Page'].children.items(): + if 'glossary' in node.children: + self.glossary_nodes.append(node.children['glossary']) +- print >>sys.stderr, ' Found Page.%s' % node.key ++ print(' Found Page.%s' % node.key, file=sys.stderr) + + def _buildSubtree(self, path, begin_index, end_index, level): + # First, identify the entries belonging to each node (entry.path[i] are +@@ -705,7 +706,7 @@ + key = '.'.join(path) + else: + key = path +- if self.cache.has_key(key): ++ if key in self.cache: + return self.cache[key] + # Split path if is string, ignore leading dot if any. + if type(path) is str: +@@ -717,7 +718,7 @@ + """Helper function that searches for the node with given path.""" + if not path: + return node +- if not node.children.has_key(path[0]): ++ if path[0] not in node.children: + return None + return findRecurse(node.children[path[0]], path[1:]) + res = findRecurse(self.root, path) +@@ -754,10 +755,10 @@ + ## print ' ' * level, '_matchTreesInNode(tree', node.path, path, func, level, ')' + if path: + if path[0] == '*': +- for child in node.children.itervalues(): ++ for child in node.children.values(): + _matchTreesInNode(tree, child, path[1:], func, block_paths, level+1) + else: +- if node.children.has_key(path[0]): ++ if path[0] in node.children: + _matchTreesInNode(tree, node.children[path[0]], path[1:], func, block_paths, level+1) + else: + for block_path in block_paths: +@@ -800,7 +801,7 @@ + + def generateAutomaticReferences(tree): + """Interpret the globals.relations entries.""" +- print >>sys.stderr, 'Generating Automatic References' ++ print('Generating Automatic References', file=sys.stderr) + relations_node = tree.find('globals.relations') + if not relations_node: + return # Empty, do nothing. +@@ -816,7 +817,7 @@ + if not res: + continue # Not found, Skip # TODO(holtgrew): Warning? + additions.append((res.path + [key], '.'.join(node.path[:2]))) +- for key, node in relations_node.children.iteritems(): ++ for key, node in relations_node.children.items(): + ## print 'GENERATE', key, node + for txt in node.texts: + path = splitKeys(txt, '.') +@@ -837,7 +838,7 @@ + + def generateInheritedElements(tree): + """Push through inheritances.""" +- print >>sys.stderr, 'Linking Inherited Entities' ++ print('Linking Inherited Entities', file=sys.stderr) + inherit_node = tree.find('globals.inherit') + # Contains children: $TARGET_FIELD:$THROUGH_FIELD.$SOURCE_FIELD + +@@ -846,7 +847,7 @@ + inheritance_rules = [] + + # First build a dependency graph. +- for target_field, child in inherit_node.children.items(): ++ for target_field, child in list(inherit_node.children.items()): + for txt in child.texts: + arr = splitKeys(txt, '.') + through_field = arr[0] +@@ -885,8 +886,8 @@ + if not source_field in source_node.children: + continue # Skip if no source field. + for path in source_node.children[source_field].texts: +- if not '\u0001' in path: # We use this ugly hack to add the inheritance source here. +- path = path + '\u0001' + '.'.join(source_node.path) ++ if not '\\u0001' in path: # We use this ugly hack to add the inheritance source here. ++ path = path + '\\u0001' + '.'.join(source_node.path) + # If necessary then create child in target node. + if not target_field in target_node.children: + target_node.children[target_field] = DddocTreeNode(tree, target_field, target_node.path + [target_field], source_node.children[source_field].entry) +@@ -909,7 +910,7 @@ + def removeDuplicateTexts(tree): + """Remove duplicates from texts members. + +- Suffixes starting with '\u0001' are ignored for the comparisons ++ Suffixes starting with '\\u0001' are ignored for the comparisons + and strings with these suffixes are preferred. + """ + ##print 'remove duplicates' +@@ -918,20 +919,20 @@ + cleaned = [] + for txt in node.texts: + clean = txt +- pos = txt.find('\u0001') ++ pos = txt.find('\\u0001') + if pos != -1: + clean = txt[:pos] + ##print cleaned, repr(clean) + if clean in in_cleaned: +- if '\u0001' in clean and not '\u0001' in cleaned[in_cleaned[clean]]: ++ if '\\u0001' in clean and not '\\u0001' in cleaned[in_cleaned[clean]]: + cleaned[in_cleaned[clean]] = txt + else: + in_cleaned[clean] = len(cleaned) + cleaned.append(txt) + node.texts = cleaned +- for child in node.children.itervalues(): ++ for child in node.children.values(): + recurse(child) +- for child in tree.root.children.itervalues(): ++ for child in tree.root.children.values(): + recurse(child) + + +@@ -946,15 +947,15 @@ + def recurse(result, path, node): + ## print path, node.path + if len(path) == 2: +- if node.children.has_key('cat'): ++ if 'cat' in node.children: + for cat in node.children['cat'].texts: + result.setdefault(path[0], {}).setdefault(cat, []).append(node) + else: + result.setdefault(path[0], {})[path[1]] = node + if len(path) < 2: +- for key, child in node.children.iteritems(): ++ for key, child in node.children.items(): + recurse(result, path + [key], child) +- for key, child in tree.root.children.iteritems(): ++ for key, child in tree.root.children.items(): + recurse(result, [key], child) + ## for k1, v1 in result.iteritems(): + ## for k2, v2 in v1.iteritems(): +@@ -968,11 +969,11 @@ + def invalidReference(self, txt, locations): + self.error_count += 1 + if not locations: +- print >>sys.stderr, 'ERROR: Invalid Reference %s in unknown location (sorry).' % txt ++ print('ERROR: Invalid Reference %s in unknown location (sorry).' % txt, file=sys.stderr) + else: +- print >>sys.stderr, 'ERROR: Invalid Reference %s in one of the following locations:' % txt ++ print('ERROR: Invalid Reference %s in one of the following locations:' % txt, file=sys.stderr) + for filename, line in locations: +- print >>sys.stderr, ' %s:%s' % (filename, line) ++ print(' %s:%s' % (filename, line), file=sys.stderr) + + + class App(object): +--- seqan.orig/util/py_lib/seqan/dddoc/dddoc.py ++++ seqan/util/py_lib/seqan/dddoc/dddoc.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 + + import os + import os.path +@@ -37,9 +37,9 @@ + with open(self.path, 'rb') as f: + self.content = pickle.load(f) + except: +- print >>sys.stderr, 'Could not load cache %s' % self.path ++ print('Could not load cache %s' % self.path, file=sys.stderr) + return False +- print >>sys.stderr, 'Successfully loaded cache %s' % self.path ++ print('Successfully loaded cache %s' % self.path, file=sys.stderr) + return True + + def flush(self): +@@ -47,16 +47,16 @@ + with open(self.path, 'wb') as f: + pickle.dump(self.content, f) + except: +- print >>sys.stderr, 'Could not store cache %s' % self.path ++ print('Could not store cache %s' % self.path, file=sys.stderr) + return False +- print >>sys.stderr, 'Successfully stored cache %s' % self.path ++ print('Successfully stored cache %s' % self.path, file=sys.stderr) + return True + + def has_key(self, key): +- return self.content.has_key(key) ++ return key in self.content + + def isFresh(self, filename): +- if not self.has_key(filename): ++ if filename not in self: + return False + mtime = os.stat(filename).st_mtime + return mtime >= self.content[filename][0] +@@ -173,7 +173,7 @@ + Data object with the lines below the given path. + """ + # If possible, return from cache. +- if self.cache.has_key(str): ++ if str in self.cache: + return self.cache[str] + + arr = splitName(str) +@@ -274,7 +274,7 @@ + if len(line.nodes) > self.level + level: + dict[line.nodes[self.level + level]] = 1 + +- arr = dict.keys() ++ arr = list(dict.keys()) + arr.sort() + return arr + +@@ -283,14 +283,14 @@ + for line in self.lines: + if len(line.nodes) > self.level + level: + key = line.nodes[self.level + level] +- if not dict.has_key(key) or (dict[key] > line.id): ++ if key not in dict or (dict[key] > line.id): + dict[key] = line.id + + dict2 = {} + for key in dict: + dict2[dict[key]] = key + +- arr2 = dict2.keys() ++ arr2 = list(dict2.keys()) + arr2.sort() + + arr = [] +--- seqan.orig/util/py_lib/seqan/dddoc/html.py ++++ seqan/util/py_lib/seqan/dddoc/html.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 + + import sys + import os +@@ -6,7 +6,7 @@ + import shutil + import json + import string +-import StringIO ++import io + import re + import datetime + import time +@@ -14,7 +14,7 @@ + import pyratemp + import json + +-import core ++from . import core + + # TODO(holtgrew): Take this from dddoc tree ;) + TPL_FILES = [ +@@ -258,7 +258,7 @@ + + The files in TPL_FILE are copied verbatimly. + """ +- print >>sys.stderr, 'Copying Template Files' ++ print('Copying Template Files', file=sys.stderr) + for path in TPL_FILES: + # Get target path name. + targetPath = os.path.join(self.out_path, os.path.dirname(path)) +@@ -267,7 +267,7 @@ + # Copy file. + srcPath = os.path.join(self.tpl_path, path) + destPath = os.path.join(self.out_path, path) +- print >>sys.stderr, ' %s => %s' % (srcPath, destPath) ++ print(' %s => %s' % (srcPath, destPath), file=sys.stderr) + shutil.copyfile(srcPath, destPath) + + def createRootIndexPage(self): +@@ -276,7 +276,7 @@ + The file is basically copied from tpl, but some small replacements are + done using Python string templates. + """ +- print >>sys.stderr, 'Creating Index Page' ++ print('Creating Index Page', file=sys.stderr) + srcPath = os.path.join(self.tpl_path, 'index.html') + destPath = os.path.join(self.out_path, 'index.html') + with open(srcPath, 'rb') as f: +@@ -301,11 +301,11 @@ + else: + type_num = TYPE_CLASS + # Index functions. +- for name, node in sorted(self.tree.find([cat_type]).children.iteritems()): ++ for name, node in sorted(self.tree.find([cat_type]).children.items()): + ## if node.children.has_key('hidefromindex'): + ## continue # Skip those that are not to appear in search. + summary = '' +- if node.children.has_key('summary') and node.children['summary'].texts: ++ if 'summary' in node.children and node.children['summary'].texts: + if not ':' in node.children['summary'].texts[0]: + continue + summary = self.html_helper.translateMarkup(node.children['summary'].texts[0].split(':', 1)[1]) +@@ -320,7 +320,7 @@ + filename = getPagePath(cat_type, name, 'files') + # Build list of include headers. + includes = '' +- if node.children.has_key('include'): ++ if 'include' in node.children: + includes = ' ' + ', '.join(node.children['include'].texts) + if type_num == TYPE_FUNCTION: + name = name + '()' +@@ -339,14 +339,14 @@ + 'longSearchIndex, 'info' that are used by searchdoc.js for providing the + search index. + """ +- print >>sys.stderr, 'Creating Search Index' ++ print('Creating Search Index', file=sys.stderr) + destPath = os.path.join(self.out_path, 'panel', 'search_index.js') + search_index = [] + long_search_index = [] + info = [] + # Get category types to index from DddocTree tree and build the index + # data for this. +- items = self.tree.find('globals.indexes').children.items() ++ items = list(self.tree.find('globals.indexes').children.items()) + key_entries = [(x[0], x[1].entry) for x in items if x[1].entry] + key_linenos = [(x[0], self.tree.entries[x[1][0]].line_no_begin) for x in key_entries] + cats = [x[0] for x in sorted(key_linenos, key=lambda a:a[1])] +@@ -362,10 +362,10 @@ + This file contains the JavaScript information for building the naviation + tree in the left frame. + """ +- print >>sys.stderr, 'Creating Panel Tree' ++ print('Creating Panel Tree', file=sys.stderr) + # Translate cat/element type from internal to user-readable. + node = self.tree.find('globals.indexes') +- cats = [(k, v.text(), v) for k, v in node.children.iteritems() if not v.children.has_key('hidefromindex')] ++ cats = [(k, v.text(), v) for k, v in node.children.items() if 'hidefromindex' not in v.children] + def getLocation(x): + return (self.tree.entries[x[2].entry[0]].filename, + self.tree.entries[x[2].entry[0]].line_no_begin) +@@ -407,7 +407,7 @@ + subcat_nodes.append(subcat_node) + filename = getIndexPagePath(self.tree, cat, 'files') + trees_data.append([cat_title, filename, '', subcat_nodes]) +- if cat_node.children.has_key('hidefromindex'): ++ if 'hidefromindex' in cat_node.children: + continue + # Write out tree as JavaScript/JSON. + ## print 'trees_data =', trees_data +@@ -512,7 +512,7 @@ + if not parent_key in node.children: + continue + for path in node.children[parent_key].texts: +- if '\u0001' in path: ++ if '\\u0001' in path: + continue # Skip automatically generated upwards links. + parent = self.tree.find(path) + if not parent: +@@ -790,7 +790,7 @@ + + class DocsCreator(object): + def __init__(self, error_logger, tree, tpl_path, out_path, include_dirs): +- print >>sys.stderr, 'Setting up Docs Creator' ++ print('Setting up Docs Creator', file=sys.stderr) + self.tree = tree + self.error_logger = error_logger + self.tpl_path = tpl_path +@@ -811,10 +811,10 @@ + def createIndexPages(self): + index = core.buildByTypeAndCatIndex(self.tree) + cat_nodes = self.tree.find('globals.indexes').children +- for cat, node in cat_nodes.iteritems(): +- if node.children.has_key('hidefromindex'): ++ for cat, node in cat_nodes.items(): ++ if 'hidefromindex' in node.children: + continue +- print >>sys.stderr, 'Indexes for ' + node.text() ++ print('Indexes for ' + node.text(), file=sys.stderr) + filename = getIndexPagePath(self.tree, cat, self.out_path) + with open(filename, 'wb') as f: + title_node = self.tree.find(['Indexpage', cat, 'title']) +@@ -836,7 +836,7 @@ + + def copyFiles(self): + """Copy files in FILE_DIRS.""" +- print >>sys.stderr, 'Copying Documentation Files' ++ print('Copying Documentation Files', file=sys.stderr) + for path in FILE_DIRS: + entries = os.listdir(path) + if not os.path.exists(os.path.join(self.out_path, path)): # Make sure output path exists. +@@ -847,18 +847,18 @@ + source_path = os.path.join(path, entry) + target_path = os.path.join(self.out_path, path, entry) + # Copy file. +- print >>sys.stderr, ' %s => %s' % (source_path, target_path) ++ print(' %s => %s' % (source_path, target_path), file=sys.stderr) + shutil.copyfile(source_path, target_path) + + def createPages(self): + """Create the documentation pages.""" + cat_dict = self.tree.find('globals.categories').children +- for cat, cat_node in cat_dict.iteritems(): # cat=Function,... +- print >>sys.stderr, 'Pages for ' + cat ++ for cat, cat_node in cat_dict.items(): # cat=Function,... ++ print('Pages for ' + cat, file=sys.stderr) + if self.tree.find(cat) is None: # Skip empty categories. +- print >>sys.stderr ++ print(file=sys.stderr) + continue +- for subcat, node in self.tree.find(cat).children.iteritems(): # subcat=length,... ++ for subcat, node in self.tree.find(cat).children.items(): # subcat=length,... + filename = getPagePath(cat, subcat, self.out_path) + ## print filename + with open(filename, 'wb') as f: +@@ -876,8 +876,8 @@ + html=HtmlHelper(self.error_logger, self.tree, os.path.dirname(filename), self.include_dirs), + json=json) + f.write(res.encode('utf-8')) +- print >>sys.stderr, '.', +- print >>sys.stderr ++ print('.', end=' ', file=sys.stderr) ++ print(file=sys.stderr) + + + def createDocs(error_logger, tree, tpl_path, out_path, include_dirs): +--- seqan.orig/util/py_lib/seqan/dddoc/main.py ++++ seqan/util/py_lib/seqan/dddoc/main.py +@@ -1,12 +1,12 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 + + import datetime + import optparse + import os + import sys + +-import core +-import html ++from . import core ++from . import html + + + HEADER = """ +@@ -52,10 +52,10 @@ + Return code of the application. Is 0 for no problem, and 1 on + errors and warnings. + """ +- print 'Scanning modules...' ++ print('Scanning modules...') + app = core.App() + if self.cache_only: +- for fn in app.cache.content.iterkeys(): ++ for fn in app.cache.content.keys(): + core.parseFile(fn, app.cache) + else: + # Scan some/all modules. +@@ -65,25 +65,25 @@ + + # Scan doc directories. + for doc_dir in self.doc_dirs: +- print 'Scanning %s...' % doc_dir ++ print('Scanning %s...' % doc_dir) + app.loadFiles(doc_dir) + + app.loadingComplete() + + # Actually build the HTML files. +- print 'Creating HTML Documentation...' ++ print('Creating HTML Documentation...') + tpl_path = os.path.abspath(os.path.join(os.path.dirname(__file__), 'tpl')) + res = html.createDocs(app.error_logger, app.dddoc_tree, tpl_path, self.out_dir, + self.include_dirs) + + # Done, print end message. +- print 'Documentation created/updated.' ++ print('Documentation created/updated.') + return res + + + def main(argv): + """Program entry point.""" +- print '%s\n' % HEADER ++ print('%s\n' % HEADER) + + start_time = datetime.datetime.now() + +@@ -106,12 +106,12 @@ + action='store_true', + help='Ignore files if cache file exists.') + options, args = parser.parse_args(argv) +- print 'doc dirs: %s' % ', '.join(options.doc_dirs) +- print ++ print('doc dirs: %s' % ', '.join(options.doc_dirs)) ++ print() + + # Show help if no arguments are given. + if len(args) < 2: +- print CMD_HELP % args[0] ++ print(CMD_HELP % args[0]) + return 1 + # Create application object and run documentation generation. + app = DDDocRunner(index_only=False, doc_dirs=options.doc_dirs, +@@ -122,7 +122,7 @@ + res = app.run(args) + + elapsed = datetime.datetime.now() - start_time +- print >>sys.stderr, 'Took %d s' % elapsed.seconds ++ print('Took %d s' % elapsed.seconds, file=sys.stderr) + + return res + +--- seqan.orig/util/py_lib/seqan/fixgcov/__init__.py ++++ seqan/util/py_lib/seqan/fixgcov/__init__.py +@@ -1,8 +1,8 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 + + import sys + +-import app ++from . import app + + main = app.main + +--- seqan.orig/util/py_lib/seqan/fixgcov/app.py ++++ seqan/util/py_lib/seqan/fixgcov/app.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 + """Small libclang based app to fix gcov output. + + Fix gcov output with templates. This is done by first parsing in the .cpp files +@@ -16,7 +16,7 @@ + License: 3-clause BSD (see LICENSE) + """ + +-from __future__ import with_statement ++ + + __author__ = 'Manuel Holtgrewe ' + +@@ -97,7 +97,7 @@ + if not _hasFileLocation(node): + return False + # Try to hit cache. +- if self.cache.has_key(node.location.file.name): ++ if node.location.file.name in self.cache: + return self.cache[node.location.file.name] + # Check whether node's location is below the include directories. It is + # only visited if this is the case. +@@ -137,7 +137,7 @@ + # print args + tu = index.parse(filename, args=args) + if self.options.verbosity >= 1: +- print 'Translation unit: %s.' % tu.spelling ++ print('Translation unit: %s.' % tu.spelling) + return self._recurse(tu.cursor) + + @classmethod +@@ -207,17 +207,17 @@ + # lines with compound statements in all included files are written to + # the location file. + if options.verbosity >= 1: +- print >>sys.stderr, 'Building Locations' ++ print('Building Locations', file=sys.stderr) + if options.verbosity >= 2: +- print >>sys.stderr, '==================' ++ print('==================', file=sys.stderr) + + # Fire off AST traversal. + if options.verbosity >= 1: +- print >>sys.stderr, 'AST Traversal' ++ print('AST Traversal', file=sys.stderr) + node_visitor = CollectCompoundStatementNodeVisitor(options) + for src in options.source_files: + if options.verbosity >= 2: +- print >>sys.stderr, ' Compilation Unit', src ++ print(' Compilation Unit', src, file=sys.stderr) + AstTraverser.visitFile(src, node_visitor, options) + + # Convert locations into points. +@@ -229,7 +229,7 @@ + + # Write out the source locations. + if options.verbosity >= 1: +- print >>sys.stderr, 'Writing out locations to', options.location_file ++ print('Writing out locations to', options.location_file, file=sys.stderr) + with open(options.location_file, 'wb') as f: + pickle.dump(locations, f) + +@@ -240,20 +240,20 @@ + if options.gcov_files: + # If no source files and gcov files are given then + if options.verbosity >= 1: +- print >>sys.stderr, 'Updating gcov Results' ++ print('Updating gcov Results', file=sys.stderr) + if options.verbosity >= 2: +- print >>sys.stderr, '=====================' ++ print('=====================', file=sys.stderr) + + if not options.source_files: + if options.verbosity >= 1: +- print >>sys.stderr, 'Loading locations from', options.location_file ++ print('Loading locations from', options.location_file, file=sys.stderr) + with open(options.location_file, 'rb') as f: + locations = pickle.load(f) + + for filename in options.gcov_files: + filename = os.path.abspath(filename) + if options.verbosity >= 2: +- print >>sys.stderr, 'Processing', filename ++ print('Processing', filename, file=sys.stderr) + with open(filename, 'rb') as f: + lines = f.readlines() + pos0 = lines[0].find(':') +@@ -268,9 +268,9 @@ + txt = line[pos1 + 1:] + if txt.startswith('Source:'): + source = os.path.abspath(txt[len('Source:'):].strip()) +- if not locations.has_key(source): ++ if source not in locations: + if options.verbosity >= 2: +- print >>sys.stderr, ' Skipping.' ++ print(' Skipping.', file=sys.stderr) + skip = True + break + if not source or lineno == 0: +--- seqan.orig/util/py_lib/seqan/paths.py ++++ seqan/util/py_lib/seqan/paths.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 + """SeqAn path utilities. + + Code to get paths within the SeqAn repository; Useful for setting defaults in +@@ -56,10 +56,10 @@ + return os.path.join(repositoryRoot(), location, filename) + + def main(args): +- print 'SeqAn paths' +- print +- print 'repositoryRoot() ==', repositoryRoot() +- print 'pathToSkeletons() ==', pathToSkeletons() ++ print('SeqAn paths') ++ print() ++ print('repositoryRoot() ==', repositoryRoot()) ++ print('pathToSkeletons() ==', pathToSkeletons()) + + if __name__ == '__main__': + sys.exit(main(sys.argv)) +--- seqan.orig/util/py_lib/seqan/pyclangcheck/__init__.py ++++ seqan/util/py_lib/seqan/pyclangcheck/__init__.py +@@ -1,8 +1,8 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 + + import sys + +-import app ++from . import app + + main = app.main + +--- seqan.orig/util/py_lib/seqan/pyclangcheck/app.py ++++ seqan/util/py_lib/seqan/pyclangcheck/app.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 + """pyclangcheck driver code + + This code is the driver code for the pyclangcheck tool. +@@ -7,7 +7,7 @@ + License: 3-clause BSD (see LICENSE) + """ + +-from __future__ import with_statement ++ + + __author__ = 'Manuel Holtgrewe ' + +@@ -19,9 +19,9 @@ + + import clang.cindex as ci + +-import simple_checks +-import violations +-import rules ++from . import simple_checks ++from . import violations ++from . import rules + + def _hasFileLocation(node): + """Return True if node has a file lcoation.""" +@@ -51,7 +51,7 @@ + self.cache = {} + + def get(self, path): +- if self.cache.has_key(path): ++ if path in self.cache: + return self.cache[path] + with open(path, 'rb') as f: + fcontents = f.readlines() +@@ -106,7 +106,7 @@ + txt = '' + else: + txt = ''.join(lines).replace('\n', '\\n') +- print ' ' * len(self.stack), 'Entering', node.kind, node._kind_id, node.spelling, 'txt="%s"' % txt, "%s-%s" % (start, end) ++ print(' ' * len(self.stack), 'Entering', node.kind, node._kind_id, node.spelling, 'txt="%s"' % txt, "%s-%s" % (start, end)) + violations = [] + for rule in self.rules: + if rule.allowVisit(node): +@@ -155,7 +155,7 @@ + if not _hasFileLocation(node): + return False + # Try to hit cache. +- if self.cache.has_key(node.location.file.name): ++ if node.location.file.name in self.cache: + return self.cache[node.location.file.name] + # Check whether the file is blocked. + if node.location.file.name in self.blocked_files: +@@ -200,14 +200,14 @@ + # print args + tu = index.parse(filename, args=args) + if self.options.verbosity >= 1: +- print 'Translation unit: %s.' % tu.spelling ++ print('Translation unit: %s.' % tu.spelling) + return self._recurse(tu.cursor) + + @classmethod + def visitFile(klass, filename, node_visitor, options): + """Don't instantiate AstTraverser yourself, use this function.""" + if options.verbosity >= 1: +- print >>sys.stderr, 'Checking', filename ++ print('Checking', filename, file=sys.stderr) + traverser = AstTraverser(node_visitor, options) + res = traverser.run(filename) + return res != True +@@ -291,7 +291,7 @@ + res = AstTraverser.visitFile(filename, node_visitor, options) + node_visitor.seenToBlocked() + elapsed = datetime.datetime.now() - start +- print >>sys.stderr, ' took', elapsed.seconds, 's' ++ print(' took', elapsed.seconds, 's', file=sys.stderr) + if res: + break + +@@ -310,7 +310,7 @@ + # Print violations. + # ======================================================================== + +- print 'VIOLATIONS' ++ print('VIOLATIONS') + vs.update(node_visitor.violations) + printer = violations.ViolationPrinter(options.ignore_nolint, + options.show_source) +--- seqan.orig/util/py_lib/seqan/pyclangcheck/rules.py ++++ seqan/util/py_lib/seqan/pyclangcheck/rules.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 + + __author__ = 'Manuel Holtgrewe ' + +@@ -7,8 +7,8 @@ + + import clang.cindex as ci + +-import app +-import violations ++from . import app ++from . import violations + + RULE_NAMING_CONSTANT = 'naming.constant' + RULE_NAMING_STRUCT = 'naming.struct' +@@ -173,7 +173,7 @@ + return True + if not app._hasFileLocation(node): + return False +- if self.cache.has_key(node.location.file.name): ++ if node.location.file.name in self.cache: + return self.cache[node.location.file.name] + # Check whether node's location is below the include directories or one + # of the source files. +--- seqan.orig/util/py_lib/seqan/pyclangcheck/simple_checks.py ++++ seqan/util/py_lib/seqan/pyclangcheck/simple_checks.py +@@ -1,13 +1,13 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 + """Simple source code checks, e.g. trailing whitespace.""" + +-from __future__ import with_statement ++ + + import bisect + import re + import sys + +-import violations ++from . import violations + + RULE_TRAILING_WHITESPACE = 'whitespace.trailing' + RULE_TEXT_TRAILING_WHITESPACE = 'Trailing whitespace is not allowed.' +@@ -98,7 +98,7 @@ + match = re.match(RE_TODO, comment) + if match: + if len(match.group(1)) > 1: +- print comment ++ print(comment) + v = violations.SimpleRuleViolation( + RULE_TODO_ONE_SPACE, filename, cstart.line, + cstart.column, RULE_TEXT_TODO_ONE_SPACE) +--- seqan.orig/util/py_lib/seqan/pyclangcheck/suppressions.py ++++ seqan/util/py_lib/seqan/pyclangcheck/suppressions.py +@@ -1 +1 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 +--- seqan.orig/util/py_lib/seqan/pyclangcheck/violations.py ++++ seqan/util/py_lib/seqan/pyclangcheck/violations.py +@@ -1,14 +1,14 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 + """Code related to violations and suppressions.""" + +-from __future__ import with_statement ++ + + import os + import os.path + import sys + +-import app +-import rules ++from . import app ++from . import rules + + + class RuleViolation(object): +@@ -54,7 +54,7 @@ + def hasNolint(self, filename, lineno): + filename = os.path.abspath(filename) + # Ensure that the nolint lines are registered in self.locations[filename]. +- if not self.locations.has_key(filename): ++ if filename not in self.locations: + line_set = set() + with open(filename, 'rb') as f: + line_no = 0 +@@ -80,12 +80,12 @@ + for k in sorted(vs.keys()): + violation = vs[k] + if self.ignore_nolint or not self.nolints.hasNolint(violation.file, violation.line): +- print violation ++ print(violation) + line = self.file_cache.get(violation.file)[violation.line - 1] + if self.show_source: +- print line.rstrip() +- print '%s^' % (' ' * (violation.column - 1)) +- print ++ print(line.rstrip()) ++ print('%s^' % (' ' * (violation.column - 1))) ++ print() + previous = violation +- print 'Total: %d violations' % len(vs) ++ print('Total: %d violations' % len(vs)) + +--- seqan.orig/util/py_lib/seqan/release_page.py ++++ seqan/util/py_lib/seqan/release_page.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 + """Build the SeqAn Releases Website.""" + + import operator +@@ -167,23 +167,23 @@ + + def generate(self): + """Create output RSS files.""" +- for sname, software in self.package_db.softwares.items(): ++ for sname, software in list(self.package_db.softwares.items()): + feed = RssFeed(sname, '', '') +- for vname, version in software.versions.items(): ++ for vname, version in list(software.versions.items()): + description = 'Version %s of %s.' % (vname, sname) + link = '%s/%s#%s' % (self.base_url, sname, vname) + item = RssItem('%s %s' % (sname, vname), description, link) + feed.items.append(item) + path = os.path.join(self.out_dir, sname, 'package.rss') +- print >>sys.stderr, 'Writing %s' % path ++ print('Writing %s' % path, file=sys.stderr) + with open(path, 'wb') as f: + f.write(feed.generate()) + + + def work(options): +- print >>sys.stderr, 'Generating Release Site.' +- print >>sys.stderr, 'Package Dir: %s' % (options.package_db,) +- print >>sys.stderr, 'Out file: %s' % (options.out_file,) ++ print('Generating Release Site.', file=sys.stderr) ++ print('Package Dir: %s' % (options.package_db,), file=sys.stderr) ++ print('Out file: %s' % (options.out_file,), file=sys.stderr) + db = PackageDatabase(options.package_db) + db.load() + # Load and render overview template. +@@ -197,9 +197,9 @@ + sorted=sorted)) + # Load and render package template. + tpl = pyratemp.Template(filename=PACKAGE_TPL_PATH) +- for sname, software in db.softwares.items(): ++ for sname, software in list(db.softwares.items()): + out_path = os.path.join(options.package_db, sname, 'index.html') +- print >>sys.stderr, 'Writing %s.' % out_path ++ print('Writing %s.' % out_path, file=sys.stderr) + with open(out_path, 'wb') as f: + f.write(tpl(FORMATS=FORMATS, + utc_time=time.strftime('%a, %d %b %Y %H:%M:%S UTC', time.gmtime()), +--- seqan.orig/util/py_lib/seqan/skel.py ++++ seqan/util/py_lib/seqan/skel.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 + """SeqAn code generation from templates / skeletons. + + This module contains code to help the creation of modules, tests, apps etc. +@@ -28,7 +28,7 @@ + License: 3-clause BSD (see LICENSE) + """ + +-from __future__ import with_statement ++ + + __author__ = 'Manuel Holtgrewe ' + +@@ -39,7 +39,7 @@ + import sys + import string + +-import paths ++from . import paths + + # Add os.path.relpath if it is not already there, so we can use Python 2.5, too. + # TODO(holtgrew): This could go into a "compatibility" module. +@@ -111,30 +111,30 @@ + #""".strip() + + def createDirectory(path, dry_run=False): +- print 'mkdir(%s)' % path +- print ++ print('mkdir(%s)' % path) ++ print() + if not dry_run: + if not os.path.exists(path): + os.mkdir(path) + + def configureFile(target_file, source_file, replacements, dry_run, options): +- print 'Configuring file.' +- print ' Source:', source_file +- print ' Target:', target_file +- print ++ print('Configuring file.') ++ print(' Source:', source_file) ++ print(' Target:', target_file) ++ print() + if os.path.exists(target_file) and not options.force: + msg = 'Target file already exists. Move it away and call the script again.' +- print >>sys.stderr, msg ++ print(msg, file=sys.stderr) + return 1 + + with open(source_file, 'rb') as f: + contents = f.read() + target_contents = contents % replacements + if dry_run: +- print 'The contents of the target file are:' +- print '-' * 78 +- print target_contents +- print '-' * 78 ++ print('The contents of the target file are:') ++ print('-' * 78) ++ print(target_contents) ++ print('-' * 78) + else: + with open(target_file, 'wb') as f: + f.write(target_contents) +@@ -177,13 +177,13 @@ + # Check that the given path does not exist yet. + if os.path.exists(target_path) and not options.force: + msg = 'The path %s already exists. Move it and call this script again.' +- print >>sys.stderr, msg % target_path ++ print(msg % target_path, file=sys.stderr) + return False + # Check that the parent path already exists. + if not os.path.exists(os.path.dirname(target_path)): + msg = 'The parent of the target path does not exist yet: %s' +- print >>sys.stderr, msg % os.path.dirname(target_path) +- print >>sys.stderr, 'Please create it and call this script again.' ++ print(msg % os.path.dirname(target_path), file=sys.stderr) ++ print('Please create it and call this script again.', file=sys.stderr) + return False + return True + +@@ -192,14 +192,14 @@ + seqan_path = os.path.join(include_path, 'seqan') + module_path = os.path.join(seqan_path, name) + header_path = os.path.join(seqan_path, '%s.h' % name) +- print 'Creating module in %s' % module_path ++ print('Creating module in %s' % module_path) + if options.create_dirs and not _checkTargetPaths(module_path, options): + return 1 + if options.create_dirs and not _checkTargetPaths(header_path, options): + return 1 +- print ' Module path is: %s' % module_path +- print ' Module header path is: %s' % header_path +- print '' ++ print(' Module path is: %s' % module_path) ++ print(' Module header path is: %s' % header_path) ++ print('') + if options.create_dirs: + # Create directory. + createDirectory(module_path, options.dry_run) +@@ -227,11 +227,11 @@ + + def createTest(name, location, options): + target_path = paths.pathToTest(location, name) +- print 'Creating test in %s' % target_path ++ print('Creating test in %s' % target_path) + if options.create_dirs and not _checkTargetPaths(target_path, options): + return 1 +- print ' Target path is: %s' % target_path +- print '' ++ print(' Target path is: %s' % target_path) ++ print('') + if options.create_dirs: + # Create directory. + createDirectory(target_path, options.dry_run) +@@ -259,11 +259,11 @@ + + def createApp(name, location, options): + target_path = paths.pathToApp(location, name) +- print 'Creating app in %s' % target_path ++ print('Creating app in %s' % target_path) + if options.create_dirs and not _checkTargetPaths(target_path, options): + return 1 +- print ' Target path is: %s' % target_path +- print '' ++ print(' Target path is: %s' % target_path) ++ print('') + if options.create_programs: + # Create directory. + createDirectory(target_path, options.dry_run) +@@ -291,11 +291,11 @@ + + def createDemo(name, location, options): + target_path = paths.pathToDemo(location, name) +- print 'Creating demo in %s' % target_path ++ print('Creating demo in %s' % target_path) + if options.create_dirs and not _checkTargetPaths(target_path, options): + return 1 +- print ' Target path is: %s' % target_path +- print '' ++ print(' Target path is: %s' % target_path) ++ print('') + if options.create_programs: + # Copy over .cpp file for app and perform replacements. + source_file = paths.pathToTemplate('demo_template', 'demo.cpp') +@@ -307,27 +307,27 @@ + + def createHeader(name, location, options): + target_path = paths.pathToHeader(location, name) +- print 'Creating (non-library) header in %s' % target_path ++ print('Creating (non-library) header in %s' % target_path) + if not _checkTargetPaths(target_path, options): + return 1 +- print ' Target path is: %s' % target_path +- print '' ++ print(' Target path is: %s' % target_path) ++ print('') + # Copy over .h file for app and perform replacements. + source_file = paths.pathToTemplate('header_template', 'header.h') + target_file = os.path.join(target_path) + replacements = buildReplacements('header', name, location, target_file, options) + res = configureFile(target_file, source_file, replacements, options.dry_run, options) + if res: return res +- print 'NOTE: Do not forget to add the header to the CMakeLists.txt file!' ++ print('NOTE: Do not forget to add the header to the CMakeLists.txt file!') + return 0 + + def createLibraryHeader(name, location, options): + target_path = paths.pathToHeader(location, name) +- print 'Creating library header in %s' % target_path ++ print('Creating library header in %s' % target_path) + if not _checkTargetPaths(target_path, options): + return 1 +- print ' Target path is: %s' % target_path +- print '' ++ print(' Target path is: %s' % target_path) ++ print('') + # Copy over .h file for app and perform replacements. + source_file = paths.pathToTemplate('header_template', 'library_header.h') + target_file = os.path.join(target_path) +@@ -337,12 +337,12 @@ + return 0 + + def createRepository(location, options): +- print 'Creating module %s' % location ++ print('Creating module %s' % location) + target_path = paths.pathToRepository(location) + if options.create_dirs and not _checkTargetPaths(target_path, options): + return 1 +- print ' Target path is: %s' % target_path +- print '' ++ print(' Target path is: %s' % target_path) ++ print('') + if options.create_dirs: + # Create directories. + createDirectory(target_path, options.dry_run) +@@ -375,13 +375,13 @@ + return 0 + + def createAppTests(location, options): +- print 'Creating app tests at %s' % location ++ print('Creating app tests at %s' % location) + tests_location = os.path.join(location, 'tests') + target_path = paths.pathToRepository(tests_location) + if options.create_dirs and not _checkTargetPaths(target_path, options): + return 1 +- print ' Target path is: %s' % target_path +- print '' ++ print(' Target path is: %s' % target_path) ++ print('') + + # Create directories. + if options.create_dirs: +@@ -398,16 +398,16 @@ + replacements = buildReplacements('app_tests', location, target_path, target_file, options) + configureFile(target_file, source_file, replacements, options.dry_run, options) + +- print '=' * 80 +- print 'Do not forget to add the tests in %s:' % os.path.join(location, 'CMakeLists.txt') +- print '' +- print '# Add app tests if Python interpreter could be found.' +- print 'if(PYTHONINTERP_FOUND)' +- print ' add_test(NAME app_test_%s COMMAND ${PYTHON_EXECUTABLE}' % os.path.split(location)[-1] +- print ' ${CMAKE_CURRENT_SOURCE_DIR}/tests/run_tests.py ${CMAKE_SOURCE_DIR}' +- print ' ${CMAKE_BINARY_DIR})' +- print 'endif(PYTHONINTERP_FOUND)' +- print '=' * 80 ++ print('=' * 80) ++ print('Do not forget to add the tests in %s:' % os.path.join(location, 'CMakeLists.txt')) ++ print('') ++ print('# Add app tests if Python interpreter could be found.') ++ print('if(PYTHONINTERP_FOUND)') ++ print(' add_test(NAME app_test_%s COMMAND ${PYTHON_EXECUTABLE}' % os.path.split(location)[-1]) ++ print(' ${CMAKE_CURRENT_SOURCE_DIR}/tests/run_tests.py ${CMAKE_SOURCE_DIR}') ++ print(' ${CMAKE_BINARY_DIR})') ++ print('endif(PYTHONINTERP_FOUND)') ++ print('=' * 80) + + return 0 + +@@ -441,7 +441,7 @@ + default=False) + options, args = parser.parse_args() + if options.infos_only and options.cmakelists_only: +- print >>sys.stderr, 'Only one of --info-only and --cmakelists-only can be given.' ++ print('Only one of --info-only and --cmakelists-only can be given.', file=sys.stderr) + return 1 + options.create_cmakelists = True + options.create_infos = True +@@ -457,15 +457,15 @@ + parser.print_help(file=sys.stderr) + return 1 + if len(args) < 2: +- print >>sys.stderr, 'Invalid argument count!' ++ print('Invalid argument count!', file=sys.stderr) + return 1 + if args[0] not in ['module', 'test', 'app', 'demo', 'repository', + 'header', 'lheader', 'app_tests']: +- print >>sys.stderr, 'Invalid template "%s".' % args[0] ++ print('Invalid template "%s".' % args[0], file=sys.stderr) + return 1 + if args[0] in['repository', 'app_tests']: + if len(args) != 2: +- print >>sys.stderr, 'Invalid argument count!' ++ print('Invalid argument count!', file=sys.stderr) + return 1 + + if args[0] == 'repository': +@@ -473,7 +473,7 @@ + elif args[0] == 'app_tests': + return createAppTests(args[1], options) + elif len(args) != 3: +- print >>sys.stderr, 'Invalid argument count!' ++ print('Invalid argument count!', file=sys.stderr) + return 1 + create_methods = { + 'module' : createModule, +--- seqan.orig/util/py_lib/termcolor.py ++++ seqan/util/py_lib/termcolor.py +@@ -23,7 +23,7 @@ + + """ANSII Color formatting for output in terminal.""" + +-from __future__ import print_function ++ + import os + + +--- seqan.orig/util/py_lib/threadpool/__init__.py ++++ seqan/util/py_lib/threadpool/__init__.py +@@ -52,7 +52,7 @@ + # standard library modules + import sys + import threading +-import Queue ++import queue + import traceback + + +@@ -145,7 +145,7 @@ + # the while loop again, to give the thread a chance to exit. + try: + request = self._requests_queue.get(True, self._poll_timeout) +- except Queue.Empty: ++ except queue.Empty: + continue + else: + if self._dismissed.isSet(): +@@ -247,8 +247,8 @@ + ``ThreadPool.putRequest()`` and catch ``Queue.Full`` exceptions. + + """ +- self._requests_queue = Queue.Queue(q_size) +- self._results_queue = Queue.Queue(resq_size) ++ self._requests_queue = queue.Queue(q_size) ++ self._results_queue = queue.Queue(resq_size) + self.workers = [] + self.dismissedWorkers = [] + self.workRequests = {} +@@ -315,7 +315,7 @@ + (request.exception and request.exc_callback): + request.callback(request, result) + del self.workRequests[request.requestID] +- except Queue.Empty: ++ except queue.Empty: + break + + def wait(self): +@@ -346,18 +346,18 @@ + + # this will be called each time a result is available + def print_result(request, result): +- print "**** Result from request #%s: %r" % (request.requestID, result) ++ print("**** Result from request #%s: %r" % (request.requestID, result)) + + # this will be called when an exception occurs within a thread + # this example exception handler does little more than the default handler + def handle_exception(request, exc_info): + if not isinstance(exc_info, tuple): + # Something is seriously wrong... +- print request +- print exc_info ++ print(request) ++ print(exc_info) + raise SystemExit +- print "**** Exception occurred in request #%s: %s" % \ +- (request.requestID, exc_info) ++ print("**** Exception occurred in request #%s: %s" % \ ++ (request.requestID, exc_info)) + + # assemble the arguments for each job to a list... + data = [random.randint(1,10) for i in range(20)] +@@ -377,13 +377,13 @@ + ) + + # we create a pool of 3 worker threads +- print "Creating thread pool with 3 worker threads." ++ print("Creating thread pool with 3 worker threads.") + main = ThreadPool(3) + + # then we put the work requests in the queue... + for req in requests: + main.putRequest(req) +- print "Work request #%s added." % req.requestID ++ print("Work request #%s added." % req.requestID) + # or shorter: + # [main.putRequest(req) for req in requests] + +@@ -398,21 +398,21 @@ + try: + time.sleep(0.5) + main.poll() +- print "Main thread working...", +- print "(active worker threads: %i)" % (threading.activeCount()-1, ) ++ print("Main thread working...", end=' ') ++ print("(active worker threads: %i)" % (threading.activeCount()-1, )) + if i == 10: +- print "**** Adding 3 more worker threads..." ++ print("**** Adding 3 more worker threads...") + main.createWorkers(3) + if i == 20: +- print "**** Dismissing 2 worker threads..." ++ print("**** Dismissing 2 worker threads...") + main.dismissWorkers(2) + i += 1 + except KeyboardInterrupt: +- print "**** Interrupted!" ++ print("**** Interrupted!") + break + except NoResultsPending: +- print "**** No pending results." ++ print("**** No pending results.") + break + if main.dismissedWorkers: +- print "Joining all dismissed worker threads..." ++ print("Joining all dismissed worker threads...") + main.joinAllDismissedWorkers() +--- seqan.orig/util/skel/app_tests_template/run_tests.py ++++ seqan/util/skel/app_tests_template/run_tests.py +@@ -1,4 +1,4 @@ +-#!/usr/bin/env python ++#!/usr/bin/python3 + """Execute the tests for %(APP_NAME)s. + + The golden test outputs are generated by the script generate_outputs.sh. diff -Nru seqan-1.4.2+dfsg/debian/patches/series seqan-1.4.2+dfsg/debian/patches/series --- seqan-1.4.2+dfsg/debian/patches/series 2016-08-06 15:05:16.000000000 +0000 +++ seqan-1.4.2+dfsg/debian/patches/series 2020-03-02 13:10:25.000000000 +0000 @@ -1,2 +1,4 @@ skip_extras_and_apps.patch skip_doc_creation.patch +2to3.patch +skip_broken_tests diff -Nru seqan-1.4.2+dfsg/debian/patches/skip_broken_tests seqan-1.4.2+dfsg/debian/patches/skip_broken_tests --- seqan-1.4.2+dfsg/debian/patches/skip_broken_tests 1970-01-01 00:00:00.000000000 +0000 +++ seqan-1.4.2+dfsg/debian/patches/skip_broken_tests 2020-03-02 13:14:55.000000000 +0000 @@ -0,0 +1,16 @@ +Author: Michael R. Crusoe +Description: skip broken tests + +Upstream will not be fixing this +--- seqan.orig/core/tests/index/CMakeLists.txt ++++ seqan/core/tests/index/CMakeLists.txt +@@ -130,9 +130,5 @@ + add_test (NAME test_test_index_sa_bwtwalk COMMAND $) + add_test (NAME test_test_index_shapes COMMAND $) + add_test (NAME test_test_index_drawing COMMAND $) +-add_test (NAME test_test_index_fm COMMAND $) +-if (NOT CMAKE_COMPILER_IS_GNUCXX OR (450 LESS _GCC_VERSION)) +- add_test (NAME test_test_index_stree_iterators COMMAND $) +-endif () + add_test (NAME test_test_index_repeats COMMAND $) + diff -Nru seqan-1.4.2+dfsg/debian/rules seqan-1.4.2+dfsg/debian/rules --- seqan-1.4.2+dfsg/debian/rules 2018-07-16 10:21:47.000000000 +0000 +++ seqan-1.4.2+dfsg/debian/rules 2020-02-25 10:28:15.000000000 +0000 @@ -8,9 +8,8 @@ export DEB_BUILD_MAINT_OPTIONS = hardening=-stackprotector # alternatively: export DEB_CXXFLAGS_STRIP='-fstack-protector' -DEB_HOST_ARCH ?= $(shell dpkg-architecture -qDEB_HOST_ARCH) +include /usr/share/dpkg/architecture.mk -DEB_BUILD_ARCH ?= $(shell dpkg-architecture -qDEB_BUILD_ARCH) ifneq (,$(filter $(DEB_BUILD_ARCH),mips mipsel)) export CXXFLAGS:=$(shell dpkg-buildflags --get CXXFLAGS | sed 's/-O[1-9]//') -O0 -mxgot endif @@ -41,12 +40,14 @@ rm -rf orig override_dh_auto_test: +ifeq (,$(filter nocheck,$(DEB_BUILD_OPTIONS))) # Run test suite only on powerfull architectures (amd64) at build time ifneq (,$(filter $(DEB_HOST_ARCH),amd64 kfreebsd-amd64)) dh_auto_test else echo "Do not run test suite when building on architecture $(DEB_HOST_ARCH)" endif +endif override_dh_fixperms: dh_fixperms diff -Nru seqan-1.4.2+dfsg/debian/upstream/metadata seqan-1.4.2+dfsg/debian/upstream/metadata --- seqan-1.4.2+dfsg/debian/upstream/metadata 2018-07-16 10:21:45.000000000 +0000 +++ seqan-1.4.2+dfsg/debian/upstream/metadata 2020-02-25 10:28:15.000000000 +0000 @@ -1,23 +1,24 @@ -Contact: SeqAn Team (http://www.seqan.de/contact-us.html) Donation: http://www.seqan.de/ -Name: SeqAn Reference: - AUTHOR: Doring, Andreas and Weese, David and Rausch, Tobias and Reinert, Knut - TITLE: SeqAn An efficient, generic C++ library for sequence analysis - JOURNAL: BMC Bioinformatics - VOLUME: 9 - YEAR: 2008 - NUMBER: 1 - PAGES: 11 - DOI: 10.1186/1471-2105-9-11 - PMID: 18184432 - URL: http://www.biomedcentral.com/1471-2105/9/11 - eprint: http://www.biomedcentral.com/content/pdf/1471-2105-9-11.pdf - ISSN: 1471-2105 + Author: Doring, Andreas and Weese, David and Rausch, Tobias and Reinert, Knut + Title: SeqAn An efficient, generic C++ library for sequence analysis + Journal: BMC Bioinformatics + Volume: 9 + Year: 2008 + Number: 1 + Pages: 11 + DOI: 10.1186/1471-2105-9-11 + PMID: 18184432 + URL: http://www.biomedcentral.com/1471-2105/9/11 + ePrint: http://www.biomedcentral.com/content/pdf/1471-2105-9-11.pdf + ISSN: 1471-2105 Registry: - - Name: OMICtools - Entry: OMICS_14825 - - Name: SciCrunch - Entry: NA - - Name: bio.tools - Entry: seqan \ No newline at end of file +- Name: OMICtools + Entry: OMICS_14825 +- Name: SciCrunch + Entry: NA +- Name: conda:bioconda + Entry: NA +- Name: bio.tools + Entry: seqan +Bug-Database: https://github.com/seqan/seqan/issues